pgtable.h 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. #ifndef _ASM_POWERPC_PGTABLE_H
  2. #define _ASM_POWERPC_PGTABLE_H
  3. #ifdef __KERNEL__
  4. #ifndef __ASSEMBLY__
  5. #include <asm/processor.h> /* For TASK_SIZE */
  6. #include <asm/mmu.h>
  7. #include <asm/page.h>
  8. struct mm_struct;
  9. #endif /* !__ASSEMBLY__ */
  10. #if defined(CONFIG_PPC64)
  11. # include <asm/pgtable-ppc64.h>
  12. #else
  13. # include <asm/pgtable-ppc32.h>
  14. #endif
  15. #ifndef __ASSEMBLY__
  16. /*
  17. * ZERO_PAGE is a global shared page that is always zero: used
  18. * for zero-mapped memory areas etc..
  19. */
  20. extern unsigned long empty_zero_page[];
  21. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  22. extern pgd_t swapper_pg_dir[];
  23. extern void paging_init(void);
  24. /*
  25. * kern_addr_valid is intended to indicate whether an address is a valid
  26. * kernel address. Most 32-bit archs define it as always true (like this)
  27. * but most 64-bit archs actually perform a test. What should we do here?
  28. */
  29. #define kern_addr_valid(addr) (1)
  30. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  31. remap_pfn_range(vma, vaddr, pfn, size, prot)
  32. #include <asm-generic/pgtable.h>
  33. /*
  34. * This gets called at the end of handling a page fault, when
  35. * the kernel has put a new PTE into the page table for the process.
  36. * We use it to ensure coherency between the i-cache and d-cache
  37. * for the page which has just been mapped in.
  38. * On machines which use an MMU hash table, we use this to put a
  39. * corresponding HPTE into the hash table ahead of time, instead of
  40. * waiting for the inevitable extra hash-table miss exception.
  41. */
  42. extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
  43. #endif /* __ASSEMBLY__ */
  44. #endif /* __KERNEL__ */
  45. #endif /* _ASM_POWERPC_PGTABLE_H */