pgtable.h 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. #ifndef _ASM_POWERPC_PGTABLE_H
  2. #define _ASM_POWERPC_PGTABLE_H
  3. #ifdef __KERNEL__
  4. #ifndef __ASSEMBLY__
  5. #include <asm/processor.h> /* For TASK_SIZE */
  6. #include <asm/mmu.h>
  7. #include <asm/page.h>
  8. struct mm_struct;
  9. #endif /* !__ASSEMBLY__ */
  10. #if defined(CONFIG_PPC64)
  11. # include <asm/pgtable-ppc64.h>
  12. #else
  13. # include <asm/pgtable-ppc32.h>
  14. #endif
  15. #ifndef __ASSEMBLY__
  16. /*
  17. * Macro to mark a page protection value as "uncacheable".
  18. */
  19. #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
  20. _PAGE_WRITETHRU)
  21. #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  22. _PAGE_NO_CACHE | _PAGE_GUARDED))
  23. #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  24. _PAGE_NO_CACHE))
  25. #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  26. _PAGE_COHERENT))
  27. #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  28. _PAGE_COHERENT | _PAGE_WRITETHRU))
  29. struct file;
  30. extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  31. unsigned long size, pgprot_t vma_prot);
  32. #define __HAVE_PHYS_MEM_ACCESS_PROT
  33. /*
  34. * ZERO_PAGE is a global shared page that is always zero: used
  35. * for zero-mapped memory areas etc..
  36. */
  37. extern unsigned long empty_zero_page[];
  38. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  39. extern pgd_t swapper_pg_dir[];
  40. extern void paging_init(void);
  41. /*
  42. * kern_addr_valid is intended to indicate whether an address is a valid
  43. * kernel address. Most 32-bit archs define it as always true (like this)
  44. * but most 64-bit archs actually perform a test. What should we do here?
  45. */
  46. #define kern_addr_valid(addr) (1)
  47. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  48. remap_pfn_range(vma, vaddr, pfn, size, prot)
  49. #include <asm-generic/pgtable.h>
  50. /*
  51. * This gets called at the end of handling a page fault, when
  52. * the kernel has put a new PTE into the page table for the process.
  53. * We use it to ensure coherency between the i-cache and d-cache
  54. * for the page which has just been mapped in.
  55. * On machines which use an MMU hash table, we use this to put a
  56. * corresponding HPTE into the hash table ahead of time, instead of
  57. * waiting for the inevitable extra hash-table miss exception.
  58. */
  59. extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
  60. #endif /* __ASSEMBLY__ */
  61. #endif /* __KERNEL__ */
  62. #endif /* _ASM_POWERPC_PGTABLE_H */