pgtable.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. #ifndef _ASM_POWERPC_PGTABLE_H
  2. #define _ASM_POWERPC_PGTABLE_H
  3. #ifdef __KERNEL__
  4. #ifndef __ASSEMBLY__
  5. #include <asm/processor.h> /* For TASK_SIZE */
  6. #include <asm/mmu.h>
  7. #include <asm/page.h>
  8. struct mm_struct;
  9. #ifdef CONFIG_DEBUG_VM
  10. extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
  11. #else /* CONFIG_DEBUG_VM */
  12. static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
  13. {
  14. }
  15. #endif /* !CONFIG_DEBUG_VM */
  16. #endif /* !__ASSEMBLY__ */
  17. #if defined(CONFIG_PPC64)
  18. # include <asm/pgtable-ppc64.h>
  19. #else
  20. # include <asm/pgtable-ppc32.h>
  21. #endif
  22. /* Special mapping for AGP */
  23. #define PAGE_AGP (PAGE_KERNEL_NC)
  24. #define HAVE_PAGE_AGP
  25. #ifndef __ASSEMBLY__
  26. /* Insert a PTE, top-level function is out of line. It uses an inline
  27. * low level function in the respective pgtable-* files
  28. */
  29. extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
  30. pte_t pte);
  31. /* This low level function performs the actual PTE insertion
  32. * Setting the PTE depends on the MMU type and other factors. It's
  33. * an horrible mess that I'm not going to try to clean up now but
  34. * I'm keeping it in one place rather than spread around
  35. */
  36. static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
  37. pte_t *ptep, pte_t pte, int percpu)
  38. {
  39. #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
  40. /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
  41. * helper pte_update() which does an atomic update. We need to do that
  42. * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
  43. * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
  44. * the hash bits instead (ie, same as the non-SMP case)
  45. */
  46. if (percpu)
  47. *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
  48. | (pte_val(pte) & ~_PAGE_HASHPTE));
  49. else
  50. pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
  51. #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
  52. /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we
  53. * can just store as long as we do the two halves in the right order
  54. * with a barrier in between. This is possible because we take care,
  55. * in the hash code, to pre-invalidate if the PTE was already hashed,
  56. * which synchronizes us with any concurrent invalidation.
  57. * In the percpu case, we also fallback to the simple update preserving
  58. * the hash bits
  59. */
  60. if (percpu) {
  61. *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
  62. | (pte_val(pte) & ~_PAGE_HASHPTE));
  63. return;
  64. }
  65. #if _PAGE_HASHPTE != 0
  66. if (pte_val(*ptep) & _PAGE_HASHPTE)
  67. flush_hash_entry(mm, ptep, addr);
  68. #endif
  69. __asm__ __volatile__("\
  70. stw%U0%X0 %2,%0\n\
  71. eieio\n\
  72. stw%U0%X0 %L2,%1"
  73. : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
  74. : "r" (pte) : "memory");
  75. #elif defined(CONFIG_PPC_STD_MMU_32)
  76. /* Third case is 32-bit hash table in UP mode, we need to preserve
  77. * the _PAGE_HASHPTE bit since we may not have invalidated the previous
  78. * translation in the hash yet (done in a subsequent flush_tlb_xxx())
  79. * and see we need to keep track that this PTE needs invalidating
  80. */
  81. *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
  82. | (pte_val(pte) & ~_PAGE_HASHPTE));
  83. #else
  84. /* Anything else just stores the PTE normally. That covers all 64-bit
  85. * cases, and 32-bit non-hash with 64-bit PTEs in UP mode
  86. */
  87. *ptep = pte;
  88. #endif
  89. }
  90. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  91. extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
  92. pte_t *ptep, pte_t entry, int dirty);
  93. /*
  94. * Macro to mark a page protection value as "uncacheable".
  95. */
  96. #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
  97. _PAGE_WRITETHRU)
  98. #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  99. _PAGE_NO_CACHE | _PAGE_GUARDED))
  100. #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  101. _PAGE_NO_CACHE))
  102. #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  103. _PAGE_COHERENT))
  104. #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
  105. _PAGE_COHERENT | _PAGE_WRITETHRU))
  106. struct file;
  107. extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  108. unsigned long size, pgprot_t vma_prot);
  109. #define __HAVE_PHYS_MEM_ACCESS_PROT
  110. /*
  111. * ZERO_PAGE is a global shared page that is always zero: used
  112. * for zero-mapped memory areas etc..
  113. */
  114. extern unsigned long empty_zero_page[];
  115. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  116. extern pgd_t swapper_pg_dir[];
  117. extern void paging_init(void);
  118. /*
  119. * kern_addr_valid is intended to indicate whether an address is a valid
  120. * kernel address. Most 32-bit archs define it as always true (like this)
  121. * but most 64-bit archs actually perform a test. What should we do here?
  122. */
  123. #define kern_addr_valid(addr) (1)
  124. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  125. remap_pfn_range(vma, vaddr, pfn, size, prot)
  126. #include <asm-generic/pgtable.h>
  127. /*
  128. * This gets called at the end of handling a page fault, when
  129. * the kernel has put a new PTE into the page table for the process.
  130. * We use it to ensure coherency between the i-cache and d-cache
  131. * for the page which has just been mapped in.
  132. * On machines which use an MMU hash table, we use this to put a
  133. * corresponding HPTE into the hash table ahead of time, instead of
  134. * waiting for the inevitable extra hash-table miss exception.
  135. */
  136. extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
  137. #endif /* __ASSEMBLY__ */
  138. #endif /* __KERNEL__ */
  139. #endif /* _ASM_POWERPC_PGTABLE_H */