pgtable-64k.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. #ifndef _ASM_POWERPC_PGTABLE_64K_H
  2. #define _ASM_POWERPC_PGTABLE_64K_H
  3. #include <asm-generic/pgtable-nopud.h>
  4. #define PTE_INDEX_SIZE 12
  5. #define PMD_INDEX_SIZE 12
  6. #define PUD_INDEX_SIZE 0
  7. #define PGD_INDEX_SIZE 4
  8. #ifndef __ASSEMBLY__
  9. #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
  10. #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
  11. #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
  12. #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
  13. #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
  14. #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
  15. #ifdef CONFIG_PPC_SUBPAGE_PROT
  16. /*
  17. * For the sub-page protection option, we extend the PGD with one of
  18. * these. Basically we have a 3-level tree, with the top level being
  19. * the protptrs array. To optimize speed and memory consumption when
  20. * only addresses < 4GB are being protected, pointers to the first
  21. * four pages of sub-page protection words are stored in the low_prot
  22. * array.
  23. * Each page of sub-page protection words protects 1GB (4 bytes
  24. * protects 64k). For the 3-level tree, each page of pointers then
  25. * protects 8TB.
  26. */
  27. struct subpage_prot_table {
  28. unsigned long maxaddr; /* only addresses < this are protected */
  29. unsigned int **protptrs[2];
  30. unsigned int *low_prot[4];
  31. };
  32. #undef PGD_TABLE_SIZE
  33. #define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
  34. sizeof(struct subpage_prot_table))
  35. #define SBP_L1_BITS (PAGE_SHIFT - 2)
  36. #define SBP_L2_BITS (PAGE_SHIFT - 3)
  37. #define SBP_L1_COUNT (1 << SBP_L1_BITS)
  38. #define SBP_L2_COUNT (1 << SBP_L2_BITS)
  39. #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
  40. #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
  41. extern void subpage_prot_free(pgd_t *pgd);
  42. static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
  43. {
  44. return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
  45. }
  46. #endif /* CONFIG_PPC_SUBPAGE_PROT */
  47. #endif /* __ASSEMBLY__ */
  48. /* With 4k base page size, hugepage PTEs go at the PMD level */
  49. #define MIN_HUGEPTE_SHIFT PAGE_SHIFT
  50. /* PMD_SHIFT determines what a second-level page table entry can map */
  51. #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
  52. #define PMD_SIZE (1UL << PMD_SHIFT)
  53. #define PMD_MASK (~(PMD_SIZE-1))
  54. /* PGDIR_SHIFT determines what a third-level page table entry can map */
  55. #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
  56. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  57. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  58. /* Additional PTE bits (don't change without checking asm in hash_low.S) */
  59. #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
  60. #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
  61. #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
  62. #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */
  63. /* Note the full page bits must be in the same location as for normal
  64. * 4k pages as the same asssembly will be used to insert 64K pages
  65. * wether the kernel has CONFIG_PPC_64K_PAGES or not
  66. */
  67. #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
  68. #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
  69. /* PTE flags to conserve for HPTE identification */
  70. #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_HPTE_SUB |\
  71. _PAGE_COMBO)
  72. /* Shift to put page number into pte.
  73. *
  74. * That gives us a max RPN of 34 bits, which means a max of 50 bits
  75. * of addressable physical space, or 46 bits for the special 4k PFNs.
  76. */
  77. #define PTE_RPN_SHIFT (30)
  78. #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
  79. #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
  80. /* _PAGE_CHG_MASK masks of bits that are to be preserved accross
  81. * pgprot changes
  82. */
  83. #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
  84. _PAGE_ACCESSED)
  85. /* Bits to mask out from a PMD to get to the PTE page */
  86. #define PMD_MASKED_BITS 0x1ff
  87. /* Bits to mask out from a PGD/PUD to get to the PMD page */
  88. #define PUD_MASKED_BITS 0x1ff
  89. /* Manipulate "rpte" values */
  90. #define __real_pte(e,p) ((real_pte_t) { \
  91. (e), pte_val(*((p) + PTRS_PER_PTE)) })
  92. #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
  93. (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
  94. #define __rpte_to_pte(r) ((r).pte)
  95. #define __rpte_sub_valid(rpte, index) \
  96. (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
  97. /* Trick: we set __end to va + 64k, which happens works for
  98. * a 16M page as well as we want only one iteration
  99. */
  100. #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
  101. do { \
  102. unsigned long __end = va + PAGE_SIZE; \
  103. unsigned __split = (psize == MMU_PAGE_4K || \
  104. psize == MMU_PAGE_64K_AP); \
  105. shift = mmu_psize_defs[psize].shift; \
  106. for (index = 0; va < __end; index++, va += (1 << shift)) { \
  107. if (!__split || __rpte_sub_valid(rpte, index)) do { \
  108. #define pte_iterate_hashed_end() } while(0); } } while(0)
  109. #define pte_pagesize_index(mm, addr, pte) \
  110. (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
  111. #define remap_4k_pfn(vma, addr, pfn, prot) \
  112. remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
  113. __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
  114. #endif /* _ASM_POWERPC_PGTABLE_64K_H */