hugepage-hash64.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * Copyright IBM Corporation, 2013
  3. * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2.1 of the GNU Lesser General Public License
  7. * as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. *
  13. */
  14. /*
  15. * PPC64 THP Support for hash based MMUs
  16. */
  17. #include <linux/mm.h>
  18. #include <asm/machdep.h>
  19. int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
  20. pmd_t *pmdp, unsigned long trap, int local, int ssize,
  21. unsigned int psize)
  22. {
  23. unsigned int index, valid;
  24. unsigned char *hpte_slot_array;
  25. unsigned long rflags, pa, hidx;
  26. unsigned long old_pmd, new_pmd;
  27. int ret, lpsize = MMU_PAGE_16M;
  28. unsigned long vpn, hash, shift, slot;
  29. /*
  30. * atomically mark the linux large page PMD busy and dirty
  31. */
  32. do {
  33. old_pmd = pmd_val(*pmdp);
  34. /* If PMD busy, retry the access */
  35. if (unlikely(old_pmd & _PAGE_BUSY))
  36. return 0;
  37. /* If PMD is trans splitting retry the access */
  38. if (unlikely(old_pmd & _PAGE_SPLITTING))
  39. return 0;
  40. /* If PMD permissions don't match, take page fault */
  41. if (unlikely(access & ~old_pmd))
  42. return 1;
  43. /*
  44. * Try to lock the PTE, add ACCESSED and DIRTY if it was
  45. * a write access
  46. */
  47. new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
  48. if (access & _PAGE_RW)
  49. new_pmd |= _PAGE_DIRTY;
  50. } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
  51. old_pmd, new_pmd));
  52. /*
  53. * PP bits. _PAGE_USER is already PP bit 0x2, so we only
  54. * need to add in 0x1 if it's a read-only user page
  55. */
  56. rflags = new_pmd & _PAGE_USER;
  57. if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
  58. (new_pmd & _PAGE_DIRTY)))
  59. rflags |= 0x1;
  60. /*
  61. * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
  62. */
  63. rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
  64. #if 0
  65. if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
  66. /*
  67. * No CPU has hugepages but lacks no execute, so we
  68. * don't need to worry about that case
  69. */
  70. rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
  71. }
  72. #endif
  73. /*
  74. * Find the slot index details for this ea, using base page size.
  75. */
  76. shift = mmu_psize_defs[psize].shift;
  77. index = (ea & ~HPAGE_PMD_MASK) >> shift;
  78. BUG_ON(index >= 4096);
  79. vpn = hpt_vpn(ea, vsid, ssize);
  80. hash = hpt_hash(vpn, shift, ssize);
  81. hpte_slot_array = get_hpte_slot_array(pmdp);
  82. valid = hpte_valid(hpte_slot_array, index);
  83. if (valid) {
  84. /* update the hpte bits */
  85. hidx = hpte_hash_index(hpte_slot_array, index);
  86. if (hidx & _PTEIDX_SECONDARY)
  87. hash = ~hash;
  88. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  89. slot += hidx & _PTEIDX_GROUP_IX;
  90. ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
  91. psize, lpsize, ssize, local);
  92. /*
  93. * We failed to update, try to insert a new entry.
  94. */
  95. if (ret == -1) {
  96. /*
  97. * large pte is marked busy, so we can be sure
  98. * nobody is looking at hpte_slot_array. hence we can
  99. * safely update this here.
  100. */
  101. valid = 0;
  102. new_pmd &= ~_PAGE_HPTEFLAGS;
  103. hpte_slot_array[index] = 0;
  104. } else
  105. /* clear the busy bits and set the hash pte bits */
  106. new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
  107. }
  108. if (!valid) {
  109. unsigned long hpte_group;
  110. /* insert new entry */
  111. pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
  112. repeat:
  113. hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
  114. /* clear the busy bits and set the hash pte bits */
  115. new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
  116. /* Add in WIMG bits */
  117. rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
  118. _PAGE_COHERENT | _PAGE_GUARDED));
  119. /* Insert into the hash table, primary slot */
  120. slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
  121. psize, lpsize, ssize);
  122. /*
  123. * Primary is full, try the secondary
  124. */
  125. if (unlikely(slot == -1)) {
  126. hpte_group = ((~hash & htab_hash_mask) *
  127. HPTES_PER_GROUP) & ~0x7UL;
  128. slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
  129. rflags, HPTE_V_SECONDARY,
  130. psize, lpsize, ssize);
  131. if (slot == -1) {
  132. if (mftb() & 0x1)
  133. hpte_group = ((hash & htab_hash_mask) *
  134. HPTES_PER_GROUP) & ~0x7UL;
  135. ppc_md.hpte_remove(hpte_group);
  136. goto repeat;
  137. }
  138. }
  139. /*
  140. * Hypervisor failure. Restore old pmd and return -1
  141. * similar to __hash_page_*
  142. */
  143. if (unlikely(slot == -2)) {
  144. *pmdp = __pmd(old_pmd);
  145. hash_failure_debug(ea, access, vsid, trap, ssize,
  146. psize, lpsize, old_pmd);
  147. return -1;
  148. }
  149. /*
  150. * large pte is marked busy, so we can be sure
  151. * nobody is looking at hpte_slot_array. hence we can
  152. * safely update this here.
  153. */
  154. mark_hpte_slot_valid(hpte_slot_array, index, slot);
  155. }
  156. /*
  157. * No need to use ldarx/stdcx here
  158. */
  159. *pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
  160. return 0;
  161. }