htab.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * iSeries hashtable management.
  3. * Derived from pSeries_htab.c
  4. *
  5. * SMP scalability work:
  6. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <asm/machdep.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/mmu.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/abs_addr.h>
  18. #include <linux/spinlock.h>
  19. #include "call_hpt.h"
  20. static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp =
  21. { [0 ... 63] = SPIN_LOCK_UNLOCKED};
  22. /*
  23. * Very primitive algorithm for picking up a lock
  24. */
  25. static inline void iSeries_hlock(unsigned long slot)
  26. {
  27. if (slot & 0x8)
  28. slot = ~slot;
  29. spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
  30. }
  31. static inline void iSeries_hunlock(unsigned long slot)
  32. {
  33. if (slot & 0x8)
  34. slot = ~slot;
  35. spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
  36. }
  37. static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
  38. unsigned long pa, unsigned long rflags,
  39. unsigned long vflags, int psize, int ssize)
  40. {
  41. long slot;
  42. struct hash_pte lhpte;
  43. int secondary = 0;
  44. BUG_ON(psize != MMU_PAGE_4K);
  45. /*
  46. * The hypervisor tries both primary and secondary.
  47. * If we are being called to insert in the secondary,
  48. * it means we have already tried both primary and secondary,
  49. * so we return failure immediately.
  50. */
  51. if (vflags & HPTE_V_SECONDARY)
  52. return -1;
  53. iSeries_hlock(hpte_group);
  54. slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
  55. if (unlikely(lhpte.v & HPTE_V_VALID)) {
  56. if (vflags & HPTE_V_BOLTED) {
  57. HvCallHpt_setSwBits(slot, 0x10, 0);
  58. HvCallHpt_setPp(slot, PP_RWXX);
  59. iSeries_hunlock(hpte_group);
  60. if (slot < 0)
  61. return 0x8 | (slot & 7);
  62. else
  63. return slot & 7;
  64. }
  65. BUG();
  66. }
  67. if (slot == -1) { /* No available entry found in either group */
  68. iSeries_hunlock(hpte_group);
  69. return -1;
  70. }
  71. if (slot < 0) { /* MSB set means secondary group */
  72. vflags |= HPTE_V_SECONDARY;
  73. secondary = 1;
  74. slot &= 0x7fffffffffffffff;
  75. }
  76. lhpte.v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M) |
  77. vflags | HPTE_V_VALID;
  78. lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
  79. /* Now fill in the actual HPTE */
  80. HvCallHpt_addValidate(slot, secondary, &lhpte);
  81. iSeries_hunlock(hpte_group);
  82. return (secondary << 3) | (slot & 7);
  83. }
  84. static unsigned long iSeries_hpte_getword0(unsigned long slot)
  85. {
  86. struct hash_pte hpte;
  87. HvCallHpt_get(&hpte, slot);
  88. return hpte.v;
  89. }
  90. static long iSeries_hpte_remove(unsigned long hpte_group)
  91. {
  92. unsigned long slot_offset;
  93. int i;
  94. unsigned long hpte_v;
  95. /* Pick a random slot to start at */
  96. slot_offset = mftb() & 0x7;
  97. iSeries_hlock(hpte_group);
  98. for (i = 0; i < HPTES_PER_GROUP; i++) {
  99. hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
  100. if (! (hpte_v & HPTE_V_BOLTED)) {
  101. HvCallHpt_invalidateSetSwBitsGet(hpte_group +
  102. slot_offset, 0, 0);
  103. iSeries_hunlock(hpte_group);
  104. return i;
  105. }
  106. slot_offset++;
  107. slot_offset &= 0x7;
  108. }
  109. iSeries_hunlock(hpte_group);
  110. return -1;
  111. }
  112. /*
  113. * The HyperVisor expects the "flags" argument in this form:
  114. * bits 0..59 : reserved
  115. * bit 60 : N
  116. * bits 61..63 : PP2,PP1,PP0
  117. */
  118. static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
  119. unsigned long va, int psize, int ssize, int local)
  120. {
  121. struct hash_pte hpte;
  122. unsigned long want_v;
  123. iSeries_hlock(slot);
  124. HvCallHpt_get(&hpte, slot);
  125. want_v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M);
  126. if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
  127. /*
  128. * Hypervisor expects bits as NPPP, which is
  129. * different from how they are mapped in our PP.
  130. */
  131. HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
  132. iSeries_hunlock(slot);
  133. return 0;
  134. }
  135. iSeries_hunlock(slot);
  136. return -1;
  137. }
  138. /*
  139. * Functions used to find the PTE for a particular virtual address.
  140. * Only used during boot when bolting pages.
  141. *
  142. * Input : vpn : virtual page number
  143. * Output: PTE index within the page table of the entry
  144. * -1 on failure
  145. */
  146. static long iSeries_hpte_find(unsigned long vpn)
  147. {
  148. struct hash_pte hpte;
  149. long slot;
  150. /*
  151. * The HvCallHpt_findValid interface is as follows:
  152. * 0xffffffffffffffff : No entry found.
  153. * 0x00000000xxxxxxxx : Entry found in primary group, slot x
  154. * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
  155. */
  156. slot = HvCallHpt_findValid(&hpte, vpn);
  157. if (hpte.v & HPTE_V_VALID) {
  158. if (slot < 0) {
  159. slot &= 0x7fffffffffffffff;
  160. slot = -slot;
  161. }
  162. } else
  163. slot = -1;
  164. return slot;
  165. }
  166. /*
  167. * Update the page protection bits. Intended to be used to create
  168. * guard pages for kernel data structures on pages which are bolted
  169. * in the HPT. Assumes pages being operated on will not be stolen.
  170. * Does not work on large pages.
  171. *
  172. * No need to lock here because we should be the only user.
  173. */
  174. static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
  175. int psize, int ssize)
  176. {
  177. unsigned long vsid,va,vpn;
  178. long slot;
  179. BUG_ON(psize != MMU_PAGE_4K);
  180. vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
  181. va = (vsid << 28) | (ea & 0x0fffffff);
  182. vpn = va >> HW_PAGE_SHIFT;
  183. slot = iSeries_hpte_find(vpn);
  184. if (slot == -1)
  185. panic("updateboltedpp: Could not find page to bolt\n");
  186. HvCallHpt_setPp(slot, newpp);
  187. }
  188. static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
  189. int psize, int ssize, int local)
  190. {
  191. unsigned long hpte_v;
  192. unsigned long avpn = va >> 23;
  193. unsigned long flags;
  194. local_irq_save(flags);
  195. iSeries_hlock(slot);
  196. hpte_v = iSeries_hpte_getword0(slot);
  197. if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
  198. HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
  199. iSeries_hunlock(slot);
  200. local_irq_restore(flags);
  201. }
  202. void __init hpte_init_iSeries(void)
  203. {
  204. ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
  205. ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
  206. ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
  207. ppc_md.hpte_insert = iSeries_hpte_insert;
  208. ppc_md.hpte_remove = iSeries_hpte_remove;
  209. }