htab.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /*
  2. * iSeries hashtable management.
  3. * Derived from pSeries_htab.c
  4. *
  5. * SMP scalability work:
  6. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <asm/machdep.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/mmu.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/abs_addr.h>
  18. #include <linux/spinlock.h>
  19. #include "call_hpt.h"
  20. static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp =
  21. { [0 ... 63] = SPIN_LOCK_UNLOCKED};
  22. /*
  23. * Very primitive algorithm for picking up a lock
  24. */
  25. static inline void iSeries_hlock(unsigned long slot)
  26. {
  27. if (slot & 0x8)
  28. slot = ~slot;
  29. spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
  30. }
  31. static inline void iSeries_hunlock(unsigned long slot)
  32. {
  33. if (slot & 0x8)
  34. slot = ~slot;
  35. spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
  36. }
  37. long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
  38. unsigned long pa, unsigned long rflags,
  39. unsigned long vflags, int psize)
  40. {
  41. long slot;
  42. hpte_t lhpte;
  43. int secondary = 0;
  44. BUG_ON(psize != MMU_PAGE_4K);
  45. /*
  46. * The hypervisor tries both primary and secondary.
  47. * If we are being called to insert in the secondary,
  48. * it means we have already tried both primary and secondary,
  49. * so we return failure immediately.
  50. */
  51. if (vflags & HPTE_V_SECONDARY)
  52. return -1;
  53. iSeries_hlock(hpte_group);
  54. slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
  55. if (unlikely(lhpte.v & HPTE_V_VALID)) {
  56. if (vflags & HPTE_V_BOLTED) {
  57. HvCallHpt_setSwBits(slot, 0x10, 0);
  58. HvCallHpt_setPp(slot, PP_RWXX);
  59. iSeries_hunlock(hpte_group);
  60. if (slot < 0)
  61. return 0x8 | (slot & 7);
  62. else
  63. return slot & 7;
  64. }
  65. BUG();
  66. }
  67. if (slot == -1) { /* No available entry found in either group */
  68. iSeries_hunlock(hpte_group);
  69. return -1;
  70. }
  71. if (slot < 0) { /* MSB set means secondary group */
  72. vflags |= HPTE_V_SECONDARY;
  73. secondary = 1;
  74. slot &= 0x7fffffffffffffff;
  75. }
  76. lhpte.v = hpte_encode_v(va, MMU_PAGE_4K) | vflags | HPTE_V_VALID;
  77. lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
  78. /* Now fill in the actual HPTE */
  79. HvCallHpt_addValidate(slot, secondary, &lhpte);
  80. iSeries_hunlock(hpte_group);
  81. return (secondary << 3) | (slot & 7);
  82. }
  83. static unsigned long iSeries_hpte_getword0(unsigned long slot)
  84. {
  85. hpte_t hpte;
  86. HvCallHpt_get(&hpte, slot);
  87. return hpte.v;
  88. }
  89. static long iSeries_hpte_remove(unsigned long hpte_group)
  90. {
  91. unsigned long slot_offset;
  92. int i;
  93. unsigned long hpte_v;
  94. /* Pick a random slot to start at */
  95. slot_offset = mftb() & 0x7;
  96. iSeries_hlock(hpte_group);
  97. for (i = 0; i < HPTES_PER_GROUP; i++) {
  98. hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
  99. if (! (hpte_v & HPTE_V_BOLTED)) {
  100. HvCallHpt_invalidateSetSwBitsGet(hpte_group +
  101. slot_offset, 0, 0);
  102. iSeries_hunlock(hpte_group);
  103. return i;
  104. }
  105. slot_offset++;
  106. slot_offset &= 0x7;
  107. }
  108. iSeries_hunlock(hpte_group);
  109. return -1;
  110. }
  111. /*
  112. * The HyperVisor expects the "flags" argument in this form:
  113. * bits 0..59 : reserved
  114. * bit 60 : N
  115. * bits 61..63 : PP2,PP1,PP0
  116. */
  117. static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
  118. unsigned long va, int psize, int local)
  119. {
  120. hpte_t hpte;
  121. unsigned long want_v;
  122. iSeries_hlock(slot);
  123. HvCallHpt_get(&hpte, slot);
  124. want_v = hpte_encode_v(va, MMU_PAGE_4K);
  125. if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
  126. /*
  127. * Hypervisor expects bits as NPPP, which is
  128. * different from how they are mapped in our PP.
  129. */
  130. HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
  131. iSeries_hunlock(slot);
  132. return 0;
  133. }
  134. iSeries_hunlock(slot);
  135. return -1;
  136. }
  137. /*
  138. * Functions used to find the PTE for a particular virtual address.
  139. * Only used during boot when bolting pages.
  140. *
  141. * Input : vpn : virtual page number
  142. * Output: PTE index within the page table of the entry
  143. * -1 on failure
  144. */
  145. static long iSeries_hpte_find(unsigned long vpn)
  146. {
  147. hpte_t hpte;
  148. long slot;
  149. /*
  150. * The HvCallHpt_findValid interface is as follows:
  151. * 0xffffffffffffffff : No entry found.
  152. * 0x00000000xxxxxxxx : Entry found in primary group, slot x
  153. * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
  154. */
  155. slot = HvCallHpt_findValid(&hpte, vpn);
  156. if (hpte.v & HPTE_V_VALID) {
  157. if (slot < 0) {
  158. slot &= 0x7fffffffffffffff;
  159. slot = -slot;
  160. }
  161. } else
  162. slot = -1;
  163. return slot;
  164. }
  165. /*
  166. * Update the page protection bits. Intended to be used to create
  167. * guard pages for kernel data structures on pages which are bolted
  168. * in the HPT. Assumes pages being operated on will not be stolen.
  169. * Does not work on large pages.
  170. *
  171. * No need to lock here because we should be the only user.
  172. */
  173. static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
  174. int psize)
  175. {
  176. unsigned long vsid,va,vpn;
  177. long slot;
  178. BUG_ON(psize != MMU_PAGE_4K);
  179. vsid = get_kernel_vsid(ea);
  180. va = (vsid << 28) | (ea & 0x0fffffff);
  181. vpn = va >> HW_PAGE_SHIFT;
  182. slot = iSeries_hpte_find(vpn);
  183. if (slot == -1)
  184. panic("updateboltedpp: Could not find page to bolt\n");
  185. HvCallHpt_setPp(slot, newpp);
  186. }
  187. static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
  188. int psize, int local)
  189. {
  190. unsigned long hpte_v;
  191. unsigned long avpn = va >> 23;
  192. unsigned long flags;
  193. local_irq_save(flags);
  194. iSeries_hlock(slot);
  195. hpte_v = iSeries_hpte_getword0(slot);
  196. if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
  197. HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
  198. iSeries_hunlock(slot);
  199. local_irq_restore(flags);
  200. }
  201. void hpte_init_iSeries(void)
  202. {
  203. ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
  204. ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
  205. ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
  206. ppc_md.hpte_insert = iSeries_hpte_insert;
  207. ppc_md.hpte_remove = iSeries_hpte_remove;
  208. htab_finish_init();
  209. }