hash_native_64.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /*
  2. * native hashtable management.
  3. *
  4. * SMP scalability work:
  5. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #undef DEBUG_LOW
  13. #include <linux/spinlock.h>
  14. #include <linux/bitops.h>
  15. #include <linux/threads.h>
  16. #include <linux/smp.h>
  17. #include <asm/abs_addr.h>
  18. #include <asm/machdep.h>
  19. #include <asm/mmu.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/pgtable.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/tlb.h>
  24. #include <asm/cputable.h>
  25. #include <asm/udbg.h>
  26. #include <asm/kexec.h>
  27. #ifdef DEBUG_LOW
  28. #define DBG_LOW(fmt...) udbg_printf(fmt)
  29. #else
  30. #define DBG_LOW(fmt...)
  31. #endif
  32. #define HPTE_LOCK_BIT 3
  33. static DEFINE_SPINLOCK(native_tlbie_lock);
  34. static inline void __tlbie(unsigned long va, unsigned int psize)
  35. {
  36. unsigned int penc;
  37. /* clear top 16 bits, non SLS segment */
  38. va &= ~(0xffffULL << 48);
  39. switch (psize) {
  40. case MMU_PAGE_4K:
  41. va &= ~0xffful;
  42. asm volatile("tlbie %0,0" : : "r" (va) : "memory");
  43. break;
  44. default:
  45. penc = mmu_psize_defs[psize].penc;
  46. va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
  47. va |= penc << 12;
  48. asm volatile("tlbie %0,1" : : "r" (va) : "memory");
  49. break;
  50. }
  51. }
  52. static inline void __tlbiel(unsigned long va, unsigned int psize)
  53. {
  54. unsigned int penc;
  55. /* clear top 16 bits, non SLS segment */
  56. va &= ~(0xffffULL << 48);
  57. switch (psize) {
  58. case MMU_PAGE_4K:
  59. va &= ~0xffful;
  60. asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
  61. : : "r"(va) : "memory");
  62. break;
  63. default:
  64. penc = mmu_psize_defs[psize].penc;
  65. va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
  66. va |= penc << 12;
  67. asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
  68. : : "r"(va) : "memory");
  69. break;
  70. }
  71. }
  72. static inline void tlbie(unsigned long va, int psize, int local)
  73. {
  74. unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL);
  75. int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
  76. if (use_local)
  77. use_local = mmu_psize_defs[psize].tlbiel;
  78. if (lock_tlbie && !use_local)
  79. spin_lock(&native_tlbie_lock);
  80. asm volatile("ptesync": : :"memory");
  81. if (use_local) {
  82. __tlbiel(va, psize);
  83. asm volatile("ptesync": : :"memory");
  84. } else {
  85. __tlbie(va, psize);
  86. asm volatile("eieio; tlbsync; ptesync": : :"memory");
  87. }
  88. if (lock_tlbie && !use_local)
  89. spin_unlock(&native_tlbie_lock);
  90. }
  91. static inline void native_lock_hpte(struct hash_pte *hptep)
  92. {
  93. unsigned long *word = &hptep->v;
  94. while (1) {
  95. if (!test_and_set_bit(HPTE_LOCK_BIT, word))
  96. break;
  97. while(test_bit(HPTE_LOCK_BIT, word))
  98. cpu_relax();
  99. }
  100. }
  101. static inline void native_unlock_hpte(struct hash_pte *hptep)
  102. {
  103. unsigned long *word = &hptep->v;
  104. asm volatile("lwsync":::"memory");
  105. clear_bit(HPTE_LOCK_BIT, word);
  106. }
  107. static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
  108. unsigned long pa, unsigned long rflags,
  109. unsigned long vflags, int psize)
  110. {
  111. struct hash_pte *hptep = htab_address + hpte_group;
  112. unsigned long hpte_v, hpte_r;
  113. int i;
  114. if (!(vflags & HPTE_V_BOLTED)) {
  115. DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx,"
  116. " rflags=%lx, vflags=%lx, psize=%d)\n",
  117. hpte_group, va, pa, rflags, vflags, psize);
  118. }
  119. for (i = 0; i < HPTES_PER_GROUP; i++) {
  120. if (! (hptep->v & HPTE_V_VALID)) {
  121. /* retry with lock held */
  122. native_lock_hpte(hptep);
  123. if (! (hptep->v & HPTE_V_VALID))
  124. break;
  125. native_unlock_hpte(hptep);
  126. }
  127. hptep++;
  128. }
  129. if (i == HPTES_PER_GROUP)
  130. return -1;
  131. hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID;
  132. hpte_r = hpte_encode_r(pa, psize) | rflags;
  133. if (!(vflags & HPTE_V_BOLTED)) {
  134. DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
  135. i, hpte_v, hpte_r);
  136. }
  137. hptep->r = hpte_r;
  138. /* Guarantee the second dword is visible before the valid bit */
  139. eieio();
  140. /*
  141. * Now set the first dword including the valid bit
  142. * NOTE: this also unlocks the hpte
  143. */
  144. hptep->v = hpte_v;
  145. __asm__ __volatile__ ("ptesync" : : : "memory");
  146. return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
  147. }
  148. static long native_hpte_remove(unsigned long hpte_group)
  149. {
  150. struct hash_pte *hptep;
  151. int i;
  152. int slot_offset;
  153. unsigned long hpte_v;
  154. DBG_LOW(" remove(group=%lx)\n", hpte_group);
  155. /* pick a random entry to start at */
  156. slot_offset = mftb() & 0x7;
  157. for (i = 0; i < HPTES_PER_GROUP; i++) {
  158. hptep = htab_address + hpte_group + slot_offset;
  159. hpte_v = hptep->v;
  160. if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
  161. /* retry with lock held */
  162. native_lock_hpte(hptep);
  163. hpte_v = hptep->v;
  164. if ((hpte_v & HPTE_V_VALID)
  165. && !(hpte_v & HPTE_V_BOLTED))
  166. break;
  167. native_unlock_hpte(hptep);
  168. }
  169. slot_offset++;
  170. slot_offset &= 0x7;
  171. }
  172. if (i == HPTES_PER_GROUP)
  173. return -1;
  174. /* Invalidate the hpte. NOTE: this also unlocks it */
  175. hptep->v = 0;
  176. return i;
  177. }
  178. static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
  179. unsigned long va, int psize, int local)
  180. {
  181. struct hash_pte *hptep = htab_address + slot;
  182. unsigned long hpte_v, want_v;
  183. int ret = 0;
  184. want_v = hpte_encode_v(va, psize);
  185. DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
  186. va, want_v & HPTE_V_AVPN, slot, newpp);
  187. native_lock_hpte(hptep);
  188. hpte_v = hptep->v;
  189. /* Even if we miss, we need to invalidate the TLB */
  190. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
  191. DBG_LOW(" -> miss\n");
  192. ret = -1;
  193. } else {
  194. DBG_LOW(" -> hit\n");
  195. /* Update the HPTE */
  196. hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
  197. (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
  198. }
  199. native_unlock_hpte(hptep);
  200. /* Ensure it is out of the tlb too. */
  201. tlbie(va, psize, local);
  202. return ret;
  203. }
  204. static long native_hpte_find(unsigned long va, int psize)
  205. {
  206. struct hash_pte *hptep;
  207. unsigned long hash;
  208. unsigned long i, j;
  209. long slot;
  210. unsigned long want_v, hpte_v;
  211. hash = hpt_hash(va, mmu_psize_defs[psize].shift);
  212. want_v = hpte_encode_v(va, psize);
  213. for (j = 0; j < 2; j++) {
  214. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  215. for (i = 0; i < HPTES_PER_GROUP; i++) {
  216. hptep = htab_address + slot;
  217. hpte_v = hptep->v;
  218. if (HPTE_V_COMPARE(hpte_v, want_v)
  219. && (hpte_v & HPTE_V_VALID)
  220. && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) {
  221. /* HPTE matches */
  222. if (j)
  223. slot = -slot;
  224. return slot;
  225. }
  226. ++slot;
  227. }
  228. hash = ~hash;
  229. }
  230. return -1;
  231. }
  232. /*
  233. * Update the page protection bits. Intended to be used to create
  234. * guard pages for kernel data structures on pages which are bolted
  235. * in the HPT. Assumes pages being operated on will not be stolen.
  236. *
  237. * No need to lock here because we should be the only user.
  238. */
  239. static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
  240. int psize)
  241. {
  242. unsigned long vsid, va;
  243. long slot;
  244. struct hash_pte *hptep;
  245. vsid = get_kernel_vsid(ea);
  246. va = (vsid << 28) | (ea & 0x0fffffff);
  247. slot = native_hpte_find(va, psize);
  248. if (slot == -1)
  249. panic("could not find page to bolt\n");
  250. hptep = htab_address + slot;
  251. /* Update the HPTE */
  252. hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
  253. (newpp & (HPTE_R_PP | HPTE_R_N));
  254. /* Ensure it is out of the tlb too. */
  255. tlbie(va, psize, 0);
  256. }
  257. static void native_hpte_invalidate(unsigned long slot, unsigned long va,
  258. int psize, int local)
  259. {
  260. struct hash_pte *hptep = htab_address + slot;
  261. unsigned long hpte_v;
  262. unsigned long want_v;
  263. unsigned long flags;
  264. local_irq_save(flags);
  265. DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
  266. want_v = hpte_encode_v(va, psize);
  267. native_lock_hpte(hptep);
  268. hpte_v = hptep->v;
  269. /* Even if we miss, we need to invalidate the TLB */
  270. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
  271. native_unlock_hpte(hptep);
  272. else
  273. /* Invalidate the hpte. NOTE: this also unlocks it */
  274. hptep->v = 0;
  275. /* Invalidate the TLB */
  276. tlbie(va, psize, local);
  277. local_irq_restore(flags);
  278. }
  279. #define LP_SHIFT 12
  280. #define LP_BITS 8
  281. #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
  282. static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
  283. int *psize, unsigned long *va)
  284. {
  285. unsigned long hpte_r = hpte->r;
  286. unsigned long hpte_v = hpte->v;
  287. unsigned long avpn;
  288. int i, size, shift, penc;
  289. if (!(hpte_v & HPTE_V_LARGE))
  290. size = MMU_PAGE_4K;
  291. else {
  292. for (i = 0; i < LP_BITS; i++) {
  293. if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
  294. break;
  295. }
  296. penc = LP_MASK(i+1) >> LP_SHIFT;
  297. for (size = 0; size < MMU_PAGE_COUNT; size++) {
  298. /* 4K pages are not represented by LP */
  299. if (size == MMU_PAGE_4K)
  300. continue;
  301. /* valid entries have a shift value */
  302. if (!mmu_psize_defs[size].shift)
  303. continue;
  304. if (penc == mmu_psize_defs[size].penc)
  305. break;
  306. }
  307. }
  308. /* This works for all page sizes, and for 256M and 1T segments */
  309. shift = mmu_psize_defs[size].shift;
  310. avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
  311. if (shift < 23) {
  312. unsigned long vpi, vsid, pteg;
  313. pteg = slot / HPTES_PER_GROUP;
  314. if (hpte_v & HPTE_V_SECONDARY)
  315. pteg = ~pteg;
  316. switch (hpte_v >> HPTE_V_SSIZE_SHIFT) {
  317. case MMU_SEGSIZE_256M:
  318. vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
  319. break;
  320. case MMU_SEGSIZE_1T:
  321. vsid = avpn >> 40;
  322. vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
  323. break;
  324. default:
  325. avpn = vpi = size = 0;
  326. }
  327. avpn |= (vpi << mmu_psize_defs[size].shift);
  328. }
  329. *va = avpn;
  330. *psize = size;
  331. }
  332. /*
  333. * clear all mappings on kexec. All cpus are in real mode (or they will
  334. * be when they isi), and we are the only one left. We rely on our kernel
  335. * mapping being 0xC0's and the hardware ignoring those two real bits.
  336. *
  337. * TODO: add batching support when enabled. remember, no dynamic memory here,
  338. * athough there is the control page available...
  339. */
  340. static void native_hpte_clear(void)
  341. {
  342. unsigned long slot, slots, flags;
  343. struct hash_pte *hptep = htab_address;
  344. unsigned long hpte_v, va;
  345. unsigned long pteg_count;
  346. int psize;
  347. pteg_count = htab_hash_mask + 1;
  348. local_irq_save(flags);
  349. /* we take the tlbie lock and hold it. Some hardware will
  350. * deadlock if we try to tlbie from two processors at once.
  351. */
  352. spin_lock(&native_tlbie_lock);
  353. slots = pteg_count * HPTES_PER_GROUP;
  354. for (slot = 0; slot < slots; slot++, hptep++) {
  355. /*
  356. * we could lock the pte here, but we are the only cpu
  357. * running, right? and for crash dump, we probably
  358. * don't want to wait for a maybe bad cpu.
  359. */
  360. hpte_v = hptep->v;
  361. /*
  362. * Call __tlbie() here rather than tlbie() since we
  363. * already hold the native_tlbie_lock.
  364. */
  365. if (hpte_v & HPTE_V_VALID) {
  366. hpte_decode(hptep, slot, &psize, &va);
  367. hptep->v = 0;
  368. __tlbie(va, psize);
  369. }
  370. }
  371. asm volatile("eieio; tlbsync; ptesync":::"memory");
  372. spin_unlock(&native_tlbie_lock);
  373. local_irq_restore(flags);
  374. }
  375. /*
  376. * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
  377. * the lock all the time
  378. */
  379. static void native_flush_hash_range(unsigned long number, int local)
  380. {
  381. unsigned long va, hash, index, hidx, shift, slot;
  382. struct hash_pte *hptep;
  383. unsigned long hpte_v;
  384. unsigned long want_v;
  385. unsigned long flags;
  386. real_pte_t pte;
  387. struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
  388. unsigned long psize = batch->psize;
  389. int i;
  390. local_irq_save(flags);
  391. for (i = 0; i < number; i++) {
  392. va = batch->vaddr[i];
  393. pte = batch->pte[i];
  394. pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
  395. hash = hpt_hash(va, shift);
  396. hidx = __rpte_to_hidx(pte, index);
  397. if (hidx & _PTEIDX_SECONDARY)
  398. hash = ~hash;
  399. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  400. slot += hidx & _PTEIDX_GROUP_IX;
  401. hptep = htab_address + slot;
  402. want_v = hpte_encode_v(va, psize);
  403. native_lock_hpte(hptep);
  404. hpte_v = hptep->v;
  405. if (!HPTE_V_COMPARE(hpte_v, want_v) ||
  406. !(hpte_v & HPTE_V_VALID))
  407. native_unlock_hpte(hptep);
  408. else
  409. hptep->v = 0;
  410. } pte_iterate_hashed_end();
  411. }
  412. if (cpu_has_feature(CPU_FTR_TLBIEL) &&
  413. mmu_psize_defs[psize].tlbiel && local) {
  414. asm volatile("ptesync":::"memory");
  415. for (i = 0; i < number; i++) {
  416. va = batch->vaddr[i];
  417. pte = batch->pte[i];
  418. pte_iterate_hashed_subpages(pte, psize, va, index,
  419. shift) {
  420. __tlbiel(va, psize);
  421. } pte_iterate_hashed_end();
  422. }
  423. asm volatile("ptesync":::"memory");
  424. } else {
  425. int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
  426. if (lock_tlbie)
  427. spin_lock(&native_tlbie_lock);
  428. asm volatile("ptesync":::"memory");
  429. for (i = 0; i < number; i++) {
  430. va = batch->vaddr[i];
  431. pte = batch->pte[i];
  432. pte_iterate_hashed_subpages(pte, psize, va, index,
  433. shift) {
  434. __tlbie(va, psize);
  435. } pte_iterate_hashed_end();
  436. }
  437. asm volatile("eieio; tlbsync; ptesync":::"memory");
  438. if (lock_tlbie)
  439. spin_unlock(&native_tlbie_lock);
  440. }
  441. local_irq_restore(flags);
  442. }
  443. #ifdef CONFIG_PPC_PSERIES
  444. /* Disable TLB batching on nighthawk */
  445. static inline int tlb_batching_enabled(void)
  446. {
  447. struct device_node *root = of_find_node_by_path("/");
  448. int enabled = 1;
  449. if (root) {
  450. const char *model = of_get_property(root, "model", NULL);
  451. if (model && !strcmp(model, "IBM,9076-N81"))
  452. enabled = 0;
  453. of_node_put(root);
  454. }
  455. return enabled;
  456. }
  457. #else
  458. static inline int tlb_batching_enabled(void)
  459. {
  460. return 1;
  461. }
  462. #endif
  463. void __init hpte_init_native(void)
  464. {
  465. ppc_md.hpte_invalidate = native_hpte_invalidate;
  466. ppc_md.hpte_updatepp = native_hpte_updatepp;
  467. ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
  468. ppc_md.hpte_insert = native_hpte_insert;
  469. ppc_md.hpte_remove = native_hpte_remove;
  470. ppc_md.hpte_clear_all = native_hpte_clear;
  471. if (tlb_batching_enabled())
  472. ppc_md.flush_hash_range = native_flush_hash_range;
  473. }