hash_native_64.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. /*
  2. * native hashtable management.
  3. *
  4. * SMP scalability work:
  5. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #undef DEBUG_LOW
  13. #include <linux/spinlock.h>
  14. #include <linux/bitops.h>
  15. #include <linux/of.h>
  16. #include <linux/threads.h>
  17. #include <linux/smp.h>
  18. #include <asm/machdep.h>
  19. #include <asm/mmu.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/pgtable.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/tlb.h>
  24. #include <asm/cputable.h>
  25. #include <asm/udbg.h>
  26. #include <asm/kexec.h>
  27. #include <asm/ppc-opcode.h>
  28. #ifdef DEBUG_LOW
  29. #define DBG_LOW(fmt...) udbg_printf(fmt)
  30. #else
  31. #define DBG_LOW(fmt...)
  32. #endif
  33. #ifdef __BIG_ENDIAN__
  34. #define HPTE_LOCK_BIT 3
  35. #else
  36. #define HPTE_LOCK_BIT (56+3)
  37. #endif
  38. DEFINE_RAW_SPINLOCK(native_tlbie_lock);
  39. static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
  40. {
  41. unsigned long va;
  42. unsigned int penc;
  43. unsigned long sllp;
  44. /*
  45. * We need 14 to 65 bits of va for a tlibe of 4K page
  46. * With vpn we ignore the lower VPN_SHIFT bits already.
  47. * And top two bits are already ignored because we can
  48. * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
  49. * of 12.
  50. */
  51. va = vpn << VPN_SHIFT;
  52. /*
  53. * clear top 16 bits of 64bit va, non SLS segment
  54. * Older versions of the architecture (2.02 and earler) require the
  55. * masking of the top 16 bits.
  56. */
  57. va &= ~(0xffffULL << 48);
  58. switch (psize) {
  59. case MMU_PAGE_4K:
  60. /* clear out bits after (52) [0....52.....63] */
  61. va &= ~((1ul << (64 - 52)) - 1);
  62. va |= ssize << 8;
  63. sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
  64. ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
  65. va |= sllp << 5;
  66. asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
  67. : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
  68. : "memory");
  69. break;
  70. default:
  71. /* We need 14 to 14 + i bits of va */
  72. penc = mmu_psize_defs[psize].penc[apsize];
  73. va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
  74. va |= penc << 12;
  75. va |= ssize << 8;
  76. /* Add AVAL part */
  77. if (psize != apsize) {
  78. /*
  79. * MPSS, 64K base page size and 16MB parge page size
  80. * We don't need all the bits, but rest of the bits
  81. * must be ignored by the processor.
  82. * vpn cover upto 65 bits of va. (0...65) and we need
  83. * 58..64 bits of va.
  84. */
  85. va |= (vpn & 0xfe);
  86. }
  87. va |= 1; /* L */
  88. asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
  89. : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
  90. : "memory");
  91. break;
  92. }
  93. }
  94. static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
  95. {
  96. unsigned long va;
  97. unsigned int penc;
  98. unsigned long sllp;
  99. /* VPN_SHIFT can be atmost 12 */
  100. va = vpn << VPN_SHIFT;
  101. /*
  102. * clear top 16 bits of 64 bit va, non SLS segment
  103. * Older versions of the architecture (2.02 and earler) require the
  104. * masking of the top 16 bits.
  105. */
  106. va &= ~(0xffffULL << 48);
  107. switch (psize) {
  108. case MMU_PAGE_4K:
  109. /* clear out bits after(52) [0....52.....63] */
  110. va &= ~((1ul << (64 - 52)) - 1);
  111. va |= ssize << 8;
  112. sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
  113. ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
  114. va |= sllp << 5;
  115. asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
  116. : : "r"(va) : "memory");
  117. break;
  118. default:
  119. /* We need 14 to 14 + i bits of va */
  120. penc = mmu_psize_defs[psize].penc[apsize];
  121. va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
  122. va |= penc << 12;
  123. va |= ssize << 8;
  124. /* Add AVAL part */
  125. if (psize != apsize) {
  126. /*
  127. * MPSS, 64K base page size and 16MB parge page size
  128. * We don't need all the bits, but rest of the bits
  129. * must be ignored by the processor.
  130. * vpn cover upto 65 bits of va. (0...65) and we need
  131. * 58..64 bits of va.
  132. */
  133. va |= (vpn & 0xfe);
  134. }
  135. va |= 1; /* L */
  136. asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
  137. : : "r"(va) : "memory");
  138. break;
  139. }
  140. }
  141. static inline void tlbie(unsigned long vpn, int psize, int apsize,
  142. int ssize, int local)
  143. {
  144. unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
  145. int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
  146. if (use_local)
  147. use_local = mmu_psize_defs[psize].tlbiel;
  148. if (lock_tlbie && !use_local)
  149. raw_spin_lock(&native_tlbie_lock);
  150. asm volatile("ptesync": : :"memory");
  151. if (use_local) {
  152. __tlbiel(vpn, psize, apsize, ssize);
  153. asm volatile("ptesync": : :"memory");
  154. } else {
  155. __tlbie(vpn, psize, apsize, ssize);
  156. asm volatile("eieio; tlbsync; ptesync": : :"memory");
  157. }
  158. if (lock_tlbie && !use_local)
  159. raw_spin_unlock(&native_tlbie_lock);
  160. }
  161. static inline void native_lock_hpte(struct hash_pte *hptep)
  162. {
  163. unsigned long *word = (unsigned long *)&hptep->v;
  164. while (1) {
  165. if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
  166. break;
  167. while(test_bit(HPTE_LOCK_BIT, word))
  168. cpu_relax();
  169. }
  170. }
  171. static inline void native_unlock_hpte(struct hash_pte *hptep)
  172. {
  173. unsigned long *word = (unsigned long *)&hptep->v;
  174. clear_bit_unlock(HPTE_LOCK_BIT, word);
  175. }
  176. static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
  177. unsigned long pa, unsigned long rflags,
  178. unsigned long vflags, int psize, int apsize, int ssize)
  179. {
  180. struct hash_pte *hptep = htab_address + hpte_group;
  181. unsigned long hpte_v, hpte_r;
  182. int i;
  183. if (!(vflags & HPTE_V_BOLTED)) {
  184. DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
  185. " rflags=%lx, vflags=%lx, psize=%d)\n",
  186. hpte_group, vpn, pa, rflags, vflags, psize);
  187. }
  188. for (i = 0; i < HPTES_PER_GROUP; i++) {
  189. if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
  190. /* retry with lock held */
  191. native_lock_hpte(hptep);
  192. if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
  193. break;
  194. native_unlock_hpte(hptep);
  195. }
  196. hptep++;
  197. }
  198. if (i == HPTES_PER_GROUP)
  199. return -1;
  200. hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
  201. hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
  202. if (!(vflags & HPTE_V_BOLTED)) {
  203. DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
  204. i, hpte_v, hpte_r);
  205. }
  206. hptep->r = cpu_to_be64(hpte_r);
  207. /* Guarantee the second dword is visible before the valid bit */
  208. eieio();
  209. /*
  210. * Now set the first dword including the valid bit
  211. * NOTE: this also unlocks the hpte
  212. */
  213. hptep->v = cpu_to_be64(hpte_v);
  214. __asm__ __volatile__ ("ptesync" : : : "memory");
  215. return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
  216. }
  217. static long native_hpte_remove(unsigned long hpte_group)
  218. {
  219. struct hash_pte *hptep;
  220. int i;
  221. int slot_offset;
  222. unsigned long hpte_v;
  223. DBG_LOW(" remove(group=%lx)\n", hpte_group);
  224. /* pick a random entry to start at */
  225. slot_offset = mftb() & 0x7;
  226. for (i = 0; i < HPTES_PER_GROUP; i++) {
  227. hptep = htab_address + hpte_group + slot_offset;
  228. hpte_v = be64_to_cpu(hptep->v);
  229. if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
  230. /* retry with lock held */
  231. native_lock_hpte(hptep);
  232. hpte_v = be64_to_cpu(hptep->v);
  233. if ((hpte_v & HPTE_V_VALID)
  234. && !(hpte_v & HPTE_V_BOLTED))
  235. break;
  236. native_unlock_hpte(hptep);
  237. }
  238. slot_offset++;
  239. slot_offset &= 0x7;
  240. }
  241. if (i == HPTES_PER_GROUP)
  242. return -1;
  243. /* Invalidate the hpte. NOTE: this also unlocks it */
  244. hptep->v = 0;
  245. return i;
  246. }
  247. static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
  248. unsigned long vpn, int bpsize,
  249. int apsize, int ssize, int local)
  250. {
  251. struct hash_pte *hptep = htab_address + slot;
  252. unsigned long hpte_v, want_v;
  253. int ret = 0;
  254. want_v = hpte_encode_avpn(vpn, bpsize, ssize);
  255. DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
  256. vpn, want_v & HPTE_V_AVPN, slot, newpp);
  257. native_lock_hpte(hptep);
  258. hpte_v = be64_to_cpu(hptep->v);
  259. /*
  260. * We need to invalidate the TLB always because hpte_remove doesn't do
  261. * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
  262. * random entry from it. When we do that we don't invalidate the TLB
  263. * (hpte_remove) because we assume the old translation is still
  264. * technically "valid".
  265. */
  266. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
  267. DBG_LOW(" -> miss\n");
  268. ret = -1;
  269. } else {
  270. DBG_LOW(" -> hit\n");
  271. /* Update the HPTE */
  272. hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) |
  273. (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)));
  274. }
  275. native_unlock_hpte(hptep);
  276. /* Ensure it is out of the tlb too. */
  277. tlbie(vpn, bpsize, apsize, ssize, local);
  278. return ret;
  279. }
  280. static long native_hpte_find(unsigned long vpn, int psize, int ssize)
  281. {
  282. struct hash_pte *hptep;
  283. unsigned long hash;
  284. unsigned long i;
  285. long slot;
  286. unsigned long want_v, hpte_v;
  287. hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
  288. want_v = hpte_encode_avpn(vpn, psize, ssize);
  289. /* Bolted mappings are only ever in the primary group */
  290. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  291. for (i = 0; i < HPTES_PER_GROUP; i++) {
  292. hptep = htab_address + slot;
  293. hpte_v = be64_to_cpu(hptep->v);
  294. if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
  295. /* HPTE matches */
  296. return slot;
  297. ++slot;
  298. }
  299. return -1;
  300. }
  301. /*
  302. * Update the page protection bits. Intended to be used to create
  303. * guard pages for kernel data structures on pages which are bolted
  304. * in the HPT. Assumes pages being operated on will not be stolen.
  305. *
  306. * No need to lock here because we should be the only user.
  307. */
  308. static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
  309. int psize, int ssize)
  310. {
  311. unsigned long vpn;
  312. unsigned long vsid;
  313. long slot;
  314. struct hash_pte *hptep;
  315. vsid = get_kernel_vsid(ea, ssize);
  316. vpn = hpt_vpn(ea, vsid, ssize);
  317. slot = native_hpte_find(vpn, psize, ssize);
  318. if (slot == -1)
  319. panic("could not find page to bolt\n");
  320. hptep = htab_address + slot;
  321. /* Update the HPTE */
  322. hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
  323. ~(HPTE_R_PP | HPTE_R_N)) |
  324. (newpp & (HPTE_R_PP | HPTE_R_N)));
  325. /*
  326. * Ensure it is out of the tlb too. Bolted entries base and
  327. * actual page size will be same.
  328. */
  329. tlbie(vpn, psize, psize, ssize, 0);
  330. }
  331. static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
  332. int bpsize, int apsize, int ssize, int local)
  333. {
  334. struct hash_pte *hptep = htab_address + slot;
  335. unsigned long hpte_v;
  336. unsigned long want_v;
  337. unsigned long flags;
  338. local_irq_save(flags);
  339. DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
  340. want_v = hpte_encode_avpn(vpn, bpsize, ssize);
  341. native_lock_hpte(hptep);
  342. hpte_v = be64_to_cpu(hptep->v);
  343. /*
  344. * We need to invalidate the TLB always because hpte_remove doesn't do
  345. * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
  346. * random entry from it. When we do that we don't invalidate the TLB
  347. * (hpte_remove) because we assume the old translation is still
  348. * technically "valid".
  349. */
  350. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
  351. native_unlock_hpte(hptep);
  352. else
  353. /* Invalidate the hpte. NOTE: this also unlocks it */
  354. hptep->v = 0;
  355. /* Invalidate the TLB */
  356. tlbie(vpn, bpsize, apsize, ssize, local);
  357. local_irq_restore(flags);
  358. }
  359. static void native_hugepage_invalidate(struct mm_struct *mm,
  360. unsigned char *hpte_slot_array,
  361. unsigned long addr, int psize)
  362. {
  363. int ssize = 0, i;
  364. int lock_tlbie;
  365. struct hash_pte *hptep;
  366. int actual_psize = MMU_PAGE_16M;
  367. unsigned int max_hpte_count, valid;
  368. unsigned long flags, s_addr = addr;
  369. unsigned long hpte_v, want_v, shift;
  370. unsigned long hidx, vpn = 0, vsid, hash, slot;
  371. shift = mmu_psize_defs[psize].shift;
  372. max_hpte_count = 1U << (PMD_SHIFT - shift);
  373. local_irq_save(flags);
  374. for (i = 0; i < max_hpte_count; i++) {
  375. valid = hpte_valid(hpte_slot_array, i);
  376. if (!valid)
  377. continue;
  378. hidx = hpte_hash_index(hpte_slot_array, i);
  379. /* get the vpn */
  380. addr = s_addr + (i * (1ul << shift));
  381. if (!is_kernel_addr(addr)) {
  382. ssize = user_segment_size(addr);
  383. vsid = get_vsid(mm->context.id, addr, ssize);
  384. WARN_ON(vsid == 0);
  385. } else {
  386. vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
  387. ssize = mmu_kernel_ssize;
  388. }
  389. vpn = hpt_vpn(addr, vsid, ssize);
  390. hash = hpt_hash(vpn, shift, ssize);
  391. if (hidx & _PTEIDX_SECONDARY)
  392. hash = ~hash;
  393. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  394. slot += hidx & _PTEIDX_GROUP_IX;
  395. hptep = htab_address + slot;
  396. want_v = hpte_encode_avpn(vpn, psize, ssize);
  397. native_lock_hpte(hptep);
  398. hpte_v = be64_to_cpu(hptep->v);
  399. /* Even if we miss, we need to invalidate the TLB */
  400. if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
  401. native_unlock_hpte(hptep);
  402. else
  403. /* Invalidate the hpte. NOTE: this also unlocks it */
  404. hptep->v = 0;
  405. }
  406. /*
  407. * Since this is a hugepage, we just need a single tlbie.
  408. * use the last vpn.
  409. */
  410. lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
  411. if (lock_tlbie)
  412. raw_spin_lock(&native_tlbie_lock);
  413. asm volatile("ptesync":::"memory");
  414. __tlbie(vpn, psize, actual_psize, ssize);
  415. asm volatile("eieio; tlbsync; ptesync":::"memory");
  416. if (lock_tlbie)
  417. raw_spin_unlock(&native_tlbie_lock);
  418. local_irq_restore(flags);
  419. }
  420. static inline int __hpte_actual_psize(unsigned int lp, int psize)
  421. {
  422. int i, shift;
  423. unsigned int mask;
  424. /* start from 1 ignoring MMU_PAGE_4K */
  425. for (i = 1; i < MMU_PAGE_COUNT; i++) {
  426. /* invalid penc */
  427. if (mmu_psize_defs[psize].penc[i] == -1)
  428. continue;
  429. /*
  430. * encoding bits per actual page size
  431. * PTE LP actual page size
  432. * rrrr rrrz >=8KB
  433. * rrrr rrzz >=16KB
  434. * rrrr rzzz >=32KB
  435. * rrrr zzzz >=64KB
  436. * .......
  437. */
  438. shift = mmu_psize_defs[i].shift - LP_SHIFT;
  439. if (shift > LP_BITS)
  440. shift = LP_BITS;
  441. mask = (1 << shift) - 1;
  442. if ((lp & mask) == mmu_psize_defs[psize].penc[i])
  443. return i;
  444. }
  445. return -1;
  446. }
  447. static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
  448. int *psize, int *apsize, int *ssize, unsigned long *vpn)
  449. {
  450. unsigned long avpn, pteg, vpi;
  451. unsigned long hpte_v = be64_to_cpu(hpte->v);
  452. unsigned long hpte_r = be64_to_cpu(hpte->r);
  453. unsigned long vsid, seg_off;
  454. int size, a_size, shift;
  455. /* Look at the 8 bit LP value */
  456. unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
  457. if (!(hpte_v & HPTE_V_LARGE)) {
  458. size = MMU_PAGE_4K;
  459. a_size = MMU_PAGE_4K;
  460. } else {
  461. for (size = 0; size < MMU_PAGE_COUNT; size++) {
  462. /* valid entries have a shift value */
  463. if (!mmu_psize_defs[size].shift)
  464. continue;
  465. a_size = __hpte_actual_psize(lp, size);
  466. if (a_size != -1)
  467. break;
  468. }
  469. }
  470. /* This works for all page sizes, and for 256M and 1T segments */
  471. *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
  472. shift = mmu_psize_defs[size].shift;
  473. avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
  474. pteg = slot / HPTES_PER_GROUP;
  475. if (hpte_v & HPTE_V_SECONDARY)
  476. pteg = ~pteg;
  477. switch (*ssize) {
  478. case MMU_SEGSIZE_256M:
  479. /* We only have 28 - 23 bits of seg_off in avpn */
  480. seg_off = (avpn & 0x1f) << 23;
  481. vsid = avpn >> 5;
  482. /* We can find more bits from the pteg value */
  483. if (shift < 23) {
  484. vpi = (vsid ^ pteg) & htab_hash_mask;
  485. seg_off |= vpi << shift;
  486. }
  487. *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
  488. break;
  489. case MMU_SEGSIZE_1T:
  490. /* We only have 40 - 23 bits of seg_off in avpn */
  491. seg_off = (avpn & 0x1ffff) << 23;
  492. vsid = avpn >> 17;
  493. if (shift < 23) {
  494. vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
  495. seg_off |= vpi << shift;
  496. }
  497. *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
  498. break;
  499. default:
  500. *vpn = size = 0;
  501. }
  502. *psize = size;
  503. *apsize = a_size;
  504. }
  505. /*
  506. * clear all mappings on kexec. All cpus are in real mode (or they will
  507. * be when they isi), and we are the only one left. We rely on our kernel
  508. * mapping being 0xC0's and the hardware ignoring those two real bits.
  509. *
  510. * TODO: add batching support when enabled. remember, no dynamic memory here,
  511. * athough there is the control page available...
  512. */
  513. static void native_hpte_clear(void)
  514. {
  515. unsigned long vpn = 0;
  516. unsigned long slot, slots, flags;
  517. struct hash_pte *hptep = htab_address;
  518. unsigned long hpte_v;
  519. unsigned long pteg_count;
  520. int psize, apsize, ssize;
  521. pteg_count = htab_hash_mask + 1;
  522. local_irq_save(flags);
  523. /* we take the tlbie lock and hold it. Some hardware will
  524. * deadlock if we try to tlbie from two processors at once.
  525. */
  526. raw_spin_lock(&native_tlbie_lock);
  527. slots = pteg_count * HPTES_PER_GROUP;
  528. for (slot = 0; slot < slots; slot++, hptep++) {
  529. /*
  530. * we could lock the pte here, but we are the only cpu
  531. * running, right? and for crash dump, we probably
  532. * don't want to wait for a maybe bad cpu.
  533. */
  534. hpte_v = be64_to_cpu(hptep->v);
  535. /*
  536. * Call __tlbie() here rather than tlbie() since we
  537. * already hold the native_tlbie_lock.
  538. */
  539. if (hpte_v & HPTE_V_VALID) {
  540. hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
  541. hptep->v = 0;
  542. __tlbie(vpn, psize, apsize, ssize);
  543. }
  544. }
  545. asm volatile("eieio; tlbsync; ptesync":::"memory");
  546. raw_spin_unlock(&native_tlbie_lock);
  547. local_irq_restore(flags);
  548. }
  549. /*
  550. * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
  551. * the lock all the time
  552. */
  553. static void native_flush_hash_range(unsigned long number, int local)
  554. {
  555. unsigned long vpn;
  556. unsigned long hash, index, hidx, shift, slot;
  557. struct hash_pte *hptep;
  558. unsigned long hpte_v;
  559. unsigned long want_v;
  560. unsigned long flags;
  561. real_pte_t pte;
  562. struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
  563. unsigned long psize = batch->psize;
  564. int ssize = batch->ssize;
  565. int i;
  566. local_irq_save(flags);
  567. for (i = 0; i < number; i++) {
  568. vpn = batch->vpn[i];
  569. pte = batch->pte[i];
  570. pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
  571. hash = hpt_hash(vpn, shift, ssize);
  572. hidx = __rpte_to_hidx(pte, index);
  573. if (hidx & _PTEIDX_SECONDARY)
  574. hash = ~hash;
  575. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  576. slot += hidx & _PTEIDX_GROUP_IX;
  577. hptep = htab_address + slot;
  578. want_v = hpte_encode_avpn(vpn, psize, ssize);
  579. native_lock_hpte(hptep);
  580. hpte_v = be64_to_cpu(hptep->v);
  581. if (!HPTE_V_COMPARE(hpte_v, want_v) ||
  582. !(hpte_v & HPTE_V_VALID))
  583. native_unlock_hpte(hptep);
  584. else
  585. hptep->v = 0;
  586. } pte_iterate_hashed_end();
  587. }
  588. if (mmu_has_feature(MMU_FTR_TLBIEL) &&
  589. mmu_psize_defs[psize].tlbiel && local) {
  590. asm volatile("ptesync":::"memory");
  591. for (i = 0; i < number; i++) {
  592. vpn = batch->vpn[i];
  593. pte = batch->pte[i];
  594. pte_iterate_hashed_subpages(pte, psize,
  595. vpn, index, shift) {
  596. __tlbiel(vpn, psize, psize, ssize);
  597. } pte_iterate_hashed_end();
  598. }
  599. asm volatile("ptesync":::"memory");
  600. } else {
  601. int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
  602. if (lock_tlbie)
  603. raw_spin_lock(&native_tlbie_lock);
  604. asm volatile("ptesync":::"memory");
  605. for (i = 0; i < number; i++) {
  606. vpn = batch->vpn[i];
  607. pte = batch->pte[i];
  608. pte_iterate_hashed_subpages(pte, psize,
  609. vpn, index, shift) {
  610. __tlbie(vpn, psize, psize, ssize);
  611. } pte_iterate_hashed_end();
  612. }
  613. asm volatile("eieio; tlbsync; ptesync":::"memory");
  614. if (lock_tlbie)
  615. raw_spin_unlock(&native_tlbie_lock);
  616. }
  617. local_irq_restore(flags);
  618. }
  619. void __init hpte_init_native(void)
  620. {
  621. ppc_md.hpte_invalidate = native_hpte_invalidate;
  622. ppc_md.hpte_updatepp = native_hpte_updatepp;
  623. ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
  624. ppc_md.hpte_insert = native_hpte_insert;
  625. ppc_md.hpte_remove = native_hpte_remove;
  626. ppc_md.hpte_clear_all = native_hpte_clear;
  627. ppc_md.flush_hash_range = native_flush_hash_range;
  628. ppc_md.hugepage_invalidate = native_hugepage_invalidate;
  629. }