hash_native_64.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /*
  2. * native hashtable management.
  3. *
  4. * SMP scalability work:
  5. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #undef DEBUG_LOW
  13. #include <linux/spinlock.h>
  14. #include <linux/bitops.h>
  15. #include <linux/of.h>
  16. #include <linux/threads.h>
  17. #include <linux/smp.h>
  18. #include <asm/machdep.h>
  19. #include <asm/mmu.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/pgtable.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/tlb.h>
  24. #include <asm/cputable.h>
  25. #include <asm/udbg.h>
  26. #include <asm/kexec.h>
  27. #include <asm/ppc-opcode.h>
  28. #ifdef DEBUG_LOW
  29. #define DBG_LOW(fmt...) udbg_printf(fmt)
  30. #else
  31. #define DBG_LOW(fmt...)
  32. #endif
  33. #define HPTE_LOCK_BIT 3
  34. DEFINE_RAW_SPINLOCK(native_tlbie_lock);
  35. static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
  36. {
  37. unsigned long va;
  38. unsigned int penc;
  39. /*
  40. * We need 14 to 65 bits of va for a tlibe of 4K page
  41. * With vpn we ignore the lower VPN_SHIFT bits already.
  42. * And top two bits are already ignored because we can
  43. * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
  44. * of 12.
  45. */
  46. va = vpn << VPN_SHIFT;
  47. /*
  48. * clear top 16 bits of 64bit va, non SLS segment
  49. * Older versions of the architecture (2.02 and earler) require the
  50. * masking of the top 16 bits.
  51. */
  52. va &= ~(0xffffULL << 48);
  53. switch (psize) {
  54. case MMU_PAGE_4K:
  55. va |= ssize << 8;
  56. asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
  57. : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
  58. : "memory");
  59. break;
  60. default:
  61. /* We need 14 to 14 + i bits of va */
  62. penc = mmu_psize_defs[psize].penc[apsize];
  63. va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
  64. va |= penc << 12;
  65. va |= ssize << 8;
  66. va |= 1; /* L */
  67. asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
  68. : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
  69. : "memory");
  70. break;
  71. }
  72. }
  73. static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
  74. {
  75. unsigned long va;
  76. unsigned int penc;
  77. /* VPN_SHIFT can be atmost 12 */
  78. va = vpn << VPN_SHIFT;
  79. /*
  80. * clear top 16 bits of 64 bit va, non SLS segment
  81. * Older versions of the architecture (2.02 and earler) require the
  82. * masking of the top 16 bits.
  83. */
  84. va &= ~(0xffffULL << 48);
  85. switch (psize) {
  86. case MMU_PAGE_4K:
  87. va |= ssize << 8;
  88. asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
  89. : : "r"(va) : "memory");
  90. break;
  91. default:
  92. /* We need 14 to 14 + i bits of va */
  93. penc = mmu_psize_defs[psize].penc[apsize];
  94. va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
  95. va |= penc << 12;
  96. va |= ssize << 8;
  97. va |= 1; /* L */
  98. asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
  99. : : "r"(va) : "memory");
  100. break;
  101. }
  102. }
  103. static inline void tlbie(unsigned long vpn, int psize, int apsize,
  104. int ssize, int local)
  105. {
  106. unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
  107. int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
  108. if (use_local)
  109. use_local = mmu_psize_defs[psize].tlbiel;
  110. if (lock_tlbie && !use_local)
  111. raw_spin_lock(&native_tlbie_lock);
  112. asm volatile("ptesync": : :"memory");
  113. if (use_local) {
  114. __tlbiel(vpn, psize, apsize, ssize);
  115. asm volatile("ptesync": : :"memory");
  116. } else {
  117. __tlbie(vpn, psize, apsize, ssize);
  118. asm volatile("eieio; tlbsync; ptesync": : :"memory");
  119. }
  120. if (lock_tlbie && !use_local)
  121. raw_spin_unlock(&native_tlbie_lock);
  122. }
  123. static inline void native_lock_hpte(struct hash_pte *hptep)
  124. {
  125. unsigned long *word = &hptep->v;
  126. while (1) {
  127. if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
  128. break;
  129. while(test_bit(HPTE_LOCK_BIT, word))
  130. cpu_relax();
  131. }
  132. }
  133. static inline void native_unlock_hpte(struct hash_pte *hptep)
  134. {
  135. unsigned long *word = &hptep->v;
  136. clear_bit_unlock(HPTE_LOCK_BIT, word);
  137. }
  138. static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
  139. unsigned long pa, unsigned long rflags,
  140. unsigned long vflags, int psize, int apsize, int ssize)
  141. {
  142. struct hash_pte *hptep = htab_address + hpte_group;
  143. unsigned long hpte_v, hpte_r;
  144. int i;
  145. if (!(vflags & HPTE_V_BOLTED)) {
  146. DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
  147. " rflags=%lx, vflags=%lx, psize=%d)\n",
  148. hpte_group, vpn, pa, rflags, vflags, psize);
  149. }
  150. for (i = 0; i < HPTES_PER_GROUP; i++) {
  151. if (! (hptep->v & HPTE_V_VALID)) {
  152. /* retry with lock held */
  153. native_lock_hpte(hptep);
  154. if (! (hptep->v & HPTE_V_VALID))
  155. break;
  156. native_unlock_hpte(hptep);
  157. }
  158. hptep++;
  159. }
  160. if (i == HPTES_PER_GROUP)
  161. return -1;
  162. hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
  163. hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
  164. if (!(vflags & HPTE_V_BOLTED)) {
  165. DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
  166. i, hpte_v, hpte_r);
  167. }
  168. hptep->r = hpte_r;
  169. /* Guarantee the second dword is visible before the valid bit */
  170. eieio();
  171. /*
  172. * Now set the first dword including the valid bit
  173. * NOTE: this also unlocks the hpte
  174. */
  175. hptep->v = hpte_v;
  176. __asm__ __volatile__ ("ptesync" : : : "memory");
  177. return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
  178. }
  179. static long native_hpte_remove(unsigned long hpte_group)
  180. {
  181. struct hash_pte *hptep;
  182. int i;
  183. int slot_offset;
  184. unsigned long hpte_v;
  185. DBG_LOW(" remove(group=%lx)\n", hpte_group);
  186. /* pick a random entry to start at */
  187. slot_offset = mftb() & 0x7;
  188. for (i = 0; i < HPTES_PER_GROUP; i++) {
  189. hptep = htab_address + hpte_group + slot_offset;
  190. hpte_v = hptep->v;
  191. if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
  192. /* retry with lock held */
  193. native_lock_hpte(hptep);
  194. hpte_v = hptep->v;
  195. if ((hpte_v & HPTE_V_VALID)
  196. && !(hpte_v & HPTE_V_BOLTED))
  197. break;
  198. native_unlock_hpte(hptep);
  199. }
  200. slot_offset++;
  201. slot_offset &= 0x7;
  202. }
  203. if (i == HPTES_PER_GROUP)
  204. return -1;
  205. /* Invalidate the hpte. NOTE: this also unlocks it */
  206. hptep->v = 0;
  207. return i;
  208. }
  209. static inline int __hpte_actual_psize(unsigned int lp, int psize)
  210. {
  211. int i, shift;
  212. unsigned int mask;
  213. /* start from 1 ignoring MMU_PAGE_4K */
  214. for (i = 1; i < MMU_PAGE_COUNT; i++) {
  215. /* invalid penc */
  216. if (mmu_psize_defs[psize].penc[i] == -1)
  217. continue;
  218. /*
  219. * encoding bits per actual page size
  220. * PTE LP actual page size
  221. * rrrr rrrz >=8KB
  222. * rrrr rrzz >=16KB
  223. * rrrr rzzz >=32KB
  224. * rrrr zzzz >=64KB
  225. * .......
  226. */
  227. shift = mmu_psize_defs[i].shift - LP_SHIFT;
  228. if (shift > LP_BITS)
  229. shift = LP_BITS;
  230. mask = (1 << shift) - 1;
  231. if ((lp & mask) == mmu_psize_defs[psize].penc[i])
  232. return i;
  233. }
  234. return -1;
  235. }
  236. static inline int hpte_actual_psize(struct hash_pte *hptep, int psize)
  237. {
  238. /* Look at the 8 bit LP value */
  239. unsigned int lp = (hptep->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
  240. if (!(hptep->v & HPTE_V_VALID))
  241. return -1;
  242. /* First check if it is large page */
  243. if (!(hptep->v & HPTE_V_LARGE))
  244. return MMU_PAGE_4K;
  245. return __hpte_actual_psize(lp, psize);
  246. }
  247. static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
  248. unsigned long vpn, int psize, int ssize,
  249. int local)
  250. {
  251. struct hash_pte *hptep = htab_address + slot;
  252. unsigned long hpte_v, want_v;
  253. int ret = 0;
  254. int actual_psize;
  255. want_v = hpte_encode_avpn(vpn, psize, ssize);
  256. DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
  257. vpn, want_v & HPTE_V_AVPN, slot, newpp);
  258. native_lock_hpte(hptep);
  259. hpte_v = hptep->v;
  260. actual_psize = hpte_actual_psize(hptep, psize);
  261. if (actual_psize < 0) {
  262. native_unlock_hpte(hptep);
  263. return -1;
  264. }
  265. /* Even if we miss, we need to invalidate the TLB */
  266. if (!HPTE_V_COMPARE(hpte_v, want_v)) {
  267. DBG_LOW(" -> miss\n");
  268. ret = -1;
  269. } else {
  270. DBG_LOW(" -> hit\n");
  271. /* Update the HPTE */
  272. hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
  273. (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
  274. }
  275. native_unlock_hpte(hptep);
  276. /* Ensure it is out of the tlb too. */
  277. tlbie(vpn, psize, actual_psize, ssize, local);
  278. return ret;
  279. }
  280. static long native_hpte_find(unsigned long vpn, int psize, int ssize)
  281. {
  282. struct hash_pte *hptep;
  283. unsigned long hash;
  284. unsigned long i;
  285. long slot;
  286. unsigned long want_v, hpte_v;
  287. hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
  288. want_v = hpte_encode_avpn(vpn, psize, ssize);
  289. /* Bolted mappings are only ever in the primary group */
  290. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  291. for (i = 0; i < HPTES_PER_GROUP; i++) {
  292. hptep = htab_address + slot;
  293. hpte_v = hptep->v;
  294. if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
  295. /* HPTE matches */
  296. return slot;
  297. ++slot;
  298. }
  299. return -1;
  300. }
  301. /*
  302. * Update the page protection bits. Intended to be used to create
  303. * guard pages for kernel data structures on pages which are bolted
  304. * in the HPT. Assumes pages being operated on will not be stolen.
  305. *
  306. * No need to lock here because we should be the only user.
  307. */
  308. static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
  309. int psize, int ssize)
  310. {
  311. int actual_psize;
  312. unsigned long vpn;
  313. unsigned long vsid;
  314. long slot;
  315. struct hash_pte *hptep;
  316. vsid = get_kernel_vsid(ea, ssize);
  317. vpn = hpt_vpn(ea, vsid, ssize);
  318. slot = native_hpte_find(vpn, psize, ssize);
  319. if (slot == -1)
  320. panic("could not find page to bolt\n");
  321. hptep = htab_address + slot;
  322. actual_psize = hpte_actual_psize(hptep, psize);
  323. if (actual_psize < 0)
  324. return;
  325. /* Update the HPTE */
  326. hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
  327. (newpp & (HPTE_R_PP | HPTE_R_N));
  328. /* Ensure it is out of the tlb too. */
  329. tlbie(vpn, psize, actual_psize, ssize, 0);
  330. }
  331. static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
  332. int psize, int ssize, int local)
  333. {
  334. struct hash_pte *hptep = htab_address + slot;
  335. unsigned long hpte_v;
  336. unsigned long want_v;
  337. unsigned long flags;
  338. int actual_psize;
  339. local_irq_save(flags);
  340. DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
  341. want_v = hpte_encode_avpn(vpn, psize, ssize);
  342. native_lock_hpte(hptep);
  343. hpte_v = hptep->v;
  344. actual_psize = hpte_actual_psize(hptep, psize);
  345. if (actual_psize < 0) {
  346. native_unlock_hpte(hptep);
  347. local_irq_restore(flags);
  348. return;
  349. }
  350. /* Even if we miss, we need to invalidate the TLB */
  351. if (!HPTE_V_COMPARE(hpte_v, want_v))
  352. native_unlock_hpte(hptep);
  353. else
  354. /* Invalidate the hpte. NOTE: this also unlocks it */
  355. hptep->v = 0;
  356. /* Invalidate the TLB */
  357. tlbie(vpn, psize, actual_psize, ssize, local);
  358. local_irq_restore(flags);
  359. }
  360. static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
  361. int *psize, int *apsize, int *ssize, unsigned long *vpn)
  362. {
  363. unsigned long avpn, pteg, vpi;
  364. unsigned long hpte_v = hpte->v;
  365. unsigned long vsid, seg_off;
  366. int size, a_size, shift;
  367. /* Look at the 8 bit LP value */
  368. unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
  369. if (!(hpte_v & HPTE_V_LARGE)) {
  370. size = MMU_PAGE_4K;
  371. a_size = MMU_PAGE_4K;
  372. } else {
  373. for (size = 0; size < MMU_PAGE_COUNT; size++) {
  374. /* valid entries have a shift value */
  375. if (!mmu_psize_defs[size].shift)
  376. continue;
  377. a_size = __hpte_actual_psize(lp, size);
  378. if (a_size != -1)
  379. break;
  380. }
  381. }
  382. /* This works for all page sizes, and for 256M and 1T segments */
  383. *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
  384. shift = mmu_psize_defs[size].shift;
  385. avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
  386. pteg = slot / HPTES_PER_GROUP;
  387. if (hpte_v & HPTE_V_SECONDARY)
  388. pteg = ~pteg;
  389. switch (*ssize) {
  390. case MMU_SEGSIZE_256M:
  391. /* We only have 28 - 23 bits of seg_off in avpn */
  392. seg_off = (avpn & 0x1f) << 23;
  393. vsid = avpn >> 5;
  394. /* We can find more bits from the pteg value */
  395. if (shift < 23) {
  396. vpi = (vsid ^ pteg) & htab_hash_mask;
  397. seg_off |= vpi << shift;
  398. }
  399. *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
  400. case MMU_SEGSIZE_1T:
  401. /* We only have 40 - 23 bits of seg_off in avpn */
  402. seg_off = (avpn & 0x1ffff) << 23;
  403. vsid = avpn >> 17;
  404. if (shift < 23) {
  405. vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
  406. seg_off |= vpi << shift;
  407. }
  408. *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
  409. default:
  410. *vpn = size = 0;
  411. }
  412. *psize = size;
  413. *apsize = a_size;
  414. }
  415. /*
  416. * clear all mappings on kexec. All cpus are in real mode (or they will
  417. * be when they isi), and we are the only one left. We rely on our kernel
  418. * mapping being 0xC0's and the hardware ignoring those two real bits.
  419. *
  420. * TODO: add batching support when enabled. remember, no dynamic memory here,
  421. * athough there is the control page available...
  422. */
  423. static void native_hpte_clear(void)
  424. {
  425. unsigned long vpn = 0;
  426. unsigned long slot, slots, flags;
  427. struct hash_pte *hptep = htab_address;
  428. unsigned long hpte_v;
  429. unsigned long pteg_count;
  430. int psize, apsize, ssize;
  431. pteg_count = htab_hash_mask + 1;
  432. local_irq_save(flags);
  433. /* we take the tlbie lock and hold it. Some hardware will
  434. * deadlock if we try to tlbie from two processors at once.
  435. */
  436. raw_spin_lock(&native_tlbie_lock);
  437. slots = pteg_count * HPTES_PER_GROUP;
  438. for (slot = 0; slot < slots; slot++, hptep++) {
  439. /*
  440. * we could lock the pte here, but we are the only cpu
  441. * running, right? and for crash dump, we probably
  442. * don't want to wait for a maybe bad cpu.
  443. */
  444. hpte_v = hptep->v;
  445. /*
  446. * Call __tlbie() here rather than tlbie() since we
  447. * already hold the native_tlbie_lock.
  448. */
  449. if (hpte_v & HPTE_V_VALID) {
  450. hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
  451. hptep->v = 0;
  452. __tlbie(vpn, psize, apsize, ssize);
  453. }
  454. }
  455. asm volatile("eieio; tlbsync; ptesync":::"memory");
  456. raw_spin_unlock(&native_tlbie_lock);
  457. local_irq_restore(flags);
  458. }
  459. /*
  460. * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
  461. * the lock all the time
  462. */
  463. static void native_flush_hash_range(unsigned long number, int local)
  464. {
  465. unsigned long vpn;
  466. unsigned long hash, index, hidx, shift, slot;
  467. struct hash_pte *hptep;
  468. unsigned long hpte_v;
  469. unsigned long want_v;
  470. unsigned long flags;
  471. real_pte_t pte;
  472. struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
  473. unsigned long psize = batch->psize;
  474. int ssize = batch->ssize;
  475. int i;
  476. local_irq_save(flags);
  477. for (i = 0; i < number; i++) {
  478. vpn = batch->vpn[i];
  479. pte = batch->pte[i];
  480. pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
  481. hash = hpt_hash(vpn, shift, ssize);
  482. hidx = __rpte_to_hidx(pte, index);
  483. if (hidx & _PTEIDX_SECONDARY)
  484. hash = ~hash;
  485. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  486. slot += hidx & _PTEIDX_GROUP_IX;
  487. hptep = htab_address + slot;
  488. want_v = hpte_encode_avpn(vpn, psize, ssize);
  489. native_lock_hpte(hptep);
  490. hpte_v = hptep->v;
  491. if (!HPTE_V_COMPARE(hpte_v, want_v) ||
  492. !(hpte_v & HPTE_V_VALID))
  493. native_unlock_hpte(hptep);
  494. else
  495. hptep->v = 0;
  496. } pte_iterate_hashed_end();
  497. }
  498. if (mmu_has_feature(MMU_FTR_TLBIEL) &&
  499. mmu_psize_defs[psize].tlbiel && local) {
  500. asm volatile("ptesync":::"memory");
  501. for (i = 0; i < number; i++) {
  502. vpn = batch->vpn[i];
  503. pte = batch->pte[i];
  504. pte_iterate_hashed_subpages(pte, psize,
  505. vpn, index, shift) {
  506. __tlbiel(vpn, psize, psize, ssize);
  507. } pte_iterate_hashed_end();
  508. }
  509. asm volatile("ptesync":::"memory");
  510. } else {
  511. int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
  512. if (lock_tlbie)
  513. raw_spin_lock(&native_tlbie_lock);
  514. asm volatile("ptesync":::"memory");
  515. for (i = 0; i < number; i++) {
  516. vpn = batch->vpn[i];
  517. pte = batch->pte[i];
  518. pte_iterate_hashed_subpages(pte, psize,
  519. vpn, index, shift) {
  520. __tlbie(vpn, psize, psize, ssize);
  521. } pte_iterate_hashed_end();
  522. }
  523. asm volatile("eieio; tlbsync; ptesync":::"memory");
  524. if (lock_tlbie)
  525. raw_spin_unlock(&native_tlbie_lock);
  526. }
  527. local_irq_restore(flags);
  528. }
  529. void __init hpte_init_native(void)
  530. {
  531. ppc_md.hpte_invalidate = native_hpte_invalidate;
  532. ppc_md.hpte_updatepp = native_hpte_updatepp;
  533. ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
  534. ppc_md.hpte_insert = native_hpte_insert;
  535. ppc_md.hpte_remove = native_hpte_remove;
  536. ppc_md.hpte_clear_all = native_hpte_clear;
  537. ppc_md.flush_hash_range = native_flush_hash_range;
  538. }