hash_utils_64.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197
  1. /*
  2. * PowerPC64 port by Mike Corrigan and Dave Engebretsen
  3. * {mikejc|engebret}@us.ibm.com
  4. *
  5. * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
  6. *
  7. * SMP scalability work:
  8. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  9. *
  10. * Module name: htab.c
  11. *
  12. * Description:
  13. * PowerPC Hashed Page Table functions
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. */
  20. #undef DEBUG
  21. #undef DEBUG_LOW
  22. #include <linux/spinlock.h>
  23. #include <linux/errno.h>
  24. #include <linux/sched.h>
  25. #include <linux/proc_fs.h>
  26. #include <linux/stat.h>
  27. #include <linux/sysctl.h>
  28. #include <linux/ctype.h>
  29. #include <linux/cache.h>
  30. #include <linux/init.h>
  31. #include <linux/signal.h>
  32. #include <linux/lmb.h>
  33. #include <asm/processor.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/mmu.h>
  36. #include <asm/mmu_context.h>
  37. #include <asm/page.h>
  38. #include <asm/types.h>
  39. #include <asm/system.h>
  40. #include <asm/uaccess.h>
  41. #include <asm/machdep.h>
  42. #include <asm/prom.h>
  43. #include <asm/abs_addr.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/io.h>
  46. #include <asm/eeh.h>
  47. #include <asm/tlb.h>
  48. #include <asm/cacheflush.h>
  49. #include <asm/cputable.h>
  50. #include <asm/sections.h>
  51. #include <asm/spu.h>
  52. #include <asm/udbg.h>
  53. #ifdef DEBUG
  54. #define DBG(fmt...) udbg_printf(fmt)
  55. #else
  56. #define DBG(fmt...)
  57. #endif
  58. #ifdef DEBUG_LOW
  59. #define DBG_LOW(fmt...) udbg_printf(fmt)
  60. #else
  61. #define DBG_LOW(fmt...)
  62. #endif
  63. #define KB (1024)
  64. #define MB (1024*KB)
  65. #define GB (1024L*MB)
  66. /*
  67. * Note: pte --> Linux PTE
  68. * HPTE --> PowerPC Hashed Page Table Entry
  69. *
  70. * Execution context:
  71. * htab_initialize is called with the MMU off (of course), but
  72. * the kernel has been copied down to zero so it can directly
  73. * reference global data. At this point it is very difficult
  74. * to print debug info.
  75. *
  76. */
  77. #ifdef CONFIG_U3_DART
  78. extern unsigned long dart_tablebase;
  79. #endif /* CONFIG_U3_DART */
  80. static unsigned long _SDR1;
  81. struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  82. struct hash_pte *htab_address;
  83. unsigned long htab_size_bytes;
  84. unsigned long htab_hash_mask;
  85. int mmu_linear_psize = MMU_PAGE_4K;
  86. int mmu_virtual_psize = MMU_PAGE_4K;
  87. int mmu_vmalloc_psize = MMU_PAGE_4K;
  88. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  89. int mmu_vmemmap_psize = MMU_PAGE_4K;
  90. #endif
  91. int mmu_io_psize = MMU_PAGE_4K;
  92. int mmu_kernel_ssize = MMU_SEGSIZE_256M;
  93. int mmu_highuser_ssize = MMU_SEGSIZE_256M;
  94. u16 mmu_slb_size = 64;
  95. #ifdef CONFIG_HUGETLB_PAGE
  96. unsigned int HPAGE_SHIFT;
  97. #endif
  98. #ifdef CONFIG_PPC_64K_PAGES
  99. int mmu_ci_restrictions;
  100. #endif
  101. #ifdef CONFIG_DEBUG_PAGEALLOC
  102. static u8 *linear_map_hash_slots;
  103. static unsigned long linear_map_hash_count;
  104. static DEFINE_SPINLOCK(linear_map_hash_lock);
  105. #endif /* CONFIG_DEBUG_PAGEALLOC */
  106. /* There are definitions of page sizes arrays to be used when none
  107. * is provided by the firmware.
  108. */
  109. /* Pre-POWER4 CPUs (4k pages only)
  110. */
  111. static struct mmu_psize_def mmu_psize_defaults_old[] = {
  112. [MMU_PAGE_4K] = {
  113. .shift = 12,
  114. .sllp = 0,
  115. .penc = 0,
  116. .avpnm = 0,
  117. .tlbiel = 0,
  118. },
  119. };
  120. /* POWER4, GPUL, POWER5
  121. *
  122. * Support for 16Mb large pages
  123. */
  124. static struct mmu_psize_def mmu_psize_defaults_gp[] = {
  125. [MMU_PAGE_4K] = {
  126. .shift = 12,
  127. .sllp = 0,
  128. .penc = 0,
  129. .avpnm = 0,
  130. .tlbiel = 1,
  131. },
  132. [MMU_PAGE_16M] = {
  133. .shift = 24,
  134. .sllp = SLB_VSID_L,
  135. .penc = 0,
  136. .avpnm = 0x1UL,
  137. .tlbiel = 0,
  138. },
  139. };
  140. static unsigned long htab_convert_pte_flags(unsigned long pteflags)
  141. {
  142. unsigned long rflags = pteflags & 0x1fa;
  143. /* _PAGE_EXEC -> NOEXEC */
  144. if ((pteflags & _PAGE_EXEC) == 0)
  145. rflags |= HPTE_R_N;
  146. /* PP bits. PAGE_USER is already PP bit 0x2, so we only
  147. * need to add in 0x1 if it's a read-only user page
  148. */
  149. if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
  150. (pteflags & _PAGE_DIRTY)))
  151. rflags |= 1;
  152. /* Always add C */
  153. return rflags | HPTE_R_C;
  154. }
  155. int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
  156. unsigned long pstart, unsigned long prot,
  157. int psize, int ssize)
  158. {
  159. unsigned long vaddr, paddr;
  160. unsigned int step, shift;
  161. int ret = 0;
  162. shift = mmu_psize_defs[psize].shift;
  163. step = 1 << shift;
  164. prot = htab_convert_pte_flags(prot);
  165. DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
  166. vstart, vend, pstart, prot, psize, ssize);
  167. for (vaddr = vstart, paddr = pstart; vaddr < vend;
  168. vaddr += step, paddr += step) {
  169. unsigned long hash, hpteg;
  170. unsigned long vsid = get_kernel_vsid(vaddr, ssize);
  171. unsigned long va = hpt_va(vaddr, vsid, ssize);
  172. unsigned long tprot = prot;
  173. /* Make kernel text executable */
  174. if (overlaps_kernel_text(vaddr, vaddr + step))
  175. tprot &= ~HPTE_R_N;
  176. hash = hpt_hash(va, shift, ssize);
  177. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  178. BUG_ON(!ppc_md.hpte_insert);
  179. ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
  180. HPTE_V_BOLTED, psize, ssize);
  181. if (ret < 0)
  182. break;
  183. #ifdef CONFIG_DEBUG_PAGEALLOC
  184. if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
  185. linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
  186. #endif /* CONFIG_DEBUG_PAGEALLOC */
  187. }
  188. return ret < 0 ? ret : 0;
  189. }
  190. #ifdef CONFIG_MEMORY_HOTPLUG
  191. static int htab_remove_mapping(unsigned long vstart, unsigned long vend,
  192. int psize, int ssize)
  193. {
  194. unsigned long vaddr;
  195. unsigned int step, shift;
  196. shift = mmu_psize_defs[psize].shift;
  197. step = 1 << shift;
  198. if (!ppc_md.hpte_removebolted) {
  199. printk(KERN_WARNING "Platform doesn't implement "
  200. "hpte_removebolted\n");
  201. return -EINVAL;
  202. }
  203. for (vaddr = vstart; vaddr < vend; vaddr += step)
  204. ppc_md.hpte_removebolted(vaddr, psize, ssize);
  205. return 0;
  206. }
  207. #endif /* CONFIG_MEMORY_HOTPLUG */
  208. static int __init htab_dt_scan_seg_sizes(unsigned long node,
  209. const char *uname, int depth,
  210. void *data)
  211. {
  212. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  213. u32 *prop;
  214. unsigned long size = 0;
  215. /* We are scanning "cpu" nodes only */
  216. if (type == NULL || strcmp(type, "cpu") != 0)
  217. return 0;
  218. prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
  219. &size);
  220. if (prop == NULL)
  221. return 0;
  222. for (; size >= 4; size -= 4, ++prop) {
  223. if (prop[0] == 40) {
  224. DBG("1T segment support detected\n");
  225. cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
  226. return 1;
  227. }
  228. }
  229. cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
  230. return 0;
  231. }
  232. static void __init htab_init_seg_sizes(void)
  233. {
  234. of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
  235. }
  236. static int __init htab_dt_scan_page_sizes(unsigned long node,
  237. const char *uname, int depth,
  238. void *data)
  239. {
  240. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  241. u32 *prop;
  242. unsigned long size = 0;
  243. /* We are scanning "cpu" nodes only */
  244. if (type == NULL || strcmp(type, "cpu") != 0)
  245. return 0;
  246. prop = (u32 *)of_get_flat_dt_prop(node,
  247. "ibm,segment-page-sizes", &size);
  248. if (prop != NULL) {
  249. DBG("Page sizes from device-tree:\n");
  250. size /= 4;
  251. cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
  252. while(size > 0) {
  253. unsigned int shift = prop[0];
  254. unsigned int slbenc = prop[1];
  255. unsigned int lpnum = prop[2];
  256. unsigned int lpenc = 0;
  257. struct mmu_psize_def *def;
  258. int idx = -1;
  259. size -= 3; prop += 3;
  260. while(size > 0 && lpnum) {
  261. if (prop[0] == shift)
  262. lpenc = prop[1];
  263. prop += 2; size -= 2;
  264. lpnum--;
  265. }
  266. switch(shift) {
  267. case 0xc:
  268. idx = MMU_PAGE_4K;
  269. break;
  270. case 0x10:
  271. idx = MMU_PAGE_64K;
  272. break;
  273. case 0x14:
  274. idx = MMU_PAGE_1M;
  275. break;
  276. case 0x18:
  277. idx = MMU_PAGE_16M;
  278. cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
  279. break;
  280. case 0x22:
  281. idx = MMU_PAGE_16G;
  282. break;
  283. }
  284. if (idx < 0)
  285. continue;
  286. def = &mmu_psize_defs[idx];
  287. def->shift = shift;
  288. if (shift <= 23)
  289. def->avpnm = 0;
  290. else
  291. def->avpnm = (1 << (shift - 23)) - 1;
  292. def->sllp = slbenc;
  293. def->penc = lpenc;
  294. /* We don't know for sure what's up with tlbiel, so
  295. * for now we only set it for 4K and 64K pages
  296. */
  297. if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
  298. def->tlbiel = 1;
  299. else
  300. def->tlbiel = 0;
  301. DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
  302. "tlbiel=%d, penc=%d\n",
  303. idx, shift, def->sllp, def->avpnm, def->tlbiel,
  304. def->penc);
  305. }
  306. return 1;
  307. }
  308. return 0;
  309. }
  310. #ifdef CONFIG_HUGETLB_PAGE
  311. /* Scan for 16G memory blocks that have been set aside for huge pages
  312. * and reserve those blocks for 16G huge pages.
  313. */
  314. static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
  315. const char *uname, int depth,
  316. void *data) {
  317. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  318. unsigned long *addr_prop;
  319. u32 *page_count_prop;
  320. unsigned int expected_pages;
  321. long unsigned int phys_addr;
  322. long unsigned int block_size;
  323. /* We are scanning "memory" nodes only */
  324. if (type == NULL || strcmp(type, "memory") != 0)
  325. return 0;
  326. /* This property is the log base 2 of the number of virtual pages that
  327. * will represent this memory block. */
  328. page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
  329. if (page_count_prop == NULL)
  330. return 0;
  331. expected_pages = (1 << page_count_prop[0]);
  332. addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
  333. if (addr_prop == NULL)
  334. return 0;
  335. phys_addr = addr_prop[0];
  336. block_size = addr_prop[1];
  337. if (block_size != (16 * GB))
  338. return 0;
  339. printk(KERN_INFO "Huge page(16GB) memory: "
  340. "addr = 0x%lX size = 0x%lX pages = %d\n",
  341. phys_addr, block_size, expected_pages);
  342. if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) {
  343. lmb_reserve(phys_addr, block_size * expected_pages);
  344. add_gpage(phys_addr, block_size, expected_pages);
  345. }
  346. return 0;
  347. }
  348. #endif /* CONFIG_HUGETLB_PAGE */
  349. static void __init htab_init_page_sizes(void)
  350. {
  351. int rc;
  352. /* Default to 4K pages only */
  353. memcpy(mmu_psize_defs, mmu_psize_defaults_old,
  354. sizeof(mmu_psize_defaults_old));
  355. /*
  356. * Try to find the available page sizes in the device-tree
  357. */
  358. rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
  359. if (rc != 0) /* Found */
  360. goto found;
  361. /*
  362. * Not in the device-tree, let's fallback on known size
  363. * list for 16M capable GP & GR
  364. */
  365. if (cpu_has_feature(CPU_FTR_16M_PAGE))
  366. memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
  367. sizeof(mmu_psize_defaults_gp));
  368. found:
  369. #ifndef CONFIG_DEBUG_PAGEALLOC
  370. /*
  371. * Pick a size for the linear mapping. Currently, we only support
  372. * 16M, 1M and 4K which is the default
  373. */
  374. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  375. mmu_linear_psize = MMU_PAGE_16M;
  376. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  377. mmu_linear_psize = MMU_PAGE_1M;
  378. #endif /* CONFIG_DEBUG_PAGEALLOC */
  379. #ifdef CONFIG_PPC_64K_PAGES
  380. /*
  381. * Pick a size for the ordinary pages. Default is 4K, we support
  382. * 64K for user mappings and vmalloc if supported by the processor.
  383. * We only use 64k for ioremap if the processor
  384. * (and firmware) support cache-inhibited large pages.
  385. * If not, we use 4k and set mmu_ci_restrictions so that
  386. * hash_page knows to switch processes that use cache-inhibited
  387. * mappings to 4k pages.
  388. */
  389. if (mmu_psize_defs[MMU_PAGE_64K].shift) {
  390. mmu_virtual_psize = MMU_PAGE_64K;
  391. mmu_vmalloc_psize = MMU_PAGE_64K;
  392. if (mmu_linear_psize == MMU_PAGE_4K)
  393. mmu_linear_psize = MMU_PAGE_64K;
  394. if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
  395. /*
  396. * Don't use 64k pages for ioremap on pSeries, since
  397. * that would stop us accessing the HEA ethernet.
  398. */
  399. if (!machine_is(pseries))
  400. mmu_io_psize = MMU_PAGE_64K;
  401. } else
  402. mmu_ci_restrictions = 1;
  403. }
  404. #endif /* CONFIG_PPC_64K_PAGES */
  405. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  406. /* We try to use 16M pages for vmemmap if that is supported
  407. * and we have at least 1G of RAM at boot
  408. */
  409. if (mmu_psize_defs[MMU_PAGE_16M].shift &&
  410. lmb_phys_mem_size() >= 0x40000000)
  411. mmu_vmemmap_psize = MMU_PAGE_16M;
  412. else if (mmu_psize_defs[MMU_PAGE_64K].shift)
  413. mmu_vmemmap_psize = MMU_PAGE_64K;
  414. else
  415. mmu_vmemmap_psize = MMU_PAGE_4K;
  416. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  417. printk(KERN_DEBUG "Page orders: linear mapping = %d, "
  418. "virtual = %d, io = %d"
  419. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  420. ", vmemmap = %d"
  421. #endif
  422. "\n",
  423. mmu_psize_defs[mmu_linear_psize].shift,
  424. mmu_psize_defs[mmu_virtual_psize].shift,
  425. mmu_psize_defs[mmu_io_psize].shift
  426. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  427. ,mmu_psize_defs[mmu_vmemmap_psize].shift
  428. #endif
  429. );
  430. #ifdef CONFIG_HUGETLB_PAGE
  431. /* Reserve 16G huge page memory sections for huge pages */
  432. of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
  433. /* Set default large page size. Currently, we pick 16M or 1M depending
  434. * on what is available
  435. */
  436. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  437. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
  438. /* With 4k/4level pagetables, we can't (for now) cope with a
  439. * huge page size < PMD_SIZE */
  440. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  441. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
  442. #endif /* CONFIG_HUGETLB_PAGE */
  443. }
  444. static int __init htab_dt_scan_pftsize(unsigned long node,
  445. const char *uname, int depth,
  446. void *data)
  447. {
  448. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  449. u32 *prop;
  450. /* We are scanning "cpu" nodes only */
  451. if (type == NULL || strcmp(type, "cpu") != 0)
  452. return 0;
  453. prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
  454. if (prop != NULL) {
  455. /* pft_size[0] is the NUMA CEC cookie */
  456. ppc64_pft_size = prop[1];
  457. return 1;
  458. }
  459. return 0;
  460. }
  461. static unsigned long __init htab_get_table_size(void)
  462. {
  463. unsigned long mem_size, rnd_mem_size, pteg_count;
  464. /* If hash size isn't already provided by the platform, we try to
  465. * retrieve it from the device-tree. If it's not there neither, we
  466. * calculate it now based on the total RAM size
  467. */
  468. if (ppc64_pft_size == 0)
  469. of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
  470. if (ppc64_pft_size)
  471. return 1UL << ppc64_pft_size;
  472. /* round mem_size up to next power of 2 */
  473. mem_size = lmb_phys_mem_size();
  474. rnd_mem_size = 1UL << __ilog2(mem_size);
  475. if (rnd_mem_size < mem_size)
  476. rnd_mem_size <<= 1;
  477. /* # pages / 2 */
  478. pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
  479. return pteg_count << 7;
  480. }
  481. #ifdef CONFIG_MEMORY_HOTPLUG
  482. void create_section_mapping(unsigned long start, unsigned long end)
  483. {
  484. BUG_ON(htab_bolt_mapping(start, end, __pa(start),
  485. pgprot_val(PAGE_KERNEL), mmu_linear_psize,
  486. mmu_kernel_ssize));
  487. }
  488. int remove_section_mapping(unsigned long start, unsigned long end)
  489. {
  490. return htab_remove_mapping(start, end, mmu_linear_psize,
  491. mmu_kernel_ssize);
  492. }
  493. #endif /* CONFIG_MEMORY_HOTPLUG */
  494. static inline void make_bl(unsigned int *insn_addr, void *func)
  495. {
  496. unsigned long funcp = *((unsigned long *)func);
  497. int offset = funcp - (unsigned long)insn_addr;
  498. *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
  499. flush_icache_range((unsigned long)insn_addr, 4+
  500. (unsigned long)insn_addr);
  501. }
  502. static void __init htab_finish_init(void)
  503. {
  504. extern unsigned int *htab_call_hpte_insert1;
  505. extern unsigned int *htab_call_hpte_insert2;
  506. extern unsigned int *htab_call_hpte_remove;
  507. extern unsigned int *htab_call_hpte_updatepp;
  508. #ifdef CONFIG_PPC_HAS_HASH_64K
  509. extern unsigned int *ht64_call_hpte_insert1;
  510. extern unsigned int *ht64_call_hpte_insert2;
  511. extern unsigned int *ht64_call_hpte_remove;
  512. extern unsigned int *ht64_call_hpte_updatepp;
  513. make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
  514. make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
  515. make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
  516. make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
  517. #endif /* CONFIG_PPC_HAS_HASH_64K */
  518. make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
  519. make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
  520. make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
  521. make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
  522. }
  523. void __init htab_initialize(void)
  524. {
  525. unsigned long table;
  526. unsigned long pteg_count;
  527. unsigned long prot;
  528. unsigned long base = 0, size = 0, limit;
  529. int i;
  530. DBG(" -> htab_initialize()\n");
  531. /* Initialize segment sizes */
  532. htab_init_seg_sizes();
  533. /* Initialize page sizes */
  534. htab_init_page_sizes();
  535. if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
  536. mmu_kernel_ssize = MMU_SEGSIZE_1T;
  537. mmu_highuser_ssize = MMU_SEGSIZE_1T;
  538. printk(KERN_INFO "Using 1TB segments\n");
  539. }
  540. /*
  541. * Calculate the required size of the htab. We want the number of
  542. * PTEGs to equal one half the number of real pages.
  543. */
  544. htab_size_bytes = htab_get_table_size();
  545. pteg_count = htab_size_bytes >> 7;
  546. htab_hash_mask = pteg_count - 1;
  547. if (firmware_has_feature(FW_FEATURE_LPAR)) {
  548. /* Using a hypervisor which owns the htab */
  549. htab_address = NULL;
  550. _SDR1 = 0;
  551. } else {
  552. /* Find storage for the HPT. Must be contiguous in
  553. * the absolute address space. On cell we want it to be
  554. * in the first 2 Gig so we can use it for IOMMU hacks.
  555. */
  556. if (machine_is(cell))
  557. limit = 0x80000000;
  558. else
  559. limit = 0;
  560. table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
  561. DBG("Hash table allocated at %lx, size: %lx\n", table,
  562. htab_size_bytes);
  563. htab_address = abs_to_virt(table);
  564. /* htab absolute addr + encoded htabsize */
  565. _SDR1 = table + __ilog2(pteg_count) - 11;
  566. /* Initialize the HPT with no entries */
  567. memset((void *)table, 0, htab_size_bytes);
  568. /* Set SDR1 */
  569. mtspr(SPRN_SDR1, _SDR1);
  570. }
  571. prot = pgprot_val(PAGE_KERNEL);
  572. #ifdef CONFIG_DEBUG_PAGEALLOC
  573. linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
  574. linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
  575. 1, lmb.rmo_size));
  576. memset(linear_map_hash_slots, 0, linear_map_hash_count);
  577. #endif /* CONFIG_DEBUG_PAGEALLOC */
  578. /* On U3 based machines, we need to reserve the DART area and
  579. * _NOT_ map it to avoid cache paradoxes as it's remapped non
  580. * cacheable later on
  581. */
  582. /* create bolted the linear mapping in the hash table */
  583. for (i=0; i < lmb.memory.cnt; i++) {
  584. base = (unsigned long)__va(lmb.memory.region[i].base);
  585. size = lmb.memory.region[i].size;
  586. DBG("creating mapping for region: %lx..%lx (prot: %x)\n",
  587. base, size, prot);
  588. #ifdef CONFIG_U3_DART
  589. /* Do not map the DART space. Fortunately, it will be aligned
  590. * in such a way that it will not cross two lmb regions and
  591. * will fit within a single 16Mb page.
  592. * The DART space is assumed to be a full 16Mb region even if
  593. * we only use 2Mb of that space. We will use more of it later
  594. * for AGP GART. We have to use a full 16Mb large page.
  595. */
  596. DBG("DART base: %lx\n", dart_tablebase);
  597. if (dart_tablebase != 0 && dart_tablebase >= base
  598. && dart_tablebase < (base + size)) {
  599. unsigned long dart_table_end = dart_tablebase + 16 * MB;
  600. if (base != dart_tablebase)
  601. BUG_ON(htab_bolt_mapping(base, dart_tablebase,
  602. __pa(base), prot,
  603. mmu_linear_psize,
  604. mmu_kernel_ssize));
  605. if ((base + size) > dart_table_end)
  606. BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
  607. base + size,
  608. __pa(dart_table_end),
  609. prot,
  610. mmu_linear_psize,
  611. mmu_kernel_ssize));
  612. continue;
  613. }
  614. #endif /* CONFIG_U3_DART */
  615. BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
  616. prot, mmu_linear_psize, mmu_kernel_ssize));
  617. }
  618. /*
  619. * If we have a memory_limit and we've allocated TCEs then we need to
  620. * explicitly map the TCE area at the top of RAM. We also cope with the
  621. * case that the TCEs start below memory_limit.
  622. * tce_alloc_start/end are 16MB aligned so the mapping should work
  623. * for either 4K or 16MB pages.
  624. */
  625. if (tce_alloc_start) {
  626. tce_alloc_start = (unsigned long)__va(tce_alloc_start);
  627. tce_alloc_end = (unsigned long)__va(tce_alloc_end);
  628. if (base + size >= tce_alloc_start)
  629. tce_alloc_start = base + size + 1;
  630. BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
  631. __pa(tce_alloc_start), prot,
  632. mmu_linear_psize, mmu_kernel_ssize));
  633. }
  634. htab_finish_init();
  635. DBG(" <- htab_initialize()\n");
  636. }
  637. #undef KB
  638. #undef MB
  639. void htab_initialize_secondary(void)
  640. {
  641. if (!firmware_has_feature(FW_FEATURE_LPAR))
  642. mtspr(SPRN_SDR1, _SDR1);
  643. }
  644. /*
  645. * Called by asm hashtable.S for doing lazy icache flush
  646. */
  647. unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
  648. {
  649. struct page *page;
  650. if (!pfn_valid(pte_pfn(pte)))
  651. return pp;
  652. page = pte_page(pte);
  653. /* page is dirty */
  654. if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
  655. if (trap == 0x400) {
  656. __flush_dcache_icache(page_address(page));
  657. set_bit(PG_arch_1, &page->flags);
  658. } else
  659. pp |= HPTE_R_N;
  660. }
  661. return pp;
  662. }
  663. #ifdef CONFIG_PPC_MM_SLICES
  664. unsigned int get_paca_psize(unsigned long addr)
  665. {
  666. unsigned long index, slices;
  667. if (addr < SLICE_LOW_TOP) {
  668. slices = get_paca()->context.low_slices_psize;
  669. index = GET_LOW_SLICE_INDEX(addr);
  670. } else {
  671. slices = get_paca()->context.high_slices_psize;
  672. index = GET_HIGH_SLICE_INDEX(addr);
  673. }
  674. return (slices >> (index * 4)) & 0xF;
  675. }
  676. #else
  677. unsigned int get_paca_psize(unsigned long addr)
  678. {
  679. return get_paca()->context.user_psize;
  680. }
  681. #endif
  682. /*
  683. * Demote a segment to using 4k pages.
  684. * For now this makes the whole process use 4k pages.
  685. */
  686. #ifdef CONFIG_PPC_64K_PAGES
  687. void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
  688. {
  689. if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
  690. return;
  691. slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
  692. #ifdef CONFIG_SPU_BASE
  693. spu_flush_all_slbs(mm);
  694. #endif
  695. if (get_paca_psize(addr) != MMU_PAGE_4K) {
  696. get_paca()->context = mm->context;
  697. slb_flush_and_rebolt();
  698. }
  699. }
  700. #endif /* CONFIG_PPC_64K_PAGES */
  701. #ifdef CONFIG_PPC_SUBPAGE_PROT
  702. /*
  703. * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
  704. * Userspace sets the subpage permissions using the subpage_prot system call.
  705. *
  706. * Result is 0: full permissions, _PAGE_RW: read-only,
  707. * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
  708. */
  709. static int subpage_protection(pgd_t *pgdir, unsigned long ea)
  710. {
  711. struct subpage_prot_table *spt = pgd_subpage_prot(pgdir);
  712. u32 spp = 0;
  713. u32 **sbpm, *sbpp;
  714. if (ea >= spt->maxaddr)
  715. return 0;
  716. if (ea < 0x100000000) {
  717. /* addresses below 4GB use spt->low_prot */
  718. sbpm = spt->low_prot;
  719. } else {
  720. sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
  721. if (!sbpm)
  722. return 0;
  723. }
  724. sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
  725. if (!sbpp)
  726. return 0;
  727. spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
  728. /* extract 2-bit bitfield for this 4k subpage */
  729. spp >>= 30 - 2 * ((ea >> 12) & 0xf);
  730. /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
  731. spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
  732. return spp;
  733. }
  734. #else /* CONFIG_PPC_SUBPAGE_PROT */
  735. static inline int subpage_protection(pgd_t *pgdir, unsigned long ea)
  736. {
  737. return 0;
  738. }
  739. #endif
  740. /* Result code is:
  741. * 0 - handled
  742. * 1 - normal page fault
  743. * -1 - critical hash insertion error
  744. * -2 - access not permitted by subpage protection mechanism
  745. */
  746. int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
  747. {
  748. void *pgdir;
  749. unsigned long vsid;
  750. struct mm_struct *mm;
  751. pte_t *ptep;
  752. cpumask_t tmp;
  753. int rc, user_region = 0, local = 0;
  754. int psize, ssize;
  755. DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
  756. ea, access, trap);
  757. if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
  758. DBG_LOW(" out of pgtable range !\n");
  759. return 1;
  760. }
  761. /* Get region & vsid */
  762. switch (REGION_ID(ea)) {
  763. case USER_REGION_ID:
  764. user_region = 1;
  765. mm = current->mm;
  766. if (! mm) {
  767. DBG_LOW(" user region with no mm !\n");
  768. return 1;
  769. }
  770. psize = get_slice_psize(mm, ea);
  771. ssize = user_segment_size(ea);
  772. vsid = get_vsid(mm->context.id, ea, ssize);
  773. break;
  774. case VMALLOC_REGION_ID:
  775. mm = &init_mm;
  776. vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
  777. if (ea < VMALLOC_END)
  778. psize = mmu_vmalloc_psize;
  779. else
  780. psize = mmu_io_psize;
  781. ssize = mmu_kernel_ssize;
  782. break;
  783. default:
  784. /* Not a valid range
  785. * Send the problem up to do_page_fault
  786. */
  787. return 1;
  788. }
  789. DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
  790. /* Get pgdir */
  791. pgdir = mm->pgd;
  792. if (pgdir == NULL)
  793. return 1;
  794. /* Check CPU locality */
  795. tmp = cpumask_of_cpu(smp_processor_id());
  796. if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
  797. local = 1;
  798. #ifdef CONFIG_HUGETLB_PAGE
  799. /* Handle hugepage regions */
  800. if (HPAGE_SHIFT && mmu_huge_psizes[psize]) {
  801. DBG_LOW(" -> huge page !\n");
  802. return hash_huge_page(mm, access, ea, vsid, local, trap);
  803. }
  804. #endif /* CONFIG_HUGETLB_PAGE */
  805. #ifndef CONFIG_PPC_64K_PAGES
  806. /* If we use 4K pages and our psize is not 4K, then we are hitting
  807. * a special driver mapping, we need to align the address before
  808. * we fetch the PTE
  809. */
  810. if (psize != MMU_PAGE_4K)
  811. ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
  812. #endif /* CONFIG_PPC_64K_PAGES */
  813. /* Get PTE and page size from page tables */
  814. ptep = find_linux_pte(pgdir, ea);
  815. if (ptep == NULL || !pte_present(*ptep)) {
  816. DBG_LOW(" no PTE !\n");
  817. return 1;
  818. }
  819. #ifndef CONFIG_PPC_64K_PAGES
  820. DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
  821. #else
  822. DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
  823. pte_val(*(ptep + PTRS_PER_PTE)));
  824. #endif
  825. /* Pre-check access permissions (will be re-checked atomically
  826. * in __hash_page_XX but this pre-check is a fast path
  827. */
  828. if (access & ~pte_val(*ptep)) {
  829. DBG_LOW(" no access !\n");
  830. return 1;
  831. }
  832. /* Do actual hashing */
  833. #ifdef CONFIG_PPC_64K_PAGES
  834. /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
  835. if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
  836. demote_segment_4k(mm, ea);
  837. psize = MMU_PAGE_4K;
  838. }
  839. /* If this PTE is non-cacheable and we have restrictions on
  840. * using non cacheable large pages, then we switch to 4k
  841. */
  842. if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
  843. (pte_val(*ptep) & _PAGE_NO_CACHE)) {
  844. if (user_region) {
  845. demote_segment_4k(mm, ea);
  846. psize = MMU_PAGE_4K;
  847. } else if (ea < VMALLOC_END) {
  848. /*
  849. * some driver did a non-cacheable mapping
  850. * in vmalloc space, so switch vmalloc
  851. * to 4k pages
  852. */
  853. printk(KERN_ALERT "Reducing vmalloc segment "
  854. "to 4kB pages because of "
  855. "non-cacheable mapping\n");
  856. psize = mmu_vmalloc_psize = MMU_PAGE_4K;
  857. #ifdef CONFIG_SPU_BASE
  858. spu_flush_all_slbs(mm);
  859. #endif
  860. }
  861. }
  862. if (user_region) {
  863. if (psize != get_paca_psize(ea)) {
  864. get_paca()->context = mm->context;
  865. slb_flush_and_rebolt();
  866. }
  867. } else if (get_paca()->vmalloc_sllp !=
  868. mmu_psize_defs[mmu_vmalloc_psize].sllp) {
  869. get_paca()->vmalloc_sllp =
  870. mmu_psize_defs[mmu_vmalloc_psize].sllp;
  871. slb_vmalloc_update();
  872. }
  873. #endif /* CONFIG_PPC_64K_PAGES */
  874. #ifdef CONFIG_PPC_HAS_HASH_64K
  875. if (psize == MMU_PAGE_64K)
  876. rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
  877. else
  878. #endif /* CONFIG_PPC_HAS_HASH_64K */
  879. {
  880. int spp = subpage_protection(pgdir, ea);
  881. if (access & spp)
  882. rc = -2;
  883. else
  884. rc = __hash_page_4K(ea, access, vsid, ptep, trap,
  885. local, ssize, spp);
  886. }
  887. #ifndef CONFIG_PPC_64K_PAGES
  888. DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
  889. #else
  890. DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
  891. pte_val(*(ptep + PTRS_PER_PTE)));
  892. #endif
  893. DBG_LOW(" -> rc=%d\n", rc);
  894. return rc;
  895. }
  896. EXPORT_SYMBOL_GPL(hash_page);
  897. void hash_preload(struct mm_struct *mm, unsigned long ea,
  898. unsigned long access, unsigned long trap)
  899. {
  900. unsigned long vsid;
  901. void *pgdir;
  902. pte_t *ptep;
  903. cpumask_t mask;
  904. unsigned long flags;
  905. int local = 0;
  906. int ssize;
  907. BUG_ON(REGION_ID(ea) != USER_REGION_ID);
  908. #ifdef CONFIG_PPC_MM_SLICES
  909. /* We only prefault standard pages for now */
  910. if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
  911. return;
  912. #endif
  913. DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
  914. " trap=%lx\n", mm, mm->pgd, ea, access, trap);
  915. /* Get Linux PTE if available */
  916. pgdir = mm->pgd;
  917. if (pgdir == NULL)
  918. return;
  919. ptep = find_linux_pte(pgdir, ea);
  920. if (!ptep)
  921. return;
  922. #ifdef CONFIG_PPC_64K_PAGES
  923. /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
  924. * a 64K kernel), then we don't preload, hash_page() will take
  925. * care of it once we actually try to access the page.
  926. * That way we don't have to duplicate all of the logic for segment
  927. * page size demotion here
  928. */
  929. if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
  930. return;
  931. #endif /* CONFIG_PPC_64K_PAGES */
  932. /* Get VSID */
  933. ssize = user_segment_size(ea);
  934. vsid = get_vsid(mm->context.id, ea, ssize);
  935. /* Hash doesn't like irqs */
  936. local_irq_save(flags);
  937. /* Is that local to this CPU ? */
  938. mask = cpumask_of_cpu(smp_processor_id());
  939. if (cpus_equal(mm->cpu_vm_mask, mask))
  940. local = 1;
  941. /* Hash it in */
  942. #ifdef CONFIG_PPC_HAS_HASH_64K
  943. if (mm->context.user_psize == MMU_PAGE_64K)
  944. __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
  945. else
  946. #endif /* CONFIG_PPC_HAS_HASH_64K */
  947. __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
  948. subpage_protection(pgdir, ea));
  949. local_irq_restore(flags);
  950. }
  951. /* WARNING: This is called from hash_low_64.S, if you change this prototype,
  952. * do not forget to update the assembly call site !
  953. */
  954. void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
  955. int local)
  956. {
  957. unsigned long hash, index, shift, hidx, slot;
  958. DBG_LOW("flush_hash_page(va=%016x)\n", va);
  959. pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
  960. hash = hpt_hash(va, shift, ssize);
  961. hidx = __rpte_to_hidx(pte, index);
  962. if (hidx & _PTEIDX_SECONDARY)
  963. hash = ~hash;
  964. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  965. slot += hidx & _PTEIDX_GROUP_IX;
  966. DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
  967. ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
  968. } pte_iterate_hashed_end();
  969. }
  970. void flush_hash_range(unsigned long number, int local)
  971. {
  972. if (ppc_md.flush_hash_range)
  973. ppc_md.flush_hash_range(number, local);
  974. else {
  975. int i;
  976. struct ppc64_tlb_batch *batch =
  977. &__get_cpu_var(ppc64_tlb_batch);
  978. for (i = 0; i < number; i++)
  979. flush_hash_page(batch->vaddr[i], batch->pte[i],
  980. batch->psize, batch->ssize, local);
  981. }
  982. }
  983. /*
  984. * low_hash_fault is called when we the low level hash code failed
  985. * to instert a PTE due to an hypervisor error
  986. */
  987. void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
  988. {
  989. if (user_mode(regs)) {
  990. #ifdef CONFIG_PPC_SUBPAGE_PROT
  991. if (rc == -2)
  992. _exception(SIGSEGV, regs, SEGV_ACCERR, address);
  993. else
  994. #endif
  995. _exception(SIGBUS, regs, BUS_ADRERR, address);
  996. } else
  997. bad_page_fault(regs, address, SIGBUS);
  998. }
  999. #ifdef CONFIG_DEBUG_PAGEALLOC
  1000. static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
  1001. {
  1002. unsigned long hash, hpteg;
  1003. unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
  1004. unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
  1005. unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
  1006. int ret;
  1007. hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
  1008. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  1009. ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
  1010. mode, HPTE_V_BOLTED,
  1011. mmu_linear_psize, mmu_kernel_ssize);
  1012. BUG_ON (ret < 0);
  1013. spin_lock(&linear_map_hash_lock);
  1014. BUG_ON(linear_map_hash_slots[lmi] & 0x80);
  1015. linear_map_hash_slots[lmi] = ret | 0x80;
  1016. spin_unlock(&linear_map_hash_lock);
  1017. }
  1018. static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
  1019. {
  1020. unsigned long hash, hidx, slot;
  1021. unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
  1022. unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
  1023. hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
  1024. spin_lock(&linear_map_hash_lock);
  1025. BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
  1026. hidx = linear_map_hash_slots[lmi] & 0x7f;
  1027. linear_map_hash_slots[lmi] = 0;
  1028. spin_unlock(&linear_map_hash_lock);
  1029. if (hidx & _PTEIDX_SECONDARY)
  1030. hash = ~hash;
  1031. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  1032. slot += hidx & _PTEIDX_GROUP_IX;
  1033. ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
  1034. }
  1035. void kernel_map_pages(struct page *page, int numpages, int enable)
  1036. {
  1037. unsigned long flags, vaddr, lmi;
  1038. int i;
  1039. local_irq_save(flags);
  1040. for (i = 0; i < numpages; i++, page++) {
  1041. vaddr = (unsigned long)page_address(page);
  1042. lmi = __pa(vaddr) >> PAGE_SHIFT;
  1043. if (lmi >= linear_map_hash_count)
  1044. continue;
  1045. if (enable)
  1046. kernel_map_linear_page(vaddr, lmi);
  1047. else
  1048. kernel_unmap_linear_page(vaddr, lmi);
  1049. }
  1050. local_irq_restore(flags);
  1051. }
  1052. #endif /* CONFIG_DEBUG_PAGEALLOC */