hash_utils_64.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093
  1. /*
  2. * PowerPC64 port by Mike Corrigan and Dave Engebretsen
  3. * {mikejc|engebret}@us.ibm.com
  4. *
  5. * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
  6. *
  7. * SMP scalability work:
  8. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  9. *
  10. * Module name: htab.c
  11. *
  12. * Description:
  13. * PowerPC Hashed Page Table functions
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. */
  20. #undef DEBUG
  21. #undef DEBUG_LOW
  22. #include <linux/spinlock.h>
  23. #include <linux/errno.h>
  24. #include <linux/sched.h>
  25. #include <linux/proc_fs.h>
  26. #include <linux/stat.h>
  27. #include <linux/sysctl.h>
  28. #include <linux/ctype.h>
  29. #include <linux/cache.h>
  30. #include <linux/init.h>
  31. #include <linux/signal.h>
  32. #include <linux/lmb.h>
  33. #include <asm/processor.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/mmu.h>
  36. #include <asm/mmu_context.h>
  37. #include <asm/page.h>
  38. #include <asm/types.h>
  39. #include <asm/system.h>
  40. #include <asm/uaccess.h>
  41. #include <asm/machdep.h>
  42. #include <asm/prom.h>
  43. #include <asm/abs_addr.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/io.h>
  46. #include <asm/eeh.h>
  47. #include <asm/tlb.h>
  48. #include <asm/cacheflush.h>
  49. #include <asm/cputable.h>
  50. #include <asm/sections.h>
  51. #include <asm/spu.h>
  52. #include <asm/udbg.h>
  53. #ifdef DEBUG
  54. #define DBG(fmt...) udbg_printf(fmt)
  55. #else
  56. #define DBG(fmt...)
  57. #endif
  58. #ifdef DEBUG_LOW
  59. #define DBG_LOW(fmt...) udbg_printf(fmt)
  60. #else
  61. #define DBG_LOW(fmt...)
  62. #endif
  63. #define KB (1024)
  64. #define MB (1024*KB)
  65. /*
  66. * Note: pte --> Linux PTE
  67. * HPTE --> PowerPC Hashed Page Table Entry
  68. *
  69. * Execution context:
  70. * htab_initialize is called with the MMU off (of course), but
  71. * the kernel has been copied down to zero so it can directly
  72. * reference global data. At this point it is very difficult
  73. * to print debug info.
  74. *
  75. */
  76. #ifdef CONFIG_U3_DART
  77. extern unsigned long dart_tablebase;
  78. #endif /* CONFIG_U3_DART */
  79. static unsigned long _SDR1;
  80. struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  81. struct hash_pte *htab_address;
  82. unsigned long htab_size_bytes;
  83. unsigned long htab_hash_mask;
  84. int mmu_linear_psize = MMU_PAGE_4K;
  85. int mmu_virtual_psize = MMU_PAGE_4K;
  86. int mmu_vmalloc_psize = MMU_PAGE_4K;
  87. int mmu_io_psize = MMU_PAGE_4K;
  88. int mmu_kernel_ssize = MMU_SEGSIZE_256M;
  89. int mmu_highuser_ssize = MMU_SEGSIZE_256M;
  90. u16 mmu_slb_size = 64;
  91. #ifdef CONFIG_HUGETLB_PAGE
  92. int mmu_huge_psize = MMU_PAGE_16M;
  93. unsigned int HPAGE_SHIFT;
  94. #endif
  95. #ifdef CONFIG_PPC_64K_PAGES
  96. int mmu_ci_restrictions;
  97. #endif
  98. #ifdef CONFIG_DEBUG_PAGEALLOC
  99. static u8 *linear_map_hash_slots;
  100. static unsigned long linear_map_hash_count;
  101. static DEFINE_SPINLOCK(linear_map_hash_lock);
  102. #endif /* CONFIG_DEBUG_PAGEALLOC */
  103. /* There are definitions of page sizes arrays to be used when none
  104. * is provided by the firmware.
  105. */
  106. /* Pre-POWER4 CPUs (4k pages only)
  107. */
  108. struct mmu_psize_def mmu_psize_defaults_old[] = {
  109. [MMU_PAGE_4K] = {
  110. .shift = 12,
  111. .sllp = 0,
  112. .penc = 0,
  113. .avpnm = 0,
  114. .tlbiel = 0,
  115. },
  116. };
  117. /* POWER4, GPUL, POWER5
  118. *
  119. * Support for 16Mb large pages
  120. */
  121. struct mmu_psize_def mmu_psize_defaults_gp[] = {
  122. [MMU_PAGE_4K] = {
  123. .shift = 12,
  124. .sllp = 0,
  125. .penc = 0,
  126. .avpnm = 0,
  127. .tlbiel = 1,
  128. },
  129. [MMU_PAGE_16M] = {
  130. .shift = 24,
  131. .sllp = SLB_VSID_L,
  132. .penc = 0,
  133. .avpnm = 0x1UL,
  134. .tlbiel = 0,
  135. },
  136. };
  137. int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
  138. unsigned long pstart, unsigned long mode,
  139. int psize, int ssize)
  140. {
  141. unsigned long vaddr, paddr;
  142. unsigned int step, shift;
  143. unsigned long tmp_mode;
  144. int ret = 0;
  145. shift = mmu_psize_defs[psize].shift;
  146. step = 1 << shift;
  147. for (vaddr = vstart, paddr = pstart; vaddr < vend;
  148. vaddr += step, paddr += step) {
  149. unsigned long hash, hpteg;
  150. unsigned long vsid = get_kernel_vsid(vaddr, ssize);
  151. unsigned long va = hpt_va(vaddr, vsid, ssize);
  152. tmp_mode = mode;
  153. /* Make non-kernel text non-executable */
  154. if (!in_kernel_text(vaddr))
  155. tmp_mode = mode | HPTE_R_N;
  156. hash = hpt_hash(va, shift, ssize);
  157. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  158. DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
  159. BUG_ON(!ppc_md.hpte_insert);
  160. ret = ppc_md.hpte_insert(hpteg, va, paddr,
  161. tmp_mode, HPTE_V_BOLTED, psize, ssize);
  162. if (ret < 0)
  163. break;
  164. #ifdef CONFIG_DEBUG_PAGEALLOC
  165. if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
  166. linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
  167. #endif /* CONFIG_DEBUG_PAGEALLOC */
  168. }
  169. return ret < 0 ? ret : 0;
  170. }
  171. #ifdef CONFIG_MEMORY_HOTPLUG
  172. static int htab_remove_mapping(unsigned long vstart, unsigned long vend,
  173. int psize, int ssize)
  174. {
  175. unsigned long vaddr;
  176. unsigned int step, shift;
  177. shift = mmu_psize_defs[psize].shift;
  178. step = 1 << shift;
  179. if (!ppc_md.hpte_removebolted) {
  180. printk(KERN_WARNING "Platform doesn't implement "
  181. "hpte_removebolted\n");
  182. return -EINVAL;
  183. }
  184. for (vaddr = vstart; vaddr < vend; vaddr += step)
  185. ppc_md.hpte_removebolted(vaddr, psize, ssize);
  186. return 0;
  187. }
  188. #endif /* CONFIG_MEMORY_HOTPLUG */
  189. static int __init htab_dt_scan_seg_sizes(unsigned long node,
  190. const char *uname, int depth,
  191. void *data)
  192. {
  193. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  194. u32 *prop;
  195. unsigned long size = 0;
  196. /* We are scanning "cpu" nodes only */
  197. if (type == NULL || strcmp(type, "cpu") != 0)
  198. return 0;
  199. prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
  200. &size);
  201. if (prop == NULL)
  202. return 0;
  203. for (; size >= 4; size -= 4, ++prop) {
  204. if (prop[0] == 40) {
  205. DBG("1T segment support detected\n");
  206. cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
  207. return 1;
  208. }
  209. }
  210. cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
  211. return 0;
  212. }
  213. static void __init htab_init_seg_sizes(void)
  214. {
  215. of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
  216. }
  217. static int __init htab_dt_scan_page_sizes(unsigned long node,
  218. const char *uname, int depth,
  219. void *data)
  220. {
  221. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  222. u32 *prop;
  223. unsigned long size = 0;
  224. /* We are scanning "cpu" nodes only */
  225. if (type == NULL || strcmp(type, "cpu") != 0)
  226. return 0;
  227. prop = (u32 *)of_get_flat_dt_prop(node,
  228. "ibm,segment-page-sizes", &size);
  229. if (prop != NULL) {
  230. DBG("Page sizes from device-tree:\n");
  231. size /= 4;
  232. cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
  233. while(size > 0) {
  234. unsigned int shift = prop[0];
  235. unsigned int slbenc = prop[1];
  236. unsigned int lpnum = prop[2];
  237. unsigned int lpenc = 0;
  238. struct mmu_psize_def *def;
  239. int idx = -1;
  240. size -= 3; prop += 3;
  241. while(size > 0 && lpnum) {
  242. if (prop[0] == shift)
  243. lpenc = prop[1];
  244. prop += 2; size -= 2;
  245. lpnum--;
  246. }
  247. switch(shift) {
  248. case 0xc:
  249. idx = MMU_PAGE_4K;
  250. break;
  251. case 0x10:
  252. idx = MMU_PAGE_64K;
  253. break;
  254. case 0x14:
  255. idx = MMU_PAGE_1M;
  256. break;
  257. case 0x18:
  258. idx = MMU_PAGE_16M;
  259. cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
  260. break;
  261. case 0x22:
  262. idx = MMU_PAGE_16G;
  263. break;
  264. }
  265. if (idx < 0)
  266. continue;
  267. def = &mmu_psize_defs[idx];
  268. def->shift = shift;
  269. if (shift <= 23)
  270. def->avpnm = 0;
  271. else
  272. def->avpnm = (1 << (shift - 23)) - 1;
  273. def->sllp = slbenc;
  274. def->penc = lpenc;
  275. /* We don't know for sure what's up with tlbiel, so
  276. * for now we only set it for 4K and 64K pages
  277. */
  278. if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
  279. def->tlbiel = 1;
  280. else
  281. def->tlbiel = 0;
  282. DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
  283. "tlbiel=%d, penc=%d\n",
  284. idx, shift, def->sllp, def->avpnm, def->tlbiel,
  285. def->penc);
  286. }
  287. return 1;
  288. }
  289. return 0;
  290. }
  291. static void __init htab_init_page_sizes(void)
  292. {
  293. int rc;
  294. /* Default to 4K pages only */
  295. memcpy(mmu_psize_defs, mmu_psize_defaults_old,
  296. sizeof(mmu_psize_defaults_old));
  297. /*
  298. * Try to find the available page sizes in the device-tree
  299. */
  300. rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
  301. if (rc != 0) /* Found */
  302. goto found;
  303. /*
  304. * Not in the device-tree, let's fallback on known size
  305. * list for 16M capable GP & GR
  306. */
  307. if (cpu_has_feature(CPU_FTR_16M_PAGE))
  308. memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
  309. sizeof(mmu_psize_defaults_gp));
  310. found:
  311. #ifndef CONFIG_DEBUG_PAGEALLOC
  312. /*
  313. * Pick a size for the linear mapping. Currently, we only support
  314. * 16M, 1M and 4K which is the default
  315. */
  316. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  317. mmu_linear_psize = MMU_PAGE_16M;
  318. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  319. mmu_linear_psize = MMU_PAGE_1M;
  320. #endif /* CONFIG_DEBUG_PAGEALLOC */
  321. #ifdef CONFIG_PPC_64K_PAGES
  322. /*
  323. * Pick a size for the ordinary pages. Default is 4K, we support
  324. * 64K for user mappings and vmalloc if supported by the processor.
  325. * We only use 64k for ioremap if the processor
  326. * (and firmware) support cache-inhibited large pages.
  327. * If not, we use 4k and set mmu_ci_restrictions so that
  328. * hash_page knows to switch processes that use cache-inhibited
  329. * mappings to 4k pages.
  330. */
  331. if (mmu_psize_defs[MMU_PAGE_64K].shift) {
  332. mmu_virtual_psize = MMU_PAGE_64K;
  333. mmu_vmalloc_psize = MMU_PAGE_64K;
  334. if (mmu_linear_psize == MMU_PAGE_4K)
  335. mmu_linear_psize = MMU_PAGE_64K;
  336. if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
  337. /*
  338. * Don't use 64k pages for ioremap on pSeries, since
  339. * that would stop us accessing the HEA ethernet.
  340. */
  341. if (!machine_is(pseries))
  342. mmu_io_psize = MMU_PAGE_64K;
  343. } else
  344. mmu_ci_restrictions = 1;
  345. }
  346. #endif /* CONFIG_PPC_64K_PAGES */
  347. printk(KERN_DEBUG "Page orders: linear mapping = %d, "
  348. "virtual = %d, io = %d\n",
  349. mmu_psize_defs[mmu_linear_psize].shift,
  350. mmu_psize_defs[mmu_virtual_psize].shift,
  351. mmu_psize_defs[mmu_io_psize].shift);
  352. #ifdef CONFIG_HUGETLB_PAGE
  353. /* Init large page size. Currently, we pick 16M or 1M depending
  354. * on what is available
  355. */
  356. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  357. set_huge_psize(MMU_PAGE_16M);
  358. /* With 4k/4level pagetables, we can't (for now) cope with a
  359. * huge page size < PMD_SIZE */
  360. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  361. set_huge_psize(MMU_PAGE_1M);
  362. #endif /* CONFIG_HUGETLB_PAGE */
  363. }
  364. static int __init htab_dt_scan_pftsize(unsigned long node,
  365. const char *uname, int depth,
  366. void *data)
  367. {
  368. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  369. u32 *prop;
  370. /* We are scanning "cpu" nodes only */
  371. if (type == NULL || strcmp(type, "cpu") != 0)
  372. return 0;
  373. prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
  374. if (prop != NULL) {
  375. /* pft_size[0] is the NUMA CEC cookie */
  376. ppc64_pft_size = prop[1];
  377. return 1;
  378. }
  379. return 0;
  380. }
  381. static unsigned long __init htab_get_table_size(void)
  382. {
  383. unsigned long mem_size, rnd_mem_size, pteg_count;
  384. /* If hash size isn't already provided by the platform, we try to
  385. * retrieve it from the device-tree. If it's not there neither, we
  386. * calculate it now based on the total RAM size
  387. */
  388. if (ppc64_pft_size == 0)
  389. of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
  390. if (ppc64_pft_size)
  391. return 1UL << ppc64_pft_size;
  392. /* round mem_size up to next power of 2 */
  393. mem_size = lmb_phys_mem_size();
  394. rnd_mem_size = 1UL << __ilog2(mem_size);
  395. if (rnd_mem_size < mem_size)
  396. rnd_mem_size <<= 1;
  397. /* # pages / 2 */
  398. pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
  399. return pteg_count << 7;
  400. }
  401. #ifdef CONFIG_MEMORY_HOTPLUG
  402. void create_section_mapping(unsigned long start, unsigned long end)
  403. {
  404. BUG_ON(htab_bolt_mapping(start, end, __pa(start),
  405. _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
  406. mmu_linear_psize, mmu_kernel_ssize));
  407. }
  408. int remove_section_mapping(unsigned long start, unsigned long end)
  409. {
  410. return htab_remove_mapping(start, end, mmu_linear_psize,
  411. mmu_kernel_ssize);
  412. }
  413. #endif /* CONFIG_MEMORY_HOTPLUG */
  414. static inline void make_bl(unsigned int *insn_addr, void *func)
  415. {
  416. unsigned long funcp = *((unsigned long *)func);
  417. int offset = funcp - (unsigned long)insn_addr;
  418. *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
  419. flush_icache_range((unsigned long)insn_addr, 4+
  420. (unsigned long)insn_addr);
  421. }
  422. static void __init htab_finish_init(void)
  423. {
  424. extern unsigned int *htab_call_hpte_insert1;
  425. extern unsigned int *htab_call_hpte_insert2;
  426. extern unsigned int *htab_call_hpte_remove;
  427. extern unsigned int *htab_call_hpte_updatepp;
  428. #ifdef CONFIG_PPC_HAS_HASH_64K
  429. extern unsigned int *ht64_call_hpte_insert1;
  430. extern unsigned int *ht64_call_hpte_insert2;
  431. extern unsigned int *ht64_call_hpte_remove;
  432. extern unsigned int *ht64_call_hpte_updatepp;
  433. make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
  434. make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
  435. make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
  436. make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
  437. #endif /* CONFIG_PPC_HAS_HASH_64K */
  438. make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
  439. make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
  440. make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
  441. make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
  442. }
  443. void __init htab_initialize(void)
  444. {
  445. unsigned long table;
  446. unsigned long pteg_count;
  447. unsigned long mode_rw;
  448. unsigned long base = 0, size = 0, limit;
  449. int i;
  450. extern unsigned long tce_alloc_start, tce_alloc_end;
  451. DBG(" -> htab_initialize()\n");
  452. /* Initialize segment sizes */
  453. htab_init_seg_sizes();
  454. /* Initialize page sizes */
  455. htab_init_page_sizes();
  456. if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
  457. mmu_kernel_ssize = MMU_SEGSIZE_1T;
  458. mmu_highuser_ssize = MMU_SEGSIZE_1T;
  459. printk(KERN_INFO "Using 1TB segments\n");
  460. }
  461. /*
  462. * Calculate the required size of the htab. We want the number of
  463. * PTEGs to equal one half the number of real pages.
  464. */
  465. htab_size_bytes = htab_get_table_size();
  466. pteg_count = htab_size_bytes >> 7;
  467. htab_hash_mask = pteg_count - 1;
  468. if (firmware_has_feature(FW_FEATURE_LPAR)) {
  469. /* Using a hypervisor which owns the htab */
  470. htab_address = NULL;
  471. _SDR1 = 0;
  472. } else {
  473. /* Find storage for the HPT. Must be contiguous in
  474. * the absolute address space. On cell we want it to be
  475. * in the first 2 Gig so we can use it for IOMMU hacks.
  476. */
  477. if (machine_is(cell))
  478. limit = 0x80000000;
  479. else
  480. limit = 0;
  481. table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
  482. DBG("Hash table allocated at %lx, size: %lx\n", table,
  483. htab_size_bytes);
  484. htab_address = abs_to_virt(table);
  485. /* htab absolute addr + encoded htabsize */
  486. _SDR1 = table + __ilog2(pteg_count) - 11;
  487. /* Initialize the HPT with no entries */
  488. memset((void *)table, 0, htab_size_bytes);
  489. /* Set SDR1 */
  490. mtspr(SPRN_SDR1, _SDR1);
  491. }
  492. mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
  493. #ifdef CONFIG_DEBUG_PAGEALLOC
  494. linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
  495. linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
  496. 1, lmb.rmo_size));
  497. memset(linear_map_hash_slots, 0, linear_map_hash_count);
  498. #endif /* CONFIG_DEBUG_PAGEALLOC */
  499. /* On U3 based machines, we need to reserve the DART area and
  500. * _NOT_ map it to avoid cache paradoxes as it's remapped non
  501. * cacheable later on
  502. */
  503. /* create bolted the linear mapping in the hash table */
  504. for (i=0; i < lmb.memory.cnt; i++) {
  505. base = (unsigned long)__va(lmb.memory.region[i].base);
  506. size = lmb.memory.region[i].size;
  507. DBG("creating mapping for region: %lx : %lx\n", base, size);
  508. #ifdef CONFIG_U3_DART
  509. /* Do not map the DART space. Fortunately, it will be aligned
  510. * in such a way that it will not cross two lmb regions and
  511. * will fit within a single 16Mb page.
  512. * The DART space is assumed to be a full 16Mb region even if
  513. * we only use 2Mb of that space. We will use more of it later
  514. * for AGP GART. We have to use a full 16Mb large page.
  515. */
  516. DBG("DART base: %lx\n", dart_tablebase);
  517. if (dart_tablebase != 0 && dart_tablebase >= base
  518. && dart_tablebase < (base + size)) {
  519. unsigned long dart_table_end = dart_tablebase + 16 * MB;
  520. if (base != dart_tablebase)
  521. BUG_ON(htab_bolt_mapping(base, dart_tablebase,
  522. __pa(base), mode_rw,
  523. mmu_linear_psize,
  524. mmu_kernel_ssize));
  525. if ((base + size) > dart_table_end)
  526. BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
  527. base + size,
  528. __pa(dart_table_end),
  529. mode_rw,
  530. mmu_linear_psize,
  531. mmu_kernel_ssize));
  532. continue;
  533. }
  534. #endif /* CONFIG_U3_DART */
  535. BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
  536. mode_rw, mmu_linear_psize, mmu_kernel_ssize));
  537. }
  538. /*
  539. * If we have a memory_limit and we've allocated TCEs then we need to
  540. * explicitly map the TCE area at the top of RAM. We also cope with the
  541. * case that the TCEs start below memory_limit.
  542. * tce_alloc_start/end are 16MB aligned so the mapping should work
  543. * for either 4K or 16MB pages.
  544. */
  545. if (tce_alloc_start) {
  546. tce_alloc_start = (unsigned long)__va(tce_alloc_start);
  547. tce_alloc_end = (unsigned long)__va(tce_alloc_end);
  548. if (base + size >= tce_alloc_start)
  549. tce_alloc_start = base + size + 1;
  550. BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
  551. __pa(tce_alloc_start), mode_rw,
  552. mmu_linear_psize, mmu_kernel_ssize));
  553. }
  554. htab_finish_init();
  555. DBG(" <- htab_initialize()\n");
  556. }
  557. #undef KB
  558. #undef MB
  559. void htab_initialize_secondary(void)
  560. {
  561. if (!firmware_has_feature(FW_FEATURE_LPAR))
  562. mtspr(SPRN_SDR1, _SDR1);
  563. }
  564. /*
  565. * Called by asm hashtable.S for doing lazy icache flush
  566. */
  567. unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
  568. {
  569. struct page *page;
  570. if (!pfn_valid(pte_pfn(pte)))
  571. return pp;
  572. page = pte_page(pte);
  573. /* page is dirty */
  574. if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
  575. if (trap == 0x400) {
  576. __flush_dcache_icache(page_address(page));
  577. set_bit(PG_arch_1, &page->flags);
  578. } else
  579. pp |= HPTE_R_N;
  580. }
  581. return pp;
  582. }
  583. /*
  584. * Demote a segment to using 4k pages.
  585. * For now this makes the whole process use 4k pages.
  586. */
  587. #ifdef CONFIG_PPC_64K_PAGES
  588. void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
  589. {
  590. if (mm->context.user_psize == MMU_PAGE_4K)
  591. return;
  592. slice_set_user_psize(mm, MMU_PAGE_4K);
  593. #ifdef CONFIG_SPU_BASE
  594. spu_flush_all_slbs(mm);
  595. #endif
  596. if (get_paca()->context.user_psize != MMU_PAGE_4K) {
  597. get_paca()->context = mm->context;
  598. slb_flush_and_rebolt();
  599. }
  600. }
  601. #endif /* CONFIG_PPC_64K_PAGES */
  602. #ifdef CONFIG_PPC_SUBPAGE_PROT
  603. /*
  604. * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
  605. * Userspace sets the subpage permissions using the subpage_prot system call.
  606. *
  607. * Result is 0: full permissions, _PAGE_RW: read-only,
  608. * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
  609. */
  610. static int subpage_protection(pgd_t *pgdir, unsigned long ea)
  611. {
  612. struct subpage_prot_table *spt = pgd_subpage_prot(pgdir);
  613. u32 spp = 0;
  614. u32 **sbpm, *sbpp;
  615. if (ea >= spt->maxaddr)
  616. return 0;
  617. if (ea < 0x100000000) {
  618. /* addresses below 4GB use spt->low_prot */
  619. sbpm = spt->low_prot;
  620. } else {
  621. sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
  622. if (!sbpm)
  623. return 0;
  624. }
  625. sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
  626. if (!sbpp)
  627. return 0;
  628. spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
  629. /* extract 2-bit bitfield for this 4k subpage */
  630. spp >>= 30 - 2 * ((ea >> 12) & 0xf);
  631. /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
  632. spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
  633. return spp;
  634. }
  635. #else /* CONFIG_PPC_SUBPAGE_PROT */
  636. static inline int subpage_protection(pgd_t *pgdir, unsigned long ea)
  637. {
  638. return 0;
  639. }
  640. #endif
  641. /* Result code is:
  642. * 0 - handled
  643. * 1 - normal page fault
  644. * -1 - critical hash insertion error
  645. * -2 - access not permitted by subpage protection mechanism
  646. */
  647. int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
  648. {
  649. void *pgdir;
  650. unsigned long vsid;
  651. struct mm_struct *mm;
  652. pte_t *ptep;
  653. cpumask_t tmp;
  654. int rc, user_region = 0, local = 0;
  655. int psize, ssize;
  656. DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
  657. ea, access, trap);
  658. if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
  659. DBG_LOW(" out of pgtable range !\n");
  660. return 1;
  661. }
  662. /* Get region & vsid */
  663. switch (REGION_ID(ea)) {
  664. case USER_REGION_ID:
  665. user_region = 1;
  666. mm = current->mm;
  667. if (! mm) {
  668. DBG_LOW(" user region with no mm !\n");
  669. return 1;
  670. }
  671. #ifdef CONFIG_PPC_MM_SLICES
  672. psize = get_slice_psize(mm, ea);
  673. #else
  674. psize = mm->context.user_psize;
  675. #endif
  676. ssize = user_segment_size(ea);
  677. vsid = get_vsid(mm->context.id, ea, ssize);
  678. break;
  679. case VMALLOC_REGION_ID:
  680. mm = &init_mm;
  681. vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
  682. if (ea < VMALLOC_END)
  683. psize = mmu_vmalloc_psize;
  684. else
  685. psize = mmu_io_psize;
  686. ssize = mmu_kernel_ssize;
  687. break;
  688. default:
  689. /* Not a valid range
  690. * Send the problem up to do_page_fault
  691. */
  692. return 1;
  693. }
  694. DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
  695. /* Get pgdir */
  696. pgdir = mm->pgd;
  697. if (pgdir == NULL)
  698. return 1;
  699. /* Check CPU locality */
  700. tmp = cpumask_of_cpu(smp_processor_id());
  701. if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
  702. local = 1;
  703. #ifdef CONFIG_HUGETLB_PAGE
  704. /* Handle hugepage regions */
  705. if (HPAGE_SHIFT && psize == mmu_huge_psize) {
  706. DBG_LOW(" -> huge page !\n");
  707. return hash_huge_page(mm, access, ea, vsid, local, trap);
  708. }
  709. #endif /* CONFIG_HUGETLB_PAGE */
  710. #ifndef CONFIG_PPC_64K_PAGES
  711. /* If we use 4K pages and our psize is not 4K, then we are hitting
  712. * a special driver mapping, we need to align the address before
  713. * we fetch the PTE
  714. */
  715. if (psize != MMU_PAGE_4K)
  716. ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
  717. #endif /* CONFIG_PPC_64K_PAGES */
  718. /* Get PTE and page size from page tables */
  719. ptep = find_linux_pte(pgdir, ea);
  720. if (ptep == NULL || !pte_present(*ptep)) {
  721. DBG_LOW(" no PTE !\n");
  722. return 1;
  723. }
  724. #ifndef CONFIG_PPC_64K_PAGES
  725. DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
  726. #else
  727. DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
  728. pte_val(*(ptep + PTRS_PER_PTE)));
  729. #endif
  730. /* Pre-check access permissions (will be re-checked atomically
  731. * in __hash_page_XX but this pre-check is a fast path
  732. */
  733. if (access & ~pte_val(*ptep)) {
  734. DBG_LOW(" no access !\n");
  735. return 1;
  736. }
  737. /* Do actual hashing */
  738. #ifdef CONFIG_PPC_64K_PAGES
  739. /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
  740. if (pte_val(*ptep) & _PAGE_4K_PFN) {
  741. demote_segment_4k(mm, ea);
  742. psize = MMU_PAGE_4K;
  743. }
  744. /* If this PTE is non-cacheable and we have restrictions on
  745. * using non cacheable large pages, then we switch to 4k
  746. */
  747. if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
  748. (pte_val(*ptep) & _PAGE_NO_CACHE)) {
  749. if (user_region) {
  750. demote_segment_4k(mm, ea);
  751. psize = MMU_PAGE_4K;
  752. } else if (ea < VMALLOC_END) {
  753. /*
  754. * some driver did a non-cacheable mapping
  755. * in vmalloc space, so switch vmalloc
  756. * to 4k pages
  757. */
  758. printk(KERN_ALERT "Reducing vmalloc segment "
  759. "to 4kB pages because of "
  760. "non-cacheable mapping\n");
  761. psize = mmu_vmalloc_psize = MMU_PAGE_4K;
  762. #ifdef CONFIG_SPU_BASE
  763. spu_flush_all_slbs(mm);
  764. #endif
  765. }
  766. }
  767. if (user_region) {
  768. if (psize != get_paca()->context.user_psize) {
  769. get_paca()->context = mm->context;
  770. slb_flush_and_rebolt();
  771. }
  772. } else if (get_paca()->vmalloc_sllp !=
  773. mmu_psize_defs[mmu_vmalloc_psize].sllp) {
  774. get_paca()->vmalloc_sllp =
  775. mmu_psize_defs[mmu_vmalloc_psize].sllp;
  776. slb_vmalloc_update();
  777. }
  778. #endif /* CONFIG_PPC_64K_PAGES */
  779. #ifdef CONFIG_PPC_HAS_HASH_64K
  780. if (psize == MMU_PAGE_64K)
  781. rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
  782. else
  783. #endif /* CONFIG_PPC_HAS_HASH_64K */
  784. {
  785. int spp = subpage_protection(pgdir, ea);
  786. if (access & spp)
  787. rc = -2;
  788. else
  789. rc = __hash_page_4K(ea, access, vsid, ptep, trap,
  790. local, ssize, spp);
  791. }
  792. #ifndef CONFIG_PPC_64K_PAGES
  793. DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
  794. #else
  795. DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
  796. pte_val(*(ptep + PTRS_PER_PTE)));
  797. #endif
  798. DBG_LOW(" -> rc=%d\n", rc);
  799. return rc;
  800. }
  801. EXPORT_SYMBOL_GPL(hash_page);
  802. void hash_preload(struct mm_struct *mm, unsigned long ea,
  803. unsigned long access, unsigned long trap)
  804. {
  805. unsigned long vsid;
  806. void *pgdir;
  807. pte_t *ptep;
  808. cpumask_t mask;
  809. unsigned long flags;
  810. int local = 0;
  811. int ssize;
  812. BUG_ON(REGION_ID(ea) != USER_REGION_ID);
  813. #ifdef CONFIG_PPC_MM_SLICES
  814. /* We only prefault standard pages for now */
  815. if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
  816. return;
  817. #endif
  818. DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
  819. " trap=%lx\n", mm, mm->pgd, ea, access, trap);
  820. /* Get Linux PTE if available */
  821. pgdir = mm->pgd;
  822. if (pgdir == NULL)
  823. return;
  824. ptep = find_linux_pte(pgdir, ea);
  825. if (!ptep)
  826. return;
  827. #ifdef CONFIG_PPC_64K_PAGES
  828. /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
  829. * a 64K kernel), then we don't preload, hash_page() will take
  830. * care of it once we actually try to access the page.
  831. * That way we don't have to duplicate all of the logic for segment
  832. * page size demotion here
  833. */
  834. if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
  835. return;
  836. #endif /* CONFIG_PPC_64K_PAGES */
  837. /* Get VSID */
  838. ssize = user_segment_size(ea);
  839. vsid = get_vsid(mm->context.id, ea, ssize);
  840. /* Hash doesn't like irqs */
  841. local_irq_save(flags);
  842. /* Is that local to this CPU ? */
  843. mask = cpumask_of_cpu(smp_processor_id());
  844. if (cpus_equal(mm->cpu_vm_mask, mask))
  845. local = 1;
  846. /* Hash it in */
  847. #ifdef CONFIG_PPC_HAS_HASH_64K
  848. if (mm->context.user_psize == MMU_PAGE_64K)
  849. __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
  850. else
  851. #endif /* CONFIG_PPC_HAS_HASH_64K */
  852. __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
  853. subpage_protection(pgdir, ea));
  854. local_irq_restore(flags);
  855. }
  856. /* WARNING: This is called from hash_low_64.S, if you change this prototype,
  857. * do not forget to update the assembly call site !
  858. */
  859. void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
  860. int local)
  861. {
  862. unsigned long hash, index, shift, hidx, slot;
  863. DBG_LOW("flush_hash_page(va=%016x)\n", va);
  864. pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
  865. hash = hpt_hash(va, shift, ssize);
  866. hidx = __rpte_to_hidx(pte, index);
  867. if (hidx & _PTEIDX_SECONDARY)
  868. hash = ~hash;
  869. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  870. slot += hidx & _PTEIDX_GROUP_IX;
  871. DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
  872. ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
  873. } pte_iterate_hashed_end();
  874. }
  875. void flush_hash_range(unsigned long number, int local)
  876. {
  877. if (ppc_md.flush_hash_range)
  878. ppc_md.flush_hash_range(number, local);
  879. else {
  880. int i;
  881. struct ppc64_tlb_batch *batch =
  882. &__get_cpu_var(ppc64_tlb_batch);
  883. for (i = 0; i < number; i++)
  884. flush_hash_page(batch->vaddr[i], batch->pte[i],
  885. batch->psize, batch->ssize, local);
  886. }
  887. }
  888. /*
  889. * low_hash_fault is called when we the low level hash code failed
  890. * to instert a PTE due to an hypervisor error
  891. */
  892. void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
  893. {
  894. if (user_mode(regs)) {
  895. #ifdef CONFIG_PPC_SUBPAGE_PROT
  896. if (rc == -2)
  897. _exception(SIGSEGV, regs, SEGV_ACCERR, address);
  898. else
  899. #endif
  900. _exception(SIGBUS, regs, BUS_ADRERR, address);
  901. } else
  902. bad_page_fault(regs, address, SIGBUS);
  903. }
  904. #ifdef CONFIG_DEBUG_PAGEALLOC
  905. static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
  906. {
  907. unsigned long hash, hpteg;
  908. unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
  909. unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
  910. unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
  911. _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
  912. int ret;
  913. hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
  914. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  915. ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
  916. mode, HPTE_V_BOLTED,
  917. mmu_linear_psize, mmu_kernel_ssize);
  918. BUG_ON (ret < 0);
  919. spin_lock(&linear_map_hash_lock);
  920. BUG_ON(linear_map_hash_slots[lmi] & 0x80);
  921. linear_map_hash_slots[lmi] = ret | 0x80;
  922. spin_unlock(&linear_map_hash_lock);
  923. }
  924. static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
  925. {
  926. unsigned long hash, hidx, slot;
  927. unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
  928. unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
  929. hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
  930. spin_lock(&linear_map_hash_lock);
  931. BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
  932. hidx = linear_map_hash_slots[lmi] & 0x7f;
  933. linear_map_hash_slots[lmi] = 0;
  934. spin_unlock(&linear_map_hash_lock);
  935. if (hidx & _PTEIDX_SECONDARY)
  936. hash = ~hash;
  937. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  938. slot += hidx & _PTEIDX_GROUP_IX;
  939. ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
  940. }
  941. void kernel_map_pages(struct page *page, int numpages, int enable)
  942. {
  943. unsigned long flags, vaddr, lmi;
  944. int i;
  945. local_irq_save(flags);
  946. for (i = 0; i < numpages; i++, page++) {
  947. vaddr = (unsigned long)page_address(page);
  948. lmi = __pa(vaddr) >> PAGE_SHIFT;
  949. if (lmi >= linear_map_hash_count)
  950. continue;
  951. if (enable)
  952. kernel_map_linear_page(vaddr, lmi);
  953. else
  954. kernel_unmap_linear_page(vaddr, lmi);
  955. }
  956. local_irq_restore(flags);
  957. }
  958. #endif /* CONFIG_DEBUG_PAGEALLOC */