hash_utils_64.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954
  1. /*
  2. * PowerPC64 port by Mike Corrigan and Dave Engebretsen
  3. * {mikejc|engebret}@us.ibm.com
  4. *
  5. * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
  6. *
  7. * SMP scalability work:
  8. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  9. *
  10. * Module name: htab.c
  11. *
  12. * Description:
  13. * PowerPC Hashed Page Table functions
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. */
  20. #undef DEBUG
  21. #undef DEBUG_LOW
  22. #include <linux/spinlock.h>
  23. #include <linux/errno.h>
  24. #include <linux/sched.h>
  25. #include <linux/proc_fs.h>
  26. #include <linux/stat.h>
  27. #include <linux/sysctl.h>
  28. #include <linux/ctype.h>
  29. #include <linux/cache.h>
  30. #include <linux/init.h>
  31. #include <linux/signal.h>
  32. #include <asm/processor.h>
  33. #include <asm/pgtable.h>
  34. #include <asm/mmu.h>
  35. #include <asm/mmu_context.h>
  36. #include <asm/page.h>
  37. #include <asm/types.h>
  38. #include <asm/system.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/machdep.h>
  41. #include <asm/lmb.h>
  42. #include <asm/abs_addr.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/io.h>
  45. #include <asm/eeh.h>
  46. #include <asm/tlb.h>
  47. #include <asm/cacheflush.h>
  48. #include <asm/cputable.h>
  49. #include <asm/abs_addr.h>
  50. #include <asm/sections.h>
  51. #include <asm/spu.h>
  52. #ifdef DEBUG
  53. #define DBG(fmt...) udbg_printf(fmt)
  54. #else
  55. #define DBG(fmt...)
  56. #endif
  57. #ifdef DEBUG_LOW
  58. #define DBG_LOW(fmt...) udbg_printf(fmt)
  59. #else
  60. #define DBG_LOW(fmt...)
  61. #endif
  62. #define KB (1024)
  63. #define MB (1024*KB)
  64. /*
  65. * Note: pte --> Linux PTE
  66. * HPTE --> PowerPC Hashed Page Table Entry
  67. *
  68. * Execution context:
  69. * htab_initialize is called with the MMU off (of course), but
  70. * the kernel has been copied down to zero so it can directly
  71. * reference global data. At this point it is very difficult
  72. * to print debug info.
  73. *
  74. */
  75. #ifdef CONFIG_U3_DART
  76. extern unsigned long dart_tablebase;
  77. #endif /* CONFIG_U3_DART */
  78. static unsigned long _SDR1;
  79. struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  80. hpte_t *htab_address;
  81. unsigned long htab_size_bytes;
  82. unsigned long htab_hash_mask;
  83. int mmu_linear_psize = MMU_PAGE_4K;
  84. int mmu_virtual_psize = MMU_PAGE_4K;
  85. int mmu_vmalloc_psize = MMU_PAGE_4K;
  86. int mmu_io_psize = MMU_PAGE_4K;
  87. #ifdef CONFIG_HUGETLB_PAGE
  88. int mmu_huge_psize = MMU_PAGE_16M;
  89. unsigned int HPAGE_SHIFT;
  90. #endif
  91. #ifdef CONFIG_PPC_64K_PAGES
  92. int mmu_ci_restrictions;
  93. #endif
  94. #ifdef CONFIG_DEBUG_PAGEALLOC
  95. static u8 *linear_map_hash_slots;
  96. static unsigned long linear_map_hash_count;
  97. static DEFINE_SPINLOCK(linear_map_hash_lock);
  98. #endif /* CONFIG_DEBUG_PAGEALLOC */
  99. /* There are definitions of page sizes arrays to be used when none
  100. * is provided by the firmware.
  101. */
  102. /* Pre-POWER4 CPUs (4k pages only)
  103. */
  104. struct mmu_psize_def mmu_psize_defaults_old[] = {
  105. [MMU_PAGE_4K] = {
  106. .shift = 12,
  107. .sllp = 0,
  108. .penc = 0,
  109. .avpnm = 0,
  110. .tlbiel = 0,
  111. },
  112. };
  113. /* POWER4, GPUL, POWER5
  114. *
  115. * Support for 16Mb large pages
  116. */
  117. struct mmu_psize_def mmu_psize_defaults_gp[] = {
  118. [MMU_PAGE_4K] = {
  119. .shift = 12,
  120. .sllp = 0,
  121. .penc = 0,
  122. .avpnm = 0,
  123. .tlbiel = 1,
  124. },
  125. [MMU_PAGE_16M] = {
  126. .shift = 24,
  127. .sllp = SLB_VSID_L,
  128. .penc = 0,
  129. .avpnm = 0x1UL,
  130. .tlbiel = 0,
  131. },
  132. };
  133. int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
  134. unsigned long pstart, unsigned long mode, int psize)
  135. {
  136. unsigned long vaddr, paddr;
  137. unsigned int step, shift;
  138. unsigned long tmp_mode;
  139. int ret = 0;
  140. shift = mmu_psize_defs[psize].shift;
  141. step = 1 << shift;
  142. for (vaddr = vstart, paddr = pstart; vaddr < vend;
  143. vaddr += step, paddr += step) {
  144. unsigned long hash, hpteg;
  145. unsigned long vsid = get_kernel_vsid(vaddr);
  146. unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
  147. tmp_mode = mode;
  148. /* Make non-kernel text non-executable */
  149. if (!in_kernel_text(vaddr))
  150. tmp_mode = mode | HPTE_R_N;
  151. hash = hpt_hash(va, shift);
  152. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  153. DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
  154. BUG_ON(!ppc_md.hpte_insert);
  155. ret = ppc_md.hpte_insert(hpteg, va, paddr,
  156. tmp_mode, HPTE_V_BOLTED, psize);
  157. if (ret < 0)
  158. break;
  159. #ifdef CONFIG_DEBUG_PAGEALLOC
  160. if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
  161. linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
  162. #endif /* CONFIG_DEBUG_PAGEALLOC */
  163. }
  164. return ret < 0 ? ret : 0;
  165. }
  166. static int __init htab_dt_scan_page_sizes(unsigned long node,
  167. const char *uname, int depth,
  168. void *data)
  169. {
  170. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  171. u32 *prop;
  172. unsigned long size = 0;
  173. /* We are scanning "cpu" nodes only */
  174. if (type == NULL || strcmp(type, "cpu") != 0)
  175. return 0;
  176. prop = (u32 *)of_get_flat_dt_prop(node,
  177. "ibm,segment-page-sizes", &size);
  178. if (prop != NULL) {
  179. DBG("Page sizes from device-tree:\n");
  180. size /= 4;
  181. cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
  182. while(size > 0) {
  183. unsigned int shift = prop[0];
  184. unsigned int slbenc = prop[1];
  185. unsigned int lpnum = prop[2];
  186. unsigned int lpenc = 0;
  187. struct mmu_psize_def *def;
  188. int idx = -1;
  189. size -= 3; prop += 3;
  190. while(size > 0 && lpnum) {
  191. if (prop[0] == shift)
  192. lpenc = prop[1];
  193. prop += 2; size -= 2;
  194. lpnum--;
  195. }
  196. switch(shift) {
  197. case 0xc:
  198. idx = MMU_PAGE_4K;
  199. break;
  200. case 0x10:
  201. idx = MMU_PAGE_64K;
  202. break;
  203. case 0x14:
  204. idx = MMU_PAGE_1M;
  205. break;
  206. case 0x18:
  207. idx = MMU_PAGE_16M;
  208. cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
  209. break;
  210. case 0x22:
  211. idx = MMU_PAGE_16G;
  212. break;
  213. }
  214. if (idx < 0)
  215. continue;
  216. def = &mmu_psize_defs[idx];
  217. def->shift = shift;
  218. if (shift <= 23)
  219. def->avpnm = 0;
  220. else
  221. def->avpnm = (1 << (shift - 23)) - 1;
  222. def->sllp = slbenc;
  223. def->penc = lpenc;
  224. /* We don't know for sure what's up with tlbiel, so
  225. * for now we only set it for 4K and 64K pages
  226. */
  227. if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
  228. def->tlbiel = 1;
  229. else
  230. def->tlbiel = 0;
  231. DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
  232. "tlbiel=%d, penc=%d\n",
  233. idx, shift, def->sllp, def->avpnm, def->tlbiel,
  234. def->penc);
  235. }
  236. return 1;
  237. }
  238. return 0;
  239. }
  240. static void __init htab_init_page_sizes(void)
  241. {
  242. int rc;
  243. /* Default to 4K pages only */
  244. memcpy(mmu_psize_defs, mmu_psize_defaults_old,
  245. sizeof(mmu_psize_defaults_old));
  246. /*
  247. * Try to find the available page sizes in the device-tree
  248. */
  249. rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
  250. if (rc != 0) /* Found */
  251. goto found;
  252. /*
  253. * Not in the device-tree, let's fallback on known size
  254. * list for 16M capable GP & GR
  255. */
  256. if (cpu_has_feature(CPU_FTR_16M_PAGE))
  257. memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
  258. sizeof(mmu_psize_defaults_gp));
  259. found:
  260. #ifndef CONFIG_DEBUG_PAGEALLOC
  261. /*
  262. * Pick a size for the linear mapping. Currently, we only support
  263. * 16M, 1M and 4K which is the default
  264. */
  265. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  266. mmu_linear_psize = MMU_PAGE_16M;
  267. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  268. mmu_linear_psize = MMU_PAGE_1M;
  269. #endif /* CONFIG_DEBUG_PAGEALLOC */
  270. #ifdef CONFIG_PPC_64K_PAGES
  271. /*
  272. * Pick a size for the ordinary pages. Default is 4K, we support
  273. * 64K for user mappings and vmalloc if supported by the processor.
  274. * We only use 64k for ioremap if the processor
  275. * (and firmware) support cache-inhibited large pages.
  276. * If not, we use 4k and set mmu_ci_restrictions so that
  277. * hash_page knows to switch processes that use cache-inhibited
  278. * mappings to 4k pages.
  279. */
  280. if (mmu_psize_defs[MMU_PAGE_64K].shift) {
  281. mmu_virtual_psize = MMU_PAGE_64K;
  282. mmu_vmalloc_psize = MMU_PAGE_64K;
  283. if (mmu_linear_psize == MMU_PAGE_4K)
  284. mmu_linear_psize = MMU_PAGE_64K;
  285. if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
  286. mmu_io_psize = MMU_PAGE_64K;
  287. else
  288. mmu_ci_restrictions = 1;
  289. }
  290. #endif /* CONFIG_PPC_64K_PAGES */
  291. printk(KERN_DEBUG "Page orders: linear mapping = %d, "
  292. "virtual = %d, io = %d\n",
  293. mmu_psize_defs[mmu_linear_psize].shift,
  294. mmu_psize_defs[mmu_virtual_psize].shift,
  295. mmu_psize_defs[mmu_io_psize].shift);
  296. #ifdef CONFIG_HUGETLB_PAGE
  297. /* Init large page size. Currently, we pick 16M or 1M depending
  298. * on what is available
  299. */
  300. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  301. mmu_huge_psize = MMU_PAGE_16M;
  302. /* With 4k/4level pagetables, we can't (for now) cope with a
  303. * huge page size < PMD_SIZE */
  304. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  305. mmu_huge_psize = MMU_PAGE_1M;
  306. /* Calculate HPAGE_SHIFT and sanity check it */
  307. if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
  308. mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
  309. HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
  310. else
  311. HPAGE_SHIFT = 0; /* No huge pages dude ! */
  312. #endif /* CONFIG_HUGETLB_PAGE */
  313. }
  314. static int __init htab_dt_scan_pftsize(unsigned long node,
  315. const char *uname, int depth,
  316. void *data)
  317. {
  318. char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  319. u32 *prop;
  320. /* We are scanning "cpu" nodes only */
  321. if (type == NULL || strcmp(type, "cpu") != 0)
  322. return 0;
  323. prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
  324. if (prop != NULL) {
  325. /* pft_size[0] is the NUMA CEC cookie */
  326. ppc64_pft_size = prop[1];
  327. return 1;
  328. }
  329. return 0;
  330. }
  331. static unsigned long __init htab_get_table_size(void)
  332. {
  333. unsigned long mem_size, rnd_mem_size, pteg_count;
  334. /* If hash size isn't already provided by the platform, we try to
  335. * retrieve it from the device-tree. If it's not there neither, we
  336. * calculate it now based on the total RAM size
  337. */
  338. if (ppc64_pft_size == 0)
  339. of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
  340. if (ppc64_pft_size)
  341. return 1UL << ppc64_pft_size;
  342. /* round mem_size up to next power of 2 */
  343. mem_size = lmb_phys_mem_size();
  344. rnd_mem_size = 1UL << __ilog2(mem_size);
  345. if (rnd_mem_size < mem_size)
  346. rnd_mem_size <<= 1;
  347. /* # pages / 2 */
  348. pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
  349. return pteg_count << 7;
  350. }
  351. #ifdef CONFIG_MEMORY_HOTPLUG
  352. void create_section_mapping(unsigned long start, unsigned long end)
  353. {
  354. BUG_ON(htab_bolt_mapping(start, end, __pa(start),
  355. _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
  356. mmu_linear_psize));
  357. }
  358. #endif /* CONFIG_MEMORY_HOTPLUG */
  359. static inline void make_bl(unsigned int *insn_addr, void *func)
  360. {
  361. unsigned long funcp = *((unsigned long *)func);
  362. int offset = funcp - (unsigned long)insn_addr;
  363. *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
  364. flush_icache_range((unsigned long)insn_addr, 4+
  365. (unsigned long)insn_addr);
  366. }
  367. static void __init htab_finish_init(void)
  368. {
  369. extern unsigned int *htab_call_hpte_insert1;
  370. extern unsigned int *htab_call_hpte_insert2;
  371. extern unsigned int *htab_call_hpte_remove;
  372. extern unsigned int *htab_call_hpte_updatepp;
  373. #ifdef CONFIG_PPC_HAS_HASH_64K
  374. extern unsigned int *ht64_call_hpte_insert1;
  375. extern unsigned int *ht64_call_hpte_insert2;
  376. extern unsigned int *ht64_call_hpte_remove;
  377. extern unsigned int *ht64_call_hpte_updatepp;
  378. make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
  379. make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
  380. make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
  381. make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
  382. #endif /* CONFIG_PPC_64K_PAGES */
  383. make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
  384. make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
  385. make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
  386. make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
  387. }
  388. void __init htab_initialize(void)
  389. {
  390. unsigned long table;
  391. unsigned long pteg_count;
  392. unsigned long mode_rw;
  393. unsigned long base = 0, size = 0;
  394. int i;
  395. extern unsigned long tce_alloc_start, tce_alloc_end;
  396. DBG(" -> htab_initialize()\n");
  397. /* Initialize page sizes */
  398. htab_init_page_sizes();
  399. /*
  400. * Calculate the required size of the htab. We want the number of
  401. * PTEGs to equal one half the number of real pages.
  402. */
  403. htab_size_bytes = htab_get_table_size();
  404. pteg_count = htab_size_bytes >> 7;
  405. htab_hash_mask = pteg_count - 1;
  406. if (firmware_has_feature(FW_FEATURE_LPAR)) {
  407. /* Using a hypervisor which owns the htab */
  408. htab_address = NULL;
  409. _SDR1 = 0;
  410. } else {
  411. /* Find storage for the HPT. Must be contiguous in
  412. * the absolute address space.
  413. */
  414. table = lmb_alloc(htab_size_bytes, htab_size_bytes);
  415. DBG("Hash table allocated at %lx, size: %lx\n", table,
  416. htab_size_bytes);
  417. htab_address = abs_to_virt(table);
  418. /* htab absolute addr + encoded htabsize */
  419. _SDR1 = table + __ilog2(pteg_count) - 11;
  420. /* Initialize the HPT with no entries */
  421. memset((void *)table, 0, htab_size_bytes);
  422. /* Set SDR1 */
  423. mtspr(SPRN_SDR1, _SDR1);
  424. }
  425. mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
  426. #ifdef CONFIG_DEBUG_PAGEALLOC
  427. linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
  428. linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
  429. 1, lmb.rmo_size));
  430. memset(linear_map_hash_slots, 0, linear_map_hash_count);
  431. #endif /* CONFIG_DEBUG_PAGEALLOC */
  432. /* On U3 based machines, we need to reserve the DART area and
  433. * _NOT_ map it to avoid cache paradoxes as it's remapped non
  434. * cacheable later on
  435. */
  436. /* create bolted the linear mapping in the hash table */
  437. for (i=0; i < lmb.memory.cnt; i++) {
  438. base = (unsigned long)__va(lmb.memory.region[i].base);
  439. size = lmb.memory.region[i].size;
  440. DBG("creating mapping for region: %lx : %lx\n", base, size);
  441. #ifdef CONFIG_U3_DART
  442. /* Do not map the DART space. Fortunately, it will be aligned
  443. * in such a way that it will not cross two lmb regions and
  444. * will fit within a single 16Mb page.
  445. * The DART space is assumed to be a full 16Mb region even if
  446. * we only use 2Mb of that space. We will use more of it later
  447. * for AGP GART. We have to use a full 16Mb large page.
  448. */
  449. DBG("DART base: %lx\n", dart_tablebase);
  450. if (dart_tablebase != 0 && dart_tablebase >= base
  451. && dart_tablebase < (base + size)) {
  452. unsigned long dart_table_end = dart_tablebase + 16 * MB;
  453. if (base != dart_tablebase)
  454. BUG_ON(htab_bolt_mapping(base, dart_tablebase,
  455. __pa(base), mode_rw,
  456. mmu_linear_psize));
  457. if ((base + size) > dart_table_end)
  458. BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
  459. base + size,
  460. __pa(dart_table_end),
  461. mode_rw,
  462. mmu_linear_psize));
  463. continue;
  464. }
  465. #endif /* CONFIG_U3_DART */
  466. BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
  467. mode_rw, mmu_linear_psize));
  468. }
  469. /*
  470. * If we have a memory_limit and we've allocated TCEs then we need to
  471. * explicitly map the TCE area at the top of RAM. We also cope with the
  472. * case that the TCEs start below memory_limit.
  473. * tce_alloc_start/end are 16MB aligned so the mapping should work
  474. * for either 4K or 16MB pages.
  475. */
  476. if (tce_alloc_start) {
  477. tce_alloc_start = (unsigned long)__va(tce_alloc_start);
  478. tce_alloc_end = (unsigned long)__va(tce_alloc_end);
  479. if (base + size >= tce_alloc_start)
  480. tce_alloc_start = base + size + 1;
  481. BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
  482. __pa(tce_alloc_start), mode_rw,
  483. mmu_linear_psize));
  484. }
  485. htab_finish_init();
  486. DBG(" <- htab_initialize()\n");
  487. }
  488. #undef KB
  489. #undef MB
  490. void htab_initialize_secondary(void)
  491. {
  492. if (!firmware_has_feature(FW_FEATURE_LPAR))
  493. mtspr(SPRN_SDR1, _SDR1);
  494. }
  495. /*
  496. * Called by asm hashtable.S for doing lazy icache flush
  497. */
  498. unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
  499. {
  500. struct page *page;
  501. if (!pfn_valid(pte_pfn(pte)))
  502. return pp;
  503. page = pte_page(pte);
  504. /* page is dirty */
  505. if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
  506. if (trap == 0x400) {
  507. __flush_dcache_icache(page_address(page));
  508. set_bit(PG_arch_1, &page->flags);
  509. } else
  510. pp |= HPTE_R_N;
  511. }
  512. return pp;
  513. }
  514. /*
  515. * Demote a segment to using 4k pages.
  516. * For now this makes the whole process use 4k pages.
  517. */
  518. #ifdef CONFIG_PPC_64K_PAGES
  519. static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
  520. {
  521. if (mm->context.user_psize == MMU_PAGE_4K)
  522. return;
  523. #ifdef CONFIG_PPC_MM_SLICES
  524. slice_set_user_psize(mm, MMU_PAGE_4K);
  525. #else /* CONFIG_PPC_MM_SLICES */
  526. mm->context.user_psize = MMU_PAGE_4K;
  527. mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
  528. #endif /* CONFIG_PPC_MM_SLICES */
  529. #ifdef CONFIG_SPE_BASE
  530. spu_flush_all_slbs(mm);
  531. #endif
  532. }
  533. #endif /* CONFIG_PPC_64K_PAGES */
  534. /* Result code is:
  535. * 0 - handled
  536. * 1 - normal page fault
  537. * -1 - critical hash insertion error
  538. */
  539. int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
  540. {
  541. void *pgdir;
  542. unsigned long vsid;
  543. struct mm_struct *mm;
  544. pte_t *ptep;
  545. cpumask_t tmp;
  546. int rc, user_region = 0, local = 0;
  547. int psize;
  548. DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
  549. ea, access, trap);
  550. if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
  551. DBG_LOW(" out of pgtable range !\n");
  552. return 1;
  553. }
  554. /* Get region & vsid */
  555. switch (REGION_ID(ea)) {
  556. case USER_REGION_ID:
  557. user_region = 1;
  558. mm = current->mm;
  559. if (! mm) {
  560. DBG_LOW(" user region with no mm !\n");
  561. return 1;
  562. }
  563. vsid = get_vsid(mm->context.id, ea);
  564. #ifdef CONFIG_PPC_MM_SLICES
  565. psize = get_slice_psize(mm, ea);
  566. #else
  567. psize = mm->context.user_psize;
  568. #endif
  569. break;
  570. case VMALLOC_REGION_ID:
  571. mm = &init_mm;
  572. vsid = get_kernel_vsid(ea);
  573. if (ea < VMALLOC_END)
  574. psize = mmu_vmalloc_psize;
  575. else
  576. psize = mmu_io_psize;
  577. break;
  578. default:
  579. /* Not a valid range
  580. * Send the problem up to do_page_fault
  581. */
  582. return 1;
  583. }
  584. DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
  585. /* Get pgdir */
  586. pgdir = mm->pgd;
  587. if (pgdir == NULL)
  588. return 1;
  589. /* Check CPU locality */
  590. tmp = cpumask_of_cpu(smp_processor_id());
  591. if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
  592. local = 1;
  593. #ifdef CONFIG_HUGETLB_PAGE
  594. /* Handle hugepage regions */
  595. if (HPAGE_SHIFT && psize == mmu_huge_psize) {
  596. DBG_LOW(" -> huge page !\n");
  597. return hash_huge_page(mm, access, ea, vsid, local, trap);
  598. }
  599. #endif /* CONFIG_HUGETLB_PAGE */
  600. #ifndef CONFIG_PPC_64K_PAGES
  601. /* If we use 4K pages and our psize is not 4K, then we are hitting
  602. * a special driver mapping, we need to align the address before
  603. * we fetch the PTE
  604. */
  605. if (psize != MMU_PAGE_4K)
  606. ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
  607. #endif /* CONFIG_PPC_64K_PAGES */
  608. /* Get PTE and page size from page tables */
  609. ptep = find_linux_pte(pgdir, ea);
  610. if (ptep == NULL || !pte_present(*ptep)) {
  611. DBG_LOW(" no PTE !\n");
  612. return 1;
  613. }
  614. #ifndef CONFIG_PPC_64K_PAGES
  615. DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
  616. #else
  617. DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
  618. pte_val(*(ptep + PTRS_PER_PTE)));
  619. #endif
  620. /* Pre-check access permissions (will be re-checked atomically
  621. * in __hash_page_XX but this pre-check is a fast path
  622. */
  623. if (access & ~pte_val(*ptep)) {
  624. DBG_LOW(" no access !\n");
  625. return 1;
  626. }
  627. /* Do actual hashing */
  628. #ifdef CONFIG_PPC_64K_PAGES
  629. /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
  630. if (pte_val(*ptep) & _PAGE_4K_PFN) {
  631. demote_segment_4k(mm, ea);
  632. psize = MMU_PAGE_4K;
  633. }
  634. /* If this PTE is non-cacheable and we have restrictions on
  635. * using non cacheable large pages, then we switch to 4k
  636. */
  637. if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
  638. (pte_val(*ptep) & _PAGE_NO_CACHE)) {
  639. if (user_region) {
  640. demote_segment_4k(mm, ea);
  641. psize = MMU_PAGE_4K;
  642. } else if (ea < VMALLOC_END) {
  643. /*
  644. * some driver did a non-cacheable mapping
  645. * in vmalloc space, so switch vmalloc
  646. * to 4k pages
  647. */
  648. printk(KERN_ALERT "Reducing vmalloc segment "
  649. "to 4kB pages because of "
  650. "non-cacheable mapping\n");
  651. psize = mmu_vmalloc_psize = MMU_PAGE_4K;
  652. #ifdef CONFIG_SPE_BASE
  653. spu_flush_all_slbs(mm);
  654. #endif
  655. }
  656. }
  657. if (user_region) {
  658. if (psize != get_paca()->context.user_psize) {
  659. get_paca()->context.user_psize =
  660. mm->context.user_psize;
  661. slb_flush_and_rebolt();
  662. }
  663. } else if (get_paca()->vmalloc_sllp !=
  664. mmu_psize_defs[mmu_vmalloc_psize].sllp) {
  665. get_paca()->vmalloc_sllp =
  666. mmu_psize_defs[mmu_vmalloc_psize].sllp;
  667. slb_flush_and_rebolt();
  668. }
  669. #endif /* CONFIG_PPC_64K_PAGES */
  670. #ifdef CONFIG_PPC_HAS_HASH_64K
  671. if (psize == MMU_PAGE_64K)
  672. rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
  673. else
  674. #endif /* CONFIG_PPC_HAS_HASH_64K */
  675. rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
  676. #ifndef CONFIG_PPC_64K_PAGES
  677. DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
  678. #else
  679. DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
  680. pte_val(*(ptep + PTRS_PER_PTE)));
  681. #endif
  682. DBG_LOW(" -> rc=%d\n", rc);
  683. return rc;
  684. }
  685. EXPORT_SYMBOL_GPL(hash_page);
  686. void hash_preload(struct mm_struct *mm, unsigned long ea,
  687. unsigned long access, unsigned long trap)
  688. {
  689. unsigned long vsid;
  690. void *pgdir;
  691. pte_t *ptep;
  692. cpumask_t mask;
  693. unsigned long flags;
  694. int local = 0;
  695. BUG_ON(REGION_ID(ea) != USER_REGION_ID);
  696. #ifdef CONFIG_PPC_MM_SLICES
  697. /* We only prefault standard pages for now */
  698. if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize));
  699. return;
  700. #endif
  701. DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
  702. " trap=%lx\n", mm, mm->pgd, ea, access, trap);
  703. /* Get Linux PTE if available */
  704. pgdir = mm->pgd;
  705. if (pgdir == NULL)
  706. return;
  707. ptep = find_linux_pte(pgdir, ea);
  708. if (!ptep)
  709. return;
  710. #ifdef CONFIG_PPC_64K_PAGES
  711. /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
  712. * a 64K kernel), then we don't preload, hash_page() will take
  713. * care of it once we actually try to access the page.
  714. * That way we don't have to duplicate all of the logic for segment
  715. * page size demotion here
  716. */
  717. if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
  718. return;
  719. #endif /* CONFIG_PPC_64K_PAGES */
  720. /* Get VSID */
  721. vsid = get_vsid(mm->context.id, ea);
  722. /* Hash doesn't like irqs */
  723. local_irq_save(flags);
  724. /* Is that local to this CPU ? */
  725. mask = cpumask_of_cpu(smp_processor_id());
  726. if (cpus_equal(mm->cpu_vm_mask, mask))
  727. local = 1;
  728. /* Hash it in */
  729. #ifdef CONFIG_PPC_HAS_HASH_64K
  730. if (mm->context.user_psize == MMU_PAGE_64K)
  731. __hash_page_64K(ea, access, vsid, ptep, trap, local);
  732. else
  733. #endif /* CONFIG_PPC_64K_PAGES */
  734. __hash_page_4K(ea, access, vsid, ptep, trap, local);
  735. local_irq_restore(flags);
  736. }
  737. void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local)
  738. {
  739. unsigned long hash, index, shift, hidx, slot;
  740. DBG_LOW("flush_hash_page(va=%016x)\n", va);
  741. pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
  742. hash = hpt_hash(va, shift);
  743. hidx = __rpte_to_hidx(pte, index);
  744. if (hidx & _PTEIDX_SECONDARY)
  745. hash = ~hash;
  746. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  747. slot += hidx & _PTEIDX_GROUP_IX;
  748. DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
  749. ppc_md.hpte_invalidate(slot, va, psize, local);
  750. } pte_iterate_hashed_end();
  751. }
  752. void flush_hash_range(unsigned long number, int local)
  753. {
  754. if (ppc_md.flush_hash_range)
  755. ppc_md.flush_hash_range(number, local);
  756. else {
  757. int i;
  758. struct ppc64_tlb_batch *batch =
  759. &__get_cpu_var(ppc64_tlb_batch);
  760. for (i = 0; i < number; i++)
  761. flush_hash_page(batch->vaddr[i], batch->pte[i],
  762. batch->psize, local);
  763. }
  764. }
  765. /*
  766. * low_hash_fault is called when we the low level hash code failed
  767. * to instert a PTE due to an hypervisor error
  768. */
  769. void low_hash_fault(struct pt_regs *regs, unsigned long address)
  770. {
  771. if (user_mode(regs)) {
  772. siginfo_t info;
  773. info.si_signo = SIGBUS;
  774. info.si_errno = 0;
  775. info.si_code = BUS_ADRERR;
  776. info.si_addr = (void __user *)address;
  777. force_sig_info(SIGBUS, &info, current);
  778. return;
  779. }
  780. bad_page_fault(regs, address, SIGBUS);
  781. }
  782. #ifdef CONFIG_DEBUG_PAGEALLOC
  783. static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
  784. {
  785. unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr);
  786. unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
  787. unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
  788. _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
  789. int ret;
  790. hash = hpt_hash(va, PAGE_SHIFT);
  791. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  792. ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
  793. mode, HPTE_V_BOLTED, mmu_linear_psize);
  794. BUG_ON (ret < 0);
  795. spin_lock(&linear_map_hash_lock);
  796. BUG_ON(linear_map_hash_slots[lmi] & 0x80);
  797. linear_map_hash_slots[lmi] = ret | 0x80;
  798. spin_unlock(&linear_map_hash_lock);
  799. }
  800. static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
  801. {
  802. unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr);
  803. unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
  804. hash = hpt_hash(va, PAGE_SHIFT);
  805. spin_lock(&linear_map_hash_lock);
  806. BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
  807. hidx = linear_map_hash_slots[lmi] & 0x7f;
  808. linear_map_hash_slots[lmi] = 0;
  809. spin_unlock(&linear_map_hash_lock);
  810. if (hidx & _PTEIDX_SECONDARY)
  811. hash = ~hash;
  812. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  813. slot += hidx & _PTEIDX_GROUP_IX;
  814. ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0);
  815. }
  816. void kernel_map_pages(struct page *page, int numpages, int enable)
  817. {
  818. unsigned long flags, vaddr, lmi;
  819. int i;
  820. local_irq_save(flags);
  821. for (i = 0; i < numpages; i++, page++) {
  822. vaddr = (unsigned long)page_address(page);
  823. lmi = __pa(vaddr) >> PAGE_SHIFT;
  824. if (lmi >= linear_map_hash_count)
  825. continue;
  826. if (enable)
  827. kernel_map_linear_page(vaddr, lmi);
  828. else
  829. kernel_unmap_linear_page(vaddr, lmi);
  830. }
  831. local_irq_restore(flags);
  832. }
  833. #endif /* CONFIG_DEBUG_PAGEALLOC */