hash_utils.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /*
  2. * PowerPC64 port by Mike Corrigan and Dave Engebretsen
  3. * {mikejc|engebret}@us.ibm.com
  4. *
  5. * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
  6. *
  7. * SMP scalability work:
  8. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  9. *
  10. * Module name: htab.c
  11. *
  12. * Description:
  13. * PowerPC Hashed Page Table functions
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. */
  20. #undef DEBUG
  21. #include <linux/config.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/errno.h>
  24. #include <linux/sched.h>
  25. #include <linux/proc_fs.h>
  26. #include <linux/stat.h>
  27. #include <linux/sysctl.h>
  28. #include <linux/ctype.h>
  29. #include <linux/cache.h>
  30. #include <linux/init.h>
  31. #include <linux/signal.h>
  32. #include <asm/ppcdebug.h>
  33. #include <asm/processor.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/mmu.h>
  36. #include <asm/mmu_context.h>
  37. #include <asm/page.h>
  38. #include <asm/types.h>
  39. #include <asm/system.h>
  40. #include <asm/uaccess.h>
  41. #include <asm/machdep.h>
  42. #include <asm/lmb.h>
  43. #include <asm/abs_addr.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/io.h>
  46. #include <asm/eeh.h>
  47. #include <asm/tlb.h>
  48. #include <asm/cacheflush.h>
  49. #include <asm/cputable.h>
  50. #include <asm/abs_addr.h>
  51. #include <asm/sections.h>
  52. #ifdef DEBUG
  53. #define DBG(fmt...) udbg_printf(fmt)
  54. #else
  55. #define DBG(fmt...)
  56. #endif
  57. /*
  58. * Note: pte --> Linux PTE
  59. * HPTE --> PowerPC Hashed Page Table Entry
  60. *
  61. * Execution context:
  62. * htab_initialize is called with the MMU off (of course), but
  63. * the kernel has been copied down to zero so it can directly
  64. * reference global data. At this point it is very difficult
  65. * to print debug info.
  66. *
  67. */
  68. #ifdef CONFIG_U3_DART
  69. extern unsigned long dart_tablebase;
  70. #endif /* CONFIG_U3_DART */
  71. HPTE *htab_address;
  72. unsigned long htab_hash_mask;
  73. extern unsigned long _SDR1;
  74. #define KB (1024)
  75. #define MB (1024*KB)
  76. static inline void loop_forever(void)
  77. {
  78. volatile unsigned long x = 1;
  79. for(;x;x|=1)
  80. ;
  81. }
  82. #ifdef CONFIG_PPC_MULTIPLATFORM
  83. static inline void create_pte_mapping(unsigned long start, unsigned long end,
  84. unsigned long mode, int large)
  85. {
  86. unsigned long addr;
  87. unsigned int step;
  88. unsigned long tmp_mode;
  89. if (large)
  90. step = 16*MB;
  91. else
  92. step = 4*KB;
  93. for (addr = start; addr < end; addr += step) {
  94. unsigned long vpn, hash, hpteg;
  95. unsigned long vsid = get_kernel_vsid(addr);
  96. unsigned long va = (vsid << 28) | (addr & 0xfffffff);
  97. int ret;
  98. if (large)
  99. vpn = va >> HPAGE_SHIFT;
  100. else
  101. vpn = va >> PAGE_SHIFT;
  102. tmp_mode = mode;
  103. /* Make non-kernel text non-executable */
  104. if (!in_kernel_text(addr))
  105. tmp_mode = mode | HW_NO_EXEC;
  106. hash = hpt_hash(vpn, large);
  107. hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
  108. #ifdef CONFIG_PPC_PSERIES
  109. if (systemcfg->platform & PLATFORM_LPAR)
  110. ret = pSeries_lpar_hpte_insert(hpteg, va,
  111. virt_to_abs(addr) >> PAGE_SHIFT,
  112. 0, tmp_mode, 1, large);
  113. else
  114. #endif /* CONFIG_PPC_PSERIES */
  115. ret = native_hpte_insert(hpteg, va,
  116. virt_to_abs(addr) >> PAGE_SHIFT,
  117. 0, tmp_mode, 1, large);
  118. if (ret == -1) {
  119. ppc64_terminate_msg(0x20, "create_pte_mapping");
  120. loop_forever();
  121. }
  122. }
  123. }
  124. void __init htab_initialize(void)
  125. {
  126. unsigned long table, htab_size_bytes;
  127. unsigned long pteg_count;
  128. unsigned long mode_rw;
  129. int i, use_largepages = 0;
  130. unsigned long base = 0, size = 0;
  131. extern unsigned long tce_alloc_start, tce_alloc_end;
  132. DBG(" -> htab_initialize()\n");
  133. /*
  134. * Calculate the required size of the htab. We want the number of
  135. * PTEGs to equal one half the number of real pages.
  136. */
  137. htab_size_bytes = 1UL << ppc64_pft_size;
  138. pteg_count = htab_size_bytes >> 7;
  139. /* For debug, make the HTAB 1/8 as big as it normally would be. */
  140. ifppcdebug(PPCDBG_HTABSIZE) {
  141. pteg_count >>= 3;
  142. htab_size_bytes = pteg_count << 7;
  143. }
  144. htab_hash_mask = pteg_count - 1;
  145. if (systemcfg->platform & PLATFORM_LPAR) {
  146. /* Using a hypervisor which owns the htab */
  147. htab_address = NULL;
  148. _SDR1 = 0;
  149. } else {
  150. /* Find storage for the HPT. Must be contiguous in
  151. * the absolute address space.
  152. */
  153. table = lmb_alloc(htab_size_bytes, htab_size_bytes);
  154. DBG("Hash table allocated at %lx, size: %lx\n", table,
  155. htab_size_bytes);
  156. if ( !table ) {
  157. ppc64_terminate_msg(0x20, "hpt space");
  158. loop_forever();
  159. }
  160. htab_address = abs_to_virt(table);
  161. /* htab absolute addr + encoded htabsize */
  162. _SDR1 = table + __ilog2(pteg_count) - 11;
  163. /* Initialize the HPT with no entries */
  164. memset((void *)table, 0, htab_size_bytes);
  165. }
  166. mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
  167. /* On U3 based machines, we need to reserve the DART area and
  168. * _NOT_ map it to avoid cache paradoxes as it's remapped non
  169. * cacheable later on
  170. */
  171. if (cpu_has_feature(CPU_FTR_16M_PAGE))
  172. use_largepages = 1;
  173. /* create bolted the linear mapping in the hash table */
  174. for (i=0; i < lmb.memory.cnt; i++) {
  175. base = lmb.memory.region[i].physbase + KERNELBASE;
  176. size = lmb.memory.region[i].size;
  177. DBG("creating mapping for region: %lx : %lx\n", base, size);
  178. #ifdef CONFIG_U3_DART
  179. /* Do not map the DART space. Fortunately, it will be aligned
  180. * in such a way that it will not cross two lmb regions and will
  181. * fit within a single 16Mb page.
  182. * The DART space is assumed to be a full 16Mb region even if we
  183. * only use 2Mb of that space. We will use more of it later for
  184. * AGP GART. We have to use a full 16Mb large page.
  185. */
  186. DBG("DART base: %lx\n", dart_tablebase);
  187. if (dart_tablebase != 0 && dart_tablebase >= base
  188. && dart_tablebase < (base + size)) {
  189. if (base != dart_tablebase)
  190. create_pte_mapping(base, dart_tablebase, mode_rw,
  191. use_largepages);
  192. if ((base + size) > (dart_tablebase + 16*MB))
  193. create_pte_mapping(dart_tablebase + 16*MB, base + size,
  194. mode_rw, use_largepages);
  195. continue;
  196. }
  197. #endif /* CONFIG_U3_DART */
  198. create_pte_mapping(base, base + size, mode_rw, use_largepages);
  199. }
  200. /*
  201. * If we have a memory_limit and we've allocated TCEs then we need to
  202. * explicitly map the TCE area at the top of RAM. We also cope with the
  203. * case that the TCEs start below memory_limit.
  204. * tce_alloc_start/end are 16MB aligned so the mapping should work
  205. * for either 4K or 16MB pages.
  206. */
  207. if (tce_alloc_start) {
  208. tce_alloc_start += KERNELBASE;
  209. tce_alloc_end += KERNELBASE;
  210. if (base + size >= tce_alloc_start)
  211. tce_alloc_start = base + size + 1;
  212. create_pte_mapping(tce_alloc_start, tce_alloc_end,
  213. mode_rw, use_largepages);
  214. }
  215. DBG(" <- htab_initialize()\n");
  216. }
  217. #undef KB
  218. #undef MB
  219. #endif /* CONFIG_PPC_MULTIPLATFORM */
  220. /*
  221. * Called by asm hashtable.S for doing lazy icache flush
  222. */
  223. unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
  224. {
  225. struct page *page;
  226. if (!pfn_valid(pte_pfn(pte)))
  227. return pp;
  228. page = pte_page(pte);
  229. /* page is dirty */
  230. if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
  231. if (trap == 0x400) {
  232. __flush_dcache_icache(page_address(page));
  233. set_bit(PG_arch_1, &page->flags);
  234. } else
  235. pp |= HW_NO_EXEC;
  236. }
  237. return pp;
  238. }
  239. /* Result code is:
  240. * 0 - handled
  241. * 1 - normal page fault
  242. * -1 - critical hash insertion error
  243. */
  244. int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
  245. {
  246. void *pgdir;
  247. unsigned long vsid;
  248. struct mm_struct *mm;
  249. pte_t *ptep;
  250. int ret;
  251. int user_region = 0;
  252. int local = 0;
  253. cpumask_t tmp;
  254. if ((ea & ~REGION_MASK) > EADDR_MASK)
  255. return 1;
  256. switch (REGION_ID(ea)) {
  257. case USER_REGION_ID:
  258. user_region = 1;
  259. mm = current->mm;
  260. if (! mm)
  261. return 1;
  262. vsid = get_vsid(mm->context.id, ea);
  263. break;
  264. case VMALLOC_REGION_ID:
  265. mm = &init_mm;
  266. vsid = get_kernel_vsid(ea);
  267. break;
  268. #if 0
  269. case KERNEL_REGION_ID:
  270. /*
  271. * Should never get here - entire 0xC0... region is bolted.
  272. * Send the problem up to do_page_fault
  273. */
  274. #endif
  275. default:
  276. /* Not a valid range
  277. * Send the problem up to do_page_fault
  278. */
  279. return 1;
  280. break;
  281. }
  282. pgdir = mm->pgd;
  283. if (pgdir == NULL)
  284. return 1;
  285. tmp = cpumask_of_cpu(smp_processor_id());
  286. if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
  287. local = 1;
  288. /* Is this a huge page ? */
  289. if (unlikely(in_hugepage_area(mm->context, ea)))
  290. ret = hash_huge_page(mm, access, ea, vsid, local);
  291. else {
  292. ptep = find_linux_pte(pgdir, ea);
  293. if (ptep == NULL)
  294. return 1;
  295. ret = __hash_page(ea, access, vsid, ptep, trap, local);
  296. }
  297. return ret;
  298. }
  299. void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
  300. int local)
  301. {
  302. unsigned long vsid, vpn, va, hash, secondary, slot;
  303. unsigned long huge = pte_huge(pte);
  304. if (ea < KERNELBASE)
  305. vsid = get_vsid(context, ea);
  306. else
  307. vsid = get_kernel_vsid(ea);
  308. va = (vsid << 28) | (ea & 0x0fffffff);
  309. if (huge)
  310. vpn = va >> HPAGE_SHIFT;
  311. else
  312. vpn = va >> PAGE_SHIFT;
  313. hash = hpt_hash(vpn, huge);
  314. secondary = (pte_val(pte) & _PAGE_SECONDARY) >> 15;
  315. if (secondary)
  316. hash = ~hash;
  317. slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
  318. slot += (pte_val(pte) & _PAGE_GROUP_IX) >> 12;
  319. ppc_md.hpte_invalidate(slot, va, huge, local);
  320. }
  321. void flush_hash_range(unsigned long context, unsigned long number, int local)
  322. {
  323. if (ppc_md.flush_hash_range) {
  324. ppc_md.flush_hash_range(context, number, local);
  325. } else {
  326. int i;
  327. struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
  328. for (i = 0; i < number; i++)
  329. flush_hash_page(context, batch->addr[i], batch->pte[i],
  330. local);
  331. }
  332. }
  333. static inline void make_bl(unsigned int *insn_addr, void *func)
  334. {
  335. unsigned long funcp = *((unsigned long *)func);
  336. int offset = funcp - (unsigned long)insn_addr;
  337. *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
  338. flush_icache_range((unsigned long)insn_addr, 4+
  339. (unsigned long)insn_addr);
  340. }
  341. /*
  342. * low_hash_fault is called when we the low level hash code failed
  343. * to instert a PTE due to an hypervisor error
  344. */
  345. void low_hash_fault(struct pt_regs *regs, unsigned long address)
  346. {
  347. if (user_mode(regs)) {
  348. siginfo_t info;
  349. info.si_signo = SIGBUS;
  350. info.si_errno = 0;
  351. info.si_code = BUS_ADRERR;
  352. info.si_addr = (void __user *)address;
  353. force_sig_info(SIGBUS, &info, current);
  354. return;
  355. }
  356. bad_page_fault(regs, address, SIGBUS);
  357. }
  358. void __init htab_finish_init(void)
  359. {
  360. extern unsigned int *htab_call_hpte_insert1;
  361. extern unsigned int *htab_call_hpte_insert2;
  362. extern unsigned int *htab_call_hpte_remove;
  363. extern unsigned int *htab_call_hpte_updatepp;
  364. make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
  365. make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
  366. make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
  367. make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
  368. }