tlbflush_64.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. /*
  2. * arch/sh/mm/tlb-flush_64.c
  3. *
  4. * Copyright (C) 2000, 2001 Paolo Alberelli
  5. * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
  6. * Copyright (C) 2003 - 2009 Paul Mundt
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/signal.h>
  13. #include <linux/rwsem.h>
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/errno.h>
  17. #include <linux/string.h>
  18. #include <linux/types.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/mman.h>
  21. #include <linux/mm.h>
  22. #include <linux/smp.h>
  23. #include <linux/perf_counter.h>
  24. #include <linux/interrupt.h>
  25. #include <asm/system.h>
  26. #include <asm/io.h>
  27. #include <asm/tlb.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/pgalloc.h>
  30. #include <asm/mmu_context.h>
  31. extern void die(const char *,struct pt_regs *,long);
  32. #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
  33. #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
  34. static inline void print_prots(pgprot_t prot)
  35. {
  36. printk("prot is 0x%08lx\n",pgprot_val(prot));
  37. printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
  38. PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
  39. }
  40. static inline void print_vma(struct vm_area_struct *vma)
  41. {
  42. printk("vma start 0x%08lx\n", vma->vm_start);
  43. printk("vma end 0x%08lx\n", vma->vm_end);
  44. print_prots(vma->vm_page_prot);
  45. printk("vm_flags 0x%08lx\n", vma->vm_flags);
  46. }
  47. static inline void print_task(struct task_struct *tsk)
  48. {
  49. printk("Task pid %d\n", task_pid_nr(tsk));
  50. }
  51. static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
  52. {
  53. pgd_t *dir;
  54. pud_t *pud;
  55. pmd_t *pmd;
  56. pte_t *pte;
  57. pte_t entry;
  58. dir = pgd_offset(mm, address);
  59. if (pgd_none(*dir))
  60. return NULL;
  61. pud = pud_offset(dir, address);
  62. if (pud_none(*pud))
  63. return NULL;
  64. pmd = pmd_offset(pud, address);
  65. if (pmd_none(*pmd))
  66. return NULL;
  67. pte = pte_offset_kernel(pmd, address);
  68. entry = *pte;
  69. if (pte_none(entry) || !pte_present(entry))
  70. return NULL;
  71. return pte;
  72. }
  73. /*
  74. * This routine handles page faults. It determines the address,
  75. * and the problem, and then passes it off to one of the appropriate
  76. * routines.
  77. */
  78. asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
  79. unsigned long textaccess, unsigned long address)
  80. {
  81. struct task_struct *tsk;
  82. struct mm_struct *mm;
  83. struct vm_area_struct * vma;
  84. const struct exception_table_entry *fixup;
  85. pte_t *pte;
  86. int fault;
  87. /* SIM
  88. * Note this is now called with interrupts still disabled
  89. * This is to cope with being called for a missing IO port
  90. * address with interrupts disabled. This should be fixed as
  91. * soon as we have a better 'fast path' miss handler.
  92. *
  93. * Plus take care how you try and debug this stuff.
  94. * For example, writing debug data to a port which you
  95. * have just faulted on is not going to work.
  96. */
  97. tsk = current;
  98. mm = tsk->mm;
  99. /* Not an IO address, so reenable interrupts */
  100. local_irq_enable();
  101. perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
  102. /*
  103. * If we're in an interrupt or have no user
  104. * context, we must not take the fault..
  105. */
  106. if (in_atomic() || !mm)
  107. goto no_context;
  108. /* TLB misses upon some cache flushes get done under cli() */
  109. down_read(&mm->mmap_sem);
  110. vma = find_vma(mm, address);
  111. if (!vma) {
  112. #ifdef DEBUG_FAULT
  113. print_task(tsk);
  114. printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
  115. __func__, __LINE__,
  116. address,regs->pc,textaccess,writeaccess);
  117. show_regs(regs);
  118. #endif
  119. goto bad_area;
  120. }
  121. if (vma->vm_start <= address) {
  122. goto good_area;
  123. }
  124. if (!(vma->vm_flags & VM_GROWSDOWN)) {
  125. #ifdef DEBUG_FAULT
  126. print_task(tsk);
  127. printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
  128. __func__, __LINE__,
  129. address,regs->pc,textaccess,writeaccess);
  130. show_regs(regs);
  131. print_vma(vma);
  132. #endif
  133. goto bad_area;
  134. }
  135. if (expand_stack(vma, address)) {
  136. #ifdef DEBUG_FAULT
  137. print_task(tsk);
  138. printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
  139. __func__, __LINE__,
  140. address,regs->pc,textaccess,writeaccess);
  141. show_regs(regs);
  142. #endif
  143. goto bad_area;
  144. }
  145. /*
  146. * Ok, we have a good vm_area for this memory access, so
  147. * we can handle it..
  148. */
  149. good_area:
  150. if (textaccess) {
  151. if (!(vma->vm_flags & VM_EXEC))
  152. goto bad_area;
  153. } else {
  154. if (writeaccess) {
  155. if (!(vma->vm_flags & VM_WRITE))
  156. goto bad_area;
  157. } else {
  158. if (!(vma->vm_flags & VM_READ))
  159. goto bad_area;
  160. }
  161. }
  162. /*
  163. * If for any reason at all we couldn't handle the fault,
  164. * make sure we exit gracefully rather than endlessly redo
  165. * the fault.
  166. */
  167. survive:
  168. fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
  169. if (unlikely(fault & VM_FAULT_ERROR)) {
  170. if (fault & VM_FAULT_OOM)
  171. goto out_of_memory;
  172. else if (fault & VM_FAULT_SIGBUS)
  173. goto do_sigbus;
  174. BUG();
  175. }
  176. if (fault & VM_FAULT_MAJOR) {
  177. tsk->maj_flt++;
  178. perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
  179. regs, address);
  180. } else {
  181. tsk->min_flt++;
  182. perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
  183. regs, address);
  184. }
  185. /* If we get here, the page fault has been handled. Do the TLB refill
  186. now from the newly-setup PTE, to avoid having to fault again right
  187. away on the same instruction. */
  188. pte = lookup_pte (mm, address);
  189. if (!pte) {
  190. /* From empirical evidence, we can get here, due to
  191. !pte_present(pte). (e.g. if a swap-in occurs, and the page
  192. is swapped back out again before the process that wanted it
  193. gets rescheduled?) */
  194. goto no_pte;
  195. }
  196. __do_tlb_refill(address, textaccess, pte);
  197. no_pte:
  198. up_read(&mm->mmap_sem);
  199. return;
  200. /*
  201. * Something tried to access memory that isn't in our memory map..
  202. * Fix it, but check if it's kernel or user first..
  203. */
  204. bad_area:
  205. #ifdef DEBUG_FAULT
  206. printk("fault:bad area\n");
  207. #endif
  208. up_read(&mm->mmap_sem);
  209. if (user_mode(regs)) {
  210. static int count=0;
  211. siginfo_t info;
  212. if (count < 4) {
  213. /* This is really to help debug faults when starting
  214. * usermode, so only need a few */
  215. count++;
  216. printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
  217. address, task_pid_nr(current), current->comm,
  218. (unsigned long) regs->pc);
  219. #if 0
  220. show_regs(regs);
  221. #endif
  222. }
  223. if (is_global_init(tsk)) {
  224. panic("INIT had user mode bad_area\n");
  225. }
  226. tsk->thread.address = address;
  227. tsk->thread.error_code = writeaccess;
  228. info.si_signo = SIGSEGV;
  229. info.si_errno = 0;
  230. info.si_addr = (void *) address;
  231. force_sig_info(SIGSEGV, &info, tsk);
  232. return;
  233. }
  234. no_context:
  235. #ifdef DEBUG_FAULT
  236. printk("fault:No context\n");
  237. #endif
  238. /* Are we prepared to handle this kernel fault? */
  239. fixup = search_exception_tables(regs->pc);
  240. if (fixup) {
  241. regs->pc = fixup->fixup;
  242. return;
  243. }
  244. /*
  245. * Oops. The kernel tried to access some bad page. We'll have to
  246. * terminate things with extreme prejudice.
  247. *
  248. */
  249. if (address < PAGE_SIZE)
  250. printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
  251. else
  252. printk(KERN_ALERT "Unable to handle kernel paging request");
  253. printk(" at virtual address %08lx\n", address);
  254. printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
  255. die("Oops", regs, writeaccess);
  256. do_exit(SIGKILL);
  257. /*
  258. * We ran out of memory, or some other thing happened to us that made
  259. * us unable to handle the page fault gracefully.
  260. */
  261. out_of_memory:
  262. if (is_global_init(current)) {
  263. panic("INIT out of memory\n");
  264. yield();
  265. goto survive;
  266. }
  267. printk("fault:Out of memory\n");
  268. up_read(&mm->mmap_sem);
  269. if (is_global_init(current)) {
  270. yield();
  271. down_read(&mm->mmap_sem);
  272. goto survive;
  273. }
  274. printk("VM: killing process %s\n", tsk->comm);
  275. if (user_mode(regs))
  276. do_group_exit(SIGKILL);
  277. goto no_context;
  278. do_sigbus:
  279. printk("fault:Do sigbus\n");
  280. up_read(&mm->mmap_sem);
  281. /*
  282. * Send a sigbus, regardless of whether we were in kernel
  283. * or user mode.
  284. */
  285. tsk->thread.address = address;
  286. tsk->thread.error_code = writeaccess;
  287. tsk->thread.trap_no = 14;
  288. force_sig(SIGBUS, tsk);
  289. /* Kernel mode? Handle exceptions or die */
  290. if (!user_mode(regs))
  291. goto no_context;
  292. }
  293. void update_mmu_cache(struct vm_area_struct * vma,
  294. unsigned long address, pte_t pte)
  295. {
  296. /*
  297. * This appears to get called once for every pte entry that gets
  298. * established => I don't think it's efficient to try refilling the
  299. * TLBs with the pages - some may not get accessed even. Also, for
  300. * executable pages, it is impossible to determine reliably here which
  301. * TLB they should be mapped into (or both even).
  302. *
  303. * So, just do nothing here and handle faults on demand. In the
  304. * TLBMISS handling case, the refill is now done anyway after the pte
  305. * has been fixed up, so that deals with most useful cases.
  306. */
  307. }
  308. void local_flush_tlb_one(unsigned long asid, unsigned long page)
  309. {
  310. unsigned long long match, pteh=0, lpage;
  311. unsigned long tlb;
  312. /*
  313. * Sign-extend based on neff.
  314. */
  315. lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
  316. match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
  317. match |= lpage;
  318. for_each_itlb_entry(tlb) {
  319. asm volatile ("getcfg %1, 0, %0"
  320. : "=r" (pteh)
  321. : "r" (tlb) );
  322. if (pteh == match) {
  323. __flush_tlb_slot(tlb);
  324. break;
  325. }
  326. }
  327. for_each_dtlb_entry(tlb) {
  328. asm volatile ("getcfg %1, 0, %0"
  329. : "=r" (pteh)
  330. : "r" (tlb) );
  331. if (pteh == match) {
  332. __flush_tlb_slot(tlb);
  333. break;
  334. }
  335. }
  336. }
  337. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  338. {
  339. unsigned long flags;
  340. if (vma->vm_mm) {
  341. page &= PAGE_MASK;
  342. local_irq_save(flags);
  343. local_flush_tlb_one(get_asid(), page);
  344. local_irq_restore(flags);
  345. }
  346. }
  347. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  348. unsigned long end)
  349. {
  350. unsigned long flags;
  351. unsigned long long match, pteh=0, pteh_epn, pteh_low;
  352. unsigned long tlb;
  353. unsigned int cpu = smp_processor_id();
  354. struct mm_struct *mm;
  355. mm = vma->vm_mm;
  356. if (cpu_context(cpu, mm) == NO_CONTEXT)
  357. return;
  358. local_irq_save(flags);
  359. start &= PAGE_MASK;
  360. end &= PAGE_MASK;
  361. match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
  362. /* Flush ITLB */
  363. for_each_itlb_entry(tlb) {
  364. asm volatile ("getcfg %1, 0, %0"
  365. : "=r" (pteh)
  366. : "r" (tlb) );
  367. pteh_epn = pteh & PAGE_MASK;
  368. pteh_low = pteh & ~PAGE_MASK;
  369. if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
  370. __flush_tlb_slot(tlb);
  371. }
  372. /* Flush DTLB */
  373. for_each_dtlb_entry(tlb) {
  374. asm volatile ("getcfg %1, 0, %0"
  375. : "=r" (pteh)
  376. : "r" (tlb) );
  377. pteh_epn = pteh & PAGE_MASK;
  378. pteh_low = pteh & ~PAGE_MASK;
  379. if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
  380. __flush_tlb_slot(tlb);
  381. }
  382. local_irq_restore(flags);
  383. }
  384. void local_flush_tlb_mm(struct mm_struct *mm)
  385. {
  386. unsigned long flags;
  387. unsigned int cpu = smp_processor_id();
  388. if (cpu_context(cpu, mm) == NO_CONTEXT)
  389. return;
  390. local_irq_save(flags);
  391. cpu_context(cpu, mm) = NO_CONTEXT;
  392. if (mm == current->mm)
  393. activate_context(mm, cpu);
  394. local_irq_restore(flags);
  395. }
  396. void local_flush_tlb_all(void)
  397. {
  398. /* Invalidate all, including shared pages, excluding fixed TLBs */
  399. unsigned long flags, tlb;
  400. local_irq_save(flags);
  401. /* Flush each ITLB entry */
  402. for_each_itlb_entry(tlb)
  403. __flush_tlb_slot(tlb);
  404. /* Flush each DTLB entry */
  405. for_each_dtlb_entry(tlb)
  406. __flush_tlb_slot(tlb);
  407. local_irq_restore(flags);
  408. }
  409. void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  410. {
  411. /* FIXME: Optimize this later.. */
  412. flush_tlb_all();
  413. }