fault.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. /*
  2. * linux/arch/m32r/mm/fault.c
  3. *
  4. * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo
  5. * Copyright (c) 2004 Naoto Sugai, NIIBE Yutaka
  6. *
  7. * Some code taken from i386 version.
  8. * Copyright (C) 1995 Linus Torvalds
  9. */
  10. #include <linux/signal.h>
  11. #include <linux/sched.h>
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/string.h>
  15. #include <linux/types.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/mman.h>
  18. #include <linux/mm.h>
  19. #include <linux/smp.h>
  20. #include <linux/smp_lock.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/init.h>
  23. #include <linux/tty.h>
  24. #include <linux/vt_kern.h> /* For unblank_screen() */
  25. #include <linux/highmem.h>
  26. #include <linux/module.h>
  27. #include <asm/m32r.h>
  28. #include <asm/system.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/hardirq.h>
  31. #include <asm/mmu_context.h>
  32. #include <asm/tlbflush.h>
  33. extern void die(const char *, struct pt_regs *, long);
  34. #ifndef CONFIG_SMP
  35. asmlinkage unsigned int tlb_entry_i_dat;
  36. asmlinkage unsigned int tlb_entry_d_dat;
  37. #define tlb_entry_i tlb_entry_i_dat
  38. #define tlb_entry_d tlb_entry_d_dat
  39. #else
  40. unsigned int tlb_entry_i_dat[NR_CPUS];
  41. unsigned int tlb_entry_d_dat[NR_CPUS];
  42. #define tlb_entry_i tlb_entry_i_dat[smp_processor_id()]
  43. #define tlb_entry_d tlb_entry_d_dat[smp_processor_id()]
  44. #endif
  45. extern void init_tlb(void);
  46. /*
  47. * Unlock any spinlocks which will prevent us from getting the
  48. * message out
  49. */
  50. void bust_spinlocks(int yes)
  51. {
  52. int loglevel_save = console_loglevel;
  53. if (yes) {
  54. oops_in_progress = 1;
  55. return;
  56. }
  57. #ifdef CONFIG_VT
  58. unblank_screen();
  59. #endif
  60. oops_in_progress = 0;
  61. /*
  62. * OK, the message is on the console. Now we call printk()
  63. * without oops_in_progress set so that printk will give klogd
  64. * a poke. Hold onto your hats...
  65. */
  66. console_loglevel = 15; /* NMI oopser may have shut the console up */
  67. printk(" ");
  68. console_loglevel = loglevel_save;
  69. }
  70. /*======================================================================*
  71. * do_page_fault()
  72. *======================================================================*
  73. * This routine handles page faults. It determines the address,
  74. * and the problem, and then passes it off to one of the appropriate
  75. * routines.
  76. *
  77. * ARGUMENT:
  78. * regs : M32R SP reg.
  79. * error_code : See below
  80. * address : M32R MMU MDEVA reg. (Operand ACE)
  81. * : M32R BPC reg. (Instruction ACE)
  82. *
  83. * error_code :
  84. * bit 0 == 0 means no page found, 1 means protection fault
  85. * bit 1 == 0 means read, 1 means write
  86. * bit 2 == 0 means kernel, 1 means user-mode
  87. * bit 3 == 0 means data, 1 means instruction
  88. *======================================================================*/
  89. #define ACE_PROTECTION 1
  90. #define ACE_WRITE 2
  91. #define ACE_USERMODE 4
  92. #define ACE_INSTRUCTION 8
  93. asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
  94. unsigned long address)
  95. {
  96. struct task_struct *tsk;
  97. struct mm_struct *mm;
  98. struct vm_area_struct * vma;
  99. unsigned long page, addr;
  100. int write;
  101. siginfo_t info;
  102. /*
  103. * If BPSW IE bit enable --> set PSW IE bit
  104. */
  105. if (regs->psw & M32R_PSW_BIE)
  106. local_irq_enable();
  107. tsk = current;
  108. info.si_code = SEGV_MAPERR;
  109. /*
  110. * We fault-in kernel-space virtual memory on-demand. The
  111. * 'reference' page table is init_mm.pgd.
  112. *
  113. * NOTE! We MUST NOT take any locks for this case. We may
  114. * be in an interrupt or a critical region, and should
  115. * only copy the information from the master page table,
  116. * nothing more.
  117. *
  118. * This verifies that the fault happens in kernel space
  119. * (error_code & ACE_USERMODE) == 0, and that the fault was not a
  120. * protection error (error_code & ACE_PROTECTION) == 0.
  121. */
  122. if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))
  123. goto vmalloc_fault;
  124. mm = tsk->mm;
  125. /*
  126. * If we're in an interrupt or have no user context or are running in an
  127. * atomic region then we must not take the fault..
  128. */
  129. if (in_atomic() || !mm)
  130. goto bad_area_nosemaphore;
  131. /* When running in the kernel we expect faults to occur only to
  132. * addresses in user space. All other faults represent errors in the
  133. * kernel and should generate an OOPS. Unfortunatly, in the case of an
  134. * erroneous fault occurring in a code path which already holds mmap_sem
  135. * we will deadlock attempting to validate the fault against the
  136. * address space. Luckily the kernel only validly references user
  137. * space from well defined areas of code, which are listed in the
  138. * exceptions table.
  139. *
  140. * As the vast majority of faults will be valid we will only perform
  141. * the source reference check when there is a possibilty of a deadlock.
  142. * Attempt to lock the address space, if we cannot we then validate the
  143. * source. If this is invalid we can skip the address space check,
  144. * thus avoiding the deadlock.
  145. */
  146. if (!down_read_trylock(&mm->mmap_sem)) {
  147. if ((error_code & ACE_USERMODE) == 0 &&
  148. !search_exception_tables(regs->psw))
  149. goto bad_area_nosemaphore;
  150. down_read(&mm->mmap_sem);
  151. }
  152. vma = find_vma(mm, address);
  153. if (!vma)
  154. goto bad_area;
  155. if (vma->vm_start <= address)
  156. goto good_area;
  157. if (!(vma->vm_flags & VM_GROWSDOWN))
  158. goto bad_area;
  159. if (error_code & ACE_USERMODE) {
  160. /*
  161. * accessing the stack below "spu" is always a bug.
  162. * The "+ 4" is there due to the push instruction
  163. * doing pre-decrement on the stack and that
  164. * doesn't show up until later..
  165. */
  166. if (address + 4 < regs->spu)
  167. goto bad_area;
  168. }
  169. if (expand_stack(vma, address))
  170. goto bad_area;
  171. /*
  172. * Ok, we have a good vm_area for this memory access, so
  173. * we can handle it..
  174. */
  175. good_area:
  176. info.si_code = SEGV_ACCERR;
  177. write = 0;
  178. switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
  179. default: /* 3: write, present */
  180. /* fall through */
  181. case ACE_WRITE: /* write, not present */
  182. if (!(vma->vm_flags & VM_WRITE))
  183. goto bad_area;
  184. write++;
  185. break;
  186. case ACE_PROTECTION: /* read, present */
  187. case 0: /* read, not present */
  188. if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
  189. goto bad_area;
  190. }
  191. /*
  192. * For instruction access exception, check if the area is executable
  193. */
  194. if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
  195. goto bad_area;
  196. survive:
  197. /*
  198. * If for any reason at all we couldn't handle the fault,
  199. * make sure we exit gracefully rather than endlessly redo
  200. * the fault.
  201. */
  202. addr = (address & PAGE_MASK);
  203. set_thread_fault_code(error_code);
  204. switch (handle_mm_fault(mm, vma, addr, write)) {
  205. case VM_FAULT_MINOR:
  206. tsk->min_flt++;
  207. break;
  208. case VM_FAULT_MAJOR:
  209. tsk->maj_flt++;
  210. break;
  211. case VM_FAULT_SIGBUS:
  212. goto do_sigbus;
  213. case VM_FAULT_OOM:
  214. goto out_of_memory;
  215. default:
  216. BUG();
  217. }
  218. set_thread_fault_code(0);
  219. up_read(&mm->mmap_sem);
  220. return;
  221. /*
  222. * Something tried to access memory that isn't in our memory map..
  223. * Fix it, but check if it's kernel or user first..
  224. */
  225. bad_area:
  226. up_read(&mm->mmap_sem);
  227. bad_area_nosemaphore:
  228. /* User mode accesses just cause a SIGSEGV */
  229. if (error_code & ACE_USERMODE) {
  230. tsk->thread.address = address;
  231. tsk->thread.error_code = error_code | (address >= TASK_SIZE);
  232. tsk->thread.trap_no = 14;
  233. info.si_signo = SIGSEGV;
  234. info.si_errno = 0;
  235. /* info.si_code has been set above */
  236. info.si_addr = (void __user *)address;
  237. force_sig_info(SIGSEGV, &info, tsk);
  238. return;
  239. }
  240. no_context:
  241. /* Are we prepared to handle this kernel fault? */
  242. if (fixup_exception(regs))
  243. return;
  244. /*
  245. * Oops. The kernel tried to access some bad page. We'll have to
  246. * terminate things with extreme prejudice.
  247. */
  248. bust_spinlocks(1);
  249. if (address < PAGE_SIZE)
  250. printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
  251. else
  252. printk(KERN_ALERT "Unable to handle kernel paging request");
  253. printk(" at virtual address %08lx\n",address);
  254. printk(KERN_ALERT " printing bpc:\n");
  255. printk("%08lx\n", regs->bpc);
  256. page = *(unsigned long *)MPTB;
  257. page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
  258. printk(KERN_ALERT "*pde = %08lx\n", page);
  259. if (page & _PAGE_PRESENT) {
  260. page &= PAGE_MASK;
  261. address &= 0x003ff000;
  262. page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
  263. printk(KERN_ALERT "*pte = %08lx\n", page);
  264. }
  265. die("Oops", regs, error_code);
  266. bust_spinlocks(0);
  267. do_exit(SIGKILL);
  268. /*
  269. * We ran out of memory, or some other thing happened to us that made
  270. * us unable to handle the page fault gracefully.
  271. */
  272. out_of_memory:
  273. up_read(&mm->mmap_sem);
  274. if (is_init(tsk)) {
  275. yield();
  276. down_read(&mm->mmap_sem);
  277. goto survive;
  278. }
  279. printk("VM: killing process %s\n", tsk->comm);
  280. if (error_code & ACE_USERMODE)
  281. do_exit(SIGKILL);
  282. goto no_context;
  283. do_sigbus:
  284. up_read(&mm->mmap_sem);
  285. /* Kernel mode? Handle exception or die */
  286. if (!(error_code & ACE_USERMODE))
  287. goto no_context;
  288. tsk->thread.address = address;
  289. tsk->thread.error_code = error_code;
  290. tsk->thread.trap_no = 14;
  291. info.si_signo = SIGBUS;
  292. info.si_errno = 0;
  293. info.si_code = BUS_ADRERR;
  294. info.si_addr = (void __user *)address;
  295. force_sig_info(SIGBUS, &info, tsk);
  296. return;
  297. vmalloc_fault:
  298. {
  299. /*
  300. * Synchronize this task's top level page-table
  301. * with the 'reference' page table.
  302. *
  303. * Do _not_ use "tsk" here. We might be inside
  304. * an interrupt in the middle of a task switch..
  305. */
  306. int offset = pgd_index(address);
  307. pgd_t *pgd, *pgd_k;
  308. pmd_t *pmd, *pmd_k;
  309. pte_t *pte_k;
  310. pgd = (pgd_t *)*(unsigned long *)MPTB;
  311. pgd = offset + (pgd_t *)pgd;
  312. pgd_k = init_mm.pgd + offset;
  313. if (!pgd_present(*pgd_k))
  314. goto no_context;
  315. /*
  316. * set_pgd(pgd, *pgd_k); here would be useless on PAE
  317. * and redundant with the set_pmd() on non-PAE.
  318. */
  319. pmd = pmd_offset(pgd, address);
  320. pmd_k = pmd_offset(pgd_k, address);
  321. if (!pmd_present(*pmd_k))
  322. goto no_context;
  323. set_pmd(pmd, *pmd_k);
  324. pte_k = pte_offset_kernel(pmd_k, address);
  325. if (!pte_present(*pte_k))
  326. goto no_context;
  327. addr = (address & PAGE_MASK);
  328. set_thread_fault_code(error_code);
  329. update_mmu_cache(NULL, addr, *pte_k);
  330. set_thread_fault_code(0);
  331. return;
  332. }
  333. }
  334. /*======================================================================*
  335. * update_mmu_cache()
  336. *======================================================================*/
  337. #define TLB_MASK (NR_TLB_ENTRIES - 1)
  338. #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
  339. #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
  340. void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
  341. pte_t pte)
  342. {
  343. volatile unsigned long *entry1, *entry2;
  344. unsigned long pte_data, flags;
  345. unsigned int *entry_dat;
  346. int inst = get_thread_fault_code() & ACE_INSTRUCTION;
  347. int i;
  348. /* Ptrace may call this routine. */
  349. if (vma && current->active_mm != vma->vm_mm)
  350. return;
  351. local_irq_save(flags);
  352. vaddr = (vaddr & PAGE_MASK) | get_asid();
  353. pte_data = pte_val(pte);
  354. #ifdef CONFIG_CHIP_OPSP
  355. entry1 = (unsigned long *)ITLB_BASE;
  356. for (i = 0; i < NR_TLB_ENTRIES; i++) {
  357. if (*entry1++ == vaddr) {
  358. set_tlb_data(entry1, pte_data);
  359. break;
  360. }
  361. entry1++;
  362. }
  363. entry2 = (unsigned long *)DTLB_BASE;
  364. for (i = 0; i < NR_TLB_ENTRIES; i++) {
  365. if (*entry2++ == vaddr) {
  366. set_tlb_data(entry2, pte_data);
  367. break;
  368. }
  369. entry2++;
  370. }
  371. #else
  372. /*
  373. * Update TLB entries
  374. * entry1: ITLB entry address
  375. * entry2: DTLB entry address
  376. */
  377. __asm__ __volatile__ (
  378. "seth %0, #high(%4) \n\t"
  379. "st %2, @(%5, %0) \n\t"
  380. "ldi %1, #1 \n\t"
  381. "st %1, @(%6, %0) \n\t"
  382. "add3 r4, %0, %7 \n\t"
  383. ".fillinsn \n"
  384. "1: \n\t"
  385. "ld %1, @(%6, %0) \n\t"
  386. "bnez %1, 1b \n\t"
  387. "ld %0, @r4+ \n\t"
  388. "ld %1, @r4 \n\t"
  389. "st %3, @+%0 \n\t"
  390. "st %3, @+%1 \n\t"
  391. : "=&r" (entry1), "=&r" (entry2)
  392. : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE),
  393. "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset)
  394. : "r4", "memory"
  395. );
  396. #endif
  397. if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END))
  398. goto notfound;
  399. found:
  400. local_irq_restore(flags);
  401. return;
  402. /* Valid entry not found */
  403. notfound:
  404. /*
  405. * Update ITLB or DTLB entry
  406. * entry1: TLB entry address
  407. * entry2: TLB base address
  408. */
  409. if (!inst) {
  410. entry2 = (unsigned long *)DTLB_BASE;
  411. entry_dat = &tlb_entry_d;
  412. } else {
  413. entry2 = (unsigned long *)ITLB_BASE;
  414. entry_dat = &tlb_entry_i;
  415. }
  416. entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1);
  417. for (i = 0 ; i < NR_TLB_ENTRIES ; i++) {
  418. if (!(entry1[1] & 2)) /* Valid bit check */
  419. break;
  420. if (entry1 != entry2)
  421. entry1 -= 2;
  422. else
  423. entry1 += TLB_MASK << 1;
  424. }
  425. if (i >= NR_TLB_ENTRIES) { /* Empty entry not found */
  426. entry1 = entry2 + (*entry_dat << 1);
  427. *entry_dat = (*entry_dat + 1) & TLB_MASK;
  428. }
  429. *entry1++ = vaddr; /* Set TLB tag */
  430. set_tlb_data(entry1, pte_data);
  431. goto found;
  432. }
  433. /*======================================================================*
  434. * flush_tlb_page() : flushes one page
  435. *======================================================================*/
  436. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  437. {
  438. if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) {
  439. unsigned long flags;
  440. local_irq_save(flags);
  441. page &= PAGE_MASK;
  442. page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK);
  443. __flush_tlb_page(page);
  444. local_irq_restore(flags);
  445. }
  446. }
  447. /*======================================================================*
  448. * flush_tlb_range() : flushes a range of pages
  449. *======================================================================*/
  450. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  451. unsigned long end)
  452. {
  453. struct mm_struct *mm;
  454. mm = vma->vm_mm;
  455. if (mm_context(mm) != NO_CONTEXT) {
  456. unsigned long flags;
  457. int size;
  458. local_irq_save(flags);
  459. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  460. if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */
  461. mm_context(mm) = NO_CONTEXT;
  462. if (mm == current->mm)
  463. activate_context(mm);
  464. } else {
  465. unsigned long asid;
  466. asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK;
  467. start &= PAGE_MASK;
  468. end += (PAGE_SIZE - 1);
  469. end &= PAGE_MASK;
  470. start |= asid;
  471. end |= asid;
  472. while (start < end) {
  473. __flush_tlb_page(start);
  474. start += PAGE_SIZE;
  475. }
  476. }
  477. local_irq_restore(flags);
  478. }
  479. }
  480. /*======================================================================*
  481. * flush_tlb_mm() : flushes the specified mm context TLB's
  482. *======================================================================*/
  483. void local_flush_tlb_mm(struct mm_struct *mm)
  484. {
  485. /* Invalidate all TLB of this process. */
  486. /* Instead of invalidating each TLB, we get new MMU context. */
  487. if (mm_context(mm) != NO_CONTEXT) {
  488. unsigned long flags;
  489. local_irq_save(flags);
  490. mm_context(mm) = NO_CONTEXT;
  491. if (mm == current->mm)
  492. activate_context(mm);
  493. local_irq_restore(flags);
  494. }
  495. }
  496. /*======================================================================*
  497. * flush_tlb_all() : flushes all processes TLBs
  498. *======================================================================*/
  499. void local_flush_tlb_all(void)
  500. {
  501. unsigned long flags;
  502. local_irq_save(flags);
  503. __flush_tlb_all();
  504. local_irq_restore(flags);
  505. }
  506. /*======================================================================*
  507. * init_mmu()
  508. *======================================================================*/
  509. void __init init_mmu(void)
  510. {
  511. tlb_entry_i = 0;
  512. tlb_entry_d = 0;
  513. mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
  514. set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
  515. *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir;
  516. }