fault.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * From i386 code copyright (C) 1995 Linus Torvalds
  15. */
  16. #include <linux/signal.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/errno.h>
  20. #include <linux/string.h>
  21. #include <linux/types.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/mman.h>
  24. #include <linux/mm.h>
  25. #include <linux/smp.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/init.h>
  28. #include <linux/tty.h>
  29. #include <linux/vt_kern.h> /* For unblank_screen() */
  30. #include <linux/highmem.h>
  31. #include <linux/module.h>
  32. #include <linux/kprobes.h>
  33. #include <linux/hugetlb.h>
  34. #include <linux/syscalls.h>
  35. #include <linux/uaccess.h>
  36. #include <asm/pgalloc.h>
  37. #include <asm/sections.h>
  38. #include <asm/traps.h>
  39. #include <asm/syscalls.h>
  40. #include <arch/interrupts.h>
  41. static noinline void force_sig_info_fault(const char *type, int si_signo,
  42. int si_code, unsigned long address,
  43. int fault_num,
  44. struct task_struct *tsk,
  45. struct pt_regs *regs)
  46. {
  47. siginfo_t info;
  48. if (unlikely(tsk->pid < 2)) {
  49. panic("Signal %d (code %d) at %#lx sent to %s!",
  50. si_signo, si_code & 0xffff, address,
  51. is_idle_task(tsk) ? "the idle task" : "init");
  52. }
  53. info.si_signo = si_signo;
  54. info.si_errno = 0;
  55. info.si_code = si_code;
  56. info.si_addr = (void __user *)address;
  57. info.si_trapno = fault_num;
  58. trace_unhandled_signal(type, regs, address, si_signo);
  59. force_sig_info(si_signo, &info, tsk);
  60. }
  61. #ifndef __tilegx__
  62. /*
  63. * Synthesize the fault a PL0 process would get by doing a word-load of
  64. * an unaligned address or a high kernel address.
  65. */
  66. SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
  67. struct pt_regs *, regs)
  68. {
  69. if (address >= PAGE_OFFSET)
  70. force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
  71. address, INT_DTLB_MISS, current, regs);
  72. else
  73. force_sig_info_fault("atomic alignment fault", SIGBUS,
  74. BUS_ADRALN, address,
  75. INT_UNALIGN_DATA, current, regs);
  76. /*
  77. * Adjust pc to point at the actual instruction, which is unusual
  78. * for syscalls normally, but is appropriate when we are claiming
  79. * that a syscall swint1 caused a page fault or bus error.
  80. */
  81. regs->pc -= 8;
  82. /*
  83. * Mark this as a caller-save interrupt, like a normal page fault,
  84. * so that when we go through the signal handler path we will
  85. * properly restore r0, r1, and r2 for the signal handler arguments.
  86. */
  87. regs->flags |= PT_FLAGS_CALLER_SAVES;
  88. return 0;
  89. }
  90. #endif
  91. static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
  92. {
  93. unsigned index = pgd_index(address);
  94. pgd_t *pgd_k;
  95. pud_t *pud, *pud_k;
  96. pmd_t *pmd, *pmd_k;
  97. pgd += index;
  98. pgd_k = init_mm.pgd + index;
  99. if (!pgd_present(*pgd_k))
  100. return NULL;
  101. pud = pud_offset(pgd, address);
  102. pud_k = pud_offset(pgd_k, address);
  103. if (!pud_present(*pud_k))
  104. return NULL;
  105. pmd = pmd_offset(pud, address);
  106. pmd_k = pmd_offset(pud_k, address);
  107. if (!pmd_present(*pmd_k))
  108. return NULL;
  109. if (!pmd_present(*pmd)) {
  110. set_pmd(pmd, *pmd_k);
  111. arch_flush_lazy_mmu_mode();
  112. } else
  113. BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
  114. return pmd_k;
  115. }
  116. /*
  117. * Handle a fault on the vmalloc area.
  118. */
  119. static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
  120. {
  121. pmd_t *pmd_k;
  122. pte_t *pte_k;
  123. /* Make sure we are in vmalloc area */
  124. if (!(address >= VMALLOC_START && address < VMALLOC_END))
  125. return -1;
  126. /*
  127. * Synchronize this task's top level page-table
  128. * with the 'reference' page table.
  129. */
  130. pmd_k = vmalloc_sync_one(pgd, address);
  131. if (!pmd_k)
  132. return -1;
  133. if (pmd_huge(*pmd_k))
  134. return 0; /* support TILE huge_vmap() API */
  135. pte_k = pte_offset_kernel(pmd_k, address);
  136. if (!pte_present(*pte_k))
  137. return -1;
  138. return 0;
  139. }
  140. /* Wait until this PTE has completed migration. */
  141. static void wait_for_migration(pte_t *pte)
  142. {
  143. if (pte_migrating(*pte)) {
  144. /*
  145. * Wait until the migrater fixes up this pte.
  146. * We scale the loop count by the clock rate so we'll wait for
  147. * a few seconds here.
  148. */
  149. int retries = 0;
  150. int bound = get_clock_rate();
  151. while (pte_migrating(*pte)) {
  152. barrier();
  153. if (++retries > bound)
  154. panic("Hit migrating PTE (%#llx) and"
  155. " page PFN %#lx still migrating",
  156. pte->val, pte_pfn(*pte));
  157. }
  158. }
  159. }
  160. /*
  161. * It's not generally safe to use "current" to get the page table pointer,
  162. * since we might be running an oprofile interrupt in the middle of a
  163. * task switch.
  164. */
  165. static pgd_t *get_current_pgd(void)
  166. {
  167. HV_Context ctx = hv_inquire_context();
  168. unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
  169. struct page *pgd_page = pfn_to_page(pgd_pfn);
  170. BUG_ON(PageHighMem(pgd_page));
  171. return (pgd_t *) __va(ctx.page_table);
  172. }
  173. /*
  174. * We can receive a page fault from a migrating PTE at any time.
  175. * Handle it by just waiting until the fault resolves.
  176. *
  177. * It's also possible to get a migrating kernel PTE that resolves
  178. * itself during the downcall from hypervisor to Linux. We just check
  179. * here to see if the PTE seems valid, and if so we retry it.
  180. *
  181. * NOTE! We MUST NOT take any locks for this case. We may be in an
  182. * interrupt or a critical region, and must do as little as possible.
  183. * Similarly, we can't use atomic ops here, since we may be handling a
  184. * fault caused by an atomic op access.
  185. *
  186. * If we find a migrating PTE while we're in an NMI context, and we're
  187. * at a PC that has a registered exception handler, we don't wait,
  188. * since this thread may (e.g.) have been interrupted while migrating
  189. * its own stack, which would then cause us to self-deadlock.
  190. */
  191. static int handle_migrating_pte(pgd_t *pgd, int fault_num,
  192. unsigned long address, unsigned long pc,
  193. int is_kernel_mode, int write)
  194. {
  195. pud_t *pud;
  196. pmd_t *pmd;
  197. pte_t *pte;
  198. pte_t pteval;
  199. if (pgd_addr_invalid(address))
  200. return 0;
  201. pgd += pgd_index(address);
  202. pud = pud_offset(pgd, address);
  203. if (!pud || !pud_present(*pud))
  204. return 0;
  205. pmd = pmd_offset(pud, address);
  206. if (!pmd || !pmd_present(*pmd))
  207. return 0;
  208. pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
  209. pte_offset_kernel(pmd, address);
  210. pteval = *pte;
  211. if (pte_migrating(pteval)) {
  212. if (in_nmi() && search_exception_tables(pc))
  213. return 0;
  214. wait_for_migration(pte);
  215. return 1;
  216. }
  217. if (!is_kernel_mode || !pte_present(pteval))
  218. return 0;
  219. if (fault_num == INT_ITLB_MISS) {
  220. if (pte_exec(pteval))
  221. return 1;
  222. } else if (write) {
  223. if (pte_write(pteval))
  224. return 1;
  225. } else {
  226. if (pte_read(pteval))
  227. return 1;
  228. }
  229. return 0;
  230. }
  231. /*
  232. * This routine is responsible for faulting in user pages.
  233. * It passes the work off to one of the appropriate routines.
  234. * It returns true if the fault was successfully handled.
  235. */
  236. static int handle_page_fault(struct pt_regs *regs,
  237. int fault_num,
  238. int is_page_fault,
  239. unsigned long address,
  240. int write)
  241. {
  242. struct task_struct *tsk;
  243. struct mm_struct *mm;
  244. struct vm_area_struct *vma;
  245. unsigned long stack_offset;
  246. int fault;
  247. int si_code;
  248. int is_kernel_mode;
  249. pgd_t *pgd;
  250. unsigned int flags;
  251. /* on TILE, protection faults are always writes */
  252. if (!is_page_fault)
  253. write = 1;
  254. flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
  255. (write ? FAULT_FLAG_WRITE : 0));
  256. is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
  257. tsk = validate_current();
  258. /*
  259. * Check to see if we might be overwriting the stack, and bail
  260. * out if so. The page fault code is a relatively likely
  261. * place to get trapped in an infinite regress, and once we
  262. * overwrite the whole stack, it becomes very hard to recover.
  263. */
  264. stack_offset = stack_pointer & (THREAD_SIZE-1);
  265. if (stack_offset < THREAD_SIZE / 8) {
  266. pr_alert("Potential stack overrun: sp %#lx\n",
  267. stack_pointer);
  268. show_regs(regs);
  269. pr_alert("Killing current process %d/%s\n",
  270. tsk->pid, tsk->comm);
  271. do_group_exit(SIGKILL);
  272. }
  273. /*
  274. * Early on, we need to check for migrating PTE entries;
  275. * see homecache.c. If we find a migrating PTE, we wait until
  276. * the backing page claims to be done migrating, then we proceed.
  277. * For kernel PTEs, we rewrite the PTE and return and retry.
  278. * Otherwise, we treat the fault like a normal "no PTE" fault,
  279. * rather than trying to patch up the existing PTE.
  280. */
  281. pgd = get_current_pgd();
  282. if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
  283. is_kernel_mode, write))
  284. return 1;
  285. si_code = SEGV_MAPERR;
  286. /*
  287. * We fault-in kernel-space virtual memory on-demand. The
  288. * 'reference' page table is init_mm.pgd.
  289. *
  290. * NOTE! We MUST NOT take any locks for this case. We may
  291. * be in an interrupt or a critical region, and should
  292. * only copy the information from the master page table,
  293. * nothing more.
  294. *
  295. * This verifies that the fault happens in kernel space
  296. * and that the fault was not a protection fault.
  297. */
  298. if (unlikely(address >= TASK_SIZE &&
  299. !is_arch_mappable_range(address, 0))) {
  300. if (is_kernel_mode && is_page_fault &&
  301. vmalloc_fault(pgd, address) >= 0)
  302. return 1;
  303. /*
  304. * Don't take the mm semaphore here. If we fixup a prefetch
  305. * fault we could otherwise deadlock.
  306. */
  307. mm = NULL; /* happy compiler */
  308. vma = NULL;
  309. goto bad_area_nosemaphore;
  310. }
  311. /*
  312. * If we're trying to touch user-space addresses, we must
  313. * be either at PL0, or else with interrupts enabled in the
  314. * kernel, so either way we can re-enable interrupts here
  315. * unless we are doing atomic access to user space with
  316. * interrupts disabled.
  317. */
  318. if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
  319. local_irq_enable();
  320. mm = tsk->mm;
  321. /*
  322. * If we're in an interrupt, have no user context or are running in an
  323. * atomic region then we must not take the fault.
  324. */
  325. if (in_atomic() || !mm) {
  326. vma = NULL; /* happy compiler */
  327. goto bad_area_nosemaphore;
  328. }
  329. /*
  330. * When running in the kernel we expect faults to occur only to
  331. * addresses in user space. All other faults represent errors in the
  332. * kernel and should generate an OOPS. Unfortunately, in the case of an
  333. * erroneous fault occurring in a code path which already holds mmap_sem
  334. * we will deadlock attempting to validate the fault against the
  335. * address space. Luckily the kernel only validly references user
  336. * space from well defined areas of code, which are listed in the
  337. * exceptions table.
  338. *
  339. * As the vast majority of faults will be valid we will only perform
  340. * the source reference check when there is a possibility of a deadlock.
  341. * Attempt to lock the address space, if we cannot we then validate the
  342. * source. If this is invalid we can skip the address space check,
  343. * thus avoiding the deadlock.
  344. */
  345. if (!down_read_trylock(&mm->mmap_sem)) {
  346. if (is_kernel_mode &&
  347. !search_exception_tables(regs->pc)) {
  348. vma = NULL; /* happy compiler */
  349. goto bad_area_nosemaphore;
  350. }
  351. retry:
  352. down_read(&mm->mmap_sem);
  353. }
  354. vma = find_vma(mm, address);
  355. if (!vma)
  356. goto bad_area;
  357. if (vma->vm_start <= address)
  358. goto good_area;
  359. if (!(vma->vm_flags & VM_GROWSDOWN))
  360. goto bad_area;
  361. if (regs->sp < PAGE_OFFSET) {
  362. /*
  363. * accessing the stack below sp is always a bug.
  364. */
  365. if (address < regs->sp)
  366. goto bad_area;
  367. }
  368. if (expand_stack(vma, address))
  369. goto bad_area;
  370. /*
  371. * Ok, we have a good vm_area for this memory access, so
  372. * we can handle it..
  373. */
  374. good_area:
  375. si_code = SEGV_ACCERR;
  376. if (fault_num == INT_ITLB_MISS) {
  377. if (!(vma->vm_flags & VM_EXEC))
  378. goto bad_area;
  379. } else if (write) {
  380. #ifdef TEST_VERIFY_AREA
  381. if (!is_page_fault && regs->cs == KERNEL_CS)
  382. pr_err("WP fault at "REGFMT"\n", regs->eip);
  383. #endif
  384. if (!(vma->vm_flags & VM_WRITE))
  385. goto bad_area;
  386. } else {
  387. if (!is_page_fault || !(vma->vm_flags & VM_READ))
  388. goto bad_area;
  389. }
  390. survive:
  391. /*
  392. * If for any reason at all we couldn't handle the fault,
  393. * make sure we exit gracefully rather than endlessly redo
  394. * the fault.
  395. */
  396. fault = handle_mm_fault(mm, vma, address, flags);
  397. if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
  398. return 0;
  399. if (unlikely(fault & VM_FAULT_ERROR)) {
  400. if (fault & VM_FAULT_OOM)
  401. goto out_of_memory;
  402. else if (fault & VM_FAULT_SIGBUS)
  403. goto do_sigbus;
  404. BUG();
  405. }
  406. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  407. if (fault & VM_FAULT_MAJOR)
  408. tsk->maj_flt++;
  409. else
  410. tsk->min_flt++;
  411. if (fault & VM_FAULT_RETRY) {
  412. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  413. /*
  414. * No need to up_read(&mm->mmap_sem) as we would
  415. * have already released it in __lock_page_or_retry
  416. * in mm/filemap.c.
  417. */
  418. goto retry;
  419. }
  420. }
  421. #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
  422. /*
  423. * If this was an asynchronous fault,
  424. * restart the appropriate engine.
  425. */
  426. switch (fault_num) {
  427. #if CHIP_HAS_TILE_DMA()
  428. case INT_DMATLB_MISS:
  429. case INT_DMATLB_MISS_DWNCL:
  430. case INT_DMATLB_ACCESS:
  431. case INT_DMATLB_ACCESS_DWNCL:
  432. __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
  433. break;
  434. #endif
  435. #if CHIP_HAS_SN_PROC()
  436. case INT_SNITLB_MISS:
  437. case INT_SNITLB_MISS_DWNCL:
  438. __insn_mtspr(SPR_SNCTL,
  439. __insn_mfspr(SPR_SNCTL) &
  440. ~SPR_SNCTL__FRZPROC_MASK);
  441. break;
  442. #endif
  443. }
  444. #endif
  445. up_read(&mm->mmap_sem);
  446. return 1;
  447. /*
  448. * Something tried to access memory that isn't in our memory map..
  449. * Fix it, but check if it's kernel or user first..
  450. */
  451. bad_area:
  452. up_read(&mm->mmap_sem);
  453. bad_area_nosemaphore:
  454. /* User mode accesses just cause a SIGSEGV */
  455. if (!is_kernel_mode) {
  456. /*
  457. * It's possible to have interrupts off here.
  458. */
  459. local_irq_enable();
  460. force_sig_info_fault("segfault", SIGSEGV, si_code, address,
  461. fault_num, tsk, regs);
  462. return 0;
  463. }
  464. no_context:
  465. /* Are we prepared to handle this kernel fault? */
  466. if (fixup_exception(regs))
  467. return 0;
  468. /*
  469. * Oops. The kernel tried to access some bad page. We'll have to
  470. * terminate things with extreme prejudice.
  471. */
  472. bust_spinlocks(1);
  473. /* FIXME: no lookup_address() yet */
  474. #ifdef SUPPORT_LOOKUP_ADDRESS
  475. if (fault_num == INT_ITLB_MISS) {
  476. pte_t *pte = lookup_address(address);
  477. if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
  478. pr_crit("kernel tried to execute"
  479. " non-executable page - exploit attempt?"
  480. " (uid: %d)\n", current->uid);
  481. }
  482. #endif
  483. if (address < PAGE_SIZE)
  484. pr_alert("Unable to handle kernel NULL pointer dereference\n");
  485. else
  486. pr_alert("Unable to handle kernel paging request\n");
  487. pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
  488. address, regs->pc);
  489. show_regs(regs);
  490. if (unlikely(tsk->pid < 2)) {
  491. panic("Kernel page fault running %s!",
  492. is_idle_task(tsk) ? "the idle task" : "init");
  493. }
  494. /*
  495. * More FIXME: we should probably copy the i386 here and
  496. * implement a generic die() routine. Not today.
  497. */
  498. #ifdef SUPPORT_DIE
  499. die("Oops", regs);
  500. #endif
  501. bust_spinlocks(1);
  502. do_group_exit(SIGKILL);
  503. /*
  504. * We ran out of memory, or some other thing happened to us that made
  505. * us unable to handle the page fault gracefully.
  506. */
  507. out_of_memory:
  508. up_read(&mm->mmap_sem);
  509. if (is_global_init(tsk)) {
  510. yield();
  511. down_read(&mm->mmap_sem);
  512. goto survive;
  513. }
  514. pr_alert("VM: killing process %s\n", tsk->comm);
  515. if (!is_kernel_mode)
  516. do_group_exit(SIGKILL);
  517. goto no_context;
  518. do_sigbus:
  519. up_read(&mm->mmap_sem);
  520. /* Kernel mode? Handle exceptions or die */
  521. if (is_kernel_mode)
  522. goto no_context;
  523. force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
  524. fault_num, tsk, regs);
  525. return 0;
  526. }
  527. #ifndef __tilegx__
  528. /* We must release ICS before panicking or we won't get anywhere. */
  529. #define ics_panic(fmt, ...) do { \
  530. __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
  531. panic(fmt, __VA_ARGS__); \
  532. } while (0)
  533. /*
  534. * When we take an ITLB or DTLB fault or access violation in the
  535. * supervisor while the critical section bit is set, the hypervisor is
  536. * reluctant to write new values into the EX_CONTEXT_K_x registers,
  537. * since that might indicate we have not yet squirreled the SPR
  538. * contents away and can thus safely take a recursive interrupt.
  539. * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
  540. *
  541. * Note that this routine is called before homecache_tlb_defer_enter(),
  542. * which means that we can properly unlock any atomics that might
  543. * be used there (good), but also means we must be very sensitive
  544. * to not touch any data structures that might be located in memory
  545. * that could migrate, as we could be entering the kernel on a dataplane
  546. * cpu that has been deferring kernel TLB updates. This means, for
  547. * example, that we can't migrate init_mm or its pgd.
  548. */
  549. struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
  550. unsigned long address,
  551. unsigned long info)
  552. {
  553. unsigned long pc = info & ~1;
  554. int write = info & 1;
  555. pgd_t *pgd = get_current_pgd();
  556. /* Retval is 1 at first since we will handle the fault fully. */
  557. struct intvec_state state = {
  558. do_page_fault, fault_num, address, write, 1
  559. };
  560. /* Validate that we are plausibly in the right routine. */
  561. if ((pc & 0x7) != 0 || pc < PAGE_OFFSET ||
  562. (fault_num != INT_DTLB_MISS &&
  563. fault_num != INT_DTLB_ACCESS)) {
  564. unsigned long old_pc = regs->pc;
  565. regs->pc = pc;
  566. ics_panic("Bad ICS page fault args:"
  567. " old PC %#lx, fault %d/%d at %#lx\n",
  568. old_pc, fault_num, write, address);
  569. }
  570. /* We might be faulting on a vmalloc page, so check that first. */
  571. if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0)
  572. return state;
  573. /*
  574. * If we faulted with ICS set in sys_cmpxchg, we are providing
  575. * a user syscall service that should generate a signal on
  576. * fault. We didn't set up a kernel stack on initial entry to
  577. * sys_cmpxchg, but instead had one set up by the fault, which
  578. * (because sys_cmpxchg never releases ICS) came to us via the
  579. * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
  580. * still referencing the original user code. We release the
  581. * atomic lock and rewrite pt_regs so that it appears that we
  582. * came from user-space directly, and after we finish the
  583. * fault we'll go back to user space and re-issue the swint.
  584. * This way the backtrace information is correct if we need to
  585. * emit a stack dump at any point while handling this.
  586. *
  587. * Must match register use in sys_cmpxchg().
  588. */
  589. if (pc >= (unsigned long) sys_cmpxchg &&
  590. pc < (unsigned long) __sys_cmpxchg_end) {
  591. #ifdef CONFIG_SMP
  592. /* Don't unlock before we could have locked. */
  593. if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) {
  594. int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
  595. __atomic_fault_unlock(lock_ptr);
  596. }
  597. #endif
  598. regs->sp = regs->regs[27];
  599. }
  600. /*
  601. * We can also fault in the atomic assembly, in which
  602. * case we use the exception table to do the first-level fixup.
  603. * We may re-fixup again in the real fault handler if it
  604. * turns out the faulting address is just bad, and not,
  605. * for example, migrating.
  606. */
  607. else if (pc >= (unsigned long) __start_atomic_asm_code &&
  608. pc < (unsigned long) __end_atomic_asm_code) {
  609. const struct exception_table_entry *fixup;
  610. #ifdef CONFIG_SMP
  611. /* Unlock the atomic lock. */
  612. int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]);
  613. __atomic_fault_unlock(lock_ptr);
  614. #endif
  615. fixup = search_exception_tables(pc);
  616. if (!fixup)
  617. ics_panic("ICS atomic fault not in table:"
  618. " PC %#lx, fault %d", pc, fault_num);
  619. regs->pc = fixup->fixup;
  620. regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
  621. }
  622. /*
  623. * Now that we have released the atomic lock (if necessary),
  624. * it's safe to spin if the PTE that caused the fault was migrating.
  625. */
  626. if (fault_num == INT_DTLB_ACCESS)
  627. write = 1;
  628. if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
  629. return state;
  630. /* Return zero so that we continue on with normal fault handling. */
  631. state.retval = 0;
  632. return state;
  633. }
  634. #endif /* !__tilegx__ */
  635. /*
  636. * This routine handles page faults. It determines the address, and the
  637. * problem, and then passes it handle_page_fault() for normal DTLB and
  638. * ITLB issues, and for DMA or SN processor faults when we are in user
  639. * space. For the latter, if we're in kernel mode, we just save the
  640. * interrupt away appropriately and return immediately. We can't do
  641. * page faults for user code while in kernel mode.
  642. */
  643. void do_page_fault(struct pt_regs *regs, int fault_num,
  644. unsigned long address, unsigned long write)
  645. {
  646. int is_page_fault;
  647. /* This case should have been handled by do_page_fault_ics(). */
  648. BUG_ON(write & ~1);
  649. #if CHIP_HAS_TILE_DMA()
  650. /*
  651. * If it's a DMA fault, suspend the transfer while we're
  652. * handling the miss; we'll restart after it's handled. If we
  653. * don't suspend, it's possible that this process could swap
  654. * out and back in, and restart the engine since the DMA is
  655. * still 'running'.
  656. */
  657. if (fault_num == INT_DMATLB_MISS ||
  658. fault_num == INT_DMATLB_ACCESS ||
  659. fault_num == INT_DMATLB_MISS_DWNCL ||
  660. fault_num == INT_DMATLB_ACCESS_DWNCL) {
  661. __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
  662. while (__insn_mfspr(SPR_DMA_USER_STATUS) &
  663. SPR_DMA_STATUS__BUSY_MASK)
  664. ;
  665. }
  666. #endif
  667. /* Validate fault num and decide if this is a first-time page fault. */
  668. switch (fault_num) {
  669. case INT_ITLB_MISS:
  670. case INT_DTLB_MISS:
  671. #if CHIP_HAS_TILE_DMA()
  672. case INT_DMATLB_MISS:
  673. case INT_DMATLB_MISS_DWNCL:
  674. #endif
  675. #if CHIP_HAS_SN_PROC()
  676. case INT_SNITLB_MISS:
  677. case INT_SNITLB_MISS_DWNCL:
  678. #endif
  679. is_page_fault = 1;
  680. break;
  681. case INT_DTLB_ACCESS:
  682. #if CHIP_HAS_TILE_DMA()
  683. case INT_DMATLB_ACCESS:
  684. case INT_DMATLB_ACCESS_DWNCL:
  685. #endif
  686. is_page_fault = 0;
  687. break;
  688. default:
  689. panic("Bad fault number %d in do_page_fault", fault_num);
  690. }
  691. #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
  692. if (EX1_PL(regs->ex1) != USER_PL) {
  693. struct async_tlb *async;
  694. switch (fault_num) {
  695. #if CHIP_HAS_TILE_DMA()
  696. case INT_DMATLB_MISS:
  697. case INT_DMATLB_ACCESS:
  698. case INT_DMATLB_MISS_DWNCL:
  699. case INT_DMATLB_ACCESS_DWNCL:
  700. async = &current->thread.dma_async_tlb;
  701. break;
  702. #endif
  703. #if CHIP_HAS_SN_PROC()
  704. case INT_SNITLB_MISS:
  705. case INT_SNITLB_MISS_DWNCL:
  706. async = &current->thread.sn_async_tlb;
  707. break;
  708. #endif
  709. default:
  710. async = NULL;
  711. }
  712. if (async) {
  713. /*
  714. * No vmalloc check required, so we can allow
  715. * interrupts immediately at this point.
  716. */
  717. local_irq_enable();
  718. set_thread_flag(TIF_ASYNC_TLB);
  719. if (async->fault_num != 0) {
  720. panic("Second async fault %d;"
  721. " old fault was %d (%#lx/%ld)",
  722. fault_num, async->fault_num,
  723. address, write);
  724. }
  725. BUG_ON(fault_num == 0);
  726. async->fault_num = fault_num;
  727. async->is_fault = is_page_fault;
  728. async->is_write = write;
  729. async->address = address;
  730. return;
  731. }
  732. }
  733. #endif
  734. handle_page_fault(regs, fault_num, is_page_fault, address, write);
  735. }
  736. #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
  737. /*
  738. * Check an async_tlb structure to see if a deferred fault is waiting,
  739. * and if so pass it to the page-fault code.
  740. */
  741. static void handle_async_page_fault(struct pt_regs *regs,
  742. struct async_tlb *async)
  743. {
  744. if (async->fault_num) {
  745. /*
  746. * Clear async->fault_num before calling the page-fault
  747. * handler so that if we re-interrupt before returning
  748. * from the function we have somewhere to put the
  749. * information from the new interrupt.
  750. */
  751. int fault_num = async->fault_num;
  752. async->fault_num = 0;
  753. handle_page_fault(regs, fault_num, async->is_fault,
  754. async->address, async->is_write);
  755. }
  756. }
  757. /*
  758. * This routine effectively re-issues asynchronous page faults
  759. * when we are returning to user space.
  760. */
  761. void do_async_page_fault(struct pt_regs *regs)
  762. {
  763. /*
  764. * Clear thread flag early. If we re-interrupt while processing
  765. * code here, we will reset it and recall this routine before
  766. * returning to user space.
  767. */
  768. clear_thread_flag(TIF_ASYNC_TLB);
  769. #if CHIP_HAS_TILE_DMA()
  770. handle_async_page_fault(regs, &current->thread.dma_async_tlb);
  771. #endif
  772. #if CHIP_HAS_SN_PROC()
  773. handle_async_page_fault(regs, &current->thread.sn_async_tlb);
  774. #endif
  775. }
  776. #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
  777. void vmalloc_sync_all(void)
  778. {
  779. #ifdef __tilegx__
  780. /* Currently all L1 kernel pmd's are static and shared. */
  781. BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START));
  782. #else
  783. /*
  784. * Note that races in the updates of insync and start aren't
  785. * problematic: insync can only get set bits added, and updates to
  786. * start are only improving performance (without affecting correctness
  787. * if undone).
  788. */
  789. static DECLARE_BITMAP(insync, PTRS_PER_PGD);
  790. static unsigned long start = PAGE_OFFSET;
  791. unsigned long address;
  792. BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK);
  793. for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) {
  794. if (!test_bit(pgd_index(address), insync)) {
  795. unsigned long flags;
  796. struct list_head *pos;
  797. spin_lock_irqsave(&pgd_lock, flags);
  798. list_for_each(pos, &pgd_list)
  799. if (!vmalloc_sync_one(list_to_pgd(pos),
  800. address)) {
  801. /* Must be at first entry in list. */
  802. BUG_ON(pos != pgd_list.next);
  803. break;
  804. }
  805. spin_unlock_irqrestore(&pgd_lock, flags);
  806. if (pos != pgd_list.next)
  807. set_bit(pgd_index(address), insync);
  808. }
  809. if (address == start && test_bit(pgd_index(address), insync))
  810. start = address + PGDIR_SIZE;
  811. }
  812. #endif
  813. }