stack.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/kprobes.h>
  17. #include <linux/module.h>
  18. #include <linux/pfn.h>
  19. #include <linux/kallsyms.h>
  20. #include <linux/stacktrace.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/mmzone.h>
  23. #include <asm/backtrace.h>
  24. #include <asm/page.h>
  25. #include <asm/tlbflush.h>
  26. #include <asm/ucontext.h>
  27. #include <asm/sigframe.h>
  28. #include <asm/stack.h>
  29. #include <arch/abi.h>
  30. #include <arch/interrupts.h>
  31. #define KBT_ONGOING 0 /* Backtrace still ongoing */
  32. #define KBT_DONE 1 /* Backtrace cleanly completed */
  33. #define KBT_RUNNING 2 /* Can't run backtrace on a running task */
  34. #define KBT_LOOP 3 /* Backtrace entered a loop */
  35. /* Is address on the specified kernel stack? */
  36. static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
  37. {
  38. ulong kstack_base = (ulong) kbt->task->stack;
  39. if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
  40. return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
  41. return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
  42. }
  43. /* Is address in the specified kernel code? */
  44. static int in_kernel_text(VirtualAddress address)
  45. {
  46. return (address >= MEM_SV_INTRPT &&
  47. address < MEM_SV_INTRPT + HPAGE_SIZE);
  48. }
  49. /* Is address valid for reading? */
  50. static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
  51. {
  52. HV_PTE *l1_pgtable = kbt->pgtable;
  53. HV_PTE *l2_pgtable;
  54. unsigned long pfn;
  55. HV_PTE pte;
  56. struct page *page;
  57. if (l1_pgtable == NULL)
  58. return 0; /* can't read user space in other tasks */
  59. pte = l1_pgtable[HV_L1_INDEX(address)];
  60. if (!hv_pte_get_present(pte))
  61. return 0;
  62. pfn = hv_pte_get_pfn(pte);
  63. if (pte_huge(pte)) {
  64. if (!pfn_valid(pfn)) {
  65. pr_err("huge page has bad pfn %#lx\n", pfn);
  66. return 0;
  67. }
  68. return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
  69. }
  70. page = pfn_to_page(pfn);
  71. if (PageHighMem(page)) {
  72. pr_err("L2 page table not in LOWMEM (%#llx)\n",
  73. HV_PFN_TO_CPA(pfn));
  74. return 0;
  75. }
  76. l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
  77. pte = l2_pgtable[HV_L2_INDEX(address)];
  78. return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
  79. }
  80. /* Callback for backtracer; basically a glorified memcpy */
  81. static bool read_memory_func(void *result, VirtualAddress address,
  82. unsigned int size, void *vkbt)
  83. {
  84. int retval;
  85. struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
  86. if (in_kernel_text(address)) {
  87. /* OK to read kernel code. */
  88. } else if (address >= PAGE_OFFSET) {
  89. /* We only tolerate kernel-space reads of this task's stack */
  90. if (!in_kernel_stack(kbt, address))
  91. return 0;
  92. } else if (!valid_address(kbt, address)) {
  93. return 0; /* invalid user-space address */
  94. }
  95. pagefault_disable();
  96. retval = __copy_from_user_inatomic(result,
  97. (void __user __force *)address,
  98. size);
  99. pagefault_enable();
  100. return (retval == 0);
  101. }
  102. /* Return a pt_regs pointer for a valid fault handler frame */
  103. static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
  104. {
  105. const char *fault = NULL; /* happy compiler */
  106. char fault_buf[64];
  107. VirtualAddress sp = kbt->it.sp;
  108. struct pt_regs *p;
  109. if (!in_kernel_stack(kbt, sp))
  110. return NULL;
  111. if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
  112. return NULL;
  113. p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
  114. if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
  115. fault = "syscall";
  116. else {
  117. if (kbt->verbose) { /* else we aren't going to use it */
  118. snprintf(fault_buf, sizeof(fault_buf),
  119. "interrupt %ld", p->faultnum);
  120. fault = fault_buf;
  121. }
  122. }
  123. if (EX1_PL(p->ex1) == KERNEL_PL &&
  124. in_kernel_text(p->pc) &&
  125. in_kernel_stack(kbt, p->sp) &&
  126. p->sp >= sp) {
  127. if (kbt->verbose)
  128. pr_err(" <%s while in kernel mode>\n", fault);
  129. } else if (EX1_PL(p->ex1) == USER_PL &&
  130. p->pc < PAGE_OFFSET &&
  131. p->sp < PAGE_OFFSET) {
  132. if (kbt->verbose)
  133. pr_err(" <%s while in user mode>\n", fault);
  134. } else if (kbt->verbose) {
  135. pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
  136. p->pc, p->sp, p->ex1);
  137. p = NULL;
  138. }
  139. if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
  140. return p;
  141. return NULL;
  142. }
  143. /* Is the pc pointing to a sigreturn trampoline? */
  144. static int is_sigreturn(VirtualAddress pc)
  145. {
  146. return (pc == VDSO_BASE);
  147. }
  148. /* Return a pt_regs pointer for a valid signal handler frame */
  149. static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
  150. {
  151. BacktraceIterator *b = &kbt->it;
  152. if (b->pc == VDSO_BASE) {
  153. struct rt_sigframe *frame;
  154. unsigned long sigframe_top =
  155. b->sp + sizeof(struct rt_sigframe) - 1;
  156. if (!valid_address(kbt, b->sp) ||
  157. !valid_address(kbt, sigframe_top)) {
  158. if (kbt->verbose)
  159. pr_err(" (odd signal: sp %#lx?)\n",
  160. (unsigned long)(b->sp));
  161. return NULL;
  162. }
  163. frame = (struct rt_sigframe *)b->sp;
  164. if (kbt->verbose) {
  165. pr_err(" <received signal %d>\n",
  166. frame->info.si_signo);
  167. }
  168. return (struct pt_regs *)&frame->uc.uc_mcontext;
  169. }
  170. return NULL;
  171. }
  172. static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
  173. {
  174. return is_sigreturn(kbt->it.pc);
  175. }
  176. static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
  177. {
  178. struct pt_regs *p;
  179. p = valid_fault_handler(kbt);
  180. if (p == NULL)
  181. p = valid_sigframe(kbt);
  182. if (p == NULL)
  183. return 0;
  184. backtrace_init(&kbt->it, read_memory_func, kbt,
  185. p->pc, p->lr, p->sp, p->regs[52]);
  186. kbt->new_context = 1;
  187. return 1;
  188. }
  189. /* Find a frame that isn't a sigreturn, if there is one. */
  190. static int KBacktraceIterator_next_item_inclusive(
  191. struct KBacktraceIterator *kbt)
  192. {
  193. for (;;) {
  194. do {
  195. if (!KBacktraceIterator_is_sigreturn(kbt))
  196. return KBT_ONGOING;
  197. } while (backtrace_next(&kbt->it));
  198. if (!KBacktraceIterator_restart(kbt))
  199. return KBT_DONE;
  200. }
  201. }
  202. /*
  203. * If the current sp is on a page different than what we recorded
  204. * as the top-of-kernel-stack last time we context switched, we have
  205. * probably blown the stack, and nothing is going to work out well.
  206. * If we can at least get out a warning, that may help the debug,
  207. * though we probably won't be able to backtrace into the code that
  208. * actually did the recursive damage.
  209. */
  210. static void validate_stack(struct pt_regs *regs)
  211. {
  212. int cpu = smp_processor_id();
  213. unsigned long ksp0 = get_current_ksp0();
  214. unsigned long ksp0_base = ksp0 - THREAD_SIZE;
  215. unsigned long sp = stack_pointer;
  216. if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
  217. pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
  218. " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
  219. cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
  220. }
  221. else if (sp < ksp0_base + sizeof(struct thread_info)) {
  222. pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
  223. " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
  224. cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
  225. }
  226. }
  227. void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
  228. struct task_struct *t, struct pt_regs *regs)
  229. {
  230. VirtualAddress pc, lr, sp, r52;
  231. int is_current;
  232. /*
  233. * Set up callback information. We grab the kernel stack base
  234. * so we will allow reads of that address range, and if we're
  235. * asking about the current process we grab the page table
  236. * so we can check user accesses before trying to read them.
  237. * We flush the TLB to avoid any weird skew issues.
  238. */
  239. is_current = (t == NULL);
  240. kbt->is_current = is_current;
  241. if (is_current)
  242. t = validate_current();
  243. kbt->task = t;
  244. kbt->pgtable = NULL;
  245. kbt->verbose = 0; /* override in caller if desired */
  246. kbt->profile = 0; /* override in caller if desired */
  247. kbt->end = KBT_ONGOING;
  248. kbt->new_context = 0;
  249. if (is_current) {
  250. HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
  251. if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
  252. /*
  253. * Not just an optimization: this also allows
  254. * this to work at all before va/pa mappings
  255. * are set up.
  256. */
  257. kbt->pgtable = swapper_pg_dir;
  258. } else {
  259. struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
  260. if (!PageHighMem(page))
  261. kbt->pgtable = __va(pgdir_pa);
  262. else
  263. pr_err("page table not in LOWMEM"
  264. " (%#llx)\n", pgdir_pa);
  265. }
  266. local_flush_tlb_all();
  267. validate_stack(regs);
  268. }
  269. if (regs == NULL) {
  270. if (is_current || t->state == TASK_RUNNING) {
  271. /* Can't do this; we need registers */
  272. kbt->end = KBT_RUNNING;
  273. return;
  274. }
  275. pc = get_switch_to_pc();
  276. lr = t->thread.pc;
  277. sp = t->thread.ksp;
  278. r52 = 0;
  279. } else {
  280. pc = regs->pc;
  281. lr = regs->lr;
  282. sp = regs->sp;
  283. r52 = regs->regs[52];
  284. }
  285. backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
  286. kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
  287. }
  288. EXPORT_SYMBOL(KBacktraceIterator_init);
  289. int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
  290. {
  291. return kbt->end != KBT_ONGOING;
  292. }
  293. EXPORT_SYMBOL(KBacktraceIterator_end);
  294. void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
  295. {
  296. VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp;
  297. kbt->new_context = 0;
  298. if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
  299. kbt->end = KBT_DONE;
  300. return;
  301. }
  302. kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
  303. if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
  304. /* Trapped in a loop; give up. */
  305. kbt->end = KBT_LOOP;
  306. }
  307. }
  308. EXPORT_SYMBOL(KBacktraceIterator_next);
  309. /*
  310. * This method wraps the backtracer's more generic support.
  311. * It is only invoked from the architecture-specific code; show_stack()
  312. * and dump_stack() (in entry.S) are architecture-independent entry points.
  313. */
  314. void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
  315. {
  316. int i;
  317. if (headers) {
  318. /*
  319. * Add a blank line since if we are called from panic(),
  320. * then bust_spinlocks() spit out a space in front of us
  321. * and it will mess up our KERN_ERR.
  322. */
  323. pr_err("\n");
  324. pr_err("Starting stack dump of tid %d, pid %d (%s)"
  325. " on cpu %d at cycle %lld\n",
  326. kbt->task->pid, kbt->task->tgid, kbt->task->comm,
  327. smp_processor_id(), get_cycles());
  328. }
  329. kbt->verbose = 1;
  330. i = 0;
  331. for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
  332. char *modname;
  333. const char *name;
  334. unsigned long address = kbt->it.pc;
  335. unsigned long offset, size;
  336. char namebuf[KSYM_NAME_LEN+100];
  337. if (address >= PAGE_OFFSET)
  338. name = kallsyms_lookup(address, &size, &offset,
  339. &modname, namebuf);
  340. else
  341. name = NULL;
  342. if (!name)
  343. namebuf[0] = '\0';
  344. else {
  345. size_t namelen = strlen(namebuf);
  346. size_t remaining = (sizeof(namebuf) - 1) - namelen;
  347. char *p = namebuf + namelen;
  348. int rc = snprintf(p, remaining, "+%#lx/%#lx ",
  349. offset, size);
  350. if (modname && rc < remaining)
  351. snprintf(p + rc, remaining - rc,
  352. "[%s] ", modname);
  353. namebuf[sizeof(namebuf)-1] = '\0';
  354. }
  355. pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
  356. i++, address, namebuf, (unsigned long)(kbt->it.sp));
  357. if (i >= 100) {
  358. pr_err("Stack dump truncated"
  359. " (%d frames)\n", i);
  360. break;
  361. }
  362. }
  363. if (kbt->end == KBT_LOOP)
  364. pr_err("Stack dump stopped; next frame identical to this one\n");
  365. if (headers)
  366. pr_err("Stack dump complete\n");
  367. }
  368. EXPORT_SYMBOL(tile_show_stack);
  369. /* This is called from show_regs() and _dump_stack() */
  370. void dump_stack_regs(struct pt_regs *regs)
  371. {
  372. struct KBacktraceIterator kbt;
  373. KBacktraceIterator_init(&kbt, NULL, regs);
  374. tile_show_stack(&kbt, 1);
  375. }
  376. EXPORT_SYMBOL(dump_stack_regs);
  377. static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
  378. ulong pc, ulong lr, ulong sp, ulong r52)
  379. {
  380. memset(regs, 0, sizeof(struct pt_regs));
  381. regs->pc = pc;
  382. regs->lr = lr;
  383. regs->sp = sp;
  384. regs->regs[52] = r52;
  385. return regs;
  386. }
  387. /* This is called from dump_stack() and just converts to pt_regs */
  388. void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
  389. {
  390. struct pt_regs regs;
  391. dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
  392. }
  393. /* This is called from KBacktraceIterator_init_current() */
  394. void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
  395. ulong lr, ulong sp, ulong r52)
  396. {
  397. struct pt_regs regs;
  398. KBacktraceIterator_init(kbt, NULL,
  399. regs_to_pt_regs(&regs, pc, lr, sp, r52));
  400. }
  401. /* This is called only from kernel/sched.c, with esp == NULL */
  402. void show_stack(struct task_struct *task, unsigned long *esp)
  403. {
  404. struct KBacktraceIterator kbt;
  405. if (task == NULL || task == current)
  406. KBacktraceIterator_init_current(&kbt);
  407. else
  408. KBacktraceIterator_init(&kbt, task, NULL);
  409. tile_show_stack(&kbt, 0);
  410. }
  411. #ifdef CONFIG_STACKTRACE
  412. /* Support generic Linux stack API too */
  413. void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
  414. {
  415. struct KBacktraceIterator kbt;
  416. int skip = trace->skip;
  417. int i = 0;
  418. if (task == NULL || task == current)
  419. KBacktraceIterator_init_current(&kbt);
  420. else
  421. KBacktraceIterator_init(&kbt, task, NULL);
  422. for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
  423. if (skip) {
  424. --skip;
  425. continue;
  426. }
  427. if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
  428. break;
  429. trace->entries[i++] = kbt.it.pc;
  430. }
  431. trace->nr_entries = i;
  432. }
  433. EXPORT_SYMBOL(save_stack_trace_tsk);
  434. void save_stack_trace(struct stack_trace *trace)
  435. {
  436. save_stack_trace_tsk(NULL, trace);
  437. }
  438. #endif
  439. /* In entry.S */
  440. EXPORT_SYMBOL(KBacktraceIterator_init_current);