process.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. /*
  2. * arch/ppc/kernel/process.c
  3. *
  4. * Derived from "arch/i386/kernel/process.c"
  5. * Copyright (C) 1995 Linus Torvalds
  6. *
  7. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  8. * Paul Mackerras (paulus@cs.anu.edu.au)
  9. *
  10. * PowerPC version
  11. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License
  15. * as published by the Free Software Foundation; either version
  16. * 2 of the License, or (at your option) any later version.
  17. */
  18. #include <linux/config.h>
  19. #include <linux/errno.h>
  20. #include <linux/sched.h>
  21. #include <linux/kernel.h>
  22. #include <linux/mm.h>
  23. #include <linux/smp.h>
  24. #include <linux/smp_lock.h>
  25. #include <linux/stddef.h>
  26. #include <linux/unistd.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/slab.h>
  29. #include <linux/user.h>
  30. #include <linux/elf.h>
  31. #include <linux/init.h>
  32. #include <linux/prctl.h>
  33. #include <linux/init_task.h>
  34. #include <linux/module.h>
  35. #include <linux/kallsyms.h>
  36. #include <linux/mqueue.h>
  37. #include <linux/hardirq.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/system.h>
  41. #include <asm/io.h>
  42. #include <asm/processor.h>
  43. #include <asm/mmu.h>
  44. #include <asm/prom.h>
  45. extern unsigned long _get_SP(void);
  46. #ifndef CONFIG_SMP
  47. struct task_struct *last_task_used_math = NULL;
  48. struct task_struct *last_task_used_altivec = NULL;
  49. struct task_struct *last_task_used_spe = NULL;
  50. #endif
  51. static struct fs_struct init_fs = INIT_FS;
  52. static struct files_struct init_files = INIT_FILES;
  53. static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
  54. static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
  55. struct mm_struct init_mm = INIT_MM(init_mm);
  56. EXPORT_SYMBOL(init_mm);
  57. /* this is 8kB-aligned so we can get to the thread_info struct
  58. at the base of it from the stack pointer with 1 integer instruction. */
  59. union thread_union init_thread_union
  60. __attribute__((__section__(".data.init_task"))) =
  61. { INIT_THREAD_INFO(init_task) };
  62. /* initial task structure */
  63. struct task_struct init_task = INIT_TASK(init_task);
  64. EXPORT_SYMBOL(init_task);
  65. /* only used to get secondary processor up */
  66. struct task_struct *current_set[NR_CPUS] = {&init_task, };
  67. /*
  68. * Make sure the floating-point register state in the
  69. * the thread_struct is up to date for task tsk.
  70. */
  71. void flush_fp_to_thread(struct task_struct *tsk)
  72. {
  73. if (tsk->thread.regs) {
  74. /*
  75. * We need to disable preemption here because if we didn't,
  76. * another process could get scheduled after the regs->msr
  77. * test but before we have finished saving the FP registers
  78. * to the thread_struct. That process could take over the
  79. * FPU, and then when we get scheduled again we would store
  80. * bogus values for the remaining FP registers.
  81. */
  82. preempt_disable();
  83. if (tsk->thread.regs->msr & MSR_FP) {
  84. #ifdef CONFIG_SMP
  85. /*
  86. * This should only ever be called for current or
  87. * for a stopped child process. Since we save away
  88. * the FP register state on context switch on SMP,
  89. * there is something wrong if a stopped child appears
  90. * to still have its FP state in the CPU registers.
  91. */
  92. BUG_ON(tsk != current);
  93. #endif
  94. giveup_fpu(current);
  95. }
  96. preempt_enable();
  97. }
  98. }
  99. void enable_kernel_fp(void)
  100. {
  101. WARN_ON(preemptible());
  102. #ifdef CONFIG_SMP
  103. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  104. giveup_fpu(current);
  105. else
  106. giveup_fpu(NULL); /* just enables FP for kernel */
  107. #else
  108. giveup_fpu(last_task_used_math);
  109. #endif /* CONFIG_SMP */
  110. }
  111. EXPORT_SYMBOL(enable_kernel_fp);
  112. int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
  113. {
  114. if (!tsk->thread.regs)
  115. return 0;
  116. flush_fp_to_thread(current);
  117. memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
  118. return 1;
  119. }
  120. #ifdef CONFIG_ALTIVEC
  121. void enable_kernel_altivec(void)
  122. {
  123. WARN_ON(preemptible());
  124. #ifdef CONFIG_SMP
  125. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  126. giveup_altivec(current);
  127. else
  128. giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
  129. #else
  130. giveup_altivec(last_task_used_altivec);
  131. #endif /* CONFIG_SMP */
  132. }
  133. EXPORT_SYMBOL(enable_kernel_altivec);
  134. /*
  135. * Make sure the VMX/Altivec register state in the
  136. * the thread_struct is up to date for task tsk.
  137. */
  138. void flush_altivec_to_thread(struct task_struct *tsk)
  139. {
  140. if (tsk->thread.regs) {
  141. preempt_disable();
  142. if (tsk->thread.regs->msr & MSR_VEC) {
  143. #ifdef CONFIG_SMP
  144. BUG_ON(tsk != current);
  145. #endif
  146. giveup_altivec(current);
  147. }
  148. preempt_enable();
  149. }
  150. }
  151. int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
  152. {
  153. flush_altivec_to_thread(current);
  154. memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
  155. return 1;
  156. }
  157. #endif /* CONFIG_ALTIVEC */
  158. #ifdef CONFIG_SPE
  159. void enable_kernel_spe(void)
  160. {
  161. WARN_ON(preemptible());
  162. #ifdef CONFIG_SMP
  163. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  164. giveup_spe(current);
  165. else
  166. giveup_spe(NULL); /* just enable SPE for kernel - force */
  167. #else
  168. giveup_spe(last_task_used_spe);
  169. #endif /* __SMP __ */
  170. }
  171. EXPORT_SYMBOL(enable_kernel_spe);
  172. void flush_spe_to_thread(struct task_struct *tsk)
  173. {
  174. if (tsk->thread.regs) {
  175. preempt_disable();
  176. if (tsk->thread.regs->msr & MSR_SPE) {
  177. #ifdef CONFIG_SMP
  178. BUG_ON(tsk != current);
  179. #endif
  180. giveup_spe(current);
  181. }
  182. preempt_enable();
  183. }
  184. }
  185. int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
  186. {
  187. flush_spe_to_thread(current);
  188. /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
  189. memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
  190. return 1;
  191. }
  192. #endif /* CONFIG_SPE */
  193. static void set_dabr_spr(unsigned long val)
  194. {
  195. mtspr(SPRN_DABR, val);
  196. }
  197. int set_dabr(unsigned long dabr)
  198. {
  199. int ret = 0;
  200. #ifdef CONFIG_PPC64
  201. if (firmware_has_feature(FW_FEATURE_XDABR)) {
  202. /* We want to catch accesses from kernel and userspace */
  203. unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
  204. ret = plpar_set_xdabr(dabr, flags);
  205. } else if (firmware_has_feature(FW_FEATURE_DABR)) {
  206. ret = plpar_set_dabr(dabr);
  207. } else
  208. #endif
  209. set_dabr_spr(dabr);
  210. return ret;
  211. }
  212. static DEFINE_PER_CPU(unsigned long, current_dabr);
  213. struct task_struct *__switch_to(struct task_struct *prev,
  214. struct task_struct *new)
  215. {
  216. struct thread_struct *new_thread, *old_thread;
  217. unsigned long flags;
  218. struct task_struct *last;
  219. #ifdef CONFIG_SMP
  220. /* avoid complexity of lazy save/restore of fpu
  221. * by just saving it every time we switch out if
  222. * this task used the fpu during the last quantum.
  223. *
  224. * If it tries to use the fpu again, it'll trap and
  225. * reload its fp regs. So we don't have to do a restore
  226. * every switch, just a save.
  227. * -- Cort
  228. */
  229. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  230. giveup_fpu(prev);
  231. #ifdef CONFIG_ALTIVEC
  232. /*
  233. * If the previous thread used altivec in the last quantum
  234. * (thus changing altivec regs) then save them.
  235. * We used to check the VRSAVE register but not all apps
  236. * set it, so we don't rely on it now (and in fact we need
  237. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  238. *
  239. * On SMP we always save/restore altivec regs just to avoid the
  240. * complexity of changing processors.
  241. * -- Cort
  242. */
  243. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  244. giveup_altivec(prev);
  245. /* Avoid the trap. On smp this this never happens since
  246. * we don't set last_task_used_altivec -- Cort
  247. */
  248. if (new->thread.regs && last_task_used_altivec == new)
  249. new->thread.regs->msr |= MSR_VEC;
  250. #endif /* CONFIG_ALTIVEC */
  251. #ifdef CONFIG_SPE
  252. /*
  253. * If the previous thread used spe in the last quantum
  254. * (thus changing spe regs) then save them.
  255. *
  256. * On SMP we always save/restore spe regs just to avoid the
  257. * complexity of changing processors.
  258. */
  259. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  260. giveup_spe(prev);
  261. /* Avoid the trap. On smp this this never happens since
  262. * we don't set last_task_used_spe
  263. */
  264. if (new->thread.regs && last_task_used_spe == new)
  265. new->thread.regs->msr |= MSR_SPE;
  266. #endif /* CONFIG_SPE */
  267. #endif /* CONFIG_SMP */
  268. #ifdef CONFIG_PPC64 /* for now */
  269. if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
  270. set_dabr(new->thread.dabr);
  271. __get_cpu_var(current_dabr) = new->thread.dabr;
  272. }
  273. #endif
  274. new_thread = &new->thread;
  275. old_thread = &current->thread;
  276. local_irq_save(flags);
  277. last = _switch(old_thread, new_thread);
  278. local_irq_restore(flags);
  279. return last;
  280. }
  281. void show_regs(struct pt_regs * regs)
  282. {
  283. int i, trap;
  284. printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n",
  285. regs->nip, regs->link, regs->gpr[1], regs, regs->trap,
  286. print_tainted());
  287. printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
  288. regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
  289. regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
  290. regs->msr&MSR_IR ? 1 : 0,
  291. regs->msr&MSR_DR ? 1 : 0);
  292. trap = TRAP(regs);
  293. if (trap == 0x300 || trap == 0x600)
  294. printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
  295. printk("TASK = %p[%d] '%s' THREAD: %p\n",
  296. current, current->pid, current->comm, current->thread_info);
  297. printk("Last syscall: %ld ", current->thread.last_syscall);
  298. #ifdef CONFIG_SMP
  299. printk(" CPU: %d", smp_processor_id());
  300. #endif /* CONFIG_SMP */
  301. for (i = 0; i < 32; i++) {
  302. long r;
  303. if ((i % 8) == 0)
  304. printk("\n" KERN_INFO "GPR%02d: ", i);
  305. if (__get_user(r, &regs->gpr[i]))
  306. break;
  307. printk("%08lX ", r);
  308. if (i == 12 && !FULL_REGS(regs))
  309. break;
  310. }
  311. printk("\n");
  312. #ifdef CONFIG_KALLSYMS
  313. /*
  314. * Lookup NIP late so we have the best change of getting the
  315. * above info out without failing
  316. */
  317. printk("NIP [%08lx] ", regs->nip);
  318. print_symbol("%s\n", regs->nip);
  319. printk("LR [%08lx] ", regs->link);
  320. print_symbol("%s\n", regs->link);
  321. #endif
  322. show_stack(current, (unsigned long *) regs->gpr[1]);
  323. }
  324. void exit_thread(void)
  325. {
  326. #ifndef CONFIG_SMP
  327. if (last_task_used_math == current)
  328. last_task_used_math = NULL;
  329. #ifdef CONFIG_ALTIVEC
  330. if (last_task_used_altivec == current)
  331. last_task_used_altivec = NULL;
  332. #endif /* CONFIG_ALTIVEC */
  333. #ifdef CONFIG_SPE
  334. if (last_task_used_spe == current)
  335. last_task_used_spe = NULL;
  336. #endif
  337. #endif /* CONFIG_SMP */
  338. }
  339. void flush_thread(void)
  340. {
  341. #ifndef CONFIG_SMP
  342. if (last_task_used_math == current)
  343. last_task_used_math = NULL;
  344. #ifdef CONFIG_ALTIVEC
  345. if (last_task_used_altivec == current)
  346. last_task_used_altivec = NULL;
  347. #endif /* CONFIG_ALTIVEC */
  348. #ifdef CONFIG_SPE
  349. if (last_task_used_spe == current)
  350. last_task_used_spe = NULL;
  351. #endif
  352. #endif /* CONFIG_SMP */
  353. #ifdef CONFIG_PPC64 /* for now */
  354. if (current->thread.dabr) {
  355. current->thread.dabr = 0;
  356. set_dabr(0);
  357. }
  358. #endif
  359. }
  360. void
  361. release_thread(struct task_struct *t)
  362. {
  363. }
  364. /*
  365. * This gets called before we allocate a new thread and copy
  366. * the current task into it.
  367. */
  368. void prepare_to_copy(struct task_struct *tsk)
  369. {
  370. flush_fp_to_thread(current);
  371. flush_altivec_to_thread(current);
  372. flush_spe_to_thread(current);
  373. }
  374. /*
  375. * Copy a thread..
  376. */
  377. int
  378. copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
  379. unsigned long unused,
  380. struct task_struct *p, struct pt_regs *regs)
  381. {
  382. struct pt_regs *childregs, *kregs;
  383. extern void ret_from_fork(void);
  384. unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
  385. unsigned long childframe;
  386. CHECK_FULL_REGS(regs);
  387. /* Copy registers */
  388. sp -= sizeof(struct pt_regs);
  389. childregs = (struct pt_regs *) sp;
  390. *childregs = *regs;
  391. if ((childregs->msr & MSR_PR) == 0) {
  392. /* for kernel thread, set `current' and stackptr in new task */
  393. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  394. childregs->gpr[2] = (unsigned long) p;
  395. p->thread.regs = NULL; /* no user register state */
  396. } else {
  397. childregs->gpr[1] = usp;
  398. p->thread.regs = childregs;
  399. if (clone_flags & CLONE_SETTLS)
  400. childregs->gpr[2] = childregs->gpr[6];
  401. }
  402. childregs->gpr[3] = 0; /* Result from fork() */
  403. sp -= STACK_FRAME_OVERHEAD;
  404. childframe = sp;
  405. /*
  406. * The way this works is that at some point in the future
  407. * some task will call _switch to switch to the new task.
  408. * That will pop off the stack frame created below and start
  409. * the new task running at ret_from_fork. The new task will
  410. * do some house keeping and then return from the fork or clone
  411. * system call, using the stack frame created above.
  412. */
  413. sp -= sizeof(struct pt_regs);
  414. kregs = (struct pt_regs *) sp;
  415. sp -= STACK_FRAME_OVERHEAD;
  416. p->thread.ksp = sp;
  417. kregs->nip = (unsigned long)ret_from_fork;
  418. p->thread.last_syscall = -1;
  419. return 0;
  420. }
  421. /*
  422. * Set up a thread for executing a new program
  423. */
  424. void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
  425. {
  426. set_fs(USER_DS);
  427. memset(regs->gpr, 0, sizeof(regs->gpr));
  428. regs->ctr = 0;
  429. regs->link = 0;
  430. regs->xer = 0;
  431. regs->ccr = 0;
  432. regs->mq = 0;
  433. regs->nip = nip;
  434. regs->gpr[1] = sp;
  435. regs->msr = MSR_USER;
  436. #ifndef CONFIG_SMP
  437. if (last_task_used_math == current)
  438. last_task_used_math = NULL;
  439. #ifdef CONFIG_ALTIVEC
  440. if (last_task_used_altivec == current)
  441. last_task_used_altivec = NULL;
  442. #endif
  443. #ifdef CONFIG_SPE
  444. if (last_task_used_spe == current)
  445. last_task_used_spe = NULL;
  446. #endif
  447. #endif /* CONFIG_SMP */
  448. memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
  449. current->thread.fpscr = 0;
  450. #ifdef CONFIG_ALTIVEC
  451. memset(current->thread.vr, 0, sizeof(current->thread.vr));
  452. memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
  453. current->thread.vrsave = 0;
  454. current->thread.used_vr = 0;
  455. #endif /* CONFIG_ALTIVEC */
  456. #ifdef CONFIG_SPE
  457. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  458. current->thread.acc = 0;
  459. current->thread.spefscr = 0;
  460. current->thread.used_spe = 0;
  461. #endif /* CONFIG_SPE */
  462. }
  463. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  464. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  465. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  466. {
  467. struct pt_regs *regs = tsk->thread.regs;
  468. /* This is a bit hairy. If we are an SPE enabled processor
  469. * (have embedded fp) we store the IEEE exception enable flags in
  470. * fpexc_mode. fpexc_mode is also used for setting FP exception
  471. * mode (asyn, precise, disabled) for 'Classic' FP. */
  472. if (val & PR_FP_EXC_SW_ENABLE) {
  473. #ifdef CONFIG_SPE
  474. tsk->thread.fpexc_mode = val &
  475. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  476. #else
  477. return -EINVAL;
  478. #endif
  479. } else {
  480. /* on a CONFIG_SPE this does not hurt us. The bits that
  481. * __pack_fe01 use do not overlap with bits used for
  482. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  483. * on CONFIG_SPE implementations are reserved so writing to
  484. * them does not change anything */
  485. if (val > PR_FP_EXC_PRECISE)
  486. return -EINVAL;
  487. tsk->thread.fpexc_mode = __pack_fe01(val);
  488. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  489. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  490. | tsk->thread.fpexc_mode;
  491. }
  492. return 0;
  493. }
  494. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  495. {
  496. unsigned int val;
  497. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  498. #ifdef CONFIG_SPE
  499. val = tsk->thread.fpexc_mode;
  500. #else
  501. return -EINVAL;
  502. #endif
  503. else
  504. val = __unpack_fe01(tsk->thread.fpexc_mode);
  505. return put_user(val, (unsigned int __user *) adr);
  506. }
  507. int sys_clone(unsigned long clone_flags, unsigned long usp,
  508. int __user *parent_tidp, void __user *child_threadptr,
  509. int __user *child_tidp, int p6,
  510. struct pt_regs *regs)
  511. {
  512. CHECK_FULL_REGS(regs);
  513. if (usp == 0)
  514. usp = regs->gpr[1]; /* stack pointer for child */
  515. return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
  516. }
  517. int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
  518. unsigned long p4, unsigned long p5, unsigned long p6,
  519. struct pt_regs *regs)
  520. {
  521. CHECK_FULL_REGS(regs);
  522. return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
  523. }
  524. int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
  525. unsigned long p4, unsigned long p5, unsigned long p6,
  526. struct pt_regs *regs)
  527. {
  528. CHECK_FULL_REGS(regs);
  529. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
  530. regs, 0, NULL, NULL);
  531. }
  532. int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
  533. unsigned long a3, unsigned long a4, unsigned long a5,
  534. struct pt_regs *regs)
  535. {
  536. int error;
  537. char * filename;
  538. filename = getname((char __user *) a0);
  539. error = PTR_ERR(filename);
  540. if (IS_ERR(filename))
  541. goto out;
  542. flush_fp_to_thread(current);
  543. flush_altivec_to_thread(current);
  544. flush_spe_to_thread(current);
  545. if (error == 0) {
  546. task_lock(current);
  547. current->ptrace &= ~PT_DTRACE;
  548. task_unlock(current);
  549. }
  550. putname(filename);
  551. out:
  552. return error;
  553. }
  554. static int validate_sp(unsigned long sp, struct task_struct *p,
  555. unsigned long nbytes)
  556. {
  557. unsigned long stack_page = (unsigned long)p->thread_info;
  558. if (sp >= stack_page + sizeof(struct thread_struct)
  559. && sp <= stack_page + THREAD_SIZE - nbytes)
  560. return 1;
  561. #ifdef CONFIG_IRQSTACKS
  562. stack_page = (unsigned long) hardirq_ctx[task_cpu(p)];
  563. if (sp >= stack_page + sizeof(struct thread_struct)
  564. && sp <= stack_page + THREAD_SIZE - nbytes)
  565. return 1;
  566. stack_page = (unsigned long) softirq_ctx[task_cpu(p)];
  567. if (sp >= stack_page + sizeof(struct thread_struct)
  568. && sp <= stack_page + THREAD_SIZE - nbytes)
  569. return 1;
  570. #endif
  571. return 0;
  572. }
  573. void dump_stack(void)
  574. {
  575. show_stack(current, NULL);
  576. }
  577. EXPORT_SYMBOL(dump_stack);
  578. void show_stack(struct task_struct *tsk, unsigned long *stack)
  579. {
  580. unsigned long sp, stack_top, prev_sp, ret;
  581. int count = 0;
  582. unsigned long next_exc = 0;
  583. struct pt_regs *regs;
  584. extern char ret_from_except, ret_from_except_full, ret_from_syscall;
  585. sp = (unsigned long) stack;
  586. if (tsk == NULL)
  587. tsk = current;
  588. if (sp == 0) {
  589. if (tsk == current)
  590. asm("mr %0,1" : "=r" (sp));
  591. else
  592. sp = tsk->thread.ksp;
  593. }
  594. prev_sp = (unsigned long) (tsk->thread_info + 1);
  595. stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE;
  596. while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) {
  597. if (count == 0) {
  598. printk("Call trace:");
  599. #ifdef CONFIG_KALLSYMS
  600. printk("\n");
  601. #endif
  602. } else {
  603. if (next_exc) {
  604. ret = next_exc;
  605. next_exc = 0;
  606. } else
  607. ret = *(unsigned long *)(sp + 4);
  608. printk(" [%08lx] ", ret);
  609. #ifdef CONFIG_KALLSYMS
  610. print_symbol("%s", ret);
  611. printk("\n");
  612. #endif
  613. if (ret == (unsigned long) &ret_from_except
  614. || ret == (unsigned long) &ret_from_except_full
  615. || ret == (unsigned long) &ret_from_syscall) {
  616. /* sp + 16 points to an exception frame */
  617. regs = (struct pt_regs *) (sp + 16);
  618. if (sp + 16 + sizeof(*regs) <= stack_top)
  619. next_exc = regs->nip;
  620. }
  621. }
  622. ++count;
  623. sp = *(unsigned long *)sp;
  624. }
  625. #ifndef CONFIG_KALLSYMS
  626. if (count > 0)
  627. printk("\n");
  628. #endif
  629. }
  630. unsigned long get_wchan(struct task_struct *p)
  631. {
  632. unsigned long ip, sp;
  633. int count = 0;
  634. if (!p || p == current || p->state == TASK_RUNNING)
  635. return 0;
  636. sp = p->thread.ksp;
  637. if (!validate_sp(sp, p, 16))
  638. return 0;
  639. do {
  640. sp = *(unsigned long *)sp;
  641. if (!validate_sp(sp, p, 16))
  642. return 0;
  643. if (count > 0) {
  644. ip = *(unsigned long *)(sp + 4);
  645. if (!in_sched_functions(ip))
  646. return ip;
  647. }
  648. } while (count++ < 16);
  649. return 0;
  650. }
  651. EXPORT_SYMBOL(get_wchan);