process.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297
  1. /*
  2. * Derived from "arch/i386/kernel/process.c"
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  6. * Paul Mackerras (paulus@cs.anu.edu.au)
  7. *
  8. * PowerPC version
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/stddef.h>
  22. #include <linux/unistd.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/slab.h>
  25. #include <linux/user.h>
  26. #include <linux/elf.h>
  27. #include <linux/init.h>
  28. #include <linux/prctl.h>
  29. #include <linux/init_task.h>
  30. #include <linux/export.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/mqueue.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/utsname.h>
  35. #include <linux/ftrace.h>
  36. #include <linux/kernel_stat.h>
  37. #include <linux/personality.h>
  38. #include <linux/random.h>
  39. #include <linux/hw_breakpoint.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/io.h>
  43. #include <asm/processor.h>
  44. #include <asm/mmu.h>
  45. #include <asm/prom.h>
  46. #include <asm/machdep.h>
  47. #include <asm/time.h>
  48. #include <asm/runlatch.h>
  49. #include <asm/syscalls.h>
  50. #include <asm/switch_to.h>
  51. #include <asm/debug.h>
  52. #ifdef CONFIG_PPC64
  53. #include <asm/firmware.h>
  54. #endif
  55. #include <linux/kprobes.h>
  56. #include <linux/kdebug.h>
  57. extern unsigned long _get_SP(void);
  58. #ifndef CONFIG_SMP
  59. struct task_struct *last_task_used_math = NULL;
  60. struct task_struct *last_task_used_altivec = NULL;
  61. struct task_struct *last_task_used_vsx = NULL;
  62. struct task_struct *last_task_used_spe = NULL;
  63. #endif
  64. /*
  65. * Make sure the floating-point register state in the
  66. * the thread_struct is up to date for task tsk.
  67. */
  68. void flush_fp_to_thread(struct task_struct *tsk)
  69. {
  70. if (tsk->thread.regs) {
  71. /*
  72. * We need to disable preemption here because if we didn't,
  73. * another process could get scheduled after the regs->msr
  74. * test but before we have finished saving the FP registers
  75. * to the thread_struct. That process could take over the
  76. * FPU, and then when we get scheduled again we would store
  77. * bogus values for the remaining FP registers.
  78. */
  79. preempt_disable();
  80. if (tsk->thread.regs->msr & MSR_FP) {
  81. #ifdef CONFIG_SMP
  82. /*
  83. * This should only ever be called for current or
  84. * for a stopped child process. Since we save away
  85. * the FP register state on context switch on SMP,
  86. * there is something wrong if a stopped child appears
  87. * to still have its FP state in the CPU registers.
  88. */
  89. BUG_ON(tsk != current);
  90. #endif
  91. giveup_fpu(tsk);
  92. }
  93. preempt_enable();
  94. }
  95. }
  96. EXPORT_SYMBOL_GPL(flush_fp_to_thread);
  97. void enable_kernel_fp(void)
  98. {
  99. WARN_ON(preemptible());
  100. #ifdef CONFIG_SMP
  101. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  102. giveup_fpu(current);
  103. else
  104. giveup_fpu(NULL); /* just enables FP for kernel */
  105. #else
  106. giveup_fpu(last_task_used_math);
  107. #endif /* CONFIG_SMP */
  108. }
  109. EXPORT_SYMBOL(enable_kernel_fp);
  110. #ifdef CONFIG_ALTIVEC
  111. void enable_kernel_altivec(void)
  112. {
  113. WARN_ON(preemptible());
  114. #ifdef CONFIG_SMP
  115. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  116. giveup_altivec(current);
  117. else
  118. giveup_altivec_notask();
  119. #else
  120. giveup_altivec(last_task_used_altivec);
  121. #endif /* CONFIG_SMP */
  122. }
  123. EXPORT_SYMBOL(enable_kernel_altivec);
  124. /*
  125. * Make sure the VMX/Altivec register state in the
  126. * the thread_struct is up to date for task tsk.
  127. */
  128. void flush_altivec_to_thread(struct task_struct *tsk)
  129. {
  130. if (tsk->thread.regs) {
  131. preempt_disable();
  132. if (tsk->thread.regs->msr & MSR_VEC) {
  133. #ifdef CONFIG_SMP
  134. BUG_ON(tsk != current);
  135. #endif
  136. giveup_altivec(tsk);
  137. }
  138. preempt_enable();
  139. }
  140. }
  141. EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
  142. #endif /* CONFIG_ALTIVEC */
  143. #ifdef CONFIG_VSX
  144. #if 0
  145. /* not currently used, but some crazy RAID module might want to later */
  146. void enable_kernel_vsx(void)
  147. {
  148. WARN_ON(preemptible());
  149. #ifdef CONFIG_SMP
  150. if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
  151. giveup_vsx(current);
  152. else
  153. giveup_vsx(NULL); /* just enable vsx for kernel - force */
  154. #else
  155. giveup_vsx(last_task_used_vsx);
  156. #endif /* CONFIG_SMP */
  157. }
  158. EXPORT_SYMBOL(enable_kernel_vsx);
  159. #endif
  160. void giveup_vsx(struct task_struct *tsk)
  161. {
  162. giveup_fpu(tsk);
  163. giveup_altivec(tsk);
  164. __giveup_vsx(tsk);
  165. }
  166. void flush_vsx_to_thread(struct task_struct *tsk)
  167. {
  168. if (tsk->thread.regs) {
  169. preempt_disable();
  170. if (tsk->thread.regs->msr & MSR_VSX) {
  171. #ifdef CONFIG_SMP
  172. BUG_ON(tsk != current);
  173. #endif
  174. giveup_vsx(tsk);
  175. }
  176. preempt_enable();
  177. }
  178. }
  179. EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
  180. #endif /* CONFIG_VSX */
  181. #ifdef CONFIG_SPE
  182. void enable_kernel_spe(void)
  183. {
  184. WARN_ON(preemptible());
  185. #ifdef CONFIG_SMP
  186. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  187. giveup_spe(current);
  188. else
  189. giveup_spe(NULL); /* just enable SPE for kernel - force */
  190. #else
  191. giveup_spe(last_task_used_spe);
  192. #endif /* __SMP __ */
  193. }
  194. EXPORT_SYMBOL(enable_kernel_spe);
  195. void flush_spe_to_thread(struct task_struct *tsk)
  196. {
  197. if (tsk->thread.regs) {
  198. preempt_disable();
  199. if (tsk->thread.regs->msr & MSR_SPE) {
  200. #ifdef CONFIG_SMP
  201. BUG_ON(tsk != current);
  202. #endif
  203. tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
  204. giveup_spe(tsk);
  205. }
  206. preempt_enable();
  207. }
  208. }
  209. #endif /* CONFIG_SPE */
  210. #ifndef CONFIG_SMP
  211. /*
  212. * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  213. * and the current task has some state, discard it.
  214. */
  215. void discard_lazy_cpu_state(void)
  216. {
  217. preempt_disable();
  218. if (last_task_used_math == current)
  219. last_task_used_math = NULL;
  220. #ifdef CONFIG_ALTIVEC
  221. if (last_task_used_altivec == current)
  222. last_task_used_altivec = NULL;
  223. #endif /* CONFIG_ALTIVEC */
  224. #ifdef CONFIG_VSX
  225. if (last_task_used_vsx == current)
  226. last_task_used_vsx = NULL;
  227. #endif /* CONFIG_VSX */
  228. #ifdef CONFIG_SPE
  229. if (last_task_used_spe == current)
  230. last_task_used_spe = NULL;
  231. #endif
  232. preempt_enable();
  233. }
  234. #endif /* CONFIG_SMP */
  235. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  236. void do_send_trap(struct pt_regs *regs, unsigned long address,
  237. unsigned long error_code, int signal_code, int breakpt)
  238. {
  239. siginfo_t info;
  240. current->thread.trap_nr = signal_code;
  241. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  242. 11, SIGSEGV) == NOTIFY_STOP)
  243. return;
  244. /* Deliver the signal to userspace */
  245. info.si_signo = SIGTRAP;
  246. info.si_errno = breakpt; /* breakpoint or watchpoint id */
  247. info.si_code = signal_code;
  248. info.si_addr = (void __user *)address;
  249. force_sig_info(SIGTRAP, &info, current);
  250. }
  251. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  252. void do_dabr(struct pt_regs *regs, unsigned long address,
  253. unsigned long error_code)
  254. {
  255. siginfo_t info;
  256. current->thread.trap_nr = TRAP_HWBKPT;
  257. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  258. 11, SIGSEGV) == NOTIFY_STOP)
  259. return;
  260. if (debugger_dabr_match(regs))
  261. return;
  262. /* Clear the DABR */
  263. set_dabr(0, 0);
  264. /* Deliver the signal to userspace */
  265. info.si_signo = SIGTRAP;
  266. info.si_errno = 0;
  267. info.si_code = TRAP_HWBKPT;
  268. info.si_addr = (void __user *)address;
  269. force_sig_info(SIGTRAP, &info, current);
  270. }
  271. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  272. static DEFINE_PER_CPU(unsigned long, current_dabr);
  273. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  274. /*
  275. * Set the debug registers back to their default "safe" values.
  276. */
  277. static void set_debug_reg_defaults(struct thread_struct *thread)
  278. {
  279. thread->iac1 = thread->iac2 = 0;
  280. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  281. thread->iac3 = thread->iac4 = 0;
  282. #endif
  283. thread->dac1 = thread->dac2 = 0;
  284. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  285. thread->dvc1 = thread->dvc2 = 0;
  286. #endif
  287. thread->dbcr0 = 0;
  288. #ifdef CONFIG_BOOKE
  289. /*
  290. * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
  291. */
  292. thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
  293. DBCR1_IAC3US | DBCR1_IAC4US;
  294. /*
  295. * Force Data Address Compare User/Supervisor bits to be User-only
  296. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
  297. */
  298. thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  299. #else
  300. thread->dbcr1 = 0;
  301. #endif
  302. }
  303. static void prime_debug_regs(struct thread_struct *thread)
  304. {
  305. mtspr(SPRN_IAC1, thread->iac1);
  306. mtspr(SPRN_IAC2, thread->iac2);
  307. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  308. mtspr(SPRN_IAC3, thread->iac3);
  309. mtspr(SPRN_IAC4, thread->iac4);
  310. #endif
  311. mtspr(SPRN_DAC1, thread->dac1);
  312. mtspr(SPRN_DAC2, thread->dac2);
  313. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  314. mtspr(SPRN_DVC1, thread->dvc1);
  315. mtspr(SPRN_DVC2, thread->dvc2);
  316. #endif
  317. mtspr(SPRN_DBCR0, thread->dbcr0);
  318. mtspr(SPRN_DBCR1, thread->dbcr1);
  319. #ifdef CONFIG_BOOKE
  320. mtspr(SPRN_DBCR2, thread->dbcr2);
  321. #endif
  322. }
  323. /*
  324. * Unless neither the old or new thread are making use of the
  325. * debug registers, set the debug registers from the values
  326. * stored in the new thread.
  327. */
  328. static void switch_booke_debug_regs(struct thread_struct *new_thread)
  329. {
  330. if ((current->thread.dbcr0 & DBCR0_IDM)
  331. || (new_thread->dbcr0 & DBCR0_IDM))
  332. prime_debug_regs(new_thread);
  333. }
  334. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  335. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  336. static void set_debug_reg_defaults(struct thread_struct *thread)
  337. {
  338. if (thread->dabr) {
  339. thread->dabr = 0;
  340. thread->dabrx = 0;
  341. set_dabr(0, 0);
  342. }
  343. }
  344. #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
  345. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  346. int set_dabr(unsigned long dabr, unsigned long dabrx)
  347. {
  348. __get_cpu_var(current_dabr) = dabr;
  349. if (ppc_md.set_dabr)
  350. return ppc_md.set_dabr(dabr, dabrx);
  351. /* XXX should we have a CPU_FTR_HAS_DABR ? */
  352. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  353. mtspr(SPRN_DAC1, dabr);
  354. #ifdef CONFIG_PPC_47x
  355. isync();
  356. #endif
  357. #elif defined(CONFIG_PPC_BOOK3S)
  358. mtspr(SPRN_DABR, dabr);
  359. mtspr(SPRN_DABRX, dabrx);
  360. #endif
  361. return 0;
  362. }
  363. #ifdef CONFIG_PPC64
  364. DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
  365. #endif
  366. struct task_struct *__switch_to(struct task_struct *prev,
  367. struct task_struct *new)
  368. {
  369. struct thread_struct *new_thread, *old_thread;
  370. unsigned long flags;
  371. struct task_struct *last;
  372. #ifdef CONFIG_PPC_BOOK3S_64
  373. struct ppc64_tlb_batch *batch;
  374. #endif
  375. #ifdef CONFIG_SMP
  376. /* avoid complexity of lazy save/restore of fpu
  377. * by just saving it every time we switch out if
  378. * this task used the fpu during the last quantum.
  379. *
  380. * If it tries to use the fpu again, it'll trap and
  381. * reload its fp regs. So we don't have to do a restore
  382. * every switch, just a save.
  383. * -- Cort
  384. */
  385. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  386. giveup_fpu(prev);
  387. #ifdef CONFIG_ALTIVEC
  388. /*
  389. * If the previous thread used altivec in the last quantum
  390. * (thus changing altivec regs) then save them.
  391. * We used to check the VRSAVE register but not all apps
  392. * set it, so we don't rely on it now (and in fact we need
  393. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  394. *
  395. * On SMP we always save/restore altivec regs just to avoid the
  396. * complexity of changing processors.
  397. * -- Cort
  398. */
  399. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  400. giveup_altivec(prev);
  401. #endif /* CONFIG_ALTIVEC */
  402. #ifdef CONFIG_VSX
  403. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
  404. /* VMX and FPU registers are already save here */
  405. __giveup_vsx(prev);
  406. #endif /* CONFIG_VSX */
  407. #ifdef CONFIG_SPE
  408. /*
  409. * If the previous thread used spe in the last quantum
  410. * (thus changing spe regs) then save them.
  411. *
  412. * On SMP we always save/restore spe regs just to avoid the
  413. * complexity of changing processors.
  414. */
  415. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  416. giveup_spe(prev);
  417. #endif /* CONFIG_SPE */
  418. #else /* CONFIG_SMP */
  419. #ifdef CONFIG_ALTIVEC
  420. /* Avoid the trap. On smp this this never happens since
  421. * we don't set last_task_used_altivec -- Cort
  422. */
  423. if (new->thread.regs && last_task_used_altivec == new)
  424. new->thread.regs->msr |= MSR_VEC;
  425. #endif /* CONFIG_ALTIVEC */
  426. #ifdef CONFIG_VSX
  427. if (new->thread.regs && last_task_used_vsx == new)
  428. new->thread.regs->msr |= MSR_VSX;
  429. #endif /* CONFIG_VSX */
  430. #ifdef CONFIG_SPE
  431. /* Avoid the trap. On smp this this never happens since
  432. * we don't set last_task_used_spe
  433. */
  434. if (new->thread.regs && last_task_used_spe == new)
  435. new->thread.regs->msr |= MSR_SPE;
  436. #endif /* CONFIG_SPE */
  437. #endif /* CONFIG_SMP */
  438. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  439. switch_booke_debug_regs(&new->thread);
  440. #else
  441. /*
  442. * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
  443. * schedule DABR
  444. */
  445. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  446. if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
  447. set_dabr(new->thread.dabr, new->thread.dabrx);
  448. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  449. #endif
  450. new_thread = &new->thread;
  451. old_thread = &current->thread;
  452. #ifdef CONFIG_PPC64
  453. /*
  454. * Collect processor utilization data per process
  455. */
  456. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  457. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  458. long unsigned start_tb, current_tb;
  459. start_tb = old_thread->start_tb;
  460. cu->current_tb = current_tb = mfspr(SPRN_PURR);
  461. old_thread->accum_tb += (current_tb - start_tb);
  462. new_thread->start_tb = current_tb;
  463. }
  464. #endif /* CONFIG_PPC64 */
  465. #ifdef CONFIG_PPC_BOOK3S_64
  466. batch = &__get_cpu_var(ppc64_tlb_batch);
  467. if (batch->active) {
  468. current_thread_info()->local_flags |= _TLF_LAZY_MMU;
  469. if (batch->index)
  470. __flush_tlb_pending(batch);
  471. batch->active = 0;
  472. }
  473. #endif /* CONFIG_PPC_BOOK3S_64 */
  474. local_irq_save(flags);
  475. /*
  476. * We can't take a PMU exception inside _switch() since there is a
  477. * window where the kernel stack SLB and the kernel stack are out
  478. * of sync. Hard disable here.
  479. */
  480. hard_irq_disable();
  481. last = _switch(old_thread, new_thread);
  482. #ifdef CONFIG_PPC_BOOK3S_64
  483. if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
  484. current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
  485. batch = &__get_cpu_var(ppc64_tlb_batch);
  486. batch->active = 1;
  487. }
  488. #endif /* CONFIG_PPC_BOOK3S_64 */
  489. local_irq_restore(flags);
  490. return last;
  491. }
  492. static int instructions_to_print = 16;
  493. static void show_instructions(struct pt_regs *regs)
  494. {
  495. int i;
  496. unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
  497. sizeof(int));
  498. printk("Instruction dump:");
  499. for (i = 0; i < instructions_to_print; i++) {
  500. int instr;
  501. if (!(i % 8))
  502. printk("\n");
  503. #if !defined(CONFIG_BOOKE)
  504. /* If executing with the IMMU off, adjust pc rather
  505. * than print XXXXXXXX.
  506. */
  507. if (!(regs->msr & MSR_IR))
  508. pc = (unsigned long)phys_to_virt(pc);
  509. #endif
  510. /* We use __get_user here *only* to avoid an OOPS on a
  511. * bad address because the pc *should* only be a
  512. * kernel address.
  513. */
  514. if (!__kernel_text_address(pc) ||
  515. __get_user(instr, (unsigned int __user *)pc)) {
  516. printk(KERN_CONT "XXXXXXXX ");
  517. } else {
  518. if (regs->nip == pc)
  519. printk(KERN_CONT "<%08x> ", instr);
  520. else
  521. printk(KERN_CONT "%08x ", instr);
  522. }
  523. pc += sizeof(int);
  524. }
  525. printk("\n");
  526. }
  527. static struct regbit {
  528. unsigned long bit;
  529. const char *name;
  530. } msr_bits[] = {
  531. #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
  532. {MSR_SF, "SF"},
  533. {MSR_HV, "HV"},
  534. #endif
  535. {MSR_VEC, "VEC"},
  536. {MSR_VSX, "VSX"},
  537. #ifdef CONFIG_BOOKE
  538. {MSR_CE, "CE"},
  539. #endif
  540. {MSR_EE, "EE"},
  541. {MSR_PR, "PR"},
  542. {MSR_FP, "FP"},
  543. {MSR_ME, "ME"},
  544. #ifdef CONFIG_BOOKE
  545. {MSR_DE, "DE"},
  546. #else
  547. {MSR_SE, "SE"},
  548. {MSR_BE, "BE"},
  549. #endif
  550. {MSR_IR, "IR"},
  551. {MSR_DR, "DR"},
  552. {MSR_PMM, "PMM"},
  553. #ifndef CONFIG_BOOKE
  554. {MSR_RI, "RI"},
  555. {MSR_LE, "LE"},
  556. #endif
  557. {0, NULL}
  558. };
  559. static void printbits(unsigned long val, struct regbit *bits)
  560. {
  561. const char *sep = "";
  562. printk("<");
  563. for (; bits->bit; ++bits)
  564. if (val & bits->bit) {
  565. printk("%s%s", sep, bits->name);
  566. sep = ",";
  567. }
  568. printk(">");
  569. }
  570. #ifdef CONFIG_PPC64
  571. #define REG "%016lx"
  572. #define REGS_PER_LINE 4
  573. #define LAST_VOLATILE 13
  574. #else
  575. #define REG "%08lx"
  576. #define REGS_PER_LINE 8
  577. #define LAST_VOLATILE 12
  578. #endif
  579. void show_regs(struct pt_regs * regs)
  580. {
  581. int i, trap;
  582. printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
  583. regs->nip, regs->link, regs->ctr);
  584. printk("REGS: %p TRAP: %04lx %s (%s)\n",
  585. regs, regs->trap, print_tainted(), init_utsname()->release);
  586. printk("MSR: "REG" ", regs->msr);
  587. printbits(regs->msr, msr_bits);
  588. printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
  589. #ifdef CONFIG_PPC64
  590. printk("SOFTE: %ld\n", regs->softe);
  591. #endif
  592. trap = TRAP(regs);
  593. if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
  594. printk("CFAR: "REG"\n", regs->orig_gpr3);
  595. if (trap == 0x300 || trap == 0x600)
  596. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  597. printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
  598. #else
  599. printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
  600. #endif
  601. printk("TASK = %p[%d] '%s' THREAD: %p",
  602. current, task_pid_nr(current), current->comm, task_thread_info(current));
  603. #ifdef CONFIG_SMP
  604. printk(" CPU: %d", raw_smp_processor_id());
  605. #endif /* CONFIG_SMP */
  606. for (i = 0; i < 32; i++) {
  607. if ((i % REGS_PER_LINE) == 0)
  608. printk("\nGPR%02d: ", i);
  609. printk(REG " ", regs->gpr[i]);
  610. if (i == LAST_VOLATILE && !FULL_REGS(regs))
  611. break;
  612. }
  613. printk("\n");
  614. #ifdef CONFIG_KALLSYMS
  615. /*
  616. * Lookup NIP late so we have the best change of getting the
  617. * above info out without failing
  618. */
  619. printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
  620. printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
  621. #endif
  622. show_stack(current, (unsigned long *) regs->gpr[1]);
  623. if (!user_mode(regs))
  624. show_instructions(regs);
  625. }
  626. void exit_thread(void)
  627. {
  628. discard_lazy_cpu_state();
  629. }
  630. void flush_thread(void)
  631. {
  632. discard_lazy_cpu_state();
  633. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  634. flush_ptrace_hw_breakpoint(current);
  635. #else /* CONFIG_HAVE_HW_BREAKPOINT */
  636. set_debug_reg_defaults(&current->thread);
  637. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  638. }
  639. void
  640. release_thread(struct task_struct *t)
  641. {
  642. }
  643. /*
  644. * this gets called so that we can store coprocessor state into memory and
  645. * copy the current task into the new thread.
  646. */
  647. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  648. {
  649. flush_fp_to_thread(src);
  650. flush_altivec_to_thread(src);
  651. flush_vsx_to_thread(src);
  652. flush_spe_to_thread(src);
  653. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  654. flush_ptrace_hw_breakpoint(src);
  655. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  656. *dst = *src;
  657. return 0;
  658. }
  659. /*
  660. * Copy a thread..
  661. */
  662. extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
  663. int copy_thread(unsigned long clone_flags, unsigned long usp,
  664. unsigned long arg, struct task_struct *p,
  665. struct pt_regs *regs)
  666. {
  667. struct pt_regs *childregs, *kregs;
  668. extern void ret_from_fork(void);
  669. extern void ret_from_kernel_thread(void);
  670. void (*f)(void);
  671. unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  672. /* Copy registers */
  673. sp -= sizeof(struct pt_regs);
  674. childregs = (struct pt_regs *) sp;
  675. if (!regs) {
  676. /* for kernel thread, set `current' and stackptr in new task */
  677. memset(childregs, 0, sizeof(struct pt_regs));
  678. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  679. #ifdef CONFIG_PPC64
  680. childregs->gpr[14] = *(unsigned long *)usp;
  681. childregs->gpr[2] = ((unsigned long *)usp)[1],
  682. clear_tsk_thread_flag(p, TIF_32BIT);
  683. #else
  684. childregs->gpr[14] = usp; /* function */
  685. childregs->gpr[2] = (unsigned long) p;
  686. #endif
  687. childregs->gpr[15] = arg;
  688. p->thread.regs = NULL; /* no user register state */
  689. f = ret_from_kernel_thread;
  690. } else {
  691. CHECK_FULL_REGS(regs);
  692. *childregs = *regs;
  693. childregs->gpr[1] = usp;
  694. p->thread.regs = childregs;
  695. childregs->gpr[3] = 0; /* Result from fork() */
  696. if (clone_flags & CLONE_SETTLS) {
  697. #ifdef CONFIG_PPC64
  698. if (!is_32bit_task())
  699. childregs->gpr[13] = childregs->gpr[6];
  700. else
  701. #endif
  702. childregs->gpr[2] = childregs->gpr[6];
  703. }
  704. f = ret_from_fork;
  705. }
  706. sp -= STACK_FRAME_OVERHEAD;
  707. /*
  708. * The way this works is that at some point in the future
  709. * some task will call _switch to switch to the new task.
  710. * That will pop off the stack frame created below and start
  711. * the new task running at ret_from_fork. The new task will
  712. * do some house keeping and then return from the fork or clone
  713. * system call, using the stack frame created above.
  714. */
  715. sp -= sizeof(struct pt_regs);
  716. kregs = (struct pt_regs *) sp;
  717. sp -= STACK_FRAME_OVERHEAD;
  718. p->thread.ksp = sp;
  719. p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
  720. _ALIGN_UP(sizeof(struct thread_info), 16);
  721. #ifdef CONFIG_PPC_STD_MMU_64
  722. if (mmu_has_feature(MMU_FTR_SLB)) {
  723. unsigned long sp_vsid;
  724. unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
  725. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  726. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
  727. << SLB_VSID_SHIFT_1T;
  728. else
  729. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
  730. << SLB_VSID_SHIFT;
  731. sp_vsid |= SLB_VSID_KERNEL | llp;
  732. p->thread.ksp_vsid = sp_vsid;
  733. }
  734. #endif /* CONFIG_PPC_STD_MMU_64 */
  735. #ifdef CONFIG_PPC64
  736. if (cpu_has_feature(CPU_FTR_DSCR)) {
  737. p->thread.dscr_inherit = current->thread.dscr_inherit;
  738. p->thread.dscr = current->thread.dscr;
  739. }
  740. #endif
  741. /*
  742. * The PPC64 ABI makes use of a TOC to contain function
  743. * pointers. The function (ret_from_except) is actually a pointer
  744. * to the TOC entry. The first entry is a pointer to the actual
  745. * function.
  746. */
  747. #ifdef CONFIG_PPC64
  748. kregs->nip = *((unsigned long *)f);
  749. #else
  750. kregs->nip = (unsigned long)f;
  751. #endif
  752. return 0;
  753. }
  754. /*
  755. * Set up a thread for executing a new program
  756. */
  757. void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
  758. {
  759. #ifdef CONFIG_PPC64
  760. unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
  761. #endif
  762. /*
  763. * If we exec out of a kernel thread then thread.regs will not be
  764. * set. Do it now.
  765. */
  766. if (!current->thread.regs) {
  767. struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
  768. current->thread.regs = regs - 1;
  769. }
  770. memset(regs->gpr, 0, sizeof(regs->gpr));
  771. regs->ctr = 0;
  772. regs->link = 0;
  773. regs->xer = 0;
  774. regs->ccr = 0;
  775. regs->gpr[1] = sp;
  776. /*
  777. * We have just cleared all the nonvolatile GPRs, so make
  778. * FULL_REGS(regs) return true. This is necessary to allow
  779. * ptrace to examine the thread immediately after exec.
  780. */
  781. regs->trap &= ~1UL;
  782. #ifdef CONFIG_PPC32
  783. regs->mq = 0;
  784. regs->nip = start;
  785. regs->msr = MSR_USER;
  786. #else
  787. if (!is_32bit_task()) {
  788. unsigned long entry, toc;
  789. /* start is a relocated pointer to the function descriptor for
  790. * the elf _start routine. The first entry in the function
  791. * descriptor is the entry address of _start and the second
  792. * entry is the TOC value we need to use.
  793. */
  794. __get_user(entry, (unsigned long __user *)start);
  795. __get_user(toc, (unsigned long __user *)start+1);
  796. /* Check whether the e_entry function descriptor entries
  797. * need to be relocated before we can use them.
  798. */
  799. if (load_addr != 0) {
  800. entry += load_addr;
  801. toc += load_addr;
  802. }
  803. regs->nip = entry;
  804. regs->gpr[2] = toc;
  805. regs->msr = MSR_USER64;
  806. } else {
  807. regs->nip = start;
  808. regs->gpr[2] = 0;
  809. regs->msr = MSR_USER32;
  810. }
  811. #endif
  812. discard_lazy_cpu_state();
  813. #ifdef CONFIG_VSX
  814. current->thread.used_vsr = 0;
  815. #endif
  816. memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
  817. current->thread.fpscr.val = 0;
  818. #ifdef CONFIG_ALTIVEC
  819. memset(current->thread.vr, 0, sizeof(current->thread.vr));
  820. memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
  821. current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
  822. current->thread.vrsave = 0;
  823. current->thread.used_vr = 0;
  824. #endif /* CONFIG_ALTIVEC */
  825. #ifdef CONFIG_SPE
  826. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  827. current->thread.acc = 0;
  828. current->thread.spefscr = 0;
  829. current->thread.used_spe = 0;
  830. #endif /* CONFIG_SPE */
  831. }
  832. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  833. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  834. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  835. {
  836. struct pt_regs *regs = tsk->thread.regs;
  837. /* This is a bit hairy. If we are an SPE enabled processor
  838. * (have embedded fp) we store the IEEE exception enable flags in
  839. * fpexc_mode. fpexc_mode is also used for setting FP exception
  840. * mode (asyn, precise, disabled) for 'Classic' FP. */
  841. if (val & PR_FP_EXC_SW_ENABLE) {
  842. #ifdef CONFIG_SPE
  843. if (cpu_has_feature(CPU_FTR_SPE)) {
  844. tsk->thread.fpexc_mode = val &
  845. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  846. return 0;
  847. } else {
  848. return -EINVAL;
  849. }
  850. #else
  851. return -EINVAL;
  852. #endif
  853. }
  854. /* on a CONFIG_SPE this does not hurt us. The bits that
  855. * __pack_fe01 use do not overlap with bits used for
  856. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  857. * on CONFIG_SPE implementations are reserved so writing to
  858. * them does not change anything */
  859. if (val > PR_FP_EXC_PRECISE)
  860. return -EINVAL;
  861. tsk->thread.fpexc_mode = __pack_fe01(val);
  862. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  863. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  864. | tsk->thread.fpexc_mode;
  865. return 0;
  866. }
  867. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  868. {
  869. unsigned int val;
  870. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  871. #ifdef CONFIG_SPE
  872. if (cpu_has_feature(CPU_FTR_SPE))
  873. val = tsk->thread.fpexc_mode;
  874. else
  875. return -EINVAL;
  876. #else
  877. return -EINVAL;
  878. #endif
  879. else
  880. val = __unpack_fe01(tsk->thread.fpexc_mode);
  881. return put_user(val, (unsigned int __user *) adr);
  882. }
  883. int set_endian(struct task_struct *tsk, unsigned int val)
  884. {
  885. struct pt_regs *regs = tsk->thread.regs;
  886. if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
  887. (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
  888. return -EINVAL;
  889. if (regs == NULL)
  890. return -EINVAL;
  891. if (val == PR_ENDIAN_BIG)
  892. regs->msr &= ~MSR_LE;
  893. else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
  894. regs->msr |= MSR_LE;
  895. else
  896. return -EINVAL;
  897. return 0;
  898. }
  899. int get_endian(struct task_struct *tsk, unsigned long adr)
  900. {
  901. struct pt_regs *regs = tsk->thread.regs;
  902. unsigned int val;
  903. if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
  904. !cpu_has_feature(CPU_FTR_REAL_LE))
  905. return -EINVAL;
  906. if (regs == NULL)
  907. return -EINVAL;
  908. if (regs->msr & MSR_LE) {
  909. if (cpu_has_feature(CPU_FTR_REAL_LE))
  910. val = PR_ENDIAN_LITTLE;
  911. else
  912. val = PR_ENDIAN_PPC_LITTLE;
  913. } else
  914. val = PR_ENDIAN_BIG;
  915. return put_user(val, (unsigned int __user *)adr);
  916. }
  917. int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
  918. {
  919. tsk->thread.align_ctl = val;
  920. return 0;
  921. }
  922. int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
  923. {
  924. return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
  925. }
  926. #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
  927. int sys_clone(unsigned long clone_flags, unsigned long usp,
  928. int __user *parent_tidp, void __user *child_threadptr,
  929. int __user *child_tidp, int p6,
  930. struct pt_regs *regs)
  931. {
  932. CHECK_FULL_REGS(regs);
  933. if (usp == 0)
  934. usp = regs->gpr[1]; /* stack pointer for child */
  935. #ifdef CONFIG_PPC64
  936. if (is_32bit_task()) {
  937. parent_tidp = TRUNC_PTR(parent_tidp);
  938. child_tidp = TRUNC_PTR(child_tidp);
  939. }
  940. #endif
  941. return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
  942. }
  943. int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
  944. unsigned long p4, unsigned long p5, unsigned long p6,
  945. struct pt_regs *regs)
  946. {
  947. CHECK_FULL_REGS(regs);
  948. return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
  949. }
  950. int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
  951. unsigned long p4, unsigned long p5, unsigned long p6,
  952. struct pt_regs *regs)
  953. {
  954. CHECK_FULL_REGS(regs);
  955. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
  956. regs, 0, NULL, NULL);
  957. }
  958. void __ret_from_kernel_execve(struct pt_regs *normal)
  959. __noreturn;
  960. void ret_from_kernel_execve(struct pt_regs *normal)
  961. {
  962. set_thread_flag(TIF_RESTOREALL);
  963. __ret_from_kernel_execve(normal);
  964. }
  965. static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
  966. unsigned long nbytes)
  967. {
  968. unsigned long stack_page;
  969. unsigned long cpu = task_cpu(p);
  970. /*
  971. * Avoid crashing if the stack has overflowed and corrupted
  972. * task_cpu(p), which is in the thread_info struct.
  973. */
  974. if (cpu < NR_CPUS && cpu_possible(cpu)) {
  975. stack_page = (unsigned long) hardirq_ctx[cpu];
  976. if (sp >= stack_page + sizeof(struct thread_struct)
  977. && sp <= stack_page + THREAD_SIZE - nbytes)
  978. return 1;
  979. stack_page = (unsigned long) softirq_ctx[cpu];
  980. if (sp >= stack_page + sizeof(struct thread_struct)
  981. && sp <= stack_page + THREAD_SIZE - nbytes)
  982. return 1;
  983. }
  984. return 0;
  985. }
  986. int validate_sp(unsigned long sp, struct task_struct *p,
  987. unsigned long nbytes)
  988. {
  989. unsigned long stack_page = (unsigned long)task_stack_page(p);
  990. if (sp >= stack_page + sizeof(struct thread_struct)
  991. && sp <= stack_page + THREAD_SIZE - nbytes)
  992. return 1;
  993. return valid_irq_stack(sp, p, nbytes);
  994. }
  995. EXPORT_SYMBOL(validate_sp);
  996. unsigned long get_wchan(struct task_struct *p)
  997. {
  998. unsigned long ip, sp;
  999. int count = 0;
  1000. if (!p || p == current || p->state == TASK_RUNNING)
  1001. return 0;
  1002. sp = p->thread.ksp;
  1003. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1004. return 0;
  1005. do {
  1006. sp = *(unsigned long *)sp;
  1007. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1008. return 0;
  1009. if (count > 0) {
  1010. ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
  1011. if (!in_sched_functions(ip))
  1012. return ip;
  1013. }
  1014. } while (count++ < 16);
  1015. return 0;
  1016. }
  1017. static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
  1018. void show_stack(struct task_struct *tsk, unsigned long *stack)
  1019. {
  1020. unsigned long sp, ip, lr, newsp;
  1021. int count = 0;
  1022. int firstframe = 1;
  1023. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1024. int curr_frame = current->curr_ret_stack;
  1025. extern void return_to_handler(void);
  1026. unsigned long rth = (unsigned long)return_to_handler;
  1027. unsigned long mrth = -1;
  1028. #ifdef CONFIG_PPC64
  1029. extern void mod_return_to_handler(void);
  1030. rth = *(unsigned long *)rth;
  1031. mrth = (unsigned long)mod_return_to_handler;
  1032. mrth = *(unsigned long *)mrth;
  1033. #endif
  1034. #endif
  1035. sp = (unsigned long) stack;
  1036. if (tsk == NULL)
  1037. tsk = current;
  1038. if (sp == 0) {
  1039. if (tsk == current)
  1040. asm("mr %0,1" : "=r" (sp));
  1041. else
  1042. sp = tsk->thread.ksp;
  1043. }
  1044. lr = 0;
  1045. printk("Call Trace:\n");
  1046. do {
  1047. if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
  1048. return;
  1049. stack = (unsigned long *) sp;
  1050. newsp = stack[0];
  1051. ip = stack[STACK_FRAME_LR_SAVE];
  1052. if (!firstframe || ip != lr) {
  1053. printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
  1054. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1055. if ((ip == rth || ip == mrth) && curr_frame >= 0) {
  1056. printk(" (%pS)",
  1057. (void *)current->ret_stack[curr_frame].ret);
  1058. curr_frame--;
  1059. }
  1060. #endif
  1061. if (firstframe)
  1062. printk(" (unreliable)");
  1063. printk("\n");
  1064. }
  1065. firstframe = 0;
  1066. /*
  1067. * See if this is an exception frame.
  1068. * We look for the "regshere" marker in the current frame.
  1069. */
  1070. if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
  1071. && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  1072. struct pt_regs *regs = (struct pt_regs *)
  1073. (sp + STACK_FRAME_OVERHEAD);
  1074. lr = regs->link;
  1075. printk("--- Exception: %lx at %pS\n LR = %pS\n",
  1076. regs->trap, (void *)regs->nip, (void *)lr);
  1077. firstframe = 1;
  1078. }
  1079. sp = newsp;
  1080. } while (count++ < kstack_depth_to_print);
  1081. }
  1082. void dump_stack(void)
  1083. {
  1084. show_stack(current, NULL);
  1085. }
  1086. EXPORT_SYMBOL(dump_stack);
  1087. #ifdef CONFIG_PPC64
  1088. /* Called with hard IRQs off */
  1089. void __ppc64_runlatch_on(void)
  1090. {
  1091. struct thread_info *ti = current_thread_info();
  1092. unsigned long ctrl;
  1093. ctrl = mfspr(SPRN_CTRLF);
  1094. ctrl |= CTRL_RUNLATCH;
  1095. mtspr(SPRN_CTRLT, ctrl);
  1096. ti->local_flags |= _TLF_RUNLATCH;
  1097. }
  1098. /* Called with hard IRQs off */
  1099. void __ppc64_runlatch_off(void)
  1100. {
  1101. struct thread_info *ti = current_thread_info();
  1102. unsigned long ctrl;
  1103. ti->local_flags &= ~_TLF_RUNLATCH;
  1104. ctrl = mfspr(SPRN_CTRLF);
  1105. ctrl &= ~CTRL_RUNLATCH;
  1106. mtspr(SPRN_CTRLT, ctrl);
  1107. }
  1108. #endif /* CONFIG_PPC64 */
  1109. unsigned long arch_align_stack(unsigned long sp)
  1110. {
  1111. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  1112. sp -= get_random_int() & ~PAGE_MASK;
  1113. return sp & ~0xf;
  1114. }
  1115. static inline unsigned long brk_rnd(void)
  1116. {
  1117. unsigned long rnd = 0;
  1118. /* 8MB for 32bit, 1GB for 64bit */
  1119. if (is_32bit_task())
  1120. rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
  1121. else
  1122. rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
  1123. return rnd << PAGE_SHIFT;
  1124. }
  1125. unsigned long arch_randomize_brk(struct mm_struct *mm)
  1126. {
  1127. unsigned long base = mm->brk;
  1128. unsigned long ret;
  1129. #ifdef CONFIG_PPC_STD_MMU_64
  1130. /*
  1131. * If we are using 1TB segments and we are allowed to randomise
  1132. * the heap, we can put it above 1TB so it is backed by a 1TB
  1133. * segment. Otherwise the heap will be in the bottom 1TB
  1134. * which always uses 256MB segments and this may result in a
  1135. * performance penalty.
  1136. */
  1137. if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
  1138. base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
  1139. #endif
  1140. ret = PAGE_ALIGN(base + brk_rnd());
  1141. if (ret < mm->brk)
  1142. return mm->brk;
  1143. return ret;
  1144. }
  1145. unsigned long randomize_et_dyn(unsigned long base)
  1146. {
  1147. unsigned long ret = PAGE_ALIGN(base + brk_rnd());
  1148. if (ret < base)
  1149. return base;
  1150. return ret;
  1151. }