process.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342
  1. /*
  2. * Derived from "arch/i386/kernel/process.c"
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  6. * Paul Mackerras (paulus@cs.anu.edu.au)
  7. *
  8. * PowerPC version
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/stddef.h>
  22. #include <linux/unistd.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/slab.h>
  25. #include <linux/user.h>
  26. #include <linux/elf.h>
  27. #include <linux/init.h>
  28. #include <linux/prctl.h>
  29. #include <linux/init_task.h>
  30. #include <linux/module.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/mqueue.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/utsname.h>
  35. #include <linux/ftrace.h>
  36. #include <linux/kernel_stat.h>
  37. #include <linux/personality.h>
  38. #include <linux/random.h>
  39. #include <linux/hw_breakpoint.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/system.h>
  43. #include <asm/io.h>
  44. #include <asm/processor.h>
  45. #include <asm/mmu.h>
  46. #include <asm/prom.h>
  47. #include <asm/machdep.h>
  48. #include <asm/time.h>
  49. #include <asm/syscalls.h>
  50. #ifdef CONFIG_PPC64
  51. #include <asm/firmware.h>
  52. #endif
  53. #include <linux/kprobes.h>
  54. #include <linux/kdebug.h>
  55. extern unsigned long _get_SP(void);
  56. #ifndef CONFIG_SMP
  57. struct task_struct *last_task_used_math = NULL;
  58. struct task_struct *last_task_used_altivec = NULL;
  59. struct task_struct *last_task_used_vsx = NULL;
  60. struct task_struct *last_task_used_spe = NULL;
  61. #endif
  62. /*
  63. * Make sure the floating-point register state in the
  64. * the thread_struct is up to date for task tsk.
  65. */
  66. void flush_fp_to_thread(struct task_struct *tsk)
  67. {
  68. if (tsk->thread.regs) {
  69. /*
  70. * We need to disable preemption here because if we didn't,
  71. * another process could get scheduled after the regs->msr
  72. * test but before we have finished saving the FP registers
  73. * to the thread_struct. That process could take over the
  74. * FPU, and then when we get scheduled again we would store
  75. * bogus values for the remaining FP registers.
  76. */
  77. preempt_disable();
  78. if (tsk->thread.regs->msr & MSR_FP) {
  79. #ifdef CONFIG_SMP
  80. /*
  81. * This should only ever be called for current or
  82. * for a stopped child process. Since we save away
  83. * the FP register state on context switch on SMP,
  84. * there is something wrong if a stopped child appears
  85. * to still have its FP state in the CPU registers.
  86. */
  87. BUG_ON(tsk != current);
  88. #endif
  89. giveup_fpu(tsk);
  90. }
  91. preempt_enable();
  92. }
  93. }
  94. EXPORT_SYMBOL_GPL(flush_fp_to_thread);
  95. void enable_kernel_fp(void)
  96. {
  97. WARN_ON(preemptible());
  98. #ifdef CONFIG_SMP
  99. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  100. giveup_fpu(current);
  101. else
  102. giveup_fpu(NULL); /* just enables FP for kernel */
  103. #else
  104. giveup_fpu(last_task_used_math);
  105. #endif /* CONFIG_SMP */
  106. }
  107. EXPORT_SYMBOL(enable_kernel_fp);
  108. #ifdef CONFIG_ALTIVEC
  109. void enable_kernel_altivec(void)
  110. {
  111. WARN_ON(preemptible());
  112. #ifdef CONFIG_SMP
  113. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  114. giveup_altivec(current);
  115. else
  116. giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
  117. #else
  118. giveup_altivec(last_task_used_altivec);
  119. #endif /* CONFIG_SMP */
  120. }
  121. EXPORT_SYMBOL(enable_kernel_altivec);
  122. /*
  123. * Make sure the VMX/Altivec register state in the
  124. * the thread_struct is up to date for task tsk.
  125. */
  126. void flush_altivec_to_thread(struct task_struct *tsk)
  127. {
  128. if (tsk->thread.regs) {
  129. preempt_disable();
  130. if (tsk->thread.regs->msr & MSR_VEC) {
  131. #ifdef CONFIG_SMP
  132. BUG_ON(tsk != current);
  133. #endif
  134. giveup_altivec(tsk);
  135. }
  136. preempt_enable();
  137. }
  138. }
  139. EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
  140. #endif /* CONFIG_ALTIVEC */
  141. #ifdef CONFIG_VSX
  142. #if 0
  143. /* not currently used, but some crazy RAID module might want to later */
  144. void enable_kernel_vsx(void)
  145. {
  146. WARN_ON(preemptible());
  147. #ifdef CONFIG_SMP
  148. if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
  149. giveup_vsx(current);
  150. else
  151. giveup_vsx(NULL); /* just enable vsx for kernel - force */
  152. #else
  153. giveup_vsx(last_task_used_vsx);
  154. #endif /* CONFIG_SMP */
  155. }
  156. EXPORT_SYMBOL(enable_kernel_vsx);
  157. #endif
  158. void giveup_vsx(struct task_struct *tsk)
  159. {
  160. giveup_fpu(tsk);
  161. giveup_altivec(tsk);
  162. __giveup_vsx(tsk);
  163. }
  164. void flush_vsx_to_thread(struct task_struct *tsk)
  165. {
  166. if (tsk->thread.regs) {
  167. preempt_disable();
  168. if (tsk->thread.regs->msr & MSR_VSX) {
  169. #ifdef CONFIG_SMP
  170. BUG_ON(tsk != current);
  171. #endif
  172. giveup_vsx(tsk);
  173. }
  174. preempt_enable();
  175. }
  176. }
  177. EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
  178. #endif /* CONFIG_VSX */
  179. #ifdef CONFIG_SPE
  180. void enable_kernel_spe(void)
  181. {
  182. WARN_ON(preemptible());
  183. #ifdef CONFIG_SMP
  184. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  185. giveup_spe(current);
  186. else
  187. giveup_spe(NULL); /* just enable SPE for kernel - force */
  188. #else
  189. giveup_spe(last_task_used_spe);
  190. #endif /* __SMP __ */
  191. }
  192. EXPORT_SYMBOL(enable_kernel_spe);
  193. void flush_spe_to_thread(struct task_struct *tsk)
  194. {
  195. if (tsk->thread.regs) {
  196. preempt_disable();
  197. if (tsk->thread.regs->msr & MSR_SPE) {
  198. #ifdef CONFIG_SMP
  199. BUG_ON(tsk != current);
  200. #endif
  201. tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
  202. giveup_spe(tsk);
  203. }
  204. preempt_enable();
  205. }
  206. }
  207. #endif /* CONFIG_SPE */
  208. #ifndef CONFIG_SMP
  209. /*
  210. * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  211. * and the current task has some state, discard it.
  212. */
  213. void discard_lazy_cpu_state(void)
  214. {
  215. preempt_disable();
  216. if (last_task_used_math == current)
  217. last_task_used_math = NULL;
  218. #ifdef CONFIG_ALTIVEC
  219. if (last_task_used_altivec == current)
  220. last_task_used_altivec = NULL;
  221. #endif /* CONFIG_ALTIVEC */
  222. #ifdef CONFIG_VSX
  223. if (last_task_used_vsx == current)
  224. last_task_used_vsx = NULL;
  225. #endif /* CONFIG_VSX */
  226. #ifdef CONFIG_SPE
  227. if (last_task_used_spe == current)
  228. last_task_used_spe = NULL;
  229. #endif
  230. preempt_enable();
  231. }
  232. #endif /* CONFIG_SMP */
  233. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  234. void do_send_trap(struct pt_regs *regs, unsigned long address,
  235. unsigned long error_code, int signal_code, int breakpt)
  236. {
  237. siginfo_t info;
  238. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  239. 11, SIGSEGV) == NOTIFY_STOP)
  240. return;
  241. /* Deliver the signal to userspace */
  242. info.si_signo = SIGTRAP;
  243. info.si_errno = breakpt; /* breakpoint or watchpoint id */
  244. info.si_code = signal_code;
  245. info.si_addr = (void __user *)address;
  246. force_sig_info(SIGTRAP, &info, current);
  247. }
  248. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  249. void do_dabr(struct pt_regs *regs, unsigned long address,
  250. unsigned long error_code)
  251. {
  252. siginfo_t info;
  253. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  254. 11, SIGSEGV) == NOTIFY_STOP)
  255. return;
  256. if (debugger_dabr_match(regs))
  257. return;
  258. /* Clear the DABR */
  259. set_dabr(0);
  260. /* Deliver the signal to userspace */
  261. info.si_signo = SIGTRAP;
  262. info.si_errno = 0;
  263. info.si_code = TRAP_HWBKPT;
  264. info.si_addr = (void __user *)address;
  265. force_sig_info(SIGTRAP, &info, current);
  266. }
  267. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  268. static DEFINE_PER_CPU(unsigned long, current_dabr);
  269. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  270. /*
  271. * Set the debug registers back to their default "safe" values.
  272. */
  273. static void set_debug_reg_defaults(struct thread_struct *thread)
  274. {
  275. thread->iac1 = thread->iac2 = 0;
  276. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  277. thread->iac3 = thread->iac4 = 0;
  278. #endif
  279. thread->dac1 = thread->dac2 = 0;
  280. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  281. thread->dvc1 = thread->dvc2 = 0;
  282. #endif
  283. thread->dbcr0 = 0;
  284. #ifdef CONFIG_BOOKE
  285. /*
  286. * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
  287. */
  288. thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
  289. DBCR1_IAC3US | DBCR1_IAC4US;
  290. /*
  291. * Force Data Address Compare User/Supervisor bits to be User-only
  292. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
  293. */
  294. thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  295. #else
  296. thread->dbcr1 = 0;
  297. #endif
  298. }
  299. static void prime_debug_regs(struct thread_struct *thread)
  300. {
  301. mtspr(SPRN_IAC1, thread->iac1);
  302. mtspr(SPRN_IAC2, thread->iac2);
  303. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  304. mtspr(SPRN_IAC3, thread->iac3);
  305. mtspr(SPRN_IAC4, thread->iac4);
  306. #endif
  307. mtspr(SPRN_DAC1, thread->dac1);
  308. mtspr(SPRN_DAC2, thread->dac2);
  309. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  310. mtspr(SPRN_DVC1, thread->dvc1);
  311. mtspr(SPRN_DVC2, thread->dvc2);
  312. #endif
  313. mtspr(SPRN_DBCR0, thread->dbcr0);
  314. mtspr(SPRN_DBCR1, thread->dbcr1);
  315. #ifdef CONFIG_BOOKE
  316. mtspr(SPRN_DBCR2, thread->dbcr2);
  317. #endif
  318. }
  319. /*
  320. * Unless neither the old or new thread are making use of the
  321. * debug registers, set the debug registers from the values
  322. * stored in the new thread.
  323. */
  324. static void switch_booke_debug_regs(struct thread_struct *new_thread)
  325. {
  326. if ((current->thread.dbcr0 & DBCR0_IDM)
  327. || (new_thread->dbcr0 & DBCR0_IDM))
  328. prime_debug_regs(new_thread);
  329. }
  330. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  331. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  332. static void set_debug_reg_defaults(struct thread_struct *thread)
  333. {
  334. if (thread->dabr) {
  335. thread->dabr = 0;
  336. set_dabr(0);
  337. }
  338. }
  339. #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
  340. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  341. int set_dabr(unsigned long dabr)
  342. {
  343. __get_cpu_var(current_dabr) = dabr;
  344. if (ppc_md.set_dabr)
  345. return ppc_md.set_dabr(dabr);
  346. /* XXX should we have a CPU_FTR_HAS_DABR ? */
  347. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  348. mtspr(SPRN_DAC1, dabr);
  349. #ifdef CONFIG_PPC_47x
  350. isync();
  351. #endif
  352. #elif defined(CONFIG_PPC_BOOK3S)
  353. mtspr(SPRN_DABR, dabr);
  354. #endif
  355. return 0;
  356. }
  357. #ifdef CONFIG_PPC64
  358. DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
  359. #endif
  360. struct task_struct *__switch_to(struct task_struct *prev,
  361. struct task_struct *new)
  362. {
  363. struct thread_struct *new_thread, *old_thread;
  364. unsigned long flags;
  365. struct task_struct *last;
  366. #ifdef CONFIG_PPC_BOOK3S_64
  367. struct ppc64_tlb_batch *batch;
  368. #endif
  369. #ifdef CONFIG_SMP
  370. /* avoid complexity of lazy save/restore of fpu
  371. * by just saving it every time we switch out if
  372. * this task used the fpu during the last quantum.
  373. *
  374. * If it tries to use the fpu again, it'll trap and
  375. * reload its fp regs. So we don't have to do a restore
  376. * every switch, just a save.
  377. * -- Cort
  378. */
  379. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  380. giveup_fpu(prev);
  381. #ifdef CONFIG_ALTIVEC
  382. /*
  383. * If the previous thread used altivec in the last quantum
  384. * (thus changing altivec regs) then save them.
  385. * We used to check the VRSAVE register but not all apps
  386. * set it, so we don't rely on it now (and in fact we need
  387. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  388. *
  389. * On SMP we always save/restore altivec regs just to avoid the
  390. * complexity of changing processors.
  391. * -- Cort
  392. */
  393. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  394. giveup_altivec(prev);
  395. #endif /* CONFIG_ALTIVEC */
  396. #ifdef CONFIG_VSX
  397. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
  398. /* VMX and FPU registers are already save here */
  399. __giveup_vsx(prev);
  400. #endif /* CONFIG_VSX */
  401. #ifdef CONFIG_SPE
  402. /*
  403. * If the previous thread used spe in the last quantum
  404. * (thus changing spe regs) then save them.
  405. *
  406. * On SMP we always save/restore spe regs just to avoid the
  407. * complexity of changing processors.
  408. */
  409. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  410. giveup_spe(prev);
  411. #endif /* CONFIG_SPE */
  412. #else /* CONFIG_SMP */
  413. #ifdef CONFIG_ALTIVEC
  414. /* Avoid the trap. On smp this this never happens since
  415. * we don't set last_task_used_altivec -- Cort
  416. */
  417. if (new->thread.regs && last_task_used_altivec == new)
  418. new->thread.regs->msr |= MSR_VEC;
  419. #endif /* CONFIG_ALTIVEC */
  420. #ifdef CONFIG_VSX
  421. if (new->thread.regs && last_task_used_vsx == new)
  422. new->thread.regs->msr |= MSR_VSX;
  423. #endif /* CONFIG_VSX */
  424. #ifdef CONFIG_SPE
  425. /* Avoid the trap. On smp this this never happens since
  426. * we don't set last_task_used_spe
  427. */
  428. if (new->thread.regs && last_task_used_spe == new)
  429. new->thread.regs->msr |= MSR_SPE;
  430. #endif /* CONFIG_SPE */
  431. #endif /* CONFIG_SMP */
  432. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  433. switch_booke_debug_regs(&new->thread);
  434. #else
  435. /*
  436. * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
  437. * schedule DABR
  438. */
  439. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  440. if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
  441. set_dabr(new->thread.dabr);
  442. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  443. #endif
  444. new_thread = &new->thread;
  445. old_thread = &current->thread;
  446. #if defined(CONFIG_PPC_BOOK3E_64)
  447. /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
  448. * we always hold the user values, so we set it now.
  449. *
  450. * However, we ensure the kernel MSR:DE is appropriately cleared too
  451. * to avoid spurrious single step exceptions in the kernel.
  452. *
  453. * This will have to change to merge with the ppc32 code at some point,
  454. * but I don't like much what ppc32 is doing today so there's some
  455. * thinking needed there
  456. */
  457. if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) {
  458. u32 dbcr0;
  459. mtmsr(mfmsr() & ~MSR_DE);
  460. isync();
  461. dbcr0 = mfspr(SPRN_DBCR0);
  462. dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0;
  463. mtspr(SPRN_DBCR0, dbcr0);
  464. }
  465. #endif /* CONFIG_PPC64_BOOK3E */
  466. #ifdef CONFIG_PPC64
  467. /*
  468. * Collect processor utilization data per process
  469. */
  470. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  471. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  472. long unsigned start_tb, current_tb;
  473. start_tb = old_thread->start_tb;
  474. cu->current_tb = current_tb = mfspr(SPRN_PURR);
  475. old_thread->accum_tb += (current_tb - start_tb);
  476. new_thread->start_tb = current_tb;
  477. }
  478. #endif /* CONFIG_PPC64 */
  479. #ifdef CONFIG_PPC_BOOK3S_64
  480. batch = &__get_cpu_var(ppc64_tlb_batch);
  481. if (batch->active) {
  482. current_thread_info()->local_flags |= _TLF_LAZY_MMU;
  483. if (batch->index)
  484. __flush_tlb_pending(batch);
  485. batch->active = 0;
  486. }
  487. #endif /* CONFIG_PPC_BOOK3S_64 */
  488. local_irq_save(flags);
  489. account_system_vtime(current);
  490. account_process_vtime(current);
  491. /*
  492. * We can't take a PMU exception inside _switch() since there is a
  493. * window where the kernel stack SLB and the kernel stack are out
  494. * of sync. Hard disable here.
  495. */
  496. hard_irq_disable();
  497. last = _switch(old_thread, new_thread);
  498. #ifdef CONFIG_PPC_BOOK3S_64
  499. if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
  500. current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
  501. batch = &__get_cpu_var(ppc64_tlb_batch);
  502. batch->active = 1;
  503. }
  504. #endif /* CONFIG_PPC_BOOK3S_64 */
  505. local_irq_restore(flags);
  506. return last;
  507. }
  508. static int instructions_to_print = 16;
  509. static void show_instructions(struct pt_regs *regs)
  510. {
  511. int i;
  512. unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
  513. sizeof(int));
  514. printk("Instruction dump:");
  515. for (i = 0; i < instructions_to_print; i++) {
  516. int instr;
  517. if (!(i % 8))
  518. printk("\n");
  519. #if !defined(CONFIG_BOOKE)
  520. /* If executing with the IMMU off, adjust pc rather
  521. * than print XXXXXXXX.
  522. */
  523. if (!(regs->msr & MSR_IR))
  524. pc = (unsigned long)phys_to_virt(pc);
  525. #endif
  526. /* We use __get_user here *only* to avoid an OOPS on a
  527. * bad address because the pc *should* only be a
  528. * kernel address.
  529. */
  530. if (!__kernel_text_address(pc) ||
  531. __get_user(instr, (unsigned int __user *)pc)) {
  532. printk("XXXXXXXX ");
  533. } else {
  534. if (regs->nip == pc)
  535. printk("<%08x> ", instr);
  536. else
  537. printk("%08x ", instr);
  538. }
  539. pc += sizeof(int);
  540. }
  541. printk("\n");
  542. }
  543. static struct regbit {
  544. unsigned long bit;
  545. const char *name;
  546. } msr_bits[] = {
  547. {MSR_EE, "EE"},
  548. {MSR_PR, "PR"},
  549. {MSR_FP, "FP"},
  550. {MSR_VEC, "VEC"},
  551. {MSR_VSX, "VSX"},
  552. {MSR_ME, "ME"},
  553. {MSR_CE, "CE"},
  554. {MSR_DE, "DE"},
  555. {MSR_IR, "IR"},
  556. {MSR_DR, "DR"},
  557. {0, NULL}
  558. };
  559. static void printbits(unsigned long val, struct regbit *bits)
  560. {
  561. const char *sep = "";
  562. printk("<");
  563. for (; bits->bit; ++bits)
  564. if (val & bits->bit) {
  565. printk("%s%s", sep, bits->name);
  566. sep = ",";
  567. }
  568. printk(">");
  569. }
  570. #ifdef CONFIG_PPC64
  571. #define REG "%016lx"
  572. #define REGS_PER_LINE 4
  573. #define LAST_VOLATILE 13
  574. #else
  575. #define REG "%08lx"
  576. #define REGS_PER_LINE 8
  577. #define LAST_VOLATILE 12
  578. #endif
  579. void show_regs(struct pt_regs * regs)
  580. {
  581. int i, trap;
  582. printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
  583. regs->nip, regs->link, regs->ctr);
  584. printk("REGS: %p TRAP: %04lx %s (%s)\n",
  585. regs, regs->trap, print_tainted(), init_utsname()->release);
  586. printk("MSR: "REG" ", regs->msr);
  587. printbits(regs->msr, msr_bits);
  588. printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
  589. trap = TRAP(regs);
  590. if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
  591. printk("CFAR: "REG"\n", regs->orig_gpr3);
  592. if (trap == 0x300 || trap == 0x600)
  593. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  594. printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
  595. #else
  596. printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
  597. #endif
  598. printk("TASK = %p[%d] '%s' THREAD: %p",
  599. current, task_pid_nr(current), current->comm, task_thread_info(current));
  600. #ifdef CONFIG_SMP
  601. printk(" CPU: %d", raw_smp_processor_id());
  602. #endif /* CONFIG_SMP */
  603. for (i = 0; i < 32; i++) {
  604. if ((i % REGS_PER_LINE) == 0)
  605. printk("\nGPR%02d: ", i);
  606. printk(REG " ", regs->gpr[i]);
  607. if (i == LAST_VOLATILE && !FULL_REGS(regs))
  608. break;
  609. }
  610. printk("\n");
  611. #ifdef CONFIG_KALLSYMS
  612. /*
  613. * Lookup NIP late so we have the best change of getting the
  614. * above info out without failing
  615. */
  616. printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
  617. printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
  618. #endif
  619. show_stack(current, (unsigned long *) regs->gpr[1]);
  620. if (!user_mode(regs))
  621. show_instructions(regs);
  622. }
  623. void exit_thread(void)
  624. {
  625. discard_lazy_cpu_state();
  626. }
  627. void flush_thread(void)
  628. {
  629. discard_lazy_cpu_state();
  630. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  631. flush_ptrace_hw_breakpoint(current);
  632. #else /* CONFIG_HAVE_HW_BREAKPOINT */
  633. set_debug_reg_defaults(&current->thread);
  634. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  635. }
  636. void
  637. release_thread(struct task_struct *t)
  638. {
  639. }
  640. /*
  641. * This gets called before we allocate a new thread and copy
  642. * the current task into it.
  643. */
  644. void prepare_to_copy(struct task_struct *tsk)
  645. {
  646. flush_fp_to_thread(current);
  647. flush_altivec_to_thread(current);
  648. flush_vsx_to_thread(current);
  649. flush_spe_to_thread(current);
  650. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  651. flush_ptrace_hw_breakpoint(tsk);
  652. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  653. }
  654. /*
  655. * Copy a thread..
  656. */
  657. extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
  658. int copy_thread(unsigned long clone_flags, unsigned long usp,
  659. unsigned long unused, struct task_struct *p,
  660. struct pt_regs *regs)
  661. {
  662. struct pt_regs *childregs, *kregs;
  663. extern void ret_from_fork(void);
  664. unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  665. CHECK_FULL_REGS(regs);
  666. /* Copy registers */
  667. sp -= sizeof(struct pt_regs);
  668. childregs = (struct pt_regs *) sp;
  669. *childregs = *regs;
  670. if ((childregs->msr & MSR_PR) == 0) {
  671. /* for kernel thread, set `current' and stackptr in new task */
  672. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  673. #ifdef CONFIG_PPC32
  674. childregs->gpr[2] = (unsigned long) p;
  675. #else
  676. clear_tsk_thread_flag(p, TIF_32BIT);
  677. #endif
  678. p->thread.regs = NULL; /* no user register state */
  679. } else {
  680. childregs->gpr[1] = usp;
  681. p->thread.regs = childregs;
  682. if (clone_flags & CLONE_SETTLS) {
  683. #ifdef CONFIG_PPC64
  684. if (!is_32bit_task())
  685. childregs->gpr[13] = childregs->gpr[6];
  686. else
  687. #endif
  688. childregs->gpr[2] = childregs->gpr[6];
  689. }
  690. }
  691. childregs->gpr[3] = 0; /* Result from fork() */
  692. sp -= STACK_FRAME_OVERHEAD;
  693. /*
  694. * The way this works is that at some point in the future
  695. * some task will call _switch to switch to the new task.
  696. * That will pop off the stack frame created below and start
  697. * the new task running at ret_from_fork. The new task will
  698. * do some house keeping and then return from the fork or clone
  699. * system call, using the stack frame created above.
  700. */
  701. sp -= sizeof(struct pt_regs);
  702. kregs = (struct pt_regs *) sp;
  703. sp -= STACK_FRAME_OVERHEAD;
  704. p->thread.ksp = sp;
  705. p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
  706. _ALIGN_UP(sizeof(struct thread_info), 16);
  707. #ifdef CONFIG_PPC_STD_MMU_64
  708. if (mmu_has_feature(MMU_FTR_SLB)) {
  709. unsigned long sp_vsid;
  710. unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
  711. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  712. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
  713. << SLB_VSID_SHIFT_1T;
  714. else
  715. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
  716. << SLB_VSID_SHIFT;
  717. sp_vsid |= SLB_VSID_KERNEL | llp;
  718. p->thread.ksp_vsid = sp_vsid;
  719. }
  720. #endif /* CONFIG_PPC_STD_MMU_64 */
  721. #ifdef CONFIG_PPC64
  722. if (cpu_has_feature(CPU_FTR_DSCR)) {
  723. if (current->thread.dscr_inherit) {
  724. p->thread.dscr_inherit = 1;
  725. p->thread.dscr = current->thread.dscr;
  726. } else if (0 != dscr_default) {
  727. p->thread.dscr_inherit = 1;
  728. p->thread.dscr = dscr_default;
  729. } else {
  730. p->thread.dscr_inherit = 0;
  731. p->thread.dscr = 0;
  732. }
  733. }
  734. #endif
  735. /*
  736. * The PPC64 ABI makes use of a TOC to contain function
  737. * pointers. The function (ret_from_except) is actually a pointer
  738. * to the TOC entry. The first entry is a pointer to the actual
  739. * function.
  740. */
  741. #ifdef CONFIG_PPC64
  742. kregs->nip = *((unsigned long *)ret_from_fork);
  743. #else
  744. kregs->nip = (unsigned long)ret_from_fork;
  745. #endif
  746. return 0;
  747. }
  748. /*
  749. * Set up a thread for executing a new program
  750. */
  751. void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
  752. {
  753. #ifdef CONFIG_PPC64
  754. unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
  755. #endif
  756. /*
  757. * If we exec out of a kernel thread then thread.regs will not be
  758. * set. Do it now.
  759. */
  760. if (!current->thread.regs) {
  761. struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
  762. current->thread.regs = regs - 1;
  763. }
  764. memset(regs->gpr, 0, sizeof(regs->gpr));
  765. regs->ctr = 0;
  766. regs->link = 0;
  767. regs->xer = 0;
  768. regs->ccr = 0;
  769. regs->gpr[1] = sp;
  770. /*
  771. * We have just cleared all the nonvolatile GPRs, so make
  772. * FULL_REGS(regs) return true. This is necessary to allow
  773. * ptrace to examine the thread immediately after exec.
  774. */
  775. regs->trap &= ~1UL;
  776. #ifdef CONFIG_PPC32
  777. regs->mq = 0;
  778. regs->nip = start;
  779. regs->msr = MSR_USER;
  780. #else
  781. if (!is_32bit_task()) {
  782. unsigned long entry, toc;
  783. /* start is a relocated pointer to the function descriptor for
  784. * the elf _start routine. The first entry in the function
  785. * descriptor is the entry address of _start and the second
  786. * entry is the TOC value we need to use.
  787. */
  788. __get_user(entry, (unsigned long __user *)start);
  789. __get_user(toc, (unsigned long __user *)start+1);
  790. /* Check whether the e_entry function descriptor entries
  791. * need to be relocated before we can use them.
  792. */
  793. if (load_addr != 0) {
  794. entry += load_addr;
  795. toc += load_addr;
  796. }
  797. regs->nip = entry;
  798. regs->gpr[2] = toc;
  799. regs->msr = MSR_USER64;
  800. } else {
  801. regs->nip = start;
  802. regs->gpr[2] = 0;
  803. regs->msr = MSR_USER32;
  804. }
  805. #endif
  806. discard_lazy_cpu_state();
  807. #ifdef CONFIG_VSX
  808. current->thread.used_vsr = 0;
  809. #endif
  810. memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
  811. current->thread.fpscr.val = 0;
  812. #ifdef CONFIG_ALTIVEC
  813. memset(current->thread.vr, 0, sizeof(current->thread.vr));
  814. memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
  815. current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
  816. current->thread.vrsave = 0;
  817. current->thread.used_vr = 0;
  818. #endif /* CONFIG_ALTIVEC */
  819. #ifdef CONFIG_SPE
  820. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  821. current->thread.acc = 0;
  822. current->thread.spefscr = 0;
  823. current->thread.used_spe = 0;
  824. #endif /* CONFIG_SPE */
  825. }
  826. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  827. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  828. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  829. {
  830. struct pt_regs *regs = tsk->thread.regs;
  831. /* This is a bit hairy. If we are an SPE enabled processor
  832. * (have embedded fp) we store the IEEE exception enable flags in
  833. * fpexc_mode. fpexc_mode is also used for setting FP exception
  834. * mode (asyn, precise, disabled) for 'Classic' FP. */
  835. if (val & PR_FP_EXC_SW_ENABLE) {
  836. #ifdef CONFIG_SPE
  837. if (cpu_has_feature(CPU_FTR_SPE)) {
  838. tsk->thread.fpexc_mode = val &
  839. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  840. return 0;
  841. } else {
  842. return -EINVAL;
  843. }
  844. #else
  845. return -EINVAL;
  846. #endif
  847. }
  848. /* on a CONFIG_SPE this does not hurt us. The bits that
  849. * __pack_fe01 use do not overlap with bits used for
  850. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  851. * on CONFIG_SPE implementations are reserved so writing to
  852. * them does not change anything */
  853. if (val > PR_FP_EXC_PRECISE)
  854. return -EINVAL;
  855. tsk->thread.fpexc_mode = __pack_fe01(val);
  856. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  857. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  858. | tsk->thread.fpexc_mode;
  859. return 0;
  860. }
  861. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  862. {
  863. unsigned int val;
  864. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  865. #ifdef CONFIG_SPE
  866. if (cpu_has_feature(CPU_FTR_SPE))
  867. val = tsk->thread.fpexc_mode;
  868. else
  869. return -EINVAL;
  870. #else
  871. return -EINVAL;
  872. #endif
  873. else
  874. val = __unpack_fe01(tsk->thread.fpexc_mode);
  875. return put_user(val, (unsigned int __user *) adr);
  876. }
  877. int set_endian(struct task_struct *tsk, unsigned int val)
  878. {
  879. struct pt_regs *regs = tsk->thread.regs;
  880. if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
  881. (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
  882. return -EINVAL;
  883. if (regs == NULL)
  884. return -EINVAL;
  885. if (val == PR_ENDIAN_BIG)
  886. regs->msr &= ~MSR_LE;
  887. else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
  888. regs->msr |= MSR_LE;
  889. else
  890. return -EINVAL;
  891. return 0;
  892. }
  893. int get_endian(struct task_struct *tsk, unsigned long adr)
  894. {
  895. struct pt_regs *regs = tsk->thread.regs;
  896. unsigned int val;
  897. if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
  898. !cpu_has_feature(CPU_FTR_REAL_LE))
  899. return -EINVAL;
  900. if (regs == NULL)
  901. return -EINVAL;
  902. if (regs->msr & MSR_LE) {
  903. if (cpu_has_feature(CPU_FTR_REAL_LE))
  904. val = PR_ENDIAN_LITTLE;
  905. else
  906. val = PR_ENDIAN_PPC_LITTLE;
  907. } else
  908. val = PR_ENDIAN_BIG;
  909. return put_user(val, (unsigned int __user *)adr);
  910. }
  911. int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
  912. {
  913. tsk->thread.align_ctl = val;
  914. return 0;
  915. }
  916. int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
  917. {
  918. return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
  919. }
  920. #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
  921. int sys_clone(unsigned long clone_flags, unsigned long usp,
  922. int __user *parent_tidp, void __user *child_threadptr,
  923. int __user *child_tidp, int p6,
  924. struct pt_regs *regs)
  925. {
  926. CHECK_FULL_REGS(regs);
  927. if (usp == 0)
  928. usp = regs->gpr[1]; /* stack pointer for child */
  929. #ifdef CONFIG_PPC64
  930. if (is_32bit_task()) {
  931. parent_tidp = TRUNC_PTR(parent_tidp);
  932. child_tidp = TRUNC_PTR(child_tidp);
  933. }
  934. #endif
  935. return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
  936. }
  937. int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
  938. unsigned long p4, unsigned long p5, unsigned long p6,
  939. struct pt_regs *regs)
  940. {
  941. CHECK_FULL_REGS(regs);
  942. return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
  943. }
  944. int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
  945. unsigned long p4, unsigned long p5, unsigned long p6,
  946. struct pt_regs *regs)
  947. {
  948. CHECK_FULL_REGS(regs);
  949. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
  950. regs, 0, NULL, NULL);
  951. }
  952. int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
  953. unsigned long a3, unsigned long a4, unsigned long a5,
  954. struct pt_regs *regs)
  955. {
  956. int error;
  957. char *filename;
  958. filename = getname((const char __user *) a0);
  959. error = PTR_ERR(filename);
  960. if (IS_ERR(filename))
  961. goto out;
  962. flush_fp_to_thread(current);
  963. flush_altivec_to_thread(current);
  964. flush_spe_to_thread(current);
  965. error = do_execve(filename,
  966. (const char __user *const __user *) a1,
  967. (const char __user *const __user *) a2, regs);
  968. putname(filename);
  969. out:
  970. return error;
  971. }
  972. static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
  973. unsigned long nbytes)
  974. {
  975. unsigned long stack_page;
  976. unsigned long cpu = task_cpu(p);
  977. /*
  978. * Avoid crashing if the stack has overflowed and corrupted
  979. * task_cpu(p), which is in the thread_info struct.
  980. */
  981. if (cpu < NR_CPUS && cpu_possible(cpu)) {
  982. stack_page = (unsigned long) hardirq_ctx[cpu];
  983. if (sp >= stack_page + sizeof(struct thread_struct)
  984. && sp <= stack_page + THREAD_SIZE - nbytes)
  985. return 1;
  986. stack_page = (unsigned long) softirq_ctx[cpu];
  987. if (sp >= stack_page + sizeof(struct thread_struct)
  988. && sp <= stack_page + THREAD_SIZE - nbytes)
  989. return 1;
  990. }
  991. return 0;
  992. }
  993. int validate_sp(unsigned long sp, struct task_struct *p,
  994. unsigned long nbytes)
  995. {
  996. unsigned long stack_page = (unsigned long)task_stack_page(p);
  997. if (sp >= stack_page + sizeof(struct thread_struct)
  998. && sp <= stack_page + THREAD_SIZE - nbytes)
  999. return 1;
  1000. return valid_irq_stack(sp, p, nbytes);
  1001. }
  1002. EXPORT_SYMBOL(validate_sp);
  1003. unsigned long get_wchan(struct task_struct *p)
  1004. {
  1005. unsigned long ip, sp;
  1006. int count = 0;
  1007. if (!p || p == current || p->state == TASK_RUNNING)
  1008. return 0;
  1009. sp = p->thread.ksp;
  1010. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1011. return 0;
  1012. do {
  1013. sp = *(unsigned long *)sp;
  1014. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1015. return 0;
  1016. if (count > 0) {
  1017. ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
  1018. if (!in_sched_functions(ip))
  1019. return ip;
  1020. }
  1021. } while (count++ < 16);
  1022. return 0;
  1023. }
  1024. static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
  1025. void show_stack(struct task_struct *tsk, unsigned long *stack)
  1026. {
  1027. unsigned long sp, ip, lr, newsp;
  1028. int count = 0;
  1029. int firstframe = 1;
  1030. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1031. int curr_frame = current->curr_ret_stack;
  1032. extern void return_to_handler(void);
  1033. unsigned long rth = (unsigned long)return_to_handler;
  1034. unsigned long mrth = -1;
  1035. #ifdef CONFIG_PPC64
  1036. extern void mod_return_to_handler(void);
  1037. rth = *(unsigned long *)rth;
  1038. mrth = (unsigned long)mod_return_to_handler;
  1039. mrth = *(unsigned long *)mrth;
  1040. #endif
  1041. #endif
  1042. sp = (unsigned long) stack;
  1043. if (tsk == NULL)
  1044. tsk = current;
  1045. if (sp == 0) {
  1046. if (tsk == current)
  1047. asm("mr %0,1" : "=r" (sp));
  1048. else
  1049. sp = tsk->thread.ksp;
  1050. }
  1051. lr = 0;
  1052. printk("Call Trace:\n");
  1053. do {
  1054. if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
  1055. return;
  1056. stack = (unsigned long *) sp;
  1057. newsp = stack[0];
  1058. ip = stack[STACK_FRAME_LR_SAVE];
  1059. if (!firstframe || ip != lr) {
  1060. printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
  1061. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1062. if ((ip == rth || ip == mrth) && curr_frame >= 0) {
  1063. printk(" (%pS)",
  1064. (void *)current->ret_stack[curr_frame].ret);
  1065. curr_frame--;
  1066. }
  1067. #endif
  1068. if (firstframe)
  1069. printk(" (unreliable)");
  1070. printk("\n");
  1071. }
  1072. firstframe = 0;
  1073. /*
  1074. * See if this is an exception frame.
  1075. * We look for the "regshere" marker in the current frame.
  1076. */
  1077. if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
  1078. && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  1079. struct pt_regs *regs = (struct pt_regs *)
  1080. (sp + STACK_FRAME_OVERHEAD);
  1081. lr = regs->link;
  1082. printk("--- Exception: %lx at %pS\n LR = %pS\n",
  1083. regs->trap, (void *)regs->nip, (void *)lr);
  1084. firstframe = 1;
  1085. }
  1086. sp = newsp;
  1087. } while (count++ < kstack_depth_to_print);
  1088. }
  1089. void dump_stack(void)
  1090. {
  1091. show_stack(current, NULL);
  1092. }
  1093. EXPORT_SYMBOL(dump_stack);
  1094. #ifdef CONFIG_PPC64
  1095. void ppc64_runlatch_on(void)
  1096. {
  1097. unsigned long ctrl;
  1098. if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
  1099. HMT_medium();
  1100. ctrl = mfspr(SPRN_CTRLF);
  1101. ctrl |= CTRL_RUNLATCH;
  1102. mtspr(SPRN_CTRLT, ctrl);
  1103. set_thread_flag(TIF_RUNLATCH);
  1104. }
  1105. }
  1106. void __ppc64_runlatch_off(void)
  1107. {
  1108. unsigned long ctrl;
  1109. HMT_medium();
  1110. clear_thread_flag(TIF_RUNLATCH);
  1111. ctrl = mfspr(SPRN_CTRLF);
  1112. ctrl &= ~CTRL_RUNLATCH;
  1113. mtspr(SPRN_CTRLT, ctrl);
  1114. }
  1115. #endif
  1116. #if THREAD_SHIFT < PAGE_SHIFT
  1117. static struct kmem_cache *thread_info_cache;
  1118. struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
  1119. {
  1120. struct thread_info *ti;
  1121. ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
  1122. if (unlikely(ti == NULL))
  1123. return NULL;
  1124. #ifdef CONFIG_DEBUG_STACK_USAGE
  1125. memset(ti, 0, THREAD_SIZE);
  1126. #endif
  1127. return ti;
  1128. }
  1129. void free_thread_info(struct thread_info *ti)
  1130. {
  1131. kmem_cache_free(thread_info_cache, ti);
  1132. }
  1133. void thread_info_cache_init(void)
  1134. {
  1135. thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
  1136. THREAD_SIZE, 0, NULL);
  1137. BUG_ON(thread_info_cache == NULL);
  1138. }
  1139. #endif /* THREAD_SHIFT < PAGE_SHIFT */
  1140. unsigned long arch_align_stack(unsigned long sp)
  1141. {
  1142. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  1143. sp -= get_random_int() & ~PAGE_MASK;
  1144. return sp & ~0xf;
  1145. }
  1146. static inline unsigned long brk_rnd(void)
  1147. {
  1148. unsigned long rnd = 0;
  1149. /* 8MB for 32bit, 1GB for 64bit */
  1150. if (is_32bit_task())
  1151. rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
  1152. else
  1153. rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
  1154. return rnd << PAGE_SHIFT;
  1155. }
  1156. unsigned long arch_randomize_brk(struct mm_struct *mm)
  1157. {
  1158. unsigned long base = mm->brk;
  1159. unsigned long ret;
  1160. #ifdef CONFIG_PPC_STD_MMU_64
  1161. /*
  1162. * If we are using 1TB segments and we are allowed to randomise
  1163. * the heap, we can put it above 1TB so it is backed by a 1TB
  1164. * segment. Otherwise the heap will be in the bottom 1TB
  1165. * which always uses 256MB segments and this may result in a
  1166. * performance penalty.
  1167. */
  1168. if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
  1169. base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
  1170. #endif
  1171. ret = PAGE_ALIGN(base + brk_rnd());
  1172. if (ret < mm->brk)
  1173. return mm->brk;
  1174. return ret;
  1175. }
  1176. unsigned long randomize_et_dyn(unsigned long base)
  1177. {
  1178. unsigned long ret = PAGE_ALIGN(base + brk_rnd());
  1179. if (ret < base)
  1180. return base;
  1181. return ret;
  1182. }