process.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492
  1. /*
  2. * Derived from "arch/i386/kernel/process.c"
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  6. * Paul Mackerras (paulus@cs.anu.edu.au)
  7. *
  8. * PowerPC version
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/stddef.h>
  22. #include <linux/unistd.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/slab.h>
  25. #include <linux/user.h>
  26. #include <linux/elf.h>
  27. #include <linux/init.h>
  28. #include <linux/prctl.h>
  29. #include <linux/init_task.h>
  30. #include <linux/export.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/mqueue.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/utsname.h>
  35. #include <linux/ftrace.h>
  36. #include <linux/kernel_stat.h>
  37. #include <linux/personality.h>
  38. #include <linux/random.h>
  39. #include <linux/hw_breakpoint.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/io.h>
  43. #include <asm/processor.h>
  44. #include <asm/mmu.h>
  45. #include <asm/prom.h>
  46. #include <asm/machdep.h>
  47. #include <asm/time.h>
  48. #include <asm/runlatch.h>
  49. #include <asm/syscalls.h>
  50. #include <asm/switch_to.h>
  51. #include <asm/tm.h>
  52. #include <asm/debug.h>
  53. #ifdef CONFIG_PPC64
  54. #include <asm/firmware.h>
  55. #endif
  56. #include <linux/kprobes.h>
  57. #include <linux/kdebug.h>
  58. /* Transactional Memory debug */
  59. #ifdef TM_DEBUG_SW
  60. #define TM_DEBUG(x...) printk(KERN_INFO x)
  61. #else
  62. #define TM_DEBUG(x...) do { } while(0)
  63. #endif
  64. extern unsigned long _get_SP(void);
  65. #ifndef CONFIG_SMP
  66. struct task_struct *last_task_used_math = NULL;
  67. struct task_struct *last_task_used_altivec = NULL;
  68. struct task_struct *last_task_used_vsx = NULL;
  69. struct task_struct *last_task_used_spe = NULL;
  70. #endif
  71. #ifdef CONFIG_PPC_FPU
  72. /*
  73. * Make sure the floating-point register state in the
  74. * the thread_struct is up to date for task tsk.
  75. */
  76. void flush_fp_to_thread(struct task_struct *tsk)
  77. {
  78. if (tsk->thread.regs) {
  79. /*
  80. * We need to disable preemption here because if we didn't,
  81. * another process could get scheduled after the regs->msr
  82. * test but before we have finished saving the FP registers
  83. * to the thread_struct. That process could take over the
  84. * FPU, and then when we get scheduled again we would store
  85. * bogus values for the remaining FP registers.
  86. */
  87. preempt_disable();
  88. if (tsk->thread.regs->msr & MSR_FP) {
  89. #ifdef CONFIG_SMP
  90. /*
  91. * This should only ever be called for current or
  92. * for a stopped child process. Since we save away
  93. * the FP register state on context switch on SMP,
  94. * there is something wrong if a stopped child appears
  95. * to still have its FP state in the CPU registers.
  96. */
  97. BUG_ON(tsk != current);
  98. #endif
  99. giveup_fpu(tsk);
  100. }
  101. preempt_enable();
  102. }
  103. }
  104. EXPORT_SYMBOL_GPL(flush_fp_to_thread);
  105. #endif
  106. void enable_kernel_fp(void)
  107. {
  108. WARN_ON(preemptible());
  109. #ifdef CONFIG_SMP
  110. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  111. giveup_fpu(current);
  112. else
  113. giveup_fpu(NULL); /* just enables FP for kernel */
  114. #else
  115. giveup_fpu(last_task_used_math);
  116. #endif /* CONFIG_SMP */
  117. }
  118. EXPORT_SYMBOL(enable_kernel_fp);
  119. #ifdef CONFIG_ALTIVEC
  120. void enable_kernel_altivec(void)
  121. {
  122. WARN_ON(preemptible());
  123. #ifdef CONFIG_SMP
  124. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  125. giveup_altivec(current);
  126. else
  127. giveup_altivec_notask();
  128. #else
  129. giveup_altivec(last_task_used_altivec);
  130. #endif /* CONFIG_SMP */
  131. }
  132. EXPORT_SYMBOL(enable_kernel_altivec);
  133. /*
  134. * Make sure the VMX/Altivec register state in the
  135. * the thread_struct is up to date for task tsk.
  136. */
  137. void flush_altivec_to_thread(struct task_struct *tsk)
  138. {
  139. if (tsk->thread.regs) {
  140. preempt_disable();
  141. if (tsk->thread.regs->msr & MSR_VEC) {
  142. #ifdef CONFIG_SMP
  143. BUG_ON(tsk != current);
  144. #endif
  145. giveup_altivec(tsk);
  146. }
  147. preempt_enable();
  148. }
  149. }
  150. EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
  151. #endif /* CONFIG_ALTIVEC */
  152. #ifdef CONFIG_VSX
  153. #if 0
  154. /* not currently used, but some crazy RAID module might want to later */
  155. void enable_kernel_vsx(void)
  156. {
  157. WARN_ON(preemptible());
  158. #ifdef CONFIG_SMP
  159. if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
  160. giveup_vsx(current);
  161. else
  162. giveup_vsx(NULL); /* just enable vsx for kernel - force */
  163. #else
  164. giveup_vsx(last_task_used_vsx);
  165. #endif /* CONFIG_SMP */
  166. }
  167. EXPORT_SYMBOL(enable_kernel_vsx);
  168. #endif
  169. void giveup_vsx(struct task_struct *tsk)
  170. {
  171. giveup_fpu(tsk);
  172. giveup_altivec(tsk);
  173. __giveup_vsx(tsk);
  174. }
  175. void flush_vsx_to_thread(struct task_struct *tsk)
  176. {
  177. if (tsk->thread.regs) {
  178. preempt_disable();
  179. if (tsk->thread.regs->msr & MSR_VSX) {
  180. #ifdef CONFIG_SMP
  181. BUG_ON(tsk != current);
  182. #endif
  183. giveup_vsx(tsk);
  184. }
  185. preempt_enable();
  186. }
  187. }
  188. EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
  189. #endif /* CONFIG_VSX */
  190. #ifdef CONFIG_SPE
  191. void enable_kernel_spe(void)
  192. {
  193. WARN_ON(preemptible());
  194. #ifdef CONFIG_SMP
  195. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  196. giveup_spe(current);
  197. else
  198. giveup_spe(NULL); /* just enable SPE for kernel - force */
  199. #else
  200. giveup_spe(last_task_used_spe);
  201. #endif /* __SMP __ */
  202. }
  203. EXPORT_SYMBOL(enable_kernel_spe);
  204. void flush_spe_to_thread(struct task_struct *tsk)
  205. {
  206. if (tsk->thread.regs) {
  207. preempt_disable();
  208. if (tsk->thread.regs->msr & MSR_SPE) {
  209. #ifdef CONFIG_SMP
  210. BUG_ON(tsk != current);
  211. #endif
  212. tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
  213. giveup_spe(tsk);
  214. }
  215. preempt_enable();
  216. }
  217. }
  218. #endif /* CONFIG_SPE */
  219. #ifndef CONFIG_SMP
  220. /*
  221. * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  222. * and the current task has some state, discard it.
  223. */
  224. void discard_lazy_cpu_state(void)
  225. {
  226. preempt_disable();
  227. if (last_task_used_math == current)
  228. last_task_used_math = NULL;
  229. #ifdef CONFIG_ALTIVEC
  230. if (last_task_used_altivec == current)
  231. last_task_used_altivec = NULL;
  232. #endif /* CONFIG_ALTIVEC */
  233. #ifdef CONFIG_VSX
  234. if (last_task_used_vsx == current)
  235. last_task_used_vsx = NULL;
  236. #endif /* CONFIG_VSX */
  237. #ifdef CONFIG_SPE
  238. if (last_task_used_spe == current)
  239. last_task_used_spe = NULL;
  240. #endif
  241. preempt_enable();
  242. }
  243. #endif /* CONFIG_SMP */
  244. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  245. void do_send_trap(struct pt_regs *regs, unsigned long address,
  246. unsigned long error_code, int signal_code, int breakpt)
  247. {
  248. siginfo_t info;
  249. current->thread.trap_nr = signal_code;
  250. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  251. 11, SIGSEGV) == NOTIFY_STOP)
  252. return;
  253. /* Deliver the signal to userspace */
  254. info.si_signo = SIGTRAP;
  255. info.si_errno = breakpt; /* breakpoint or watchpoint id */
  256. info.si_code = signal_code;
  257. info.si_addr = (void __user *)address;
  258. force_sig_info(SIGTRAP, &info, current);
  259. }
  260. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  261. void do_break (struct pt_regs *regs, unsigned long address,
  262. unsigned long error_code)
  263. {
  264. siginfo_t info;
  265. current->thread.trap_nr = TRAP_HWBKPT;
  266. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  267. 11, SIGSEGV) == NOTIFY_STOP)
  268. return;
  269. if (debugger_break_match(regs))
  270. return;
  271. /* Clear the breakpoint */
  272. hw_breakpoint_disable();
  273. /* Deliver the signal to userspace */
  274. info.si_signo = SIGTRAP;
  275. info.si_errno = 0;
  276. info.si_code = TRAP_HWBKPT;
  277. info.si_addr = (void __user *)address;
  278. force_sig_info(SIGTRAP, &info, current);
  279. }
  280. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  281. static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
  282. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  283. /*
  284. * Set the debug registers back to their default "safe" values.
  285. */
  286. static void set_debug_reg_defaults(struct thread_struct *thread)
  287. {
  288. thread->debug.iac1 = thread->debug.iac2 = 0;
  289. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  290. thread->debug.iac3 = thread->debug.iac4 = 0;
  291. #endif
  292. thread->debug.dac1 = thread->debug.dac2 = 0;
  293. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  294. thread->debug.dvc1 = thread->debug.dvc2 = 0;
  295. #endif
  296. thread->debug.dbcr0 = 0;
  297. #ifdef CONFIG_BOOKE
  298. /*
  299. * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
  300. */
  301. thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
  302. DBCR1_IAC3US | DBCR1_IAC4US;
  303. /*
  304. * Force Data Address Compare User/Supervisor bits to be User-only
  305. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
  306. */
  307. thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  308. #else
  309. thread->debug.dbcr1 = 0;
  310. #endif
  311. }
  312. static void prime_debug_regs(struct thread_struct *thread)
  313. {
  314. /*
  315. * We could have inherited MSR_DE from userspace, since
  316. * it doesn't get cleared on exception entry. Make sure
  317. * MSR_DE is clear before we enable any debug events.
  318. */
  319. mtmsr(mfmsr() & ~MSR_DE);
  320. mtspr(SPRN_IAC1, thread->debug.iac1);
  321. mtspr(SPRN_IAC2, thread->debug.iac2);
  322. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  323. mtspr(SPRN_IAC3, thread->debug.iac3);
  324. mtspr(SPRN_IAC4, thread->debug.iac4);
  325. #endif
  326. mtspr(SPRN_DAC1, thread->debug.dac1);
  327. mtspr(SPRN_DAC2, thread->debug.dac2);
  328. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  329. mtspr(SPRN_DVC1, thread->debug.dvc1);
  330. mtspr(SPRN_DVC2, thread->debug.dvc2);
  331. #endif
  332. mtspr(SPRN_DBCR0, thread->debug.dbcr0);
  333. mtspr(SPRN_DBCR1, thread->debug.dbcr1);
  334. #ifdef CONFIG_BOOKE
  335. mtspr(SPRN_DBCR2, thread->debug.dbcr2);
  336. #endif
  337. }
  338. /*
  339. * Unless neither the old or new thread are making use of the
  340. * debug registers, set the debug registers from the values
  341. * stored in the new thread.
  342. */
  343. void switch_booke_debug_regs(struct thread_struct *new_thread)
  344. {
  345. if ((current->thread.debug.dbcr0 & DBCR0_IDM)
  346. || (new_thread->debug.dbcr0 & DBCR0_IDM))
  347. prime_debug_regs(new_thread);
  348. }
  349. EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
  350. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  351. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  352. static void set_debug_reg_defaults(struct thread_struct *thread)
  353. {
  354. thread->hw_brk.address = 0;
  355. thread->hw_brk.type = 0;
  356. set_breakpoint(&thread->hw_brk);
  357. }
  358. #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
  359. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  360. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  361. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  362. {
  363. mtspr(SPRN_DAC1, dabr);
  364. #ifdef CONFIG_PPC_47x
  365. isync();
  366. #endif
  367. return 0;
  368. }
  369. #elif defined(CONFIG_PPC_BOOK3S)
  370. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  371. {
  372. mtspr(SPRN_DABR, dabr);
  373. if (cpu_has_feature(CPU_FTR_DABRX))
  374. mtspr(SPRN_DABRX, dabrx);
  375. return 0;
  376. }
  377. #else
  378. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  379. {
  380. return -EINVAL;
  381. }
  382. #endif
  383. static inline int set_dabr(struct arch_hw_breakpoint *brk)
  384. {
  385. unsigned long dabr, dabrx;
  386. dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
  387. dabrx = ((brk->type >> 3) & 0x7);
  388. if (ppc_md.set_dabr)
  389. return ppc_md.set_dabr(dabr, dabrx);
  390. return __set_dabr(dabr, dabrx);
  391. }
  392. static inline int set_dawr(struct arch_hw_breakpoint *brk)
  393. {
  394. unsigned long dawr, dawrx, mrd;
  395. dawr = brk->address;
  396. dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
  397. << (63 - 58); //* read/write bits */
  398. dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
  399. << (63 - 59); //* translate */
  400. dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
  401. >> 3; //* PRIM bits */
  402. /* dawr length is stored in field MDR bits 48:53. Matches range in
  403. doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
  404. 0b111111=64DW.
  405. brk->len is in bytes.
  406. This aligns up to double word size, shifts and does the bias.
  407. */
  408. mrd = ((brk->len + 7) >> 3) - 1;
  409. dawrx |= (mrd & 0x3f) << (63 - 53);
  410. if (ppc_md.set_dawr)
  411. return ppc_md.set_dawr(dawr, dawrx);
  412. mtspr(SPRN_DAWR, dawr);
  413. mtspr(SPRN_DAWRX, dawrx);
  414. return 0;
  415. }
  416. int set_breakpoint(struct arch_hw_breakpoint *brk)
  417. {
  418. __get_cpu_var(current_brk) = *brk;
  419. if (cpu_has_feature(CPU_FTR_DAWR))
  420. return set_dawr(brk);
  421. return set_dabr(brk);
  422. }
  423. #ifdef CONFIG_PPC64
  424. DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
  425. #endif
  426. static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
  427. struct arch_hw_breakpoint *b)
  428. {
  429. if (a->address != b->address)
  430. return false;
  431. if (a->type != b->type)
  432. return false;
  433. if (a->len != b->len)
  434. return false;
  435. return true;
  436. }
  437. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  438. static inline void tm_reclaim_task(struct task_struct *tsk)
  439. {
  440. /* We have to work out if we're switching from/to a task that's in the
  441. * middle of a transaction.
  442. *
  443. * In switching we need to maintain a 2nd register state as
  444. * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
  445. * checkpointed (tbegin) state in ckpt_regs and saves the transactional
  446. * (current) FPRs into oldtask->thread.transact_fpr[].
  447. *
  448. * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
  449. */
  450. struct thread_struct *thr = &tsk->thread;
  451. if (!thr->regs)
  452. return;
  453. if (!MSR_TM_ACTIVE(thr->regs->msr))
  454. goto out_and_saveregs;
  455. /* Stash the original thread MSR, as giveup_fpu et al will
  456. * modify it. We hold onto it to see whether the task used
  457. * FP & vector regs.
  458. */
  459. thr->tm_orig_msr = thr->regs->msr;
  460. TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
  461. "ccr=%lx, msr=%lx, trap=%lx)\n",
  462. tsk->pid, thr->regs->nip,
  463. thr->regs->ccr, thr->regs->msr,
  464. thr->regs->trap);
  465. tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
  466. TM_DEBUG("--- tm_reclaim on pid %d complete\n",
  467. tsk->pid);
  468. out_and_saveregs:
  469. /* Always save the regs here, even if a transaction's not active.
  470. * This context-switches a thread's TM info SPRs. We do it here to
  471. * be consistent with the restore path (in recheckpoint) which
  472. * cannot happen later in _switch().
  473. */
  474. tm_save_sprs(thr);
  475. }
  476. static inline void tm_recheckpoint_new_task(struct task_struct *new)
  477. {
  478. unsigned long msr;
  479. if (!cpu_has_feature(CPU_FTR_TM))
  480. return;
  481. /* Recheckpoint the registers of the thread we're about to switch to.
  482. *
  483. * If the task was using FP, we non-lazily reload both the original and
  484. * the speculative FP register states. This is because the kernel
  485. * doesn't see if/when a TM rollback occurs, so if we take an FP
  486. * unavoidable later, we are unable to determine which set of FP regs
  487. * need to be restored.
  488. */
  489. if (!new->thread.regs)
  490. return;
  491. /* The TM SPRs are restored here, so that TEXASR.FS can be set
  492. * before the trecheckpoint and no explosion occurs.
  493. */
  494. tm_restore_sprs(&new->thread);
  495. if (!MSR_TM_ACTIVE(new->thread.regs->msr))
  496. return;
  497. msr = new->thread.tm_orig_msr;
  498. /* Recheckpoint to restore original checkpointed register state. */
  499. TM_DEBUG("*** tm_recheckpoint of pid %d "
  500. "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
  501. new->pid, new->thread.regs->msr, msr);
  502. /* This loads the checkpointed FP/VEC state, if used */
  503. tm_recheckpoint(&new->thread, msr);
  504. /* This loads the speculative FP/VEC state, if used */
  505. if (msr & MSR_FP) {
  506. do_load_up_transact_fpu(&new->thread);
  507. new->thread.regs->msr |=
  508. (MSR_FP | new->thread.fpexc_mode);
  509. }
  510. #ifdef CONFIG_ALTIVEC
  511. if (msr & MSR_VEC) {
  512. do_load_up_transact_altivec(&new->thread);
  513. new->thread.regs->msr |= MSR_VEC;
  514. }
  515. #endif
  516. /* We may as well turn on VSX too since all the state is restored now */
  517. if (msr & MSR_VSX)
  518. new->thread.regs->msr |= MSR_VSX;
  519. TM_DEBUG("*** tm_recheckpoint of pid %d complete "
  520. "(kernel msr 0x%lx)\n",
  521. new->pid, mfmsr());
  522. }
  523. static inline void __switch_to_tm(struct task_struct *prev)
  524. {
  525. if (cpu_has_feature(CPU_FTR_TM)) {
  526. tm_enable();
  527. tm_reclaim_task(prev);
  528. }
  529. }
  530. #else
  531. #define tm_recheckpoint_new_task(new)
  532. #define __switch_to_tm(prev)
  533. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  534. struct task_struct *__switch_to(struct task_struct *prev,
  535. struct task_struct *new)
  536. {
  537. struct thread_struct *new_thread, *old_thread;
  538. struct task_struct *last;
  539. #ifdef CONFIG_PPC_BOOK3S_64
  540. struct ppc64_tlb_batch *batch;
  541. #endif
  542. WARN_ON(!irqs_disabled());
  543. /* Back up the TAR across context switches.
  544. * Note that the TAR is not available for use in the kernel. (To
  545. * provide this, the TAR should be backed up/restored on exception
  546. * entry/exit instead, and be in pt_regs. FIXME, this should be in
  547. * pt_regs anyway (for debug).)
  548. * Save the TAR here before we do treclaim/trecheckpoint as these
  549. * will change the TAR.
  550. */
  551. save_tar(&prev->thread);
  552. __switch_to_tm(prev);
  553. #ifdef CONFIG_SMP
  554. /* avoid complexity of lazy save/restore of fpu
  555. * by just saving it every time we switch out if
  556. * this task used the fpu during the last quantum.
  557. *
  558. * If it tries to use the fpu again, it'll trap and
  559. * reload its fp regs. So we don't have to do a restore
  560. * every switch, just a save.
  561. * -- Cort
  562. */
  563. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  564. giveup_fpu(prev);
  565. #ifdef CONFIG_ALTIVEC
  566. /*
  567. * If the previous thread used altivec in the last quantum
  568. * (thus changing altivec regs) then save them.
  569. * We used to check the VRSAVE register but not all apps
  570. * set it, so we don't rely on it now (and in fact we need
  571. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  572. *
  573. * On SMP we always save/restore altivec regs just to avoid the
  574. * complexity of changing processors.
  575. * -- Cort
  576. */
  577. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  578. giveup_altivec(prev);
  579. #endif /* CONFIG_ALTIVEC */
  580. #ifdef CONFIG_VSX
  581. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
  582. /* VMX and FPU registers are already save here */
  583. __giveup_vsx(prev);
  584. #endif /* CONFIG_VSX */
  585. #ifdef CONFIG_SPE
  586. /*
  587. * If the previous thread used spe in the last quantum
  588. * (thus changing spe regs) then save them.
  589. *
  590. * On SMP we always save/restore spe regs just to avoid the
  591. * complexity of changing processors.
  592. */
  593. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  594. giveup_spe(prev);
  595. #endif /* CONFIG_SPE */
  596. #else /* CONFIG_SMP */
  597. #ifdef CONFIG_ALTIVEC
  598. /* Avoid the trap. On smp this this never happens since
  599. * we don't set last_task_used_altivec -- Cort
  600. */
  601. if (new->thread.regs && last_task_used_altivec == new)
  602. new->thread.regs->msr |= MSR_VEC;
  603. #endif /* CONFIG_ALTIVEC */
  604. #ifdef CONFIG_VSX
  605. if (new->thread.regs && last_task_used_vsx == new)
  606. new->thread.regs->msr |= MSR_VSX;
  607. #endif /* CONFIG_VSX */
  608. #ifdef CONFIG_SPE
  609. /* Avoid the trap. On smp this this never happens since
  610. * we don't set last_task_used_spe
  611. */
  612. if (new->thread.regs && last_task_used_spe == new)
  613. new->thread.regs->msr |= MSR_SPE;
  614. #endif /* CONFIG_SPE */
  615. #endif /* CONFIG_SMP */
  616. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  617. switch_booke_debug_regs(&new->thread);
  618. #else
  619. /*
  620. * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
  621. * schedule DABR
  622. */
  623. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  624. if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
  625. set_breakpoint(&new->thread.hw_brk);
  626. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  627. #endif
  628. new_thread = &new->thread;
  629. old_thread = &current->thread;
  630. #ifdef CONFIG_PPC64
  631. /*
  632. * Collect processor utilization data per process
  633. */
  634. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  635. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  636. long unsigned start_tb, current_tb;
  637. start_tb = old_thread->start_tb;
  638. cu->current_tb = current_tb = mfspr(SPRN_PURR);
  639. old_thread->accum_tb += (current_tb - start_tb);
  640. new_thread->start_tb = current_tb;
  641. }
  642. #endif /* CONFIG_PPC64 */
  643. #ifdef CONFIG_PPC_BOOK3S_64
  644. batch = &__get_cpu_var(ppc64_tlb_batch);
  645. if (batch->active) {
  646. current_thread_info()->local_flags |= _TLF_LAZY_MMU;
  647. if (batch->index)
  648. __flush_tlb_pending(batch);
  649. batch->active = 0;
  650. }
  651. #endif /* CONFIG_PPC_BOOK3S_64 */
  652. /*
  653. * We can't take a PMU exception inside _switch() since there is a
  654. * window where the kernel stack SLB and the kernel stack are out
  655. * of sync. Hard disable here.
  656. */
  657. hard_irq_disable();
  658. tm_recheckpoint_new_task(new);
  659. last = _switch(old_thread, new_thread);
  660. #ifdef CONFIG_PPC_BOOK3S_64
  661. if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
  662. current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
  663. batch = &__get_cpu_var(ppc64_tlb_batch);
  664. batch->active = 1;
  665. }
  666. #endif /* CONFIG_PPC_BOOK3S_64 */
  667. return last;
  668. }
  669. static int instructions_to_print = 16;
  670. static void show_instructions(struct pt_regs *regs)
  671. {
  672. int i;
  673. unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
  674. sizeof(int));
  675. printk("Instruction dump:");
  676. for (i = 0; i < instructions_to_print; i++) {
  677. int instr;
  678. if (!(i % 8))
  679. printk("\n");
  680. #if !defined(CONFIG_BOOKE)
  681. /* If executing with the IMMU off, adjust pc rather
  682. * than print XXXXXXXX.
  683. */
  684. if (!(regs->msr & MSR_IR))
  685. pc = (unsigned long)phys_to_virt(pc);
  686. #endif
  687. /* We use __get_user here *only* to avoid an OOPS on a
  688. * bad address because the pc *should* only be a
  689. * kernel address.
  690. */
  691. if (!__kernel_text_address(pc) ||
  692. __get_user(instr, (unsigned int __user *)pc)) {
  693. printk(KERN_CONT "XXXXXXXX ");
  694. } else {
  695. if (regs->nip == pc)
  696. printk(KERN_CONT "<%08x> ", instr);
  697. else
  698. printk(KERN_CONT "%08x ", instr);
  699. }
  700. pc += sizeof(int);
  701. }
  702. printk("\n");
  703. }
  704. static struct regbit {
  705. unsigned long bit;
  706. const char *name;
  707. } msr_bits[] = {
  708. #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
  709. {MSR_SF, "SF"},
  710. {MSR_HV, "HV"},
  711. #endif
  712. {MSR_VEC, "VEC"},
  713. {MSR_VSX, "VSX"},
  714. #ifdef CONFIG_BOOKE
  715. {MSR_CE, "CE"},
  716. #endif
  717. {MSR_EE, "EE"},
  718. {MSR_PR, "PR"},
  719. {MSR_FP, "FP"},
  720. {MSR_ME, "ME"},
  721. #ifdef CONFIG_BOOKE
  722. {MSR_DE, "DE"},
  723. #else
  724. {MSR_SE, "SE"},
  725. {MSR_BE, "BE"},
  726. #endif
  727. {MSR_IR, "IR"},
  728. {MSR_DR, "DR"},
  729. {MSR_PMM, "PMM"},
  730. #ifndef CONFIG_BOOKE
  731. {MSR_RI, "RI"},
  732. {MSR_LE, "LE"},
  733. #endif
  734. {0, NULL}
  735. };
  736. static void printbits(unsigned long val, struct regbit *bits)
  737. {
  738. const char *sep = "";
  739. printk("<");
  740. for (; bits->bit; ++bits)
  741. if (val & bits->bit) {
  742. printk("%s%s", sep, bits->name);
  743. sep = ",";
  744. }
  745. printk(">");
  746. }
  747. #ifdef CONFIG_PPC64
  748. #define REG "%016lx"
  749. #define REGS_PER_LINE 4
  750. #define LAST_VOLATILE 13
  751. #else
  752. #define REG "%08lx"
  753. #define REGS_PER_LINE 8
  754. #define LAST_VOLATILE 12
  755. #endif
  756. void show_regs(struct pt_regs * regs)
  757. {
  758. int i, trap;
  759. show_regs_print_info(KERN_DEFAULT);
  760. printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
  761. regs->nip, regs->link, regs->ctr);
  762. printk("REGS: %p TRAP: %04lx %s (%s)\n",
  763. regs, regs->trap, print_tainted(), init_utsname()->release);
  764. printk("MSR: "REG" ", regs->msr);
  765. printbits(regs->msr, msr_bits);
  766. printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
  767. trap = TRAP(regs);
  768. if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
  769. printk("CFAR: "REG" ", regs->orig_gpr3);
  770. if (trap == 0x200 || trap == 0x300 || trap == 0x600)
  771. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  772. printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
  773. #else
  774. printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
  775. #endif
  776. #ifdef CONFIG_PPC64
  777. printk("SOFTE: %ld ", regs->softe);
  778. #endif
  779. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  780. if (MSR_TM_ACTIVE(regs->msr))
  781. printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
  782. #endif
  783. for (i = 0; i < 32; i++) {
  784. if ((i % REGS_PER_LINE) == 0)
  785. printk("\nGPR%02d: ", i);
  786. printk(REG " ", regs->gpr[i]);
  787. if (i == LAST_VOLATILE && !FULL_REGS(regs))
  788. break;
  789. }
  790. printk("\n");
  791. #ifdef CONFIG_KALLSYMS
  792. /*
  793. * Lookup NIP late so we have the best change of getting the
  794. * above info out without failing
  795. */
  796. printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
  797. printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
  798. #endif
  799. show_stack(current, (unsigned long *) regs->gpr[1]);
  800. if (!user_mode(regs))
  801. show_instructions(regs);
  802. }
  803. void exit_thread(void)
  804. {
  805. discard_lazy_cpu_state();
  806. }
  807. void flush_thread(void)
  808. {
  809. discard_lazy_cpu_state();
  810. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  811. flush_ptrace_hw_breakpoint(current);
  812. #else /* CONFIG_HAVE_HW_BREAKPOINT */
  813. set_debug_reg_defaults(&current->thread);
  814. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  815. }
  816. void
  817. release_thread(struct task_struct *t)
  818. {
  819. }
  820. /*
  821. * this gets called so that we can store coprocessor state into memory and
  822. * copy the current task into the new thread.
  823. */
  824. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  825. {
  826. flush_fp_to_thread(src);
  827. flush_altivec_to_thread(src);
  828. flush_vsx_to_thread(src);
  829. flush_spe_to_thread(src);
  830. *dst = *src;
  831. clear_task_ebb(dst);
  832. return 0;
  833. }
  834. /*
  835. * Copy a thread..
  836. */
  837. extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
  838. int copy_thread(unsigned long clone_flags, unsigned long usp,
  839. unsigned long arg, struct task_struct *p)
  840. {
  841. struct pt_regs *childregs, *kregs;
  842. extern void ret_from_fork(void);
  843. extern void ret_from_kernel_thread(void);
  844. void (*f)(void);
  845. unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  846. /* Copy registers */
  847. sp -= sizeof(struct pt_regs);
  848. childregs = (struct pt_regs *) sp;
  849. if (unlikely(p->flags & PF_KTHREAD)) {
  850. struct thread_info *ti = (void *)task_stack_page(p);
  851. memset(childregs, 0, sizeof(struct pt_regs));
  852. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  853. childregs->gpr[14] = usp; /* function */
  854. #ifdef CONFIG_PPC64
  855. clear_tsk_thread_flag(p, TIF_32BIT);
  856. childregs->softe = 1;
  857. #endif
  858. childregs->gpr[15] = arg;
  859. p->thread.regs = NULL; /* no user register state */
  860. ti->flags |= _TIF_RESTOREALL;
  861. f = ret_from_kernel_thread;
  862. } else {
  863. struct pt_regs *regs = current_pt_regs();
  864. CHECK_FULL_REGS(regs);
  865. *childregs = *regs;
  866. if (usp)
  867. childregs->gpr[1] = usp;
  868. p->thread.regs = childregs;
  869. childregs->gpr[3] = 0; /* Result from fork() */
  870. if (clone_flags & CLONE_SETTLS) {
  871. #ifdef CONFIG_PPC64
  872. if (!is_32bit_task())
  873. childregs->gpr[13] = childregs->gpr[6];
  874. else
  875. #endif
  876. childregs->gpr[2] = childregs->gpr[6];
  877. }
  878. f = ret_from_fork;
  879. }
  880. sp -= STACK_FRAME_OVERHEAD;
  881. /*
  882. * The way this works is that at some point in the future
  883. * some task will call _switch to switch to the new task.
  884. * That will pop off the stack frame created below and start
  885. * the new task running at ret_from_fork. The new task will
  886. * do some house keeping and then return from the fork or clone
  887. * system call, using the stack frame created above.
  888. */
  889. ((unsigned long *)sp)[0] = 0;
  890. sp -= sizeof(struct pt_regs);
  891. kregs = (struct pt_regs *) sp;
  892. sp -= STACK_FRAME_OVERHEAD;
  893. p->thread.ksp = sp;
  894. #ifdef CONFIG_PPC32
  895. p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
  896. _ALIGN_UP(sizeof(struct thread_info), 16);
  897. #endif
  898. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  899. p->thread.ptrace_bps[0] = NULL;
  900. #endif
  901. p->thread.fp_save_area = NULL;
  902. #ifdef CONFIG_ALTIVEC
  903. p->thread.vr_save_area = NULL;
  904. #endif
  905. #ifdef CONFIG_PPC_STD_MMU_64
  906. if (mmu_has_feature(MMU_FTR_SLB)) {
  907. unsigned long sp_vsid;
  908. unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
  909. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  910. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
  911. << SLB_VSID_SHIFT_1T;
  912. else
  913. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
  914. << SLB_VSID_SHIFT;
  915. sp_vsid |= SLB_VSID_KERNEL | llp;
  916. p->thread.ksp_vsid = sp_vsid;
  917. }
  918. #endif /* CONFIG_PPC_STD_MMU_64 */
  919. #ifdef CONFIG_PPC64
  920. if (cpu_has_feature(CPU_FTR_DSCR)) {
  921. p->thread.dscr_inherit = current->thread.dscr_inherit;
  922. p->thread.dscr = current->thread.dscr;
  923. }
  924. if (cpu_has_feature(CPU_FTR_HAS_PPR))
  925. p->thread.ppr = INIT_PPR;
  926. #endif
  927. /*
  928. * The PPC64 ABI makes use of a TOC to contain function
  929. * pointers. The function (ret_from_except) is actually a pointer
  930. * to the TOC entry. The first entry is a pointer to the actual
  931. * function.
  932. */
  933. #ifdef CONFIG_PPC64
  934. kregs->nip = *((unsigned long *)f);
  935. #else
  936. kregs->nip = (unsigned long)f;
  937. #endif
  938. return 0;
  939. }
  940. /*
  941. * Set up a thread for executing a new program
  942. */
  943. void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
  944. {
  945. #ifdef CONFIG_PPC64
  946. unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
  947. #endif
  948. /*
  949. * If we exec out of a kernel thread then thread.regs will not be
  950. * set. Do it now.
  951. */
  952. if (!current->thread.regs) {
  953. struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
  954. current->thread.regs = regs - 1;
  955. }
  956. memset(regs->gpr, 0, sizeof(regs->gpr));
  957. regs->ctr = 0;
  958. regs->link = 0;
  959. regs->xer = 0;
  960. regs->ccr = 0;
  961. regs->gpr[1] = sp;
  962. /*
  963. * We have just cleared all the nonvolatile GPRs, so make
  964. * FULL_REGS(regs) return true. This is necessary to allow
  965. * ptrace to examine the thread immediately after exec.
  966. */
  967. regs->trap &= ~1UL;
  968. #ifdef CONFIG_PPC32
  969. regs->mq = 0;
  970. regs->nip = start;
  971. regs->msr = MSR_USER;
  972. #else
  973. if (!is_32bit_task()) {
  974. unsigned long entry;
  975. if (is_elf2_task()) {
  976. /* Look ma, no function descriptors! */
  977. entry = start;
  978. /*
  979. * Ulrich says:
  980. * The latest iteration of the ABI requires that when
  981. * calling a function (at its global entry point),
  982. * the caller must ensure r12 holds the entry point
  983. * address (so that the function can quickly
  984. * establish addressability).
  985. */
  986. regs->gpr[12] = start;
  987. /* Make sure that's restored on entry to userspace. */
  988. set_thread_flag(TIF_RESTOREALL);
  989. } else {
  990. unsigned long toc;
  991. /* start is a relocated pointer to the function
  992. * descriptor for the elf _start routine. The first
  993. * entry in the function descriptor is the entry
  994. * address of _start and the second entry is the TOC
  995. * value we need to use.
  996. */
  997. __get_user(entry, (unsigned long __user *)start);
  998. __get_user(toc, (unsigned long __user *)start+1);
  999. /* Check whether the e_entry function descriptor entries
  1000. * need to be relocated before we can use them.
  1001. */
  1002. if (load_addr != 0) {
  1003. entry += load_addr;
  1004. toc += load_addr;
  1005. }
  1006. regs->gpr[2] = toc;
  1007. }
  1008. regs->nip = entry;
  1009. regs->msr = MSR_USER64;
  1010. } else {
  1011. regs->nip = start;
  1012. regs->gpr[2] = 0;
  1013. regs->msr = MSR_USER32;
  1014. }
  1015. #endif
  1016. discard_lazy_cpu_state();
  1017. #ifdef CONFIG_VSX
  1018. current->thread.used_vsr = 0;
  1019. #endif
  1020. memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
  1021. current->thread.fp_save_area = NULL;
  1022. #ifdef CONFIG_ALTIVEC
  1023. memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
  1024. current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
  1025. current->thread.vr_save_area = NULL;
  1026. current->thread.vrsave = 0;
  1027. current->thread.used_vr = 0;
  1028. #endif /* CONFIG_ALTIVEC */
  1029. #ifdef CONFIG_SPE
  1030. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  1031. current->thread.acc = 0;
  1032. current->thread.spefscr = 0;
  1033. current->thread.used_spe = 0;
  1034. #endif /* CONFIG_SPE */
  1035. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1036. if (cpu_has_feature(CPU_FTR_TM))
  1037. regs->msr |= MSR_TM;
  1038. current->thread.tm_tfhar = 0;
  1039. current->thread.tm_texasr = 0;
  1040. current->thread.tm_tfiar = 0;
  1041. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  1042. }
  1043. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  1044. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  1045. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  1046. {
  1047. struct pt_regs *regs = tsk->thread.regs;
  1048. /* This is a bit hairy. If we are an SPE enabled processor
  1049. * (have embedded fp) we store the IEEE exception enable flags in
  1050. * fpexc_mode. fpexc_mode is also used for setting FP exception
  1051. * mode (asyn, precise, disabled) for 'Classic' FP. */
  1052. if (val & PR_FP_EXC_SW_ENABLE) {
  1053. #ifdef CONFIG_SPE
  1054. if (cpu_has_feature(CPU_FTR_SPE)) {
  1055. tsk->thread.fpexc_mode = val &
  1056. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  1057. return 0;
  1058. } else {
  1059. return -EINVAL;
  1060. }
  1061. #else
  1062. return -EINVAL;
  1063. #endif
  1064. }
  1065. /* on a CONFIG_SPE this does not hurt us. The bits that
  1066. * __pack_fe01 use do not overlap with bits used for
  1067. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  1068. * on CONFIG_SPE implementations are reserved so writing to
  1069. * them does not change anything */
  1070. if (val > PR_FP_EXC_PRECISE)
  1071. return -EINVAL;
  1072. tsk->thread.fpexc_mode = __pack_fe01(val);
  1073. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  1074. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  1075. | tsk->thread.fpexc_mode;
  1076. return 0;
  1077. }
  1078. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  1079. {
  1080. unsigned int val;
  1081. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  1082. #ifdef CONFIG_SPE
  1083. if (cpu_has_feature(CPU_FTR_SPE))
  1084. val = tsk->thread.fpexc_mode;
  1085. else
  1086. return -EINVAL;
  1087. #else
  1088. return -EINVAL;
  1089. #endif
  1090. else
  1091. val = __unpack_fe01(tsk->thread.fpexc_mode);
  1092. return put_user(val, (unsigned int __user *) adr);
  1093. }
  1094. int set_endian(struct task_struct *tsk, unsigned int val)
  1095. {
  1096. struct pt_regs *regs = tsk->thread.regs;
  1097. if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
  1098. (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
  1099. return -EINVAL;
  1100. if (regs == NULL)
  1101. return -EINVAL;
  1102. if (val == PR_ENDIAN_BIG)
  1103. regs->msr &= ~MSR_LE;
  1104. else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
  1105. regs->msr |= MSR_LE;
  1106. else
  1107. return -EINVAL;
  1108. return 0;
  1109. }
  1110. int get_endian(struct task_struct *tsk, unsigned long adr)
  1111. {
  1112. struct pt_regs *regs = tsk->thread.regs;
  1113. unsigned int val;
  1114. if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
  1115. !cpu_has_feature(CPU_FTR_REAL_LE))
  1116. return -EINVAL;
  1117. if (regs == NULL)
  1118. return -EINVAL;
  1119. if (regs->msr & MSR_LE) {
  1120. if (cpu_has_feature(CPU_FTR_REAL_LE))
  1121. val = PR_ENDIAN_LITTLE;
  1122. else
  1123. val = PR_ENDIAN_PPC_LITTLE;
  1124. } else
  1125. val = PR_ENDIAN_BIG;
  1126. return put_user(val, (unsigned int __user *)adr);
  1127. }
  1128. int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
  1129. {
  1130. tsk->thread.align_ctl = val;
  1131. return 0;
  1132. }
  1133. int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
  1134. {
  1135. return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
  1136. }
  1137. static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
  1138. unsigned long nbytes)
  1139. {
  1140. unsigned long stack_page;
  1141. unsigned long cpu = task_cpu(p);
  1142. /*
  1143. * Avoid crashing if the stack has overflowed and corrupted
  1144. * task_cpu(p), which is in the thread_info struct.
  1145. */
  1146. if (cpu < NR_CPUS && cpu_possible(cpu)) {
  1147. stack_page = (unsigned long) hardirq_ctx[cpu];
  1148. if (sp >= stack_page + sizeof(struct thread_struct)
  1149. && sp <= stack_page + THREAD_SIZE - nbytes)
  1150. return 1;
  1151. stack_page = (unsigned long) softirq_ctx[cpu];
  1152. if (sp >= stack_page + sizeof(struct thread_struct)
  1153. && sp <= stack_page + THREAD_SIZE - nbytes)
  1154. return 1;
  1155. }
  1156. return 0;
  1157. }
  1158. int validate_sp(unsigned long sp, struct task_struct *p,
  1159. unsigned long nbytes)
  1160. {
  1161. unsigned long stack_page = (unsigned long)task_stack_page(p);
  1162. if (sp >= stack_page + sizeof(struct thread_struct)
  1163. && sp <= stack_page + THREAD_SIZE - nbytes)
  1164. return 1;
  1165. return valid_irq_stack(sp, p, nbytes);
  1166. }
  1167. EXPORT_SYMBOL(validate_sp);
  1168. unsigned long get_wchan(struct task_struct *p)
  1169. {
  1170. unsigned long ip, sp;
  1171. int count = 0;
  1172. if (!p || p == current || p->state == TASK_RUNNING)
  1173. return 0;
  1174. sp = p->thread.ksp;
  1175. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1176. return 0;
  1177. do {
  1178. sp = *(unsigned long *)sp;
  1179. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1180. return 0;
  1181. if (count > 0) {
  1182. ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
  1183. if (!in_sched_functions(ip))
  1184. return ip;
  1185. }
  1186. } while (count++ < 16);
  1187. return 0;
  1188. }
  1189. static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
  1190. void show_stack(struct task_struct *tsk, unsigned long *stack)
  1191. {
  1192. unsigned long sp, ip, lr, newsp;
  1193. int count = 0;
  1194. int firstframe = 1;
  1195. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1196. int curr_frame = current->curr_ret_stack;
  1197. extern void return_to_handler(void);
  1198. unsigned long rth = (unsigned long)return_to_handler;
  1199. unsigned long mrth = -1;
  1200. #ifdef CONFIG_PPC64
  1201. extern void mod_return_to_handler(void);
  1202. rth = *(unsigned long *)rth;
  1203. mrth = (unsigned long)mod_return_to_handler;
  1204. mrth = *(unsigned long *)mrth;
  1205. #endif
  1206. #endif
  1207. sp = (unsigned long) stack;
  1208. if (tsk == NULL)
  1209. tsk = current;
  1210. if (sp == 0) {
  1211. if (tsk == current)
  1212. asm("mr %0,1" : "=r" (sp));
  1213. else
  1214. sp = tsk->thread.ksp;
  1215. }
  1216. lr = 0;
  1217. printk("Call Trace:\n");
  1218. do {
  1219. if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
  1220. return;
  1221. stack = (unsigned long *) sp;
  1222. newsp = stack[0];
  1223. ip = stack[STACK_FRAME_LR_SAVE];
  1224. if (!firstframe || ip != lr) {
  1225. printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
  1226. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1227. if ((ip == rth || ip == mrth) && curr_frame >= 0) {
  1228. printk(" (%pS)",
  1229. (void *)current->ret_stack[curr_frame].ret);
  1230. curr_frame--;
  1231. }
  1232. #endif
  1233. if (firstframe)
  1234. printk(" (unreliable)");
  1235. printk("\n");
  1236. }
  1237. firstframe = 0;
  1238. /*
  1239. * See if this is an exception frame.
  1240. * We look for the "regshere" marker in the current frame.
  1241. */
  1242. if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
  1243. && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  1244. struct pt_regs *regs = (struct pt_regs *)
  1245. (sp + STACK_FRAME_OVERHEAD);
  1246. lr = regs->link;
  1247. printk("--- Exception: %lx at %pS\n LR = %pS\n",
  1248. regs->trap, (void *)regs->nip, (void *)lr);
  1249. firstframe = 1;
  1250. }
  1251. sp = newsp;
  1252. } while (count++ < kstack_depth_to_print);
  1253. }
  1254. #ifdef CONFIG_PPC64
  1255. /* Called with hard IRQs off */
  1256. void notrace __ppc64_runlatch_on(void)
  1257. {
  1258. struct thread_info *ti = current_thread_info();
  1259. unsigned long ctrl;
  1260. ctrl = mfspr(SPRN_CTRLF);
  1261. ctrl |= CTRL_RUNLATCH;
  1262. mtspr(SPRN_CTRLT, ctrl);
  1263. ti->local_flags |= _TLF_RUNLATCH;
  1264. }
  1265. /* Called with hard IRQs off */
  1266. void notrace __ppc64_runlatch_off(void)
  1267. {
  1268. struct thread_info *ti = current_thread_info();
  1269. unsigned long ctrl;
  1270. ti->local_flags &= ~_TLF_RUNLATCH;
  1271. ctrl = mfspr(SPRN_CTRLF);
  1272. ctrl &= ~CTRL_RUNLATCH;
  1273. mtspr(SPRN_CTRLT, ctrl);
  1274. }
  1275. #endif /* CONFIG_PPC64 */
  1276. unsigned long arch_align_stack(unsigned long sp)
  1277. {
  1278. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  1279. sp -= get_random_int() & ~PAGE_MASK;
  1280. return sp & ~0xf;
  1281. }
  1282. static inline unsigned long brk_rnd(void)
  1283. {
  1284. unsigned long rnd = 0;
  1285. /* 8MB for 32bit, 1GB for 64bit */
  1286. if (is_32bit_task())
  1287. rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
  1288. else
  1289. rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
  1290. return rnd << PAGE_SHIFT;
  1291. }
  1292. unsigned long arch_randomize_brk(struct mm_struct *mm)
  1293. {
  1294. unsigned long base = mm->brk;
  1295. unsigned long ret;
  1296. #ifdef CONFIG_PPC_STD_MMU_64
  1297. /*
  1298. * If we are using 1TB segments and we are allowed to randomise
  1299. * the heap, we can put it above 1TB so it is backed by a 1TB
  1300. * segment. Otherwise the heap will be in the bottom 1TB
  1301. * which always uses 256MB segments and this may result in a
  1302. * performance penalty.
  1303. */
  1304. if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
  1305. base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
  1306. #endif
  1307. ret = PAGE_ALIGN(base + brk_rnd());
  1308. if (ret < mm->brk)
  1309. return mm->brk;
  1310. return ret;
  1311. }
  1312. unsigned long randomize_et_dyn(unsigned long base)
  1313. {
  1314. unsigned long ret = PAGE_ALIGN(base + brk_rnd());
  1315. if (ret < base)
  1316. return base;
  1317. return ret;
  1318. }