process.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465
  1. /*
  2. * Derived from "arch/i386/kernel/process.c"
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  6. * Paul Mackerras (paulus@cs.anu.edu.au)
  7. *
  8. * PowerPC version
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/stddef.h>
  22. #include <linux/unistd.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/slab.h>
  25. #include <linux/user.h>
  26. #include <linux/elf.h>
  27. #include <linux/init.h>
  28. #include <linux/prctl.h>
  29. #include <linux/init_task.h>
  30. #include <linux/export.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/mqueue.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/utsname.h>
  35. #include <linux/ftrace.h>
  36. #include <linux/kernel_stat.h>
  37. #include <linux/personality.h>
  38. #include <linux/random.h>
  39. #include <linux/hw_breakpoint.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/io.h>
  43. #include <asm/processor.h>
  44. #include <asm/mmu.h>
  45. #include <asm/prom.h>
  46. #include <asm/machdep.h>
  47. #include <asm/time.h>
  48. #include <asm/runlatch.h>
  49. #include <asm/syscalls.h>
  50. #include <asm/switch_to.h>
  51. #include <asm/tm.h>
  52. #include <asm/debug.h>
  53. #ifdef CONFIG_PPC64
  54. #include <asm/firmware.h>
  55. #endif
  56. #include <linux/kprobes.h>
  57. #include <linux/kdebug.h>
  58. /* Transactional Memory debug */
  59. #ifdef TM_DEBUG_SW
  60. #define TM_DEBUG(x...) printk(KERN_INFO x)
  61. #else
  62. #define TM_DEBUG(x...) do { } while(0)
  63. #endif
  64. extern unsigned long _get_SP(void);
  65. #ifndef CONFIG_SMP
  66. struct task_struct *last_task_used_math = NULL;
  67. struct task_struct *last_task_used_altivec = NULL;
  68. struct task_struct *last_task_used_vsx = NULL;
  69. struct task_struct *last_task_used_spe = NULL;
  70. #endif
  71. /*
  72. * Make sure the floating-point register state in the
  73. * the thread_struct is up to date for task tsk.
  74. */
  75. void flush_fp_to_thread(struct task_struct *tsk)
  76. {
  77. if (tsk->thread.regs) {
  78. /*
  79. * We need to disable preemption here because if we didn't,
  80. * another process could get scheduled after the regs->msr
  81. * test but before we have finished saving the FP registers
  82. * to the thread_struct. That process could take over the
  83. * FPU, and then when we get scheduled again we would store
  84. * bogus values for the remaining FP registers.
  85. */
  86. preempt_disable();
  87. if (tsk->thread.regs->msr & MSR_FP) {
  88. #ifdef CONFIG_SMP
  89. /*
  90. * This should only ever be called for current or
  91. * for a stopped child process. Since we save away
  92. * the FP register state on context switch on SMP,
  93. * there is something wrong if a stopped child appears
  94. * to still have its FP state in the CPU registers.
  95. */
  96. BUG_ON(tsk != current);
  97. #endif
  98. giveup_fpu(tsk);
  99. }
  100. preempt_enable();
  101. }
  102. }
  103. EXPORT_SYMBOL_GPL(flush_fp_to_thread);
  104. void enable_kernel_fp(void)
  105. {
  106. WARN_ON(preemptible());
  107. #ifdef CONFIG_SMP
  108. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  109. giveup_fpu(current);
  110. else
  111. giveup_fpu(NULL); /* just enables FP for kernel */
  112. #else
  113. giveup_fpu(last_task_used_math);
  114. #endif /* CONFIG_SMP */
  115. }
  116. EXPORT_SYMBOL(enable_kernel_fp);
  117. #ifdef CONFIG_ALTIVEC
  118. void enable_kernel_altivec(void)
  119. {
  120. WARN_ON(preemptible());
  121. #ifdef CONFIG_SMP
  122. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  123. giveup_altivec(current);
  124. else
  125. giveup_altivec_notask();
  126. #else
  127. giveup_altivec(last_task_used_altivec);
  128. #endif /* CONFIG_SMP */
  129. }
  130. EXPORT_SYMBOL(enable_kernel_altivec);
  131. /*
  132. * Make sure the VMX/Altivec register state in the
  133. * the thread_struct is up to date for task tsk.
  134. */
  135. void flush_altivec_to_thread(struct task_struct *tsk)
  136. {
  137. if (tsk->thread.regs) {
  138. preempt_disable();
  139. if (tsk->thread.regs->msr & MSR_VEC) {
  140. #ifdef CONFIG_SMP
  141. BUG_ON(tsk != current);
  142. #endif
  143. giveup_altivec(tsk);
  144. }
  145. preempt_enable();
  146. }
  147. }
  148. EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
  149. #endif /* CONFIG_ALTIVEC */
  150. #ifdef CONFIG_VSX
  151. #if 0
  152. /* not currently used, but some crazy RAID module might want to later */
  153. void enable_kernel_vsx(void)
  154. {
  155. WARN_ON(preemptible());
  156. #ifdef CONFIG_SMP
  157. if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
  158. giveup_vsx(current);
  159. else
  160. giveup_vsx(NULL); /* just enable vsx for kernel - force */
  161. #else
  162. giveup_vsx(last_task_used_vsx);
  163. #endif /* CONFIG_SMP */
  164. }
  165. EXPORT_SYMBOL(enable_kernel_vsx);
  166. #endif
  167. void giveup_vsx(struct task_struct *tsk)
  168. {
  169. giveup_fpu(tsk);
  170. giveup_altivec(tsk);
  171. __giveup_vsx(tsk);
  172. }
  173. void flush_vsx_to_thread(struct task_struct *tsk)
  174. {
  175. if (tsk->thread.regs) {
  176. preempt_disable();
  177. if (tsk->thread.regs->msr & MSR_VSX) {
  178. #ifdef CONFIG_SMP
  179. BUG_ON(tsk != current);
  180. #endif
  181. giveup_vsx(tsk);
  182. }
  183. preempt_enable();
  184. }
  185. }
  186. EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
  187. #endif /* CONFIG_VSX */
  188. #ifdef CONFIG_SPE
  189. void enable_kernel_spe(void)
  190. {
  191. WARN_ON(preemptible());
  192. #ifdef CONFIG_SMP
  193. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  194. giveup_spe(current);
  195. else
  196. giveup_spe(NULL); /* just enable SPE for kernel - force */
  197. #else
  198. giveup_spe(last_task_used_spe);
  199. #endif /* __SMP __ */
  200. }
  201. EXPORT_SYMBOL(enable_kernel_spe);
  202. void flush_spe_to_thread(struct task_struct *tsk)
  203. {
  204. if (tsk->thread.regs) {
  205. preempt_disable();
  206. if (tsk->thread.regs->msr & MSR_SPE) {
  207. #ifdef CONFIG_SMP
  208. BUG_ON(tsk != current);
  209. #endif
  210. tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
  211. giveup_spe(tsk);
  212. }
  213. preempt_enable();
  214. }
  215. }
  216. #endif /* CONFIG_SPE */
  217. #ifndef CONFIG_SMP
  218. /*
  219. * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  220. * and the current task has some state, discard it.
  221. */
  222. void discard_lazy_cpu_state(void)
  223. {
  224. preempt_disable();
  225. if (last_task_used_math == current)
  226. last_task_used_math = NULL;
  227. #ifdef CONFIG_ALTIVEC
  228. if (last_task_used_altivec == current)
  229. last_task_used_altivec = NULL;
  230. #endif /* CONFIG_ALTIVEC */
  231. #ifdef CONFIG_VSX
  232. if (last_task_used_vsx == current)
  233. last_task_used_vsx = NULL;
  234. #endif /* CONFIG_VSX */
  235. #ifdef CONFIG_SPE
  236. if (last_task_used_spe == current)
  237. last_task_used_spe = NULL;
  238. #endif
  239. preempt_enable();
  240. }
  241. #endif /* CONFIG_SMP */
  242. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  243. void do_send_trap(struct pt_regs *regs, unsigned long address,
  244. unsigned long error_code, int signal_code, int breakpt)
  245. {
  246. siginfo_t info;
  247. current->thread.trap_nr = signal_code;
  248. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  249. 11, SIGSEGV) == NOTIFY_STOP)
  250. return;
  251. /* Deliver the signal to userspace */
  252. info.si_signo = SIGTRAP;
  253. info.si_errno = breakpt; /* breakpoint or watchpoint id */
  254. info.si_code = signal_code;
  255. info.si_addr = (void __user *)address;
  256. force_sig_info(SIGTRAP, &info, current);
  257. }
  258. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  259. void do_break (struct pt_regs *regs, unsigned long address,
  260. unsigned long error_code)
  261. {
  262. siginfo_t info;
  263. current->thread.trap_nr = TRAP_HWBKPT;
  264. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  265. 11, SIGSEGV) == NOTIFY_STOP)
  266. return;
  267. if (debugger_break_match(regs))
  268. return;
  269. /* Clear the breakpoint */
  270. hw_breakpoint_disable();
  271. /* Deliver the signal to userspace */
  272. info.si_signo = SIGTRAP;
  273. info.si_errno = 0;
  274. info.si_code = TRAP_HWBKPT;
  275. info.si_addr = (void __user *)address;
  276. force_sig_info(SIGTRAP, &info, current);
  277. }
  278. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  279. static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
  280. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  281. /*
  282. * Set the debug registers back to their default "safe" values.
  283. */
  284. static void set_debug_reg_defaults(struct thread_struct *thread)
  285. {
  286. thread->iac1 = thread->iac2 = 0;
  287. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  288. thread->iac3 = thread->iac4 = 0;
  289. #endif
  290. thread->dac1 = thread->dac2 = 0;
  291. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  292. thread->dvc1 = thread->dvc2 = 0;
  293. #endif
  294. thread->dbcr0 = 0;
  295. #ifdef CONFIG_BOOKE
  296. /*
  297. * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
  298. */
  299. thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
  300. DBCR1_IAC3US | DBCR1_IAC4US;
  301. /*
  302. * Force Data Address Compare User/Supervisor bits to be User-only
  303. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
  304. */
  305. thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  306. #else
  307. thread->dbcr1 = 0;
  308. #endif
  309. }
  310. static void prime_debug_regs(struct thread_struct *thread)
  311. {
  312. /*
  313. * We could have inherited MSR_DE from userspace, since
  314. * it doesn't get cleared on exception entry. Make sure
  315. * MSR_DE is clear before we enable any debug events.
  316. */
  317. mtmsr(mfmsr() & ~MSR_DE);
  318. mtspr(SPRN_IAC1, thread->iac1);
  319. mtspr(SPRN_IAC2, thread->iac2);
  320. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  321. mtspr(SPRN_IAC3, thread->iac3);
  322. mtspr(SPRN_IAC4, thread->iac4);
  323. #endif
  324. mtspr(SPRN_DAC1, thread->dac1);
  325. mtspr(SPRN_DAC2, thread->dac2);
  326. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  327. mtspr(SPRN_DVC1, thread->dvc1);
  328. mtspr(SPRN_DVC2, thread->dvc2);
  329. #endif
  330. mtspr(SPRN_DBCR0, thread->dbcr0);
  331. mtspr(SPRN_DBCR1, thread->dbcr1);
  332. #ifdef CONFIG_BOOKE
  333. mtspr(SPRN_DBCR2, thread->dbcr2);
  334. #endif
  335. }
  336. /*
  337. * Unless neither the old or new thread are making use of the
  338. * debug registers, set the debug registers from the values
  339. * stored in the new thread.
  340. */
  341. static void switch_booke_debug_regs(struct thread_struct *new_thread)
  342. {
  343. if ((current->thread.dbcr0 & DBCR0_IDM)
  344. || (new_thread->dbcr0 & DBCR0_IDM))
  345. prime_debug_regs(new_thread);
  346. }
  347. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  348. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  349. static void set_debug_reg_defaults(struct thread_struct *thread)
  350. {
  351. thread->hw_brk.address = 0;
  352. thread->hw_brk.type = 0;
  353. set_breakpoint(&thread->hw_brk);
  354. }
  355. #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
  356. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  357. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  358. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  359. {
  360. mtspr(SPRN_DAC1, dabr);
  361. #ifdef CONFIG_PPC_47x
  362. isync();
  363. #endif
  364. return 0;
  365. }
  366. #elif defined(CONFIG_PPC_BOOK3S)
  367. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  368. {
  369. mtspr(SPRN_DABR, dabr);
  370. if (cpu_has_feature(CPU_FTR_DABRX))
  371. mtspr(SPRN_DABRX, dabrx);
  372. return 0;
  373. }
  374. #else
  375. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  376. {
  377. return -EINVAL;
  378. }
  379. #endif
  380. static inline int set_dabr(struct arch_hw_breakpoint *brk)
  381. {
  382. unsigned long dabr, dabrx;
  383. dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
  384. dabrx = ((brk->type >> 3) & 0x7);
  385. if (ppc_md.set_dabr)
  386. return ppc_md.set_dabr(dabr, dabrx);
  387. return __set_dabr(dabr, dabrx);
  388. }
  389. static inline int set_dawr(struct arch_hw_breakpoint *brk)
  390. {
  391. unsigned long dawr, dawrx, mrd;
  392. dawr = brk->address;
  393. dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
  394. << (63 - 58); //* read/write bits */
  395. dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
  396. << (63 - 59); //* translate */
  397. dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
  398. >> 3; //* PRIM bits */
  399. /* dawr length is stored in field MDR bits 48:53. Matches range in
  400. doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
  401. 0b111111=64DW.
  402. brk->len is in bytes.
  403. This aligns up to double word size, shifts and does the bias.
  404. */
  405. mrd = ((brk->len + 7) >> 3) - 1;
  406. dawrx |= (mrd & 0x3f) << (63 - 53);
  407. if (ppc_md.set_dawr)
  408. return ppc_md.set_dawr(dawr, dawrx);
  409. mtspr(SPRN_DAWR, dawr);
  410. mtspr(SPRN_DAWRX, dawrx);
  411. return 0;
  412. }
  413. int set_breakpoint(struct arch_hw_breakpoint *brk)
  414. {
  415. __get_cpu_var(current_brk) = *brk;
  416. if (cpu_has_feature(CPU_FTR_DAWR))
  417. return set_dawr(brk);
  418. return set_dabr(brk);
  419. }
  420. #ifdef CONFIG_PPC64
  421. DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
  422. #endif
  423. static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
  424. struct arch_hw_breakpoint *b)
  425. {
  426. if (a->address != b->address)
  427. return false;
  428. if (a->type != b->type)
  429. return false;
  430. if (a->len != b->len)
  431. return false;
  432. return true;
  433. }
  434. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  435. static inline void tm_reclaim_task(struct task_struct *tsk)
  436. {
  437. /* We have to work out if we're switching from/to a task that's in the
  438. * middle of a transaction.
  439. *
  440. * In switching we need to maintain a 2nd register state as
  441. * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
  442. * checkpointed (tbegin) state in ckpt_regs and saves the transactional
  443. * (current) FPRs into oldtask->thread.transact_fpr[].
  444. *
  445. * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
  446. */
  447. struct thread_struct *thr = &tsk->thread;
  448. if (!thr->regs)
  449. return;
  450. if (!MSR_TM_ACTIVE(thr->regs->msr))
  451. goto out_and_saveregs;
  452. /* Stash the original thread MSR, as giveup_fpu et al will
  453. * modify it. We hold onto it to see whether the task used
  454. * FP & vector regs.
  455. */
  456. thr->tm_orig_msr = thr->regs->msr;
  457. TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
  458. "ccr=%lx, msr=%lx, trap=%lx)\n",
  459. tsk->pid, thr->regs->nip,
  460. thr->regs->ccr, thr->regs->msr,
  461. thr->regs->trap);
  462. tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
  463. TM_DEBUG("--- tm_reclaim on pid %d complete\n",
  464. tsk->pid);
  465. out_and_saveregs:
  466. /* Always save the regs here, even if a transaction's not active.
  467. * This context-switches a thread's TM info SPRs. We do it here to
  468. * be consistent with the restore path (in recheckpoint) which
  469. * cannot happen later in _switch().
  470. */
  471. tm_save_sprs(thr);
  472. }
  473. static inline void tm_recheckpoint_new_task(struct task_struct *new)
  474. {
  475. unsigned long msr;
  476. if (!cpu_has_feature(CPU_FTR_TM))
  477. return;
  478. /* Recheckpoint the registers of the thread we're about to switch to.
  479. *
  480. * If the task was using FP, we non-lazily reload both the original and
  481. * the speculative FP register states. This is because the kernel
  482. * doesn't see if/when a TM rollback occurs, so if we take an FP
  483. * unavoidable later, we are unable to determine which set of FP regs
  484. * need to be restored.
  485. */
  486. if (!new->thread.regs)
  487. return;
  488. /* The TM SPRs are restored here, so that TEXASR.FS can be set
  489. * before the trecheckpoint and no explosion occurs.
  490. */
  491. tm_restore_sprs(&new->thread);
  492. if (!MSR_TM_ACTIVE(new->thread.regs->msr))
  493. return;
  494. msr = new->thread.tm_orig_msr;
  495. /* Recheckpoint to restore original checkpointed register state. */
  496. TM_DEBUG("*** tm_recheckpoint of pid %d "
  497. "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
  498. new->pid, new->thread.regs->msr, msr);
  499. /* This loads the checkpointed FP/VEC state, if used */
  500. tm_recheckpoint(&new->thread, msr);
  501. /* This loads the speculative FP/VEC state, if used */
  502. if (msr & MSR_FP) {
  503. do_load_up_transact_fpu(&new->thread);
  504. new->thread.regs->msr |=
  505. (MSR_FP | new->thread.fpexc_mode);
  506. }
  507. #ifdef CONFIG_ALTIVEC
  508. if (msr & MSR_VEC) {
  509. do_load_up_transact_altivec(&new->thread);
  510. new->thread.regs->msr |= MSR_VEC;
  511. }
  512. #endif
  513. /* We may as well turn on VSX too since all the state is restored now */
  514. if (msr & MSR_VSX)
  515. new->thread.regs->msr |= MSR_VSX;
  516. TM_DEBUG("*** tm_recheckpoint of pid %d complete "
  517. "(kernel msr 0x%lx)\n",
  518. new->pid, mfmsr());
  519. }
  520. static inline void __switch_to_tm(struct task_struct *prev)
  521. {
  522. if (cpu_has_feature(CPU_FTR_TM)) {
  523. tm_enable();
  524. tm_reclaim_task(prev);
  525. }
  526. }
  527. #else
  528. #define tm_recheckpoint_new_task(new)
  529. #define __switch_to_tm(prev)
  530. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  531. struct task_struct *__switch_to(struct task_struct *prev,
  532. struct task_struct *new)
  533. {
  534. struct thread_struct *new_thread, *old_thread;
  535. unsigned long flags;
  536. struct task_struct *last;
  537. #ifdef CONFIG_PPC_BOOK3S_64
  538. struct ppc64_tlb_batch *batch;
  539. #endif
  540. /* Back up the TAR across context switches.
  541. * Note that the TAR is not available for use in the kernel. (To
  542. * provide this, the TAR should be backed up/restored on exception
  543. * entry/exit instead, and be in pt_regs. FIXME, this should be in
  544. * pt_regs anyway (for debug).)
  545. * Save the TAR here before we do treclaim/trecheckpoint as these
  546. * will change the TAR.
  547. */
  548. save_tar(&prev->thread);
  549. __switch_to_tm(prev);
  550. #ifdef CONFIG_SMP
  551. /* avoid complexity of lazy save/restore of fpu
  552. * by just saving it every time we switch out if
  553. * this task used the fpu during the last quantum.
  554. *
  555. * If it tries to use the fpu again, it'll trap and
  556. * reload its fp regs. So we don't have to do a restore
  557. * every switch, just a save.
  558. * -- Cort
  559. */
  560. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  561. giveup_fpu(prev);
  562. #ifdef CONFIG_ALTIVEC
  563. /*
  564. * If the previous thread used altivec in the last quantum
  565. * (thus changing altivec regs) then save them.
  566. * We used to check the VRSAVE register but not all apps
  567. * set it, so we don't rely on it now (and in fact we need
  568. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  569. *
  570. * On SMP we always save/restore altivec regs just to avoid the
  571. * complexity of changing processors.
  572. * -- Cort
  573. */
  574. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  575. giveup_altivec(prev);
  576. #endif /* CONFIG_ALTIVEC */
  577. #ifdef CONFIG_VSX
  578. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
  579. /* VMX and FPU registers are already save here */
  580. __giveup_vsx(prev);
  581. #endif /* CONFIG_VSX */
  582. #ifdef CONFIG_SPE
  583. /*
  584. * If the previous thread used spe in the last quantum
  585. * (thus changing spe regs) then save them.
  586. *
  587. * On SMP we always save/restore spe regs just to avoid the
  588. * complexity of changing processors.
  589. */
  590. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  591. giveup_spe(prev);
  592. #endif /* CONFIG_SPE */
  593. #else /* CONFIG_SMP */
  594. #ifdef CONFIG_ALTIVEC
  595. /* Avoid the trap. On smp this this never happens since
  596. * we don't set last_task_used_altivec -- Cort
  597. */
  598. if (new->thread.regs && last_task_used_altivec == new)
  599. new->thread.regs->msr |= MSR_VEC;
  600. #endif /* CONFIG_ALTIVEC */
  601. #ifdef CONFIG_VSX
  602. if (new->thread.regs && last_task_used_vsx == new)
  603. new->thread.regs->msr |= MSR_VSX;
  604. #endif /* CONFIG_VSX */
  605. #ifdef CONFIG_SPE
  606. /* Avoid the trap. On smp this this never happens since
  607. * we don't set last_task_used_spe
  608. */
  609. if (new->thread.regs && last_task_used_spe == new)
  610. new->thread.regs->msr |= MSR_SPE;
  611. #endif /* CONFIG_SPE */
  612. #endif /* CONFIG_SMP */
  613. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  614. switch_booke_debug_regs(&new->thread);
  615. #else
  616. /*
  617. * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
  618. * schedule DABR
  619. */
  620. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  621. if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
  622. set_breakpoint(&new->thread.hw_brk);
  623. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  624. #endif
  625. new_thread = &new->thread;
  626. old_thread = &current->thread;
  627. #ifdef CONFIG_PPC64
  628. /*
  629. * Collect processor utilization data per process
  630. */
  631. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  632. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  633. long unsigned start_tb, current_tb;
  634. start_tb = old_thread->start_tb;
  635. cu->current_tb = current_tb = mfspr(SPRN_PURR);
  636. old_thread->accum_tb += (current_tb - start_tb);
  637. new_thread->start_tb = current_tb;
  638. }
  639. #endif /* CONFIG_PPC64 */
  640. #ifdef CONFIG_PPC_BOOK3S_64
  641. batch = &__get_cpu_var(ppc64_tlb_batch);
  642. if (batch->active) {
  643. current_thread_info()->local_flags |= _TLF_LAZY_MMU;
  644. if (batch->index)
  645. __flush_tlb_pending(batch);
  646. batch->active = 0;
  647. }
  648. #endif /* CONFIG_PPC_BOOK3S_64 */
  649. local_irq_save(flags);
  650. /*
  651. * We can't take a PMU exception inside _switch() since there is a
  652. * window where the kernel stack SLB and the kernel stack are out
  653. * of sync. Hard disable here.
  654. */
  655. hard_irq_disable();
  656. tm_recheckpoint_new_task(new);
  657. last = _switch(old_thread, new_thread);
  658. #ifdef CONFIG_PPC_BOOK3S_64
  659. if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
  660. current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
  661. batch = &__get_cpu_var(ppc64_tlb_batch);
  662. batch->active = 1;
  663. }
  664. #endif /* CONFIG_PPC_BOOK3S_64 */
  665. local_irq_restore(flags);
  666. return last;
  667. }
  668. static int instructions_to_print = 16;
  669. static void show_instructions(struct pt_regs *regs)
  670. {
  671. int i;
  672. unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
  673. sizeof(int));
  674. printk("Instruction dump:");
  675. for (i = 0; i < instructions_to_print; i++) {
  676. int instr;
  677. if (!(i % 8))
  678. printk("\n");
  679. #if !defined(CONFIG_BOOKE)
  680. /* If executing with the IMMU off, adjust pc rather
  681. * than print XXXXXXXX.
  682. */
  683. if (!(regs->msr & MSR_IR))
  684. pc = (unsigned long)phys_to_virt(pc);
  685. #endif
  686. /* We use __get_user here *only* to avoid an OOPS on a
  687. * bad address because the pc *should* only be a
  688. * kernel address.
  689. */
  690. if (!__kernel_text_address(pc) ||
  691. __get_user(instr, (unsigned int __user *)pc)) {
  692. printk(KERN_CONT "XXXXXXXX ");
  693. } else {
  694. if (regs->nip == pc)
  695. printk(KERN_CONT "<%08x> ", instr);
  696. else
  697. printk(KERN_CONT "%08x ", instr);
  698. }
  699. pc += sizeof(int);
  700. }
  701. printk("\n");
  702. }
  703. static struct regbit {
  704. unsigned long bit;
  705. const char *name;
  706. } msr_bits[] = {
  707. #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
  708. {MSR_SF, "SF"},
  709. {MSR_HV, "HV"},
  710. #endif
  711. {MSR_VEC, "VEC"},
  712. {MSR_VSX, "VSX"},
  713. #ifdef CONFIG_BOOKE
  714. {MSR_CE, "CE"},
  715. #endif
  716. {MSR_EE, "EE"},
  717. {MSR_PR, "PR"},
  718. {MSR_FP, "FP"},
  719. {MSR_ME, "ME"},
  720. #ifdef CONFIG_BOOKE
  721. {MSR_DE, "DE"},
  722. #else
  723. {MSR_SE, "SE"},
  724. {MSR_BE, "BE"},
  725. #endif
  726. {MSR_IR, "IR"},
  727. {MSR_DR, "DR"},
  728. {MSR_PMM, "PMM"},
  729. #ifndef CONFIG_BOOKE
  730. {MSR_RI, "RI"},
  731. {MSR_LE, "LE"},
  732. #endif
  733. {0, NULL}
  734. };
  735. static void printbits(unsigned long val, struct regbit *bits)
  736. {
  737. const char *sep = "";
  738. printk("<");
  739. for (; bits->bit; ++bits)
  740. if (val & bits->bit) {
  741. printk("%s%s", sep, bits->name);
  742. sep = ",";
  743. }
  744. printk(">");
  745. }
  746. #ifdef CONFIG_PPC64
  747. #define REG "%016lx"
  748. #define REGS_PER_LINE 4
  749. #define LAST_VOLATILE 13
  750. #else
  751. #define REG "%08lx"
  752. #define REGS_PER_LINE 8
  753. #define LAST_VOLATILE 12
  754. #endif
  755. void show_regs(struct pt_regs * regs)
  756. {
  757. int i, trap;
  758. show_regs_print_info(KERN_DEFAULT);
  759. printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
  760. regs->nip, regs->link, regs->ctr);
  761. printk("REGS: %p TRAP: %04lx %s (%s)\n",
  762. regs, regs->trap, print_tainted(), init_utsname()->release);
  763. printk("MSR: "REG" ", regs->msr);
  764. printbits(regs->msr, msr_bits);
  765. printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
  766. #ifdef CONFIG_PPC64
  767. printk("SOFTE: %ld\n", regs->softe);
  768. #endif
  769. trap = TRAP(regs);
  770. if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
  771. printk("CFAR: "REG"\n", regs->orig_gpr3);
  772. if (trap == 0x300 || trap == 0x600)
  773. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  774. printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
  775. #else
  776. printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
  777. #endif
  778. for (i = 0; i < 32; i++) {
  779. if ((i % REGS_PER_LINE) == 0)
  780. printk("\nGPR%02d: ", i);
  781. printk(REG " ", regs->gpr[i]);
  782. if (i == LAST_VOLATILE && !FULL_REGS(regs))
  783. break;
  784. }
  785. printk("\n");
  786. #ifdef CONFIG_KALLSYMS
  787. /*
  788. * Lookup NIP late so we have the best change of getting the
  789. * above info out without failing
  790. */
  791. printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
  792. printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
  793. #endif
  794. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  795. printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
  796. #endif
  797. show_stack(current, (unsigned long *) regs->gpr[1]);
  798. if (!user_mode(regs))
  799. show_instructions(regs);
  800. }
  801. void exit_thread(void)
  802. {
  803. discard_lazy_cpu_state();
  804. }
  805. void flush_thread(void)
  806. {
  807. discard_lazy_cpu_state();
  808. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  809. flush_ptrace_hw_breakpoint(current);
  810. #else /* CONFIG_HAVE_HW_BREAKPOINT */
  811. set_debug_reg_defaults(&current->thread);
  812. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  813. }
  814. void
  815. release_thread(struct task_struct *t)
  816. {
  817. }
  818. /*
  819. * this gets called so that we can store coprocessor state into memory and
  820. * copy the current task into the new thread.
  821. */
  822. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  823. {
  824. flush_fp_to_thread(src);
  825. flush_altivec_to_thread(src);
  826. flush_vsx_to_thread(src);
  827. flush_spe_to_thread(src);
  828. *dst = *src;
  829. clear_task_ebb(dst);
  830. return 0;
  831. }
  832. /*
  833. * Copy a thread..
  834. */
  835. extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
  836. int copy_thread(unsigned long clone_flags, unsigned long usp,
  837. unsigned long arg, struct task_struct *p)
  838. {
  839. struct pt_regs *childregs, *kregs;
  840. extern void ret_from_fork(void);
  841. extern void ret_from_kernel_thread(void);
  842. void (*f)(void);
  843. unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  844. /* Copy registers */
  845. sp -= sizeof(struct pt_regs);
  846. childregs = (struct pt_regs *) sp;
  847. if (unlikely(p->flags & PF_KTHREAD)) {
  848. struct thread_info *ti = (void *)task_stack_page(p);
  849. memset(childregs, 0, sizeof(struct pt_regs));
  850. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  851. childregs->gpr[14] = usp; /* function */
  852. #ifdef CONFIG_PPC64
  853. clear_tsk_thread_flag(p, TIF_32BIT);
  854. childregs->softe = 1;
  855. #endif
  856. childregs->gpr[15] = arg;
  857. p->thread.regs = NULL; /* no user register state */
  858. ti->flags |= _TIF_RESTOREALL;
  859. f = ret_from_kernel_thread;
  860. } else {
  861. struct pt_regs *regs = current_pt_regs();
  862. CHECK_FULL_REGS(regs);
  863. *childregs = *regs;
  864. if (usp)
  865. childregs->gpr[1] = usp;
  866. p->thread.regs = childregs;
  867. childregs->gpr[3] = 0; /* Result from fork() */
  868. if (clone_flags & CLONE_SETTLS) {
  869. #ifdef CONFIG_PPC64
  870. if (!is_32bit_task())
  871. childregs->gpr[13] = childregs->gpr[6];
  872. else
  873. #endif
  874. childregs->gpr[2] = childregs->gpr[6];
  875. }
  876. f = ret_from_fork;
  877. }
  878. sp -= STACK_FRAME_OVERHEAD;
  879. /*
  880. * The way this works is that at some point in the future
  881. * some task will call _switch to switch to the new task.
  882. * That will pop off the stack frame created below and start
  883. * the new task running at ret_from_fork. The new task will
  884. * do some house keeping and then return from the fork or clone
  885. * system call, using the stack frame created above.
  886. */
  887. ((unsigned long *)sp)[0] = 0;
  888. sp -= sizeof(struct pt_regs);
  889. kregs = (struct pt_regs *) sp;
  890. sp -= STACK_FRAME_OVERHEAD;
  891. p->thread.ksp = sp;
  892. p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
  893. _ALIGN_UP(sizeof(struct thread_info), 16);
  894. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  895. p->thread.ptrace_bps[0] = NULL;
  896. #endif
  897. #ifdef CONFIG_PPC_STD_MMU_64
  898. if (mmu_has_feature(MMU_FTR_SLB)) {
  899. unsigned long sp_vsid;
  900. unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
  901. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  902. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
  903. << SLB_VSID_SHIFT_1T;
  904. else
  905. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
  906. << SLB_VSID_SHIFT;
  907. sp_vsid |= SLB_VSID_KERNEL | llp;
  908. p->thread.ksp_vsid = sp_vsid;
  909. }
  910. #endif /* CONFIG_PPC_STD_MMU_64 */
  911. #ifdef CONFIG_PPC64
  912. if (cpu_has_feature(CPU_FTR_DSCR)) {
  913. p->thread.dscr_inherit = current->thread.dscr_inherit;
  914. p->thread.dscr = current->thread.dscr;
  915. }
  916. if (cpu_has_feature(CPU_FTR_HAS_PPR))
  917. p->thread.ppr = INIT_PPR;
  918. #endif
  919. /*
  920. * The PPC64 ABI makes use of a TOC to contain function
  921. * pointers. The function (ret_from_except) is actually a pointer
  922. * to the TOC entry. The first entry is a pointer to the actual
  923. * function.
  924. */
  925. #ifdef CONFIG_PPC64
  926. kregs->nip = *((unsigned long *)f);
  927. #else
  928. kregs->nip = (unsigned long)f;
  929. #endif
  930. return 0;
  931. }
  932. /*
  933. * Set up a thread for executing a new program
  934. */
  935. void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
  936. {
  937. #ifdef CONFIG_PPC64
  938. unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
  939. #endif
  940. /*
  941. * If we exec out of a kernel thread then thread.regs will not be
  942. * set. Do it now.
  943. */
  944. if (!current->thread.regs) {
  945. struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
  946. current->thread.regs = regs - 1;
  947. }
  948. memset(regs->gpr, 0, sizeof(regs->gpr));
  949. regs->ctr = 0;
  950. regs->link = 0;
  951. regs->xer = 0;
  952. regs->ccr = 0;
  953. regs->gpr[1] = sp;
  954. /*
  955. * We have just cleared all the nonvolatile GPRs, so make
  956. * FULL_REGS(regs) return true. This is necessary to allow
  957. * ptrace to examine the thread immediately after exec.
  958. */
  959. regs->trap &= ~1UL;
  960. #ifdef CONFIG_PPC32
  961. regs->mq = 0;
  962. regs->nip = start;
  963. regs->msr = MSR_USER;
  964. #else
  965. if (!is_32bit_task()) {
  966. unsigned long entry, toc;
  967. /* start is a relocated pointer to the function descriptor for
  968. * the elf _start routine. The first entry in the function
  969. * descriptor is the entry address of _start and the second
  970. * entry is the TOC value we need to use.
  971. */
  972. __get_user(entry, (unsigned long __user *)start);
  973. __get_user(toc, (unsigned long __user *)start+1);
  974. /* Check whether the e_entry function descriptor entries
  975. * need to be relocated before we can use them.
  976. */
  977. if (load_addr != 0) {
  978. entry += load_addr;
  979. toc += load_addr;
  980. }
  981. regs->nip = entry;
  982. regs->gpr[2] = toc;
  983. regs->msr = MSR_USER64;
  984. } else {
  985. regs->nip = start;
  986. regs->gpr[2] = 0;
  987. regs->msr = MSR_USER32;
  988. }
  989. #endif
  990. discard_lazy_cpu_state();
  991. #ifdef CONFIG_VSX
  992. current->thread.used_vsr = 0;
  993. #endif
  994. memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
  995. current->thread.fpscr.val = 0;
  996. #ifdef CONFIG_ALTIVEC
  997. memset(current->thread.vr, 0, sizeof(current->thread.vr));
  998. memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
  999. current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
  1000. current->thread.vrsave = 0;
  1001. current->thread.used_vr = 0;
  1002. #endif /* CONFIG_ALTIVEC */
  1003. #ifdef CONFIG_SPE
  1004. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  1005. current->thread.acc = 0;
  1006. current->thread.spefscr = 0;
  1007. current->thread.used_spe = 0;
  1008. #endif /* CONFIG_SPE */
  1009. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1010. if (cpu_has_feature(CPU_FTR_TM))
  1011. regs->msr |= MSR_TM;
  1012. current->thread.tm_tfhar = 0;
  1013. current->thread.tm_texasr = 0;
  1014. current->thread.tm_tfiar = 0;
  1015. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  1016. }
  1017. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  1018. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  1019. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  1020. {
  1021. struct pt_regs *regs = tsk->thread.regs;
  1022. /* This is a bit hairy. If we are an SPE enabled processor
  1023. * (have embedded fp) we store the IEEE exception enable flags in
  1024. * fpexc_mode. fpexc_mode is also used for setting FP exception
  1025. * mode (asyn, precise, disabled) for 'Classic' FP. */
  1026. if (val & PR_FP_EXC_SW_ENABLE) {
  1027. #ifdef CONFIG_SPE
  1028. if (cpu_has_feature(CPU_FTR_SPE)) {
  1029. tsk->thread.fpexc_mode = val &
  1030. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  1031. return 0;
  1032. } else {
  1033. return -EINVAL;
  1034. }
  1035. #else
  1036. return -EINVAL;
  1037. #endif
  1038. }
  1039. /* on a CONFIG_SPE this does not hurt us. The bits that
  1040. * __pack_fe01 use do not overlap with bits used for
  1041. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  1042. * on CONFIG_SPE implementations are reserved so writing to
  1043. * them does not change anything */
  1044. if (val > PR_FP_EXC_PRECISE)
  1045. return -EINVAL;
  1046. tsk->thread.fpexc_mode = __pack_fe01(val);
  1047. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  1048. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  1049. | tsk->thread.fpexc_mode;
  1050. return 0;
  1051. }
  1052. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  1053. {
  1054. unsigned int val;
  1055. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  1056. #ifdef CONFIG_SPE
  1057. if (cpu_has_feature(CPU_FTR_SPE))
  1058. val = tsk->thread.fpexc_mode;
  1059. else
  1060. return -EINVAL;
  1061. #else
  1062. return -EINVAL;
  1063. #endif
  1064. else
  1065. val = __unpack_fe01(tsk->thread.fpexc_mode);
  1066. return put_user(val, (unsigned int __user *) adr);
  1067. }
  1068. int set_endian(struct task_struct *tsk, unsigned int val)
  1069. {
  1070. struct pt_regs *regs = tsk->thread.regs;
  1071. if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
  1072. (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
  1073. return -EINVAL;
  1074. if (regs == NULL)
  1075. return -EINVAL;
  1076. if (val == PR_ENDIAN_BIG)
  1077. regs->msr &= ~MSR_LE;
  1078. else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
  1079. regs->msr |= MSR_LE;
  1080. else
  1081. return -EINVAL;
  1082. return 0;
  1083. }
  1084. int get_endian(struct task_struct *tsk, unsigned long adr)
  1085. {
  1086. struct pt_regs *regs = tsk->thread.regs;
  1087. unsigned int val;
  1088. if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
  1089. !cpu_has_feature(CPU_FTR_REAL_LE))
  1090. return -EINVAL;
  1091. if (regs == NULL)
  1092. return -EINVAL;
  1093. if (regs->msr & MSR_LE) {
  1094. if (cpu_has_feature(CPU_FTR_REAL_LE))
  1095. val = PR_ENDIAN_LITTLE;
  1096. else
  1097. val = PR_ENDIAN_PPC_LITTLE;
  1098. } else
  1099. val = PR_ENDIAN_BIG;
  1100. return put_user(val, (unsigned int __user *)adr);
  1101. }
  1102. int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
  1103. {
  1104. tsk->thread.align_ctl = val;
  1105. return 0;
  1106. }
  1107. int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
  1108. {
  1109. return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
  1110. }
  1111. static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
  1112. unsigned long nbytes)
  1113. {
  1114. unsigned long stack_page;
  1115. unsigned long cpu = task_cpu(p);
  1116. /*
  1117. * Avoid crashing if the stack has overflowed and corrupted
  1118. * task_cpu(p), which is in the thread_info struct.
  1119. */
  1120. if (cpu < NR_CPUS && cpu_possible(cpu)) {
  1121. stack_page = (unsigned long) hardirq_ctx[cpu];
  1122. if (sp >= stack_page + sizeof(struct thread_struct)
  1123. && sp <= stack_page + THREAD_SIZE - nbytes)
  1124. return 1;
  1125. stack_page = (unsigned long) softirq_ctx[cpu];
  1126. if (sp >= stack_page + sizeof(struct thread_struct)
  1127. && sp <= stack_page + THREAD_SIZE - nbytes)
  1128. return 1;
  1129. }
  1130. return 0;
  1131. }
  1132. int validate_sp(unsigned long sp, struct task_struct *p,
  1133. unsigned long nbytes)
  1134. {
  1135. unsigned long stack_page = (unsigned long)task_stack_page(p);
  1136. if (sp >= stack_page + sizeof(struct thread_struct)
  1137. && sp <= stack_page + THREAD_SIZE - nbytes)
  1138. return 1;
  1139. return valid_irq_stack(sp, p, nbytes);
  1140. }
  1141. EXPORT_SYMBOL(validate_sp);
  1142. unsigned long get_wchan(struct task_struct *p)
  1143. {
  1144. unsigned long ip, sp;
  1145. int count = 0;
  1146. if (!p || p == current || p->state == TASK_RUNNING)
  1147. return 0;
  1148. sp = p->thread.ksp;
  1149. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1150. return 0;
  1151. do {
  1152. sp = *(unsigned long *)sp;
  1153. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1154. return 0;
  1155. if (count > 0) {
  1156. ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
  1157. if (!in_sched_functions(ip))
  1158. return ip;
  1159. }
  1160. } while (count++ < 16);
  1161. return 0;
  1162. }
  1163. static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
  1164. void show_stack(struct task_struct *tsk, unsigned long *stack)
  1165. {
  1166. unsigned long sp, ip, lr, newsp;
  1167. int count = 0;
  1168. int firstframe = 1;
  1169. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1170. int curr_frame = current->curr_ret_stack;
  1171. extern void return_to_handler(void);
  1172. unsigned long rth = (unsigned long)return_to_handler;
  1173. unsigned long mrth = -1;
  1174. #ifdef CONFIG_PPC64
  1175. extern void mod_return_to_handler(void);
  1176. rth = *(unsigned long *)rth;
  1177. mrth = (unsigned long)mod_return_to_handler;
  1178. mrth = *(unsigned long *)mrth;
  1179. #endif
  1180. #endif
  1181. sp = (unsigned long) stack;
  1182. if (tsk == NULL)
  1183. tsk = current;
  1184. if (sp == 0) {
  1185. if (tsk == current)
  1186. asm("mr %0,1" : "=r" (sp));
  1187. else
  1188. sp = tsk->thread.ksp;
  1189. }
  1190. lr = 0;
  1191. printk("Call Trace:\n");
  1192. do {
  1193. if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
  1194. return;
  1195. stack = (unsigned long *) sp;
  1196. newsp = stack[0];
  1197. ip = stack[STACK_FRAME_LR_SAVE];
  1198. if (!firstframe || ip != lr) {
  1199. printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
  1200. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1201. if ((ip == rth || ip == mrth) && curr_frame >= 0) {
  1202. printk(" (%pS)",
  1203. (void *)current->ret_stack[curr_frame].ret);
  1204. curr_frame--;
  1205. }
  1206. #endif
  1207. if (firstframe)
  1208. printk(" (unreliable)");
  1209. printk("\n");
  1210. }
  1211. firstframe = 0;
  1212. /*
  1213. * See if this is an exception frame.
  1214. * We look for the "regshere" marker in the current frame.
  1215. */
  1216. if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
  1217. && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  1218. struct pt_regs *regs = (struct pt_regs *)
  1219. (sp + STACK_FRAME_OVERHEAD);
  1220. lr = regs->link;
  1221. printk("--- Exception: %lx at %pS\n LR = %pS\n",
  1222. regs->trap, (void *)regs->nip, (void *)lr);
  1223. firstframe = 1;
  1224. }
  1225. sp = newsp;
  1226. } while (count++ < kstack_depth_to_print);
  1227. }
  1228. #ifdef CONFIG_PPC64
  1229. /* Called with hard IRQs off */
  1230. void notrace __ppc64_runlatch_on(void)
  1231. {
  1232. struct thread_info *ti = current_thread_info();
  1233. unsigned long ctrl;
  1234. ctrl = mfspr(SPRN_CTRLF);
  1235. ctrl |= CTRL_RUNLATCH;
  1236. mtspr(SPRN_CTRLT, ctrl);
  1237. ti->local_flags |= _TLF_RUNLATCH;
  1238. }
  1239. /* Called with hard IRQs off */
  1240. void notrace __ppc64_runlatch_off(void)
  1241. {
  1242. struct thread_info *ti = current_thread_info();
  1243. unsigned long ctrl;
  1244. ti->local_flags &= ~_TLF_RUNLATCH;
  1245. ctrl = mfspr(SPRN_CTRLF);
  1246. ctrl &= ~CTRL_RUNLATCH;
  1247. mtspr(SPRN_CTRLT, ctrl);
  1248. }
  1249. #endif /* CONFIG_PPC64 */
  1250. unsigned long arch_align_stack(unsigned long sp)
  1251. {
  1252. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  1253. sp -= get_random_int() & ~PAGE_MASK;
  1254. return sp & ~0xf;
  1255. }
  1256. static inline unsigned long brk_rnd(void)
  1257. {
  1258. unsigned long rnd = 0;
  1259. /* 8MB for 32bit, 1GB for 64bit */
  1260. if (is_32bit_task())
  1261. rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
  1262. else
  1263. rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
  1264. return rnd << PAGE_SHIFT;
  1265. }
  1266. unsigned long arch_randomize_brk(struct mm_struct *mm)
  1267. {
  1268. unsigned long base = mm->brk;
  1269. unsigned long ret;
  1270. #ifdef CONFIG_PPC_STD_MMU_64
  1271. /*
  1272. * If we are using 1TB segments and we are allowed to randomise
  1273. * the heap, we can put it above 1TB so it is backed by a 1TB
  1274. * segment. Otherwise the heap will be in the bottom 1TB
  1275. * which always uses 256MB segments and this may result in a
  1276. * performance penalty.
  1277. */
  1278. if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
  1279. base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
  1280. #endif
  1281. ret = PAGE_ALIGN(base + brk_rnd());
  1282. if (ret < mm->brk)
  1283. return mm->brk;
  1284. return ret;
  1285. }
  1286. unsigned long randomize_et_dyn(unsigned long base)
  1287. {
  1288. unsigned long ret = PAGE_ALIGN(base + brk_rnd());
  1289. if (ret < base)
  1290. return base;
  1291. return ret;
  1292. }