process.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491
  1. /*
  2. * Derived from "arch/i386/kernel/process.c"
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  6. * Paul Mackerras (paulus@cs.anu.edu.au)
  7. *
  8. * PowerPC version
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/stddef.h>
  22. #include <linux/unistd.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/slab.h>
  25. #include <linux/user.h>
  26. #include <linux/elf.h>
  27. #include <linux/init.h>
  28. #include <linux/prctl.h>
  29. #include <linux/init_task.h>
  30. #include <linux/export.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/mqueue.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/utsname.h>
  35. #include <linux/ftrace.h>
  36. #include <linux/kernel_stat.h>
  37. #include <linux/personality.h>
  38. #include <linux/random.h>
  39. #include <linux/hw_breakpoint.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/io.h>
  43. #include <asm/processor.h>
  44. #include <asm/mmu.h>
  45. #include <asm/prom.h>
  46. #include <asm/machdep.h>
  47. #include <asm/time.h>
  48. #include <asm/runlatch.h>
  49. #include <asm/syscalls.h>
  50. #include <asm/switch_to.h>
  51. #include <asm/tm.h>
  52. #include <asm/debug.h>
  53. #ifdef CONFIG_PPC64
  54. #include <asm/firmware.h>
  55. #endif
  56. #include <linux/kprobes.h>
  57. #include <linux/kdebug.h>
  58. /* Transactional Memory debug */
  59. #ifdef TM_DEBUG_SW
  60. #define TM_DEBUG(x...) printk(KERN_INFO x)
  61. #else
  62. #define TM_DEBUG(x...) do { } while(0)
  63. #endif
  64. extern unsigned long _get_SP(void);
  65. #ifndef CONFIG_SMP
  66. struct task_struct *last_task_used_math = NULL;
  67. struct task_struct *last_task_used_altivec = NULL;
  68. struct task_struct *last_task_used_vsx = NULL;
  69. struct task_struct *last_task_used_spe = NULL;
  70. #endif
  71. #ifdef CONFIG_PPC_FPU
  72. /*
  73. * Make sure the floating-point register state in the
  74. * the thread_struct is up to date for task tsk.
  75. */
  76. void flush_fp_to_thread(struct task_struct *tsk)
  77. {
  78. if (tsk->thread.regs) {
  79. /*
  80. * We need to disable preemption here because if we didn't,
  81. * another process could get scheduled after the regs->msr
  82. * test but before we have finished saving the FP registers
  83. * to the thread_struct. That process could take over the
  84. * FPU, and then when we get scheduled again we would store
  85. * bogus values for the remaining FP registers.
  86. */
  87. preempt_disable();
  88. if (tsk->thread.regs->msr & MSR_FP) {
  89. #ifdef CONFIG_SMP
  90. /*
  91. * This should only ever be called for current or
  92. * for a stopped child process. Since we save away
  93. * the FP register state on context switch on SMP,
  94. * there is something wrong if a stopped child appears
  95. * to still have its FP state in the CPU registers.
  96. */
  97. BUG_ON(tsk != current);
  98. #endif
  99. giveup_fpu(tsk);
  100. }
  101. preempt_enable();
  102. }
  103. }
  104. EXPORT_SYMBOL_GPL(flush_fp_to_thread);
  105. #endif
  106. void enable_kernel_fp(void)
  107. {
  108. WARN_ON(preemptible());
  109. #ifdef CONFIG_SMP
  110. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  111. giveup_fpu(current);
  112. else
  113. giveup_fpu(NULL); /* just enables FP for kernel */
  114. #else
  115. giveup_fpu(last_task_used_math);
  116. #endif /* CONFIG_SMP */
  117. }
  118. EXPORT_SYMBOL(enable_kernel_fp);
  119. #ifdef CONFIG_ALTIVEC
  120. void enable_kernel_altivec(void)
  121. {
  122. WARN_ON(preemptible());
  123. #ifdef CONFIG_SMP
  124. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  125. giveup_altivec(current);
  126. else
  127. giveup_altivec_notask();
  128. #else
  129. giveup_altivec(last_task_used_altivec);
  130. #endif /* CONFIG_SMP */
  131. }
  132. EXPORT_SYMBOL(enable_kernel_altivec);
  133. /*
  134. * Make sure the VMX/Altivec register state in the
  135. * the thread_struct is up to date for task tsk.
  136. */
  137. void flush_altivec_to_thread(struct task_struct *tsk)
  138. {
  139. if (tsk->thread.regs) {
  140. preempt_disable();
  141. if (tsk->thread.regs->msr & MSR_VEC) {
  142. #ifdef CONFIG_SMP
  143. BUG_ON(tsk != current);
  144. #endif
  145. giveup_altivec(tsk);
  146. }
  147. preempt_enable();
  148. }
  149. }
  150. EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
  151. #endif /* CONFIG_ALTIVEC */
  152. #ifdef CONFIG_VSX
  153. #if 0
  154. /* not currently used, but some crazy RAID module might want to later */
  155. void enable_kernel_vsx(void)
  156. {
  157. WARN_ON(preemptible());
  158. #ifdef CONFIG_SMP
  159. if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
  160. giveup_vsx(current);
  161. else
  162. giveup_vsx(NULL); /* just enable vsx for kernel - force */
  163. #else
  164. giveup_vsx(last_task_used_vsx);
  165. #endif /* CONFIG_SMP */
  166. }
  167. EXPORT_SYMBOL(enable_kernel_vsx);
  168. #endif
  169. void giveup_vsx(struct task_struct *tsk)
  170. {
  171. giveup_fpu(tsk);
  172. giveup_altivec(tsk);
  173. __giveup_vsx(tsk);
  174. }
  175. void flush_vsx_to_thread(struct task_struct *tsk)
  176. {
  177. if (tsk->thread.regs) {
  178. preempt_disable();
  179. if (tsk->thread.regs->msr & MSR_VSX) {
  180. #ifdef CONFIG_SMP
  181. BUG_ON(tsk != current);
  182. #endif
  183. giveup_vsx(tsk);
  184. }
  185. preempt_enable();
  186. }
  187. }
  188. EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
  189. #endif /* CONFIG_VSX */
  190. #ifdef CONFIG_SPE
  191. void enable_kernel_spe(void)
  192. {
  193. WARN_ON(preemptible());
  194. #ifdef CONFIG_SMP
  195. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  196. giveup_spe(current);
  197. else
  198. giveup_spe(NULL); /* just enable SPE for kernel - force */
  199. #else
  200. giveup_spe(last_task_used_spe);
  201. #endif /* __SMP __ */
  202. }
  203. EXPORT_SYMBOL(enable_kernel_spe);
  204. void flush_spe_to_thread(struct task_struct *tsk)
  205. {
  206. if (tsk->thread.regs) {
  207. preempt_disable();
  208. if (tsk->thread.regs->msr & MSR_SPE) {
  209. #ifdef CONFIG_SMP
  210. BUG_ON(tsk != current);
  211. #endif
  212. tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
  213. giveup_spe(tsk);
  214. }
  215. preempt_enable();
  216. }
  217. }
  218. #endif /* CONFIG_SPE */
  219. #ifndef CONFIG_SMP
  220. /*
  221. * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  222. * and the current task has some state, discard it.
  223. */
  224. void discard_lazy_cpu_state(void)
  225. {
  226. preempt_disable();
  227. if (last_task_used_math == current)
  228. last_task_used_math = NULL;
  229. #ifdef CONFIG_ALTIVEC
  230. if (last_task_used_altivec == current)
  231. last_task_used_altivec = NULL;
  232. #endif /* CONFIG_ALTIVEC */
  233. #ifdef CONFIG_VSX
  234. if (last_task_used_vsx == current)
  235. last_task_used_vsx = NULL;
  236. #endif /* CONFIG_VSX */
  237. #ifdef CONFIG_SPE
  238. if (last_task_used_spe == current)
  239. last_task_used_spe = NULL;
  240. #endif
  241. preempt_enable();
  242. }
  243. #endif /* CONFIG_SMP */
  244. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  245. void do_send_trap(struct pt_regs *regs, unsigned long address,
  246. unsigned long error_code, int signal_code, int breakpt)
  247. {
  248. siginfo_t info;
  249. current->thread.trap_nr = signal_code;
  250. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  251. 11, SIGSEGV) == NOTIFY_STOP)
  252. return;
  253. /* Deliver the signal to userspace */
  254. info.si_signo = SIGTRAP;
  255. info.si_errno = breakpt; /* breakpoint or watchpoint id */
  256. info.si_code = signal_code;
  257. info.si_addr = (void __user *)address;
  258. force_sig_info(SIGTRAP, &info, current);
  259. }
  260. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  261. void do_break (struct pt_regs *regs, unsigned long address,
  262. unsigned long error_code)
  263. {
  264. siginfo_t info;
  265. current->thread.trap_nr = TRAP_HWBKPT;
  266. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  267. 11, SIGSEGV) == NOTIFY_STOP)
  268. return;
  269. if (debugger_break_match(regs))
  270. return;
  271. /* Clear the breakpoint */
  272. hw_breakpoint_disable();
  273. /* Deliver the signal to userspace */
  274. info.si_signo = SIGTRAP;
  275. info.si_errno = 0;
  276. info.si_code = TRAP_HWBKPT;
  277. info.si_addr = (void __user *)address;
  278. force_sig_info(SIGTRAP, &info, current);
  279. }
  280. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  281. static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
  282. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  283. /*
  284. * Set the debug registers back to their default "safe" values.
  285. */
  286. static void set_debug_reg_defaults(struct thread_struct *thread)
  287. {
  288. thread->debug.iac1 = thread->debug.iac2 = 0;
  289. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  290. thread->debug.iac3 = thread->debug.iac4 = 0;
  291. #endif
  292. thread->debug.dac1 = thread->debug.dac2 = 0;
  293. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  294. thread->debug.dvc1 = thread->debug.dvc2 = 0;
  295. #endif
  296. thread->debug.dbcr0 = 0;
  297. #ifdef CONFIG_BOOKE
  298. /*
  299. * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
  300. */
  301. thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
  302. DBCR1_IAC3US | DBCR1_IAC4US;
  303. /*
  304. * Force Data Address Compare User/Supervisor bits to be User-only
  305. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
  306. */
  307. thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  308. #else
  309. thread->debug.dbcr1 = 0;
  310. #endif
  311. }
  312. static void prime_debug_regs(struct thread_struct *thread)
  313. {
  314. /*
  315. * We could have inherited MSR_DE from userspace, since
  316. * it doesn't get cleared on exception entry. Make sure
  317. * MSR_DE is clear before we enable any debug events.
  318. */
  319. mtmsr(mfmsr() & ~MSR_DE);
  320. mtspr(SPRN_IAC1, thread->debug.iac1);
  321. mtspr(SPRN_IAC2, thread->debug.iac2);
  322. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  323. mtspr(SPRN_IAC3, thread->debug.iac3);
  324. mtspr(SPRN_IAC4, thread->debug.iac4);
  325. #endif
  326. mtspr(SPRN_DAC1, thread->debug.dac1);
  327. mtspr(SPRN_DAC2, thread->debug.dac2);
  328. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  329. mtspr(SPRN_DVC1, thread->debug.dvc1);
  330. mtspr(SPRN_DVC2, thread->debug.dvc2);
  331. #endif
  332. mtspr(SPRN_DBCR0, thread->debug.dbcr0);
  333. mtspr(SPRN_DBCR1, thread->debug.dbcr1);
  334. #ifdef CONFIG_BOOKE
  335. mtspr(SPRN_DBCR2, thread->debug.dbcr2);
  336. #endif
  337. }
  338. /*
  339. * Unless neither the old or new thread are making use of the
  340. * debug registers, set the debug registers from the values
  341. * stored in the new thread.
  342. */
  343. void switch_booke_debug_regs(struct thread_struct *new_thread)
  344. {
  345. if ((current->thread.debug.dbcr0 & DBCR0_IDM)
  346. || (new_thread->debug.dbcr0 & DBCR0_IDM))
  347. prime_debug_regs(new_thread);
  348. }
  349. EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
  350. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  351. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  352. static void set_debug_reg_defaults(struct thread_struct *thread)
  353. {
  354. thread->hw_brk.address = 0;
  355. thread->hw_brk.type = 0;
  356. set_breakpoint(&thread->hw_brk);
  357. }
  358. #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
  359. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  360. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  361. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  362. {
  363. mtspr(SPRN_DAC1, dabr);
  364. #ifdef CONFIG_PPC_47x
  365. isync();
  366. #endif
  367. return 0;
  368. }
  369. #elif defined(CONFIG_PPC_BOOK3S)
  370. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  371. {
  372. mtspr(SPRN_DABR, dabr);
  373. if (cpu_has_feature(CPU_FTR_DABRX))
  374. mtspr(SPRN_DABRX, dabrx);
  375. return 0;
  376. }
  377. #else
  378. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  379. {
  380. return -EINVAL;
  381. }
  382. #endif
  383. static inline int set_dabr(struct arch_hw_breakpoint *brk)
  384. {
  385. unsigned long dabr, dabrx;
  386. dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
  387. dabrx = ((brk->type >> 3) & 0x7);
  388. if (ppc_md.set_dabr)
  389. return ppc_md.set_dabr(dabr, dabrx);
  390. return __set_dabr(dabr, dabrx);
  391. }
  392. static inline int set_dawr(struct arch_hw_breakpoint *brk)
  393. {
  394. unsigned long dawr, dawrx, mrd;
  395. dawr = brk->address;
  396. dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
  397. << (63 - 58); //* read/write bits */
  398. dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
  399. << (63 - 59); //* translate */
  400. dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
  401. >> 3; //* PRIM bits */
  402. /* dawr length is stored in field MDR bits 48:53. Matches range in
  403. doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
  404. 0b111111=64DW.
  405. brk->len is in bytes.
  406. This aligns up to double word size, shifts and does the bias.
  407. */
  408. mrd = ((brk->len + 7) >> 3) - 1;
  409. dawrx |= (mrd & 0x3f) << (63 - 53);
  410. if (ppc_md.set_dawr)
  411. return ppc_md.set_dawr(dawr, dawrx);
  412. mtspr(SPRN_DAWR, dawr);
  413. mtspr(SPRN_DAWRX, dawrx);
  414. return 0;
  415. }
  416. int set_breakpoint(struct arch_hw_breakpoint *brk)
  417. {
  418. __get_cpu_var(current_brk) = *brk;
  419. if (cpu_has_feature(CPU_FTR_DAWR))
  420. return set_dawr(brk);
  421. return set_dabr(brk);
  422. }
  423. #ifdef CONFIG_PPC64
  424. DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
  425. #endif
  426. static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
  427. struct arch_hw_breakpoint *b)
  428. {
  429. if (a->address != b->address)
  430. return false;
  431. if (a->type != b->type)
  432. return false;
  433. if (a->len != b->len)
  434. return false;
  435. return true;
  436. }
  437. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  438. static inline void tm_reclaim_task(struct task_struct *tsk)
  439. {
  440. /* We have to work out if we're switching from/to a task that's in the
  441. * middle of a transaction.
  442. *
  443. * In switching we need to maintain a 2nd register state as
  444. * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
  445. * checkpointed (tbegin) state in ckpt_regs and saves the transactional
  446. * (current) FPRs into oldtask->thread.transact_fpr[].
  447. *
  448. * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
  449. */
  450. struct thread_struct *thr = &tsk->thread;
  451. if (!thr->regs)
  452. return;
  453. if (!MSR_TM_ACTIVE(thr->regs->msr))
  454. goto out_and_saveregs;
  455. /* Stash the original thread MSR, as giveup_fpu et al will
  456. * modify it. We hold onto it to see whether the task used
  457. * FP & vector regs.
  458. */
  459. thr->tm_orig_msr = thr->regs->msr;
  460. TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
  461. "ccr=%lx, msr=%lx, trap=%lx)\n",
  462. tsk->pid, thr->regs->nip,
  463. thr->regs->ccr, thr->regs->msr,
  464. thr->regs->trap);
  465. tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
  466. TM_DEBUG("--- tm_reclaim on pid %d complete\n",
  467. tsk->pid);
  468. out_and_saveregs:
  469. /* Always save the regs here, even if a transaction's not active.
  470. * This context-switches a thread's TM info SPRs. We do it here to
  471. * be consistent with the restore path (in recheckpoint) which
  472. * cannot happen later in _switch().
  473. */
  474. tm_save_sprs(thr);
  475. }
  476. static inline void tm_recheckpoint_new_task(struct task_struct *new)
  477. {
  478. unsigned long msr;
  479. if (!cpu_has_feature(CPU_FTR_TM))
  480. return;
  481. /* Recheckpoint the registers of the thread we're about to switch to.
  482. *
  483. * If the task was using FP, we non-lazily reload both the original and
  484. * the speculative FP register states. This is because the kernel
  485. * doesn't see if/when a TM rollback occurs, so if we take an FP
  486. * unavoidable later, we are unable to determine which set of FP regs
  487. * need to be restored.
  488. */
  489. if (!new->thread.regs)
  490. return;
  491. /* The TM SPRs are restored here, so that TEXASR.FS can be set
  492. * before the trecheckpoint and no explosion occurs.
  493. */
  494. tm_restore_sprs(&new->thread);
  495. if (!MSR_TM_ACTIVE(new->thread.regs->msr))
  496. return;
  497. msr = new->thread.tm_orig_msr;
  498. /* Recheckpoint to restore original checkpointed register state. */
  499. TM_DEBUG("*** tm_recheckpoint of pid %d "
  500. "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
  501. new->pid, new->thread.regs->msr, msr);
  502. /* This loads the checkpointed FP/VEC state, if used */
  503. tm_recheckpoint(&new->thread, msr);
  504. /* This loads the speculative FP/VEC state, if used */
  505. if (msr & MSR_FP) {
  506. do_load_up_transact_fpu(&new->thread);
  507. new->thread.regs->msr |=
  508. (MSR_FP | new->thread.fpexc_mode);
  509. }
  510. #ifdef CONFIG_ALTIVEC
  511. if (msr & MSR_VEC) {
  512. do_load_up_transact_altivec(&new->thread);
  513. new->thread.regs->msr |= MSR_VEC;
  514. }
  515. #endif
  516. /* We may as well turn on VSX too since all the state is restored now */
  517. if (msr & MSR_VSX)
  518. new->thread.regs->msr |= MSR_VSX;
  519. TM_DEBUG("*** tm_recheckpoint of pid %d complete "
  520. "(kernel msr 0x%lx)\n",
  521. new->pid, mfmsr());
  522. }
  523. static inline void __switch_to_tm(struct task_struct *prev)
  524. {
  525. if (cpu_has_feature(CPU_FTR_TM)) {
  526. tm_enable();
  527. tm_reclaim_task(prev);
  528. }
  529. }
  530. #else
  531. #define tm_recheckpoint_new_task(new)
  532. #define __switch_to_tm(prev)
  533. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  534. struct task_struct *__switch_to(struct task_struct *prev,
  535. struct task_struct *new)
  536. {
  537. struct thread_struct *new_thread, *old_thread;
  538. struct task_struct *last;
  539. #ifdef CONFIG_PPC_BOOK3S_64
  540. struct ppc64_tlb_batch *batch;
  541. #endif
  542. WARN_ON(!irqs_disabled());
  543. /* Back up the TAR across context switches.
  544. * Note that the TAR is not available for use in the kernel. (To
  545. * provide this, the TAR should be backed up/restored on exception
  546. * entry/exit instead, and be in pt_regs. FIXME, this should be in
  547. * pt_regs anyway (for debug).)
  548. * Save the TAR here before we do treclaim/trecheckpoint as these
  549. * will change the TAR.
  550. */
  551. save_tar(&prev->thread);
  552. __switch_to_tm(prev);
  553. #ifdef CONFIG_SMP
  554. /* avoid complexity of lazy save/restore of fpu
  555. * by just saving it every time we switch out if
  556. * this task used the fpu during the last quantum.
  557. *
  558. * If it tries to use the fpu again, it'll trap and
  559. * reload its fp regs. So we don't have to do a restore
  560. * every switch, just a save.
  561. * -- Cort
  562. */
  563. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  564. giveup_fpu(prev);
  565. #ifdef CONFIG_ALTIVEC
  566. /*
  567. * If the previous thread used altivec in the last quantum
  568. * (thus changing altivec regs) then save them.
  569. * We used to check the VRSAVE register but not all apps
  570. * set it, so we don't rely on it now (and in fact we need
  571. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  572. *
  573. * On SMP we always save/restore altivec regs just to avoid the
  574. * complexity of changing processors.
  575. * -- Cort
  576. */
  577. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  578. giveup_altivec(prev);
  579. #endif /* CONFIG_ALTIVEC */
  580. #ifdef CONFIG_VSX
  581. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
  582. /* VMX and FPU registers are already save here */
  583. __giveup_vsx(prev);
  584. #endif /* CONFIG_VSX */
  585. #ifdef CONFIG_SPE
  586. /*
  587. * If the previous thread used spe in the last quantum
  588. * (thus changing spe regs) then save them.
  589. *
  590. * On SMP we always save/restore spe regs just to avoid the
  591. * complexity of changing processors.
  592. */
  593. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  594. giveup_spe(prev);
  595. #endif /* CONFIG_SPE */
  596. #else /* CONFIG_SMP */
  597. #ifdef CONFIG_ALTIVEC
  598. /* Avoid the trap. On smp this this never happens since
  599. * we don't set last_task_used_altivec -- Cort
  600. */
  601. if (new->thread.regs && last_task_used_altivec == new)
  602. new->thread.regs->msr |= MSR_VEC;
  603. #endif /* CONFIG_ALTIVEC */
  604. #ifdef CONFIG_VSX
  605. if (new->thread.regs && last_task_used_vsx == new)
  606. new->thread.regs->msr |= MSR_VSX;
  607. #endif /* CONFIG_VSX */
  608. #ifdef CONFIG_SPE
  609. /* Avoid the trap. On smp this this never happens since
  610. * we don't set last_task_used_spe
  611. */
  612. if (new->thread.regs && last_task_used_spe == new)
  613. new->thread.regs->msr |= MSR_SPE;
  614. #endif /* CONFIG_SPE */
  615. #endif /* CONFIG_SMP */
  616. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  617. switch_booke_debug_regs(&new->thread);
  618. #else
  619. /*
  620. * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
  621. * schedule DABR
  622. */
  623. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  624. if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
  625. set_breakpoint(&new->thread.hw_brk);
  626. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  627. #endif
  628. new_thread = &new->thread;
  629. old_thread = &current->thread;
  630. #ifdef CONFIG_PPC64
  631. /*
  632. * Collect processor utilization data per process
  633. */
  634. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  635. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  636. long unsigned start_tb, current_tb;
  637. start_tb = old_thread->start_tb;
  638. cu->current_tb = current_tb = mfspr(SPRN_PURR);
  639. old_thread->accum_tb += (current_tb - start_tb);
  640. new_thread->start_tb = current_tb;
  641. }
  642. #endif /* CONFIG_PPC64 */
  643. #ifdef CONFIG_PPC_BOOK3S_64
  644. batch = &__get_cpu_var(ppc64_tlb_batch);
  645. if (batch->active) {
  646. current_thread_info()->local_flags |= _TLF_LAZY_MMU;
  647. if (batch->index)
  648. __flush_tlb_pending(batch);
  649. batch->active = 0;
  650. }
  651. #endif /* CONFIG_PPC_BOOK3S_64 */
  652. /*
  653. * We can't take a PMU exception inside _switch() since there is a
  654. * window where the kernel stack SLB and the kernel stack are out
  655. * of sync. Hard disable here.
  656. */
  657. hard_irq_disable();
  658. tm_recheckpoint_new_task(new);
  659. last = _switch(old_thread, new_thread);
  660. #ifdef CONFIG_PPC_BOOK3S_64
  661. if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
  662. current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
  663. batch = &__get_cpu_var(ppc64_tlb_batch);
  664. batch->active = 1;
  665. }
  666. #endif /* CONFIG_PPC_BOOK3S_64 */
  667. return last;
  668. }
  669. static int instructions_to_print = 16;
  670. static void show_instructions(struct pt_regs *regs)
  671. {
  672. int i;
  673. unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
  674. sizeof(int));
  675. printk("Instruction dump:");
  676. for (i = 0; i < instructions_to_print; i++) {
  677. int instr;
  678. if (!(i % 8))
  679. printk("\n");
  680. #if !defined(CONFIG_BOOKE)
  681. /* If executing with the IMMU off, adjust pc rather
  682. * than print XXXXXXXX.
  683. */
  684. if (!(regs->msr & MSR_IR))
  685. pc = (unsigned long)phys_to_virt(pc);
  686. #endif
  687. /* We use __get_user here *only* to avoid an OOPS on a
  688. * bad address because the pc *should* only be a
  689. * kernel address.
  690. */
  691. if (!__kernel_text_address(pc) ||
  692. __get_user(instr, (unsigned int __user *)pc)) {
  693. printk(KERN_CONT "XXXXXXXX ");
  694. } else {
  695. if (regs->nip == pc)
  696. printk(KERN_CONT "<%08x> ", instr);
  697. else
  698. printk(KERN_CONT "%08x ", instr);
  699. }
  700. pc += sizeof(int);
  701. }
  702. printk("\n");
  703. }
  704. static struct regbit {
  705. unsigned long bit;
  706. const char *name;
  707. } msr_bits[] = {
  708. #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
  709. {MSR_SF, "SF"},
  710. {MSR_HV, "HV"},
  711. #endif
  712. {MSR_VEC, "VEC"},
  713. {MSR_VSX, "VSX"},
  714. #ifdef CONFIG_BOOKE
  715. {MSR_CE, "CE"},
  716. #endif
  717. {MSR_EE, "EE"},
  718. {MSR_PR, "PR"},
  719. {MSR_FP, "FP"},
  720. {MSR_ME, "ME"},
  721. #ifdef CONFIG_BOOKE
  722. {MSR_DE, "DE"},
  723. #else
  724. {MSR_SE, "SE"},
  725. {MSR_BE, "BE"},
  726. #endif
  727. {MSR_IR, "IR"},
  728. {MSR_DR, "DR"},
  729. {MSR_PMM, "PMM"},
  730. #ifndef CONFIG_BOOKE
  731. {MSR_RI, "RI"},
  732. {MSR_LE, "LE"},
  733. #endif
  734. {0, NULL}
  735. };
  736. static void printbits(unsigned long val, struct regbit *bits)
  737. {
  738. const char *sep = "";
  739. printk("<");
  740. for (; bits->bit; ++bits)
  741. if (val & bits->bit) {
  742. printk("%s%s", sep, bits->name);
  743. sep = ",";
  744. }
  745. printk(">");
  746. }
  747. #ifdef CONFIG_PPC64
  748. #define REG "%016lx"
  749. #define REGS_PER_LINE 4
  750. #define LAST_VOLATILE 13
  751. #else
  752. #define REG "%08lx"
  753. #define REGS_PER_LINE 8
  754. #define LAST_VOLATILE 12
  755. #endif
  756. void show_regs(struct pt_regs * regs)
  757. {
  758. int i, trap;
  759. show_regs_print_info(KERN_DEFAULT);
  760. printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
  761. regs->nip, regs->link, regs->ctr);
  762. printk("REGS: %p TRAP: %04lx %s (%s)\n",
  763. regs, regs->trap, print_tainted(), init_utsname()->release);
  764. printk("MSR: "REG" ", regs->msr);
  765. printbits(regs->msr, msr_bits);
  766. printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
  767. #ifdef CONFIG_PPC64
  768. printk("SOFTE: %ld\n", regs->softe);
  769. #endif
  770. trap = TRAP(regs);
  771. if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
  772. printk("CFAR: "REG"\n", regs->orig_gpr3);
  773. if (trap == 0x300 || trap == 0x600)
  774. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  775. printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
  776. #else
  777. printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
  778. #endif
  779. for (i = 0; i < 32; i++) {
  780. if ((i % REGS_PER_LINE) == 0)
  781. printk("\nGPR%02d: ", i);
  782. printk(REG " ", regs->gpr[i]);
  783. if (i == LAST_VOLATILE && !FULL_REGS(regs))
  784. break;
  785. }
  786. printk("\n");
  787. #ifdef CONFIG_KALLSYMS
  788. /*
  789. * Lookup NIP late so we have the best change of getting the
  790. * above info out without failing
  791. */
  792. printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
  793. printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
  794. #endif
  795. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  796. printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
  797. #endif
  798. show_stack(current, (unsigned long *) regs->gpr[1]);
  799. if (!user_mode(regs))
  800. show_instructions(regs);
  801. }
  802. void exit_thread(void)
  803. {
  804. discard_lazy_cpu_state();
  805. }
  806. void flush_thread(void)
  807. {
  808. discard_lazy_cpu_state();
  809. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  810. flush_ptrace_hw_breakpoint(current);
  811. #else /* CONFIG_HAVE_HW_BREAKPOINT */
  812. set_debug_reg_defaults(&current->thread);
  813. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  814. }
  815. void
  816. release_thread(struct task_struct *t)
  817. {
  818. }
  819. /*
  820. * this gets called so that we can store coprocessor state into memory and
  821. * copy the current task into the new thread.
  822. */
  823. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  824. {
  825. flush_fp_to_thread(src);
  826. flush_altivec_to_thread(src);
  827. flush_vsx_to_thread(src);
  828. flush_spe_to_thread(src);
  829. *dst = *src;
  830. clear_task_ebb(dst);
  831. return 0;
  832. }
  833. /*
  834. * Copy a thread..
  835. */
  836. extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
  837. int copy_thread(unsigned long clone_flags, unsigned long usp,
  838. unsigned long arg, struct task_struct *p)
  839. {
  840. struct pt_regs *childregs, *kregs;
  841. extern void ret_from_fork(void);
  842. extern void ret_from_kernel_thread(void);
  843. void (*f)(void);
  844. unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  845. /* Copy registers */
  846. sp -= sizeof(struct pt_regs);
  847. childregs = (struct pt_regs *) sp;
  848. if (unlikely(p->flags & PF_KTHREAD)) {
  849. struct thread_info *ti = (void *)task_stack_page(p);
  850. memset(childregs, 0, sizeof(struct pt_regs));
  851. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  852. childregs->gpr[14] = usp; /* function */
  853. #ifdef CONFIG_PPC64
  854. clear_tsk_thread_flag(p, TIF_32BIT);
  855. childregs->softe = 1;
  856. #endif
  857. childregs->gpr[15] = arg;
  858. p->thread.regs = NULL; /* no user register state */
  859. ti->flags |= _TIF_RESTOREALL;
  860. f = ret_from_kernel_thread;
  861. } else {
  862. struct pt_regs *regs = current_pt_regs();
  863. CHECK_FULL_REGS(regs);
  864. *childregs = *regs;
  865. if (usp)
  866. childregs->gpr[1] = usp;
  867. p->thread.regs = childregs;
  868. childregs->gpr[3] = 0; /* Result from fork() */
  869. if (clone_flags & CLONE_SETTLS) {
  870. #ifdef CONFIG_PPC64
  871. if (!is_32bit_task())
  872. childregs->gpr[13] = childregs->gpr[6];
  873. else
  874. #endif
  875. childregs->gpr[2] = childregs->gpr[6];
  876. }
  877. f = ret_from_fork;
  878. }
  879. sp -= STACK_FRAME_OVERHEAD;
  880. /*
  881. * The way this works is that at some point in the future
  882. * some task will call _switch to switch to the new task.
  883. * That will pop off the stack frame created below and start
  884. * the new task running at ret_from_fork. The new task will
  885. * do some house keeping and then return from the fork or clone
  886. * system call, using the stack frame created above.
  887. */
  888. ((unsigned long *)sp)[0] = 0;
  889. sp -= sizeof(struct pt_regs);
  890. kregs = (struct pt_regs *) sp;
  891. sp -= STACK_FRAME_OVERHEAD;
  892. p->thread.ksp = sp;
  893. #ifdef CONFIG_PPC32
  894. p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
  895. _ALIGN_UP(sizeof(struct thread_info), 16);
  896. #endif
  897. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  898. p->thread.ptrace_bps[0] = NULL;
  899. #endif
  900. p->thread.fp_save_area = NULL;
  901. #ifdef CONFIG_ALTIVEC
  902. p->thread.vr_save_area = NULL;
  903. #endif
  904. #ifdef CONFIG_PPC_STD_MMU_64
  905. if (mmu_has_feature(MMU_FTR_SLB)) {
  906. unsigned long sp_vsid;
  907. unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
  908. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  909. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
  910. << SLB_VSID_SHIFT_1T;
  911. else
  912. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
  913. << SLB_VSID_SHIFT;
  914. sp_vsid |= SLB_VSID_KERNEL | llp;
  915. p->thread.ksp_vsid = sp_vsid;
  916. }
  917. #endif /* CONFIG_PPC_STD_MMU_64 */
  918. #ifdef CONFIG_PPC64
  919. if (cpu_has_feature(CPU_FTR_DSCR)) {
  920. p->thread.dscr_inherit = current->thread.dscr_inherit;
  921. p->thread.dscr = current->thread.dscr;
  922. }
  923. if (cpu_has_feature(CPU_FTR_HAS_PPR))
  924. p->thread.ppr = INIT_PPR;
  925. #endif
  926. /*
  927. * The PPC64 ABI makes use of a TOC to contain function
  928. * pointers. The function (ret_from_except) is actually a pointer
  929. * to the TOC entry. The first entry is a pointer to the actual
  930. * function.
  931. */
  932. #ifdef CONFIG_PPC64
  933. kregs->nip = *((unsigned long *)f);
  934. #else
  935. kregs->nip = (unsigned long)f;
  936. #endif
  937. return 0;
  938. }
  939. /*
  940. * Set up a thread for executing a new program
  941. */
  942. void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
  943. {
  944. #ifdef CONFIG_PPC64
  945. unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
  946. #endif
  947. /*
  948. * If we exec out of a kernel thread then thread.regs will not be
  949. * set. Do it now.
  950. */
  951. if (!current->thread.regs) {
  952. struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
  953. current->thread.regs = regs - 1;
  954. }
  955. memset(regs->gpr, 0, sizeof(regs->gpr));
  956. regs->ctr = 0;
  957. regs->link = 0;
  958. regs->xer = 0;
  959. regs->ccr = 0;
  960. regs->gpr[1] = sp;
  961. /*
  962. * We have just cleared all the nonvolatile GPRs, so make
  963. * FULL_REGS(regs) return true. This is necessary to allow
  964. * ptrace to examine the thread immediately after exec.
  965. */
  966. regs->trap &= ~1UL;
  967. #ifdef CONFIG_PPC32
  968. regs->mq = 0;
  969. regs->nip = start;
  970. regs->msr = MSR_USER;
  971. #else
  972. if (!is_32bit_task()) {
  973. unsigned long entry;
  974. if (is_elf2_task()) {
  975. /* Look ma, no function descriptors! */
  976. entry = start;
  977. /*
  978. * Ulrich says:
  979. * The latest iteration of the ABI requires that when
  980. * calling a function (at its global entry point),
  981. * the caller must ensure r12 holds the entry point
  982. * address (so that the function can quickly
  983. * establish addressability).
  984. */
  985. regs->gpr[12] = start;
  986. /* Make sure that's restored on entry to userspace. */
  987. set_thread_flag(TIF_RESTOREALL);
  988. } else {
  989. unsigned long toc;
  990. /* start is a relocated pointer to the function
  991. * descriptor for the elf _start routine. The first
  992. * entry in the function descriptor is the entry
  993. * address of _start and the second entry is the TOC
  994. * value we need to use.
  995. */
  996. __get_user(entry, (unsigned long __user *)start);
  997. __get_user(toc, (unsigned long __user *)start+1);
  998. /* Check whether the e_entry function descriptor entries
  999. * need to be relocated before we can use them.
  1000. */
  1001. if (load_addr != 0) {
  1002. entry += load_addr;
  1003. toc += load_addr;
  1004. }
  1005. regs->gpr[2] = toc;
  1006. }
  1007. regs->nip = entry;
  1008. regs->msr = MSR_USER64;
  1009. } else {
  1010. regs->nip = start;
  1011. regs->gpr[2] = 0;
  1012. regs->msr = MSR_USER32;
  1013. }
  1014. #endif
  1015. discard_lazy_cpu_state();
  1016. #ifdef CONFIG_VSX
  1017. current->thread.used_vsr = 0;
  1018. #endif
  1019. memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
  1020. current->thread.fp_save_area = NULL;
  1021. #ifdef CONFIG_ALTIVEC
  1022. memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
  1023. current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
  1024. current->thread.vr_save_area = NULL;
  1025. current->thread.vrsave = 0;
  1026. current->thread.used_vr = 0;
  1027. #endif /* CONFIG_ALTIVEC */
  1028. #ifdef CONFIG_SPE
  1029. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  1030. current->thread.acc = 0;
  1031. current->thread.spefscr = 0;
  1032. current->thread.used_spe = 0;
  1033. #endif /* CONFIG_SPE */
  1034. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1035. if (cpu_has_feature(CPU_FTR_TM))
  1036. regs->msr |= MSR_TM;
  1037. current->thread.tm_tfhar = 0;
  1038. current->thread.tm_texasr = 0;
  1039. current->thread.tm_tfiar = 0;
  1040. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  1041. }
  1042. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  1043. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  1044. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  1045. {
  1046. struct pt_regs *regs = tsk->thread.regs;
  1047. /* This is a bit hairy. If we are an SPE enabled processor
  1048. * (have embedded fp) we store the IEEE exception enable flags in
  1049. * fpexc_mode. fpexc_mode is also used for setting FP exception
  1050. * mode (asyn, precise, disabled) for 'Classic' FP. */
  1051. if (val & PR_FP_EXC_SW_ENABLE) {
  1052. #ifdef CONFIG_SPE
  1053. if (cpu_has_feature(CPU_FTR_SPE)) {
  1054. tsk->thread.fpexc_mode = val &
  1055. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  1056. return 0;
  1057. } else {
  1058. return -EINVAL;
  1059. }
  1060. #else
  1061. return -EINVAL;
  1062. #endif
  1063. }
  1064. /* on a CONFIG_SPE this does not hurt us. The bits that
  1065. * __pack_fe01 use do not overlap with bits used for
  1066. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  1067. * on CONFIG_SPE implementations are reserved so writing to
  1068. * them does not change anything */
  1069. if (val > PR_FP_EXC_PRECISE)
  1070. return -EINVAL;
  1071. tsk->thread.fpexc_mode = __pack_fe01(val);
  1072. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  1073. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  1074. | tsk->thread.fpexc_mode;
  1075. return 0;
  1076. }
  1077. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  1078. {
  1079. unsigned int val;
  1080. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  1081. #ifdef CONFIG_SPE
  1082. if (cpu_has_feature(CPU_FTR_SPE))
  1083. val = tsk->thread.fpexc_mode;
  1084. else
  1085. return -EINVAL;
  1086. #else
  1087. return -EINVAL;
  1088. #endif
  1089. else
  1090. val = __unpack_fe01(tsk->thread.fpexc_mode);
  1091. return put_user(val, (unsigned int __user *) adr);
  1092. }
  1093. int set_endian(struct task_struct *tsk, unsigned int val)
  1094. {
  1095. struct pt_regs *regs = tsk->thread.regs;
  1096. if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
  1097. (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
  1098. return -EINVAL;
  1099. if (regs == NULL)
  1100. return -EINVAL;
  1101. if (val == PR_ENDIAN_BIG)
  1102. regs->msr &= ~MSR_LE;
  1103. else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
  1104. regs->msr |= MSR_LE;
  1105. else
  1106. return -EINVAL;
  1107. return 0;
  1108. }
  1109. int get_endian(struct task_struct *tsk, unsigned long adr)
  1110. {
  1111. struct pt_regs *regs = tsk->thread.regs;
  1112. unsigned int val;
  1113. if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
  1114. !cpu_has_feature(CPU_FTR_REAL_LE))
  1115. return -EINVAL;
  1116. if (regs == NULL)
  1117. return -EINVAL;
  1118. if (regs->msr & MSR_LE) {
  1119. if (cpu_has_feature(CPU_FTR_REAL_LE))
  1120. val = PR_ENDIAN_LITTLE;
  1121. else
  1122. val = PR_ENDIAN_PPC_LITTLE;
  1123. } else
  1124. val = PR_ENDIAN_BIG;
  1125. return put_user(val, (unsigned int __user *)adr);
  1126. }
  1127. int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
  1128. {
  1129. tsk->thread.align_ctl = val;
  1130. return 0;
  1131. }
  1132. int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
  1133. {
  1134. return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
  1135. }
  1136. static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
  1137. unsigned long nbytes)
  1138. {
  1139. unsigned long stack_page;
  1140. unsigned long cpu = task_cpu(p);
  1141. /*
  1142. * Avoid crashing if the stack has overflowed and corrupted
  1143. * task_cpu(p), which is in the thread_info struct.
  1144. */
  1145. if (cpu < NR_CPUS && cpu_possible(cpu)) {
  1146. stack_page = (unsigned long) hardirq_ctx[cpu];
  1147. if (sp >= stack_page + sizeof(struct thread_struct)
  1148. && sp <= stack_page + THREAD_SIZE - nbytes)
  1149. return 1;
  1150. stack_page = (unsigned long) softirq_ctx[cpu];
  1151. if (sp >= stack_page + sizeof(struct thread_struct)
  1152. && sp <= stack_page + THREAD_SIZE - nbytes)
  1153. return 1;
  1154. }
  1155. return 0;
  1156. }
  1157. int validate_sp(unsigned long sp, struct task_struct *p,
  1158. unsigned long nbytes)
  1159. {
  1160. unsigned long stack_page = (unsigned long)task_stack_page(p);
  1161. if (sp >= stack_page + sizeof(struct thread_struct)
  1162. && sp <= stack_page + THREAD_SIZE - nbytes)
  1163. return 1;
  1164. return valid_irq_stack(sp, p, nbytes);
  1165. }
  1166. EXPORT_SYMBOL(validate_sp);
  1167. unsigned long get_wchan(struct task_struct *p)
  1168. {
  1169. unsigned long ip, sp;
  1170. int count = 0;
  1171. if (!p || p == current || p->state == TASK_RUNNING)
  1172. return 0;
  1173. sp = p->thread.ksp;
  1174. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1175. return 0;
  1176. do {
  1177. sp = *(unsigned long *)sp;
  1178. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1179. return 0;
  1180. if (count > 0) {
  1181. ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
  1182. if (!in_sched_functions(ip))
  1183. return ip;
  1184. }
  1185. } while (count++ < 16);
  1186. return 0;
  1187. }
  1188. static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
  1189. void show_stack(struct task_struct *tsk, unsigned long *stack)
  1190. {
  1191. unsigned long sp, ip, lr, newsp;
  1192. int count = 0;
  1193. int firstframe = 1;
  1194. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1195. int curr_frame = current->curr_ret_stack;
  1196. extern void return_to_handler(void);
  1197. unsigned long rth = (unsigned long)return_to_handler;
  1198. unsigned long mrth = -1;
  1199. #ifdef CONFIG_PPC64
  1200. extern void mod_return_to_handler(void);
  1201. rth = *(unsigned long *)rth;
  1202. mrth = (unsigned long)mod_return_to_handler;
  1203. mrth = *(unsigned long *)mrth;
  1204. #endif
  1205. #endif
  1206. sp = (unsigned long) stack;
  1207. if (tsk == NULL)
  1208. tsk = current;
  1209. if (sp == 0) {
  1210. if (tsk == current)
  1211. asm("mr %0,1" : "=r" (sp));
  1212. else
  1213. sp = tsk->thread.ksp;
  1214. }
  1215. lr = 0;
  1216. printk("Call Trace:\n");
  1217. do {
  1218. if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
  1219. return;
  1220. stack = (unsigned long *) sp;
  1221. newsp = stack[0];
  1222. ip = stack[STACK_FRAME_LR_SAVE];
  1223. if (!firstframe || ip != lr) {
  1224. printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
  1225. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1226. if ((ip == rth || ip == mrth) && curr_frame >= 0) {
  1227. printk(" (%pS)",
  1228. (void *)current->ret_stack[curr_frame].ret);
  1229. curr_frame--;
  1230. }
  1231. #endif
  1232. if (firstframe)
  1233. printk(" (unreliable)");
  1234. printk("\n");
  1235. }
  1236. firstframe = 0;
  1237. /*
  1238. * See if this is an exception frame.
  1239. * We look for the "regshere" marker in the current frame.
  1240. */
  1241. if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
  1242. && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  1243. struct pt_regs *regs = (struct pt_regs *)
  1244. (sp + STACK_FRAME_OVERHEAD);
  1245. lr = regs->link;
  1246. printk("--- Exception: %lx at %pS\n LR = %pS\n",
  1247. regs->trap, (void *)regs->nip, (void *)lr);
  1248. firstframe = 1;
  1249. }
  1250. sp = newsp;
  1251. } while (count++ < kstack_depth_to_print);
  1252. }
  1253. #ifdef CONFIG_PPC64
  1254. /* Called with hard IRQs off */
  1255. void notrace __ppc64_runlatch_on(void)
  1256. {
  1257. struct thread_info *ti = current_thread_info();
  1258. unsigned long ctrl;
  1259. ctrl = mfspr(SPRN_CTRLF);
  1260. ctrl |= CTRL_RUNLATCH;
  1261. mtspr(SPRN_CTRLT, ctrl);
  1262. ti->local_flags |= _TLF_RUNLATCH;
  1263. }
  1264. /* Called with hard IRQs off */
  1265. void notrace __ppc64_runlatch_off(void)
  1266. {
  1267. struct thread_info *ti = current_thread_info();
  1268. unsigned long ctrl;
  1269. ti->local_flags &= ~_TLF_RUNLATCH;
  1270. ctrl = mfspr(SPRN_CTRLF);
  1271. ctrl &= ~CTRL_RUNLATCH;
  1272. mtspr(SPRN_CTRLT, ctrl);
  1273. }
  1274. #endif /* CONFIG_PPC64 */
  1275. unsigned long arch_align_stack(unsigned long sp)
  1276. {
  1277. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  1278. sp -= get_random_int() & ~PAGE_MASK;
  1279. return sp & ~0xf;
  1280. }
  1281. static inline unsigned long brk_rnd(void)
  1282. {
  1283. unsigned long rnd = 0;
  1284. /* 8MB for 32bit, 1GB for 64bit */
  1285. if (is_32bit_task())
  1286. rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
  1287. else
  1288. rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
  1289. return rnd << PAGE_SHIFT;
  1290. }
  1291. unsigned long arch_randomize_brk(struct mm_struct *mm)
  1292. {
  1293. unsigned long base = mm->brk;
  1294. unsigned long ret;
  1295. #ifdef CONFIG_PPC_STD_MMU_64
  1296. /*
  1297. * If we are using 1TB segments and we are allowed to randomise
  1298. * the heap, we can put it above 1TB so it is backed by a 1TB
  1299. * segment. Otherwise the heap will be in the bottom 1TB
  1300. * which always uses 256MB segments and this may result in a
  1301. * performance penalty.
  1302. */
  1303. if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
  1304. base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
  1305. #endif
  1306. ret = PAGE_ALIGN(base + brk_rnd());
  1307. if (ret < mm->brk)
  1308. return mm->brk;
  1309. return ret;
  1310. }
  1311. unsigned long randomize_et_dyn(unsigned long base)
  1312. {
  1313. unsigned long ret = PAGE_ALIGN(base + brk_rnd());
  1314. if (ret < base)
  1315. return base;
  1316. return ret;
  1317. }