process.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305
  1. /*
  2. * Derived from "arch/i386/kernel/process.c"
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  6. * Paul Mackerras (paulus@cs.anu.edu.au)
  7. *
  8. * PowerPC version
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/stddef.h>
  22. #include <linux/unistd.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/slab.h>
  25. #include <linux/user.h>
  26. #include <linux/elf.h>
  27. #include <linux/init.h>
  28. #include <linux/prctl.h>
  29. #include <linux/init_task.h>
  30. #include <linux/export.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/mqueue.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/utsname.h>
  35. #include <linux/ftrace.h>
  36. #include <linux/kernel_stat.h>
  37. #include <linux/personality.h>
  38. #include <linux/random.h>
  39. #include <linux/hw_breakpoint.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/io.h>
  43. #include <asm/processor.h>
  44. #include <asm/mmu.h>
  45. #include <asm/prom.h>
  46. #include <asm/machdep.h>
  47. #include <asm/time.h>
  48. #include <asm/runlatch.h>
  49. #include <asm/syscalls.h>
  50. #include <asm/switch_to.h>
  51. #include <asm/debug.h>
  52. #ifdef CONFIG_PPC64
  53. #include <asm/firmware.h>
  54. #endif
  55. #include <linux/kprobes.h>
  56. #include <linux/kdebug.h>
  57. extern unsigned long _get_SP(void);
  58. #ifndef CONFIG_SMP
  59. struct task_struct *last_task_used_math = NULL;
  60. struct task_struct *last_task_used_altivec = NULL;
  61. struct task_struct *last_task_used_vsx = NULL;
  62. struct task_struct *last_task_used_spe = NULL;
  63. #endif
  64. /*
  65. * Make sure the floating-point register state in the
  66. * the thread_struct is up to date for task tsk.
  67. */
  68. void flush_fp_to_thread(struct task_struct *tsk)
  69. {
  70. if (tsk->thread.regs) {
  71. /*
  72. * We need to disable preemption here because if we didn't,
  73. * another process could get scheduled after the regs->msr
  74. * test but before we have finished saving the FP registers
  75. * to the thread_struct. That process could take over the
  76. * FPU, and then when we get scheduled again we would store
  77. * bogus values for the remaining FP registers.
  78. */
  79. preempt_disable();
  80. if (tsk->thread.regs->msr & MSR_FP) {
  81. #ifdef CONFIG_SMP
  82. /*
  83. * This should only ever be called for current or
  84. * for a stopped child process. Since we save away
  85. * the FP register state on context switch on SMP,
  86. * there is something wrong if a stopped child appears
  87. * to still have its FP state in the CPU registers.
  88. */
  89. BUG_ON(tsk != current);
  90. #endif
  91. giveup_fpu(tsk);
  92. }
  93. preempt_enable();
  94. }
  95. }
  96. EXPORT_SYMBOL_GPL(flush_fp_to_thread);
  97. void enable_kernel_fp(void)
  98. {
  99. WARN_ON(preemptible());
  100. #ifdef CONFIG_SMP
  101. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  102. giveup_fpu(current);
  103. else
  104. giveup_fpu(NULL); /* just enables FP for kernel */
  105. #else
  106. giveup_fpu(last_task_used_math);
  107. #endif /* CONFIG_SMP */
  108. }
  109. EXPORT_SYMBOL(enable_kernel_fp);
  110. #ifdef CONFIG_ALTIVEC
  111. void enable_kernel_altivec(void)
  112. {
  113. WARN_ON(preemptible());
  114. #ifdef CONFIG_SMP
  115. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  116. giveup_altivec(current);
  117. else
  118. giveup_altivec_notask();
  119. #else
  120. giveup_altivec(last_task_used_altivec);
  121. #endif /* CONFIG_SMP */
  122. }
  123. EXPORT_SYMBOL(enable_kernel_altivec);
  124. /*
  125. * Make sure the VMX/Altivec register state in the
  126. * the thread_struct is up to date for task tsk.
  127. */
  128. void flush_altivec_to_thread(struct task_struct *tsk)
  129. {
  130. if (tsk->thread.regs) {
  131. preempt_disable();
  132. if (tsk->thread.regs->msr & MSR_VEC) {
  133. #ifdef CONFIG_SMP
  134. BUG_ON(tsk != current);
  135. #endif
  136. giveup_altivec(tsk);
  137. }
  138. preempt_enable();
  139. }
  140. }
  141. EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
  142. #endif /* CONFIG_ALTIVEC */
  143. #ifdef CONFIG_VSX
  144. #if 0
  145. /* not currently used, but some crazy RAID module might want to later */
  146. void enable_kernel_vsx(void)
  147. {
  148. WARN_ON(preemptible());
  149. #ifdef CONFIG_SMP
  150. if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
  151. giveup_vsx(current);
  152. else
  153. giveup_vsx(NULL); /* just enable vsx for kernel - force */
  154. #else
  155. giveup_vsx(last_task_used_vsx);
  156. #endif /* CONFIG_SMP */
  157. }
  158. EXPORT_SYMBOL(enable_kernel_vsx);
  159. #endif
  160. void giveup_vsx(struct task_struct *tsk)
  161. {
  162. giveup_fpu(tsk);
  163. giveup_altivec(tsk);
  164. __giveup_vsx(tsk);
  165. }
  166. void flush_vsx_to_thread(struct task_struct *tsk)
  167. {
  168. if (tsk->thread.regs) {
  169. preempt_disable();
  170. if (tsk->thread.regs->msr & MSR_VSX) {
  171. #ifdef CONFIG_SMP
  172. BUG_ON(tsk != current);
  173. #endif
  174. giveup_vsx(tsk);
  175. }
  176. preempt_enable();
  177. }
  178. }
  179. EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
  180. #endif /* CONFIG_VSX */
  181. #ifdef CONFIG_SPE
  182. void enable_kernel_spe(void)
  183. {
  184. WARN_ON(preemptible());
  185. #ifdef CONFIG_SMP
  186. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  187. giveup_spe(current);
  188. else
  189. giveup_spe(NULL); /* just enable SPE for kernel - force */
  190. #else
  191. giveup_spe(last_task_used_spe);
  192. #endif /* __SMP __ */
  193. }
  194. EXPORT_SYMBOL(enable_kernel_spe);
  195. void flush_spe_to_thread(struct task_struct *tsk)
  196. {
  197. if (tsk->thread.regs) {
  198. preempt_disable();
  199. if (tsk->thread.regs->msr & MSR_SPE) {
  200. #ifdef CONFIG_SMP
  201. BUG_ON(tsk != current);
  202. #endif
  203. tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
  204. giveup_spe(tsk);
  205. }
  206. preempt_enable();
  207. }
  208. }
  209. #endif /* CONFIG_SPE */
  210. #ifndef CONFIG_SMP
  211. /*
  212. * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  213. * and the current task has some state, discard it.
  214. */
  215. void discard_lazy_cpu_state(void)
  216. {
  217. preempt_disable();
  218. if (last_task_used_math == current)
  219. last_task_used_math = NULL;
  220. #ifdef CONFIG_ALTIVEC
  221. if (last_task_used_altivec == current)
  222. last_task_used_altivec = NULL;
  223. #endif /* CONFIG_ALTIVEC */
  224. #ifdef CONFIG_VSX
  225. if (last_task_used_vsx == current)
  226. last_task_used_vsx = NULL;
  227. #endif /* CONFIG_VSX */
  228. #ifdef CONFIG_SPE
  229. if (last_task_used_spe == current)
  230. last_task_used_spe = NULL;
  231. #endif
  232. preempt_enable();
  233. }
  234. #endif /* CONFIG_SMP */
  235. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  236. void do_send_trap(struct pt_regs *regs, unsigned long address,
  237. unsigned long error_code, int signal_code, int breakpt)
  238. {
  239. siginfo_t info;
  240. current->thread.trap_nr = signal_code;
  241. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  242. 11, SIGSEGV) == NOTIFY_STOP)
  243. return;
  244. /* Deliver the signal to userspace */
  245. info.si_signo = SIGTRAP;
  246. info.si_errno = breakpt; /* breakpoint or watchpoint id */
  247. info.si_code = signal_code;
  248. info.si_addr = (void __user *)address;
  249. force_sig_info(SIGTRAP, &info, current);
  250. }
  251. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  252. void do_dabr(struct pt_regs *regs, unsigned long address,
  253. unsigned long error_code)
  254. {
  255. siginfo_t info;
  256. current->thread.trap_nr = TRAP_HWBKPT;
  257. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  258. 11, SIGSEGV) == NOTIFY_STOP)
  259. return;
  260. if (debugger_dabr_match(regs))
  261. return;
  262. /* Clear the DABR */
  263. set_dabr(0, 0);
  264. /* Deliver the signal to userspace */
  265. info.si_signo = SIGTRAP;
  266. info.si_errno = 0;
  267. info.si_code = TRAP_HWBKPT;
  268. info.si_addr = (void __user *)address;
  269. force_sig_info(SIGTRAP, &info, current);
  270. }
  271. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  272. static DEFINE_PER_CPU(unsigned long, current_dabr);
  273. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  274. /*
  275. * Set the debug registers back to their default "safe" values.
  276. */
  277. static void set_debug_reg_defaults(struct thread_struct *thread)
  278. {
  279. thread->iac1 = thread->iac2 = 0;
  280. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  281. thread->iac3 = thread->iac4 = 0;
  282. #endif
  283. thread->dac1 = thread->dac2 = 0;
  284. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  285. thread->dvc1 = thread->dvc2 = 0;
  286. #endif
  287. thread->dbcr0 = 0;
  288. #ifdef CONFIG_BOOKE
  289. /*
  290. * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
  291. */
  292. thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
  293. DBCR1_IAC3US | DBCR1_IAC4US;
  294. /*
  295. * Force Data Address Compare User/Supervisor bits to be User-only
  296. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
  297. */
  298. thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  299. #else
  300. thread->dbcr1 = 0;
  301. #endif
  302. }
  303. static void prime_debug_regs(struct thread_struct *thread)
  304. {
  305. mtspr(SPRN_IAC1, thread->iac1);
  306. mtspr(SPRN_IAC2, thread->iac2);
  307. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  308. mtspr(SPRN_IAC3, thread->iac3);
  309. mtspr(SPRN_IAC4, thread->iac4);
  310. #endif
  311. mtspr(SPRN_DAC1, thread->dac1);
  312. mtspr(SPRN_DAC2, thread->dac2);
  313. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  314. mtspr(SPRN_DVC1, thread->dvc1);
  315. mtspr(SPRN_DVC2, thread->dvc2);
  316. #endif
  317. mtspr(SPRN_DBCR0, thread->dbcr0);
  318. mtspr(SPRN_DBCR1, thread->dbcr1);
  319. #ifdef CONFIG_BOOKE
  320. mtspr(SPRN_DBCR2, thread->dbcr2);
  321. #endif
  322. }
  323. /*
  324. * Unless neither the old or new thread are making use of the
  325. * debug registers, set the debug registers from the values
  326. * stored in the new thread.
  327. */
  328. static void switch_booke_debug_regs(struct thread_struct *new_thread)
  329. {
  330. if ((current->thread.dbcr0 & DBCR0_IDM)
  331. || (new_thread->dbcr0 & DBCR0_IDM))
  332. prime_debug_regs(new_thread);
  333. }
  334. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  335. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  336. static void set_debug_reg_defaults(struct thread_struct *thread)
  337. {
  338. if (thread->dabr) {
  339. thread->dabr = 0;
  340. thread->dabrx = 0;
  341. set_dabr(0, 0);
  342. }
  343. }
  344. #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
  345. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  346. int set_dabr(unsigned long dabr, unsigned long dabrx)
  347. {
  348. __get_cpu_var(current_dabr) = dabr;
  349. if (ppc_md.set_dabr)
  350. return ppc_md.set_dabr(dabr, dabrx);
  351. /* XXX should we have a CPU_FTR_HAS_DABR ? */
  352. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  353. mtspr(SPRN_DAC1, dabr);
  354. #ifdef CONFIG_PPC_47x
  355. isync();
  356. #endif
  357. #elif defined(CONFIG_PPC_BOOK3S)
  358. mtspr(SPRN_DABR, dabr);
  359. mtspr(SPRN_DABRX, dabrx);
  360. #endif
  361. return 0;
  362. }
  363. #ifdef CONFIG_PPC64
  364. DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
  365. #endif
  366. struct task_struct *__switch_to(struct task_struct *prev,
  367. struct task_struct *new)
  368. {
  369. struct thread_struct *new_thread, *old_thread;
  370. unsigned long flags;
  371. struct task_struct *last;
  372. #ifdef CONFIG_PPC_BOOK3S_64
  373. struct ppc64_tlb_batch *batch;
  374. #endif
  375. #ifdef CONFIG_SMP
  376. /* avoid complexity of lazy save/restore of fpu
  377. * by just saving it every time we switch out if
  378. * this task used the fpu during the last quantum.
  379. *
  380. * If it tries to use the fpu again, it'll trap and
  381. * reload its fp regs. So we don't have to do a restore
  382. * every switch, just a save.
  383. * -- Cort
  384. */
  385. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  386. giveup_fpu(prev);
  387. #ifdef CONFIG_ALTIVEC
  388. /*
  389. * If the previous thread used altivec in the last quantum
  390. * (thus changing altivec regs) then save them.
  391. * We used to check the VRSAVE register but not all apps
  392. * set it, so we don't rely on it now (and in fact we need
  393. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  394. *
  395. * On SMP we always save/restore altivec regs just to avoid the
  396. * complexity of changing processors.
  397. * -- Cort
  398. */
  399. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  400. giveup_altivec(prev);
  401. #endif /* CONFIG_ALTIVEC */
  402. #ifdef CONFIG_VSX
  403. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
  404. /* VMX and FPU registers are already save here */
  405. __giveup_vsx(prev);
  406. #endif /* CONFIG_VSX */
  407. #ifdef CONFIG_SPE
  408. /*
  409. * If the previous thread used spe in the last quantum
  410. * (thus changing spe regs) then save them.
  411. *
  412. * On SMP we always save/restore spe regs just to avoid the
  413. * complexity of changing processors.
  414. */
  415. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  416. giveup_spe(prev);
  417. #endif /* CONFIG_SPE */
  418. #else /* CONFIG_SMP */
  419. #ifdef CONFIG_ALTIVEC
  420. /* Avoid the trap. On smp this this never happens since
  421. * we don't set last_task_used_altivec -- Cort
  422. */
  423. if (new->thread.regs && last_task_used_altivec == new)
  424. new->thread.regs->msr |= MSR_VEC;
  425. #endif /* CONFIG_ALTIVEC */
  426. #ifdef CONFIG_VSX
  427. if (new->thread.regs && last_task_used_vsx == new)
  428. new->thread.regs->msr |= MSR_VSX;
  429. #endif /* CONFIG_VSX */
  430. #ifdef CONFIG_SPE
  431. /* Avoid the trap. On smp this this never happens since
  432. * we don't set last_task_used_spe
  433. */
  434. if (new->thread.regs && last_task_used_spe == new)
  435. new->thread.regs->msr |= MSR_SPE;
  436. #endif /* CONFIG_SPE */
  437. #endif /* CONFIG_SMP */
  438. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  439. switch_booke_debug_regs(&new->thread);
  440. #else
  441. /*
  442. * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
  443. * schedule DABR
  444. */
  445. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  446. if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
  447. set_dabr(new->thread.dabr, new->thread.dabrx);
  448. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  449. #endif
  450. new_thread = &new->thread;
  451. old_thread = &current->thread;
  452. #ifdef CONFIG_PPC64
  453. /*
  454. * Collect processor utilization data per process
  455. */
  456. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  457. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  458. long unsigned start_tb, current_tb;
  459. start_tb = old_thread->start_tb;
  460. cu->current_tb = current_tb = mfspr(SPRN_PURR);
  461. old_thread->accum_tb += (current_tb - start_tb);
  462. new_thread->start_tb = current_tb;
  463. }
  464. #endif /* CONFIG_PPC64 */
  465. #ifdef CONFIG_PPC_BOOK3S_64
  466. batch = &__get_cpu_var(ppc64_tlb_batch);
  467. if (batch->active) {
  468. current_thread_info()->local_flags |= _TLF_LAZY_MMU;
  469. if (batch->index)
  470. __flush_tlb_pending(batch);
  471. batch->active = 0;
  472. }
  473. #endif /* CONFIG_PPC_BOOK3S_64 */
  474. local_irq_save(flags);
  475. account_system_vtime(current);
  476. account_process_vtime(current);
  477. /*
  478. * We can't take a PMU exception inside _switch() since there is a
  479. * window where the kernel stack SLB and the kernel stack are out
  480. * of sync. Hard disable here.
  481. */
  482. hard_irq_disable();
  483. last = _switch(old_thread, new_thread);
  484. #ifdef CONFIG_PPC_BOOK3S_64
  485. if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
  486. current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
  487. batch = &__get_cpu_var(ppc64_tlb_batch);
  488. batch->active = 1;
  489. }
  490. #endif /* CONFIG_PPC_BOOK3S_64 */
  491. local_irq_restore(flags);
  492. return last;
  493. }
  494. static int instructions_to_print = 16;
  495. static void show_instructions(struct pt_regs *regs)
  496. {
  497. int i;
  498. unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
  499. sizeof(int));
  500. printk("Instruction dump:");
  501. for (i = 0; i < instructions_to_print; i++) {
  502. int instr;
  503. if (!(i % 8))
  504. printk("\n");
  505. #if !defined(CONFIG_BOOKE)
  506. /* If executing with the IMMU off, adjust pc rather
  507. * than print XXXXXXXX.
  508. */
  509. if (!(regs->msr & MSR_IR))
  510. pc = (unsigned long)phys_to_virt(pc);
  511. #endif
  512. /* We use __get_user here *only* to avoid an OOPS on a
  513. * bad address because the pc *should* only be a
  514. * kernel address.
  515. */
  516. if (!__kernel_text_address(pc) ||
  517. __get_user(instr, (unsigned int __user *)pc)) {
  518. printk(KERN_CONT "XXXXXXXX ");
  519. } else {
  520. if (regs->nip == pc)
  521. printk(KERN_CONT "<%08x> ", instr);
  522. else
  523. printk(KERN_CONT "%08x ", instr);
  524. }
  525. pc += sizeof(int);
  526. }
  527. printk("\n");
  528. }
  529. static struct regbit {
  530. unsigned long bit;
  531. const char *name;
  532. } msr_bits[] = {
  533. #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
  534. {MSR_SF, "SF"},
  535. {MSR_HV, "HV"},
  536. #endif
  537. {MSR_VEC, "VEC"},
  538. {MSR_VSX, "VSX"},
  539. #ifdef CONFIG_BOOKE
  540. {MSR_CE, "CE"},
  541. #endif
  542. {MSR_EE, "EE"},
  543. {MSR_PR, "PR"},
  544. {MSR_FP, "FP"},
  545. {MSR_ME, "ME"},
  546. #ifdef CONFIG_BOOKE
  547. {MSR_DE, "DE"},
  548. #else
  549. {MSR_SE, "SE"},
  550. {MSR_BE, "BE"},
  551. #endif
  552. {MSR_IR, "IR"},
  553. {MSR_DR, "DR"},
  554. {MSR_PMM, "PMM"},
  555. #ifndef CONFIG_BOOKE
  556. {MSR_RI, "RI"},
  557. {MSR_LE, "LE"},
  558. #endif
  559. {0, NULL}
  560. };
  561. static void printbits(unsigned long val, struct regbit *bits)
  562. {
  563. const char *sep = "";
  564. printk("<");
  565. for (; bits->bit; ++bits)
  566. if (val & bits->bit) {
  567. printk("%s%s", sep, bits->name);
  568. sep = ",";
  569. }
  570. printk(">");
  571. }
  572. #ifdef CONFIG_PPC64
  573. #define REG "%016lx"
  574. #define REGS_PER_LINE 4
  575. #define LAST_VOLATILE 13
  576. #else
  577. #define REG "%08lx"
  578. #define REGS_PER_LINE 8
  579. #define LAST_VOLATILE 12
  580. #endif
  581. void show_regs(struct pt_regs * regs)
  582. {
  583. int i, trap;
  584. printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
  585. regs->nip, regs->link, regs->ctr);
  586. printk("REGS: %p TRAP: %04lx %s (%s)\n",
  587. regs, regs->trap, print_tainted(), init_utsname()->release);
  588. printk("MSR: "REG" ", regs->msr);
  589. printbits(regs->msr, msr_bits);
  590. printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
  591. #ifdef CONFIG_PPC64
  592. printk("SOFTE: %ld\n", regs->softe);
  593. #endif
  594. trap = TRAP(regs);
  595. if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
  596. printk("CFAR: "REG"\n", regs->orig_gpr3);
  597. if (trap == 0x300 || trap == 0x600)
  598. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  599. printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
  600. #else
  601. printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
  602. #endif
  603. printk("TASK = %p[%d] '%s' THREAD: %p",
  604. current, task_pid_nr(current), current->comm, task_thread_info(current));
  605. #ifdef CONFIG_SMP
  606. printk(" CPU: %d", raw_smp_processor_id());
  607. #endif /* CONFIG_SMP */
  608. for (i = 0; i < 32; i++) {
  609. if ((i % REGS_PER_LINE) == 0)
  610. printk("\nGPR%02d: ", i);
  611. printk(REG " ", regs->gpr[i]);
  612. if (i == LAST_VOLATILE && !FULL_REGS(regs))
  613. break;
  614. }
  615. printk("\n");
  616. #ifdef CONFIG_KALLSYMS
  617. /*
  618. * Lookup NIP late so we have the best change of getting the
  619. * above info out without failing
  620. */
  621. printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
  622. printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
  623. #endif
  624. show_stack(current, (unsigned long *) regs->gpr[1]);
  625. if (!user_mode(regs))
  626. show_instructions(regs);
  627. }
  628. void exit_thread(void)
  629. {
  630. discard_lazy_cpu_state();
  631. }
  632. void flush_thread(void)
  633. {
  634. discard_lazy_cpu_state();
  635. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  636. flush_ptrace_hw_breakpoint(current);
  637. #else /* CONFIG_HAVE_HW_BREAKPOINT */
  638. set_debug_reg_defaults(&current->thread);
  639. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  640. }
  641. void
  642. release_thread(struct task_struct *t)
  643. {
  644. }
  645. /*
  646. * this gets called so that we can store coprocessor state into memory and
  647. * copy the current task into the new thread.
  648. */
  649. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  650. {
  651. flush_fp_to_thread(src);
  652. flush_altivec_to_thread(src);
  653. flush_vsx_to_thread(src);
  654. flush_spe_to_thread(src);
  655. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  656. flush_ptrace_hw_breakpoint(src);
  657. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  658. *dst = *src;
  659. return 0;
  660. }
  661. /*
  662. * Copy a thread..
  663. */
  664. extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
  665. int copy_thread(unsigned long clone_flags, unsigned long usp,
  666. unsigned long unused, struct task_struct *p,
  667. struct pt_regs *regs)
  668. {
  669. struct pt_regs *childregs, *kregs;
  670. extern void ret_from_fork(void);
  671. unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  672. CHECK_FULL_REGS(regs);
  673. /* Copy registers */
  674. sp -= sizeof(struct pt_regs);
  675. childregs = (struct pt_regs *) sp;
  676. *childregs = *regs;
  677. if ((childregs->msr & MSR_PR) == 0) {
  678. /* for kernel thread, set `current' and stackptr in new task */
  679. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  680. #ifdef CONFIG_PPC32
  681. childregs->gpr[2] = (unsigned long) p;
  682. #else
  683. clear_tsk_thread_flag(p, TIF_32BIT);
  684. #endif
  685. p->thread.regs = NULL; /* no user register state */
  686. } else {
  687. childregs->gpr[1] = usp;
  688. p->thread.regs = childregs;
  689. if (clone_flags & CLONE_SETTLS) {
  690. #ifdef CONFIG_PPC64
  691. if (!is_32bit_task())
  692. childregs->gpr[13] = childregs->gpr[6];
  693. else
  694. #endif
  695. childregs->gpr[2] = childregs->gpr[6];
  696. }
  697. }
  698. childregs->gpr[3] = 0; /* Result from fork() */
  699. sp -= STACK_FRAME_OVERHEAD;
  700. /*
  701. * The way this works is that at some point in the future
  702. * some task will call _switch to switch to the new task.
  703. * That will pop off the stack frame created below and start
  704. * the new task running at ret_from_fork. The new task will
  705. * do some house keeping and then return from the fork or clone
  706. * system call, using the stack frame created above.
  707. */
  708. sp -= sizeof(struct pt_regs);
  709. kregs = (struct pt_regs *) sp;
  710. sp -= STACK_FRAME_OVERHEAD;
  711. p->thread.ksp = sp;
  712. p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
  713. _ALIGN_UP(sizeof(struct thread_info), 16);
  714. #ifdef CONFIG_PPC_STD_MMU_64
  715. if (mmu_has_feature(MMU_FTR_SLB)) {
  716. unsigned long sp_vsid;
  717. unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
  718. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  719. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
  720. << SLB_VSID_SHIFT_1T;
  721. else
  722. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
  723. << SLB_VSID_SHIFT;
  724. sp_vsid |= SLB_VSID_KERNEL | llp;
  725. p->thread.ksp_vsid = sp_vsid;
  726. }
  727. #endif /* CONFIG_PPC_STD_MMU_64 */
  728. #ifdef CONFIG_PPC64
  729. if (cpu_has_feature(CPU_FTR_DSCR)) {
  730. p->thread.dscr_inherit = current->thread.dscr_inherit;
  731. p->thread.dscr = current->thread.dscr;
  732. }
  733. #endif
  734. /*
  735. * The PPC64 ABI makes use of a TOC to contain function
  736. * pointers. The function (ret_from_except) is actually a pointer
  737. * to the TOC entry. The first entry is a pointer to the actual
  738. * function.
  739. */
  740. #ifdef CONFIG_PPC64
  741. kregs->nip = *((unsigned long *)ret_from_fork);
  742. #else
  743. kregs->nip = (unsigned long)ret_from_fork;
  744. #endif
  745. return 0;
  746. }
  747. /*
  748. * Set up a thread for executing a new program
  749. */
  750. void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
  751. {
  752. #ifdef CONFIG_PPC64
  753. unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
  754. #endif
  755. /*
  756. * If we exec out of a kernel thread then thread.regs will not be
  757. * set. Do it now.
  758. */
  759. if (!current->thread.regs) {
  760. struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
  761. current->thread.regs = regs - 1;
  762. }
  763. memset(regs->gpr, 0, sizeof(regs->gpr));
  764. regs->ctr = 0;
  765. regs->link = 0;
  766. regs->xer = 0;
  767. regs->ccr = 0;
  768. regs->gpr[1] = sp;
  769. /*
  770. * We have just cleared all the nonvolatile GPRs, so make
  771. * FULL_REGS(regs) return true. This is necessary to allow
  772. * ptrace to examine the thread immediately after exec.
  773. */
  774. regs->trap &= ~1UL;
  775. #ifdef CONFIG_PPC32
  776. regs->mq = 0;
  777. regs->nip = start;
  778. regs->msr = MSR_USER;
  779. #else
  780. if (!is_32bit_task()) {
  781. unsigned long entry, toc;
  782. /* start is a relocated pointer to the function descriptor for
  783. * the elf _start routine. The first entry in the function
  784. * descriptor is the entry address of _start and the second
  785. * entry is the TOC value we need to use.
  786. */
  787. __get_user(entry, (unsigned long __user *)start);
  788. __get_user(toc, (unsigned long __user *)start+1);
  789. /* Check whether the e_entry function descriptor entries
  790. * need to be relocated before we can use them.
  791. */
  792. if (load_addr != 0) {
  793. entry += load_addr;
  794. toc += load_addr;
  795. }
  796. regs->nip = entry;
  797. regs->gpr[2] = toc;
  798. regs->msr = MSR_USER64;
  799. } else {
  800. regs->nip = start;
  801. regs->gpr[2] = 0;
  802. regs->msr = MSR_USER32;
  803. }
  804. #endif
  805. discard_lazy_cpu_state();
  806. #ifdef CONFIG_VSX
  807. current->thread.used_vsr = 0;
  808. #endif
  809. memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
  810. current->thread.fpscr.val = 0;
  811. #ifdef CONFIG_ALTIVEC
  812. memset(current->thread.vr, 0, sizeof(current->thread.vr));
  813. memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
  814. current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
  815. current->thread.vrsave = 0;
  816. current->thread.used_vr = 0;
  817. #endif /* CONFIG_ALTIVEC */
  818. #ifdef CONFIG_SPE
  819. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  820. current->thread.acc = 0;
  821. current->thread.spefscr = 0;
  822. current->thread.used_spe = 0;
  823. #endif /* CONFIG_SPE */
  824. }
  825. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  826. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  827. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  828. {
  829. struct pt_regs *regs = tsk->thread.regs;
  830. /* This is a bit hairy. If we are an SPE enabled processor
  831. * (have embedded fp) we store the IEEE exception enable flags in
  832. * fpexc_mode. fpexc_mode is also used for setting FP exception
  833. * mode (asyn, precise, disabled) for 'Classic' FP. */
  834. if (val & PR_FP_EXC_SW_ENABLE) {
  835. #ifdef CONFIG_SPE
  836. if (cpu_has_feature(CPU_FTR_SPE)) {
  837. tsk->thread.fpexc_mode = val &
  838. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  839. return 0;
  840. } else {
  841. return -EINVAL;
  842. }
  843. #else
  844. return -EINVAL;
  845. #endif
  846. }
  847. /* on a CONFIG_SPE this does not hurt us. The bits that
  848. * __pack_fe01 use do not overlap with bits used for
  849. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  850. * on CONFIG_SPE implementations are reserved so writing to
  851. * them does not change anything */
  852. if (val > PR_FP_EXC_PRECISE)
  853. return -EINVAL;
  854. tsk->thread.fpexc_mode = __pack_fe01(val);
  855. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  856. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  857. | tsk->thread.fpexc_mode;
  858. return 0;
  859. }
  860. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  861. {
  862. unsigned int val;
  863. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  864. #ifdef CONFIG_SPE
  865. if (cpu_has_feature(CPU_FTR_SPE))
  866. val = tsk->thread.fpexc_mode;
  867. else
  868. return -EINVAL;
  869. #else
  870. return -EINVAL;
  871. #endif
  872. else
  873. val = __unpack_fe01(tsk->thread.fpexc_mode);
  874. return put_user(val, (unsigned int __user *) adr);
  875. }
  876. int set_endian(struct task_struct *tsk, unsigned int val)
  877. {
  878. struct pt_regs *regs = tsk->thread.regs;
  879. if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
  880. (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
  881. return -EINVAL;
  882. if (regs == NULL)
  883. return -EINVAL;
  884. if (val == PR_ENDIAN_BIG)
  885. regs->msr &= ~MSR_LE;
  886. else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
  887. regs->msr |= MSR_LE;
  888. else
  889. return -EINVAL;
  890. return 0;
  891. }
  892. int get_endian(struct task_struct *tsk, unsigned long adr)
  893. {
  894. struct pt_regs *regs = tsk->thread.regs;
  895. unsigned int val;
  896. if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
  897. !cpu_has_feature(CPU_FTR_REAL_LE))
  898. return -EINVAL;
  899. if (regs == NULL)
  900. return -EINVAL;
  901. if (regs->msr & MSR_LE) {
  902. if (cpu_has_feature(CPU_FTR_REAL_LE))
  903. val = PR_ENDIAN_LITTLE;
  904. else
  905. val = PR_ENDIAN_PPC_LITTLE;
  906. } else
  907. val = PR_ENDIAN_BIG;
  908. return put_user(val, (unsigned int __user *)adr);
  909. }
  910. int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
  911. {
  912. tsk->thread.align_ctl = val;
  913. return 0;
  914. }
  915. int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
  916. {
  917. return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
  918. }
  919. #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
  920. int sys_clone(unsigned long clone_flags, unsigned long usp,
  921. int __user *parent_tidp, void __user *child_threadptr,
  922. int __user *child_tidp, int p6,
  923. struct pt_regs *regs)
  924. {
  925. CHECK_FULL_REGS(regs);
  926. if (usp == 0)
  927. usp = regs->gpr[1]; /* stack pointer for child */
  928. #ifdef CONFIG_PPC64
  929. if (is_32bit_task()) {
  930. parent_tidp = TRUNC_PTR(parent_tidp);
  931. child_tidp = TRUNC_PTR(child_tidp);
  932. }
  933. #endif
  934. return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
  935. }
  936. int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
  937. unsigned long p4, unsigned long p5, unsigned long p6,
  938. struct pt_regs *regs)
  939. {
  940. CHECK_FULL_REGS(regs);
  941. return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
  942. }
  943. int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
  944. unsigned long p4, unsigned long p5, unsigned long p6,
  945. struct pt_regs *regs)
  946. {
  947. CHECK_FULL_REGS(regs);
  948. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
  949. regs, 0, NULL, NULL);
  950. }
  951. int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
  952. unsigned long a3, unsigned long a4, unsigned long a5,
  953. struct pt_regs *regs)
  954. {
  955. int error;
  956. char *filename;
  957. filename = getname((const char __user *) a0);
  958. error = PTR_ERR(filename);
  959. if (IS_ERR(filename))
  960. goto out;
  961. flush_fp_to_thread(current);
  962. flush_altivec_to_thread(current);
  963. flush_spe_to_thread(current);
  964. error = do_execve(filename,
  965. (const char __user *const __user *) a1,
  966. (const char __user *const __user *) a2, regs);
  967. putname(filename);
  968. out:
  969. return error;
  970. }
  971. static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
  972. unsigned long nbytes)
  973. {
  974. unsigned long stack_page;
  975. unsigned long cpu = task_cpu(p);
  976. /*
  977. * Avoid crashing if the stack has overflowed and corrupted
  978. * task_cpu(p), which is in the thread_info struct.
  979. */
  980. if (cpu < NR_CPUS && cpu_possible(cpu)) {
  981. stack_page = (unsigned long) hardirq_ctx[cpu];
  982. if (sp >= stack_page + sizeof(struct thread_struct)
  983. && sp <= stack_page + THREAD_SIZE - nbytes)
  984. return 1;
  985. stack_page = (unsigned long) softirq_ctx[cpu];
  986. if (sp >= stack_page + sizeof(struct thread_struct)
  987. && sp <= stack_page + THREAD_SIZE - nbytes)
  988. return 1;
  989. }
  990. return 0;
  991. }
  992. int validate_sp(unsigned long sp, struct task_struct *p,
  993. unsigned long nbytes)
  994. {
  995. unsigned long stack_page = (unsigned long)task_stack_page(p);
  996. if (sp >= stack_page + sizeof(struct thread_struct)
  997. && sp <= stack_page + THREAD_SIZE - nbytes)
  998. return 1;
  999. return valid_irq_stack(sp, p, nbytes);
  1000. }
  1001. EXPORT_SYMBOL(validate_sp);
  1002. unsigned long get_wchan(struct task_struct *p)
  1003. {
  1004. unsigned long ip, sp;
  1005. int count = 0;
  1006. if (!p || p == current || p->state == TASK_RUNNING)
  1007. return 0;
  1008. sp = p->thread.ksp;
  1009. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1010. return 0;
  1011. do {
  1012. sp = *(unsigned long *)sp;
  1013. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1014. return 0;
  1015. if (count > 0) {
  1016. ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
  1017. if (!in_sched_functions(ip))
  1018. return ip;
  1019. }
  1020. } while (count++ < 16);
  1021. return 0;
  1022. }
  1023. static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
  1024. void show_stack(struct task_struct *tsk, unsigned long *stack)
  1025. {
  1026. unsigned long sp, ip, lr, newsp;
  1027. int count = 0;
  1028. int firstframe = 1;
  1029. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1030. int curr_frame = current->curr_ret_stack;
  1031. extern void return_to_handler(void);
  1032. unsigned long rth = (unsigned long)return_to_handler;
  1033. unsigned long mrth = -1;
  1034. #ifdef CONFIG_PPC64
  1035. extern void mod_return_to_handler(void);
  1036. rth = *(unsigned long *)rth;
  1037. mrth = (unsigned long)mod_return_to_handler;
  1038. mrth = *(unsigned long *)mrth;
  1039. #endif
  1040. #endif
  1041. sp = (unsigned long) stack;
  1042. if (tsk == NULL)
  1043. tsk = current;
  1044. if (sp == 0) {
  1045. if (tsk == current)
  1046. asm("mr %0,1" : "=r" (sp));
  1047. else
  1048. sp = tsk->thread.ksp;
  1049. }
  1050. lr = 0;
  1051. printk("Call Trace:\n");
  1052. do {
  1053. if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
  1054. return;
  1055. stack = (unsigned long *) sp;
  1056. newsp = stack[0];
  1057. ip = stack[STACK_FRAME_LR_SAVE];
  1058. if (!firstframe || ip != lr) {
  1059. printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
  1060. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1061. if ((ip == rth || ip == mrth) && curr_frame >= 0) {
  1062. printk(" (%pS)",
  1063. (void *)current->ret_stack[curr_frame].ret);
  1064. curr_frame--;
  1065. }
  1066. #endif
  1067. if (firstframe)
  1068. printk(" (unreliable)");
  1069. printk("\n");
  1070. }
  1071. firstframe = 0;
  1072. /*
  1073. * See if this is an exception frame.
  1074. * We look for the "regshere" marker in the current frame.
  1075. */
  1076. if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
  1077. && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  1078. struct pt_regs *regs = (struct pt_regs *)
  1079. (sp + STACK_FRAME_OVERHEAD);
  1080. lr = regs->link;
  1081. printk("--- Exception: %lx at %pS\n LR = %pS\n",
  1082. regs->trap, (void *)regs->nip, (void *)lr);
  1083. firstframe = 1;
  1084. }
  1085. sp = newsp;
  1086. } while (count++ < kstack_depth_to_print);
  1087. }
  1088. void dump_stack(void)
  1089. {
  1090. show_stack(current, NULL);
  1091. }
  1092. EXPORT_SYMBOL(dump_stack);
  1093. #ifdef CONFIG_PPC64
  1094. /* Called with hard IRQs off */
  1095. void __ppc64_runlatch_on(void)
  1096. {
  1097. struct thread_info *ti = current_thread_info();
  1098. unsigned long ctrl;
  1099. ctrl = mfspr(SPRN_CTRLF);
  1100. ctrl |= CTRL_RUNLATCH;
  1101. mtspr(SPRN_CTRLT, ctrl);
  1102. ti->local_flags |= _TLF_RUNLATCH;
  1103. }
  1104. /* Called with hard IRQs off */
  1105. void __ppc64_runlatch_off(void)
  1106. {
  1107. struct thread_info *ti = current_thread_info();
  1108. unsigned long ctrl;
  1109. ti->local_flags &= ~_TLF_RUNLATCH;
  1110. ctrl = mfspr(SPRN_CTRLF);
  1111. ctrl &= ~CTRL_RUNLATCH;
  1112. mtspr(SPRN_CTRLT, ctrl);
  1113. }
  1114. #endif /* CONFIG_PPC64 */
  1115. unsigned long arch_align_stack(unsigned long sp)
  1116. {
  1117. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  1118. sp -= get_random_int() & ~PAGE_MASK;
  1119. return sp & ~0xf;
  1120. }
  1121. static inline unsigned long brk_rnd(void)
  1122. {
  1123. unsigned long rnd = 0;
  1124. /* 8MB for 32bit, 1GB for 64bit */
  1125. if (is_32bit_task())
  1126. rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
  1127. else
  1128. rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
  1129. return rnd << PAGE_SHIFT;
  1130. }
  1131. unsigned long arch_randomize_brk(struct mm_struct *mm)
  1132. {
  1133. unsigned long base = mm->brk;
  1134. unsigned long ret;
  1135. #ifdef CONFIG_PPC_STD_MMU_64
  1136. /*
  1137. * If we are using 1TB segments and we are allowed to randomise
  1138. * the heap, we can put it above 1TB so it is backed by a 1TB
  1139. * segment. Otherwise the heap will be in the bottom 1TB
  1140. * which always uses 256MB segments and this may result in a
  1141. * performance penalty.
  1142. */
  1143. if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
  1144. base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
  1145. #endif
  1146. ret = PAGE_ALIGN(base + brk_rnd());
  1147. if (ret < mm->brk)
  1148. return mm->brk;
  1149. return ret;
  1150. }
  1151. unsigned long randomize_et_dyn(unsigned long base)
  1152. {
  1153. unsigned long ret = PAGE_ALIGN(base + brk_rnd());
  1154. if (ret < base)
  1155. return base;
  1156. return ret;
  1157. }