process.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286
  1. /*
  2. * Derived from "arch/i386/kernel/process.c"
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  6. * Paul Mackerras (paulus@cs.anu.edu.au)
  7. *
  8. * PowerPC version
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/stddef.h>
  22. #include <linux/unistd.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/slab.h>
  25. #include <linux/user.h>
  26. #include <linux/elf.h>
  27. #include <linux/init.h>
  28. #include <linux/prctl.h>
  29. #include <linux/init_task.h>
  30. #include <linux/export.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/mqueue.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/utsname.h>
  35. #include <linux/ftrace.h>
  36. #include <linux/kernel_stat.h>
  37. #include <linux/personality.h>
  38. #include <linux/random.h>
  39. #include <linux/hw_breakpoint.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/io.h>
  43. #include <asm/processor.h>
  44. #include <asm/mmu.h>
  45. #include <asm/prom.h>
  46. #include <asm/machdep.h>
  47. #include <asm/time.h>
  48. #include <asm/runlatch.h>
  49. #include <asm/syscalls.h>
  50. #include <asm/switch_to.h>
  51. #include <asm/debug.h>
  52. #ifdef CONFIG_PPC64
  53. #include <asm/firmware.h>
  54. #endif
  55. #include <linux/kprobes.h>
  56. #include <linux/kdebug.h>
  57. extern unsigned long _get_SP(void);
  58. #ifndef CONFIG_SMP
  59. struct task_struct *last_task_used_math = NULL;
  60. struct task_struct *last_task_used_altivec = NULL;
  61. struct task_struct *last_task_used_vsx = NULL;
  62. struct task_struct *last_task_used_spe = NULL;
  63. #endif
  64. /*
  65. * Make sure the floating-point register state in the
  66. * the thread_struct is up to date for task tsk.
  67. */
  68. void flush_fp_to_thread(struct task_struct *tsk)
  69. {
  70. if (tsk->thread.regs) {
  71. /*
  72. * We need to disable preemption here because if we didn't,
  73. * another process could get scheduled after the regs->msr
  74. * test but before we have finished saving the FP registers
  75. * to the thread_struct. That process could take over the
  76. * FPU, and then when we get scheduled again we would store
  77. * bogus values for the remaining FP registers.
  78. */
  79. preempt_disable();
  80. if (tsk->thread.regs->msr & MSR_FP) {
  81. #ifdef CONFIG_SMP
  82. /*
  83. * This should only ever be called for current or
  84. * for a stopped child process. Since we save away
  85. * the FP register state on context switch on SMP,
  86. * there is something wrong if a stopped child appears
  87. * to still have its FP state in the CPU registers.
  88. */
  89. BUG_ON(tsk != current);
  90. #endif
  91. giveup_fpu(tsk);
  92. }
  93. preempt_enable();
  94. }
  95. }
  96. EXPORT_SYMBOL_GPL(flush_fp_to_thread);
  97. void enable_kernel_fp(void)
  98. {
  99. WARN_ON(preemptible());
  100. #ifdef CONFIG_SMP
  101. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  102. giveup_fpu(current);
  103. else
  104. giveup_fpu(NULL); /* just enables FP for kernel */
  105. #else
  106. giveup_fpu(last_task_used_math);
  107. #endif /* CONFIG_SMP */
  108. }
  109. EXPORT_SYMBOL(enable_kernel_fp);
  110. #ifdef CONFIG_ALTIVEC
  111. void enable_kernel_altivec(void)
  112. {
  113. WARN_ON(preemptible());
  114. #ifdef CONFIG_SMP
  115. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  116. giveup_altivec(current);
  117. else
  118. giveup_altivec_notask();
  119. #else
  120. giveup_altivec(last_task_used_altivec);
  121. #endif /* CONFIG_SMP */
  122. }
  123. EXPORT_SYMBOL(enable_kernel_altivec);
  124. /*
  125. * Make sure the VMX/Altivec register state in the
  126. * the thread_struct is up to date for task tsk.
  127. */
  128. void flush_altivec_to_thread(struct task_struct *tsk)
  129. {
  130. if (tsk->thread.regs) {
  131. preempt_disable();
  132. if (tsk->thread.regs->msr & MSR_VEC) {
  133. #ifdef CONFIG_SMP
  134. BUG_ON(tsk != current);
  135. #endif
  136. giveup_altivec(tsk);
  137. }
  138. preempt_enable();
  139. }
  140. }
  141. EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
  142. #endif /* CONFIG_ALTIVEC */
  143. #ifdef CONFIG_VSX
  144. #if 0
  145. /* not currently used, but some crazy RAID module might want to later */
  146. void enable_kernel_vsx(void)
  147. {
  148. WARN_ON(preemptible());
  149. #ifdef CONFIG_SMP
  150. if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
  151. giveup_vsx(current);
  152. else
  153. giveup_vsx(NULL); /* just enable vsx for kernel - force */
  154. #else
  155. giveup_vsx(last_task_used_vsx);
  156. #endif /* CONFIG_SMP */
  157. }
  158. EXPORT_SYMBOL(enable_kernel_vsx);
  159. #endif
  160. void giveup_vsx(struct task_struct *tsk)
  161. {
  162. giveup_fpu(tsk);
  163. giveup_altivec(tsk);
  164. __giveup_vsx(tsk);
  165. }
  166. void flush_vsx_to_thread(struct task_struct *tsk)
  167. {
  168. if (tsk->thread.regs) {
  169. preempt_disable();
  170. if (tsk->thread.regs->msr & MSR_VSX) {
  171. #ifdef CONFIG_SMP
  172. BUG_ON(tsk != current);
  173. #endif
  174. giveup_vsx(tsk);
  175. }
  176. preempt_enable();
  177. }
  178. }
  179. EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
  180. #endif /* CONFIG_VSX */
  181. #ifdef CONFIG_SPE
  182. void enable_kernel_spe(void)
  183. {
  184. WARN_ON(preemptible());
  185. #ifdef CONFIG_SMP
  186. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  187. giveup_spe(current);
  188. else
  189. giveup_spe(NULL); /* just enable SPE for kernel - force */
  190. #else
  191. giveup_spe(last_task_used_spe);
  192. #endif /* __SMP __ */
  193. }
  194. EXPORT_SYMBOL(enable_kernel_spe);
  195. void flush_spe_to_thread(struct task_struct *tsk)
  196. {
  197. if (tsk->thread.regs) {
  198. preempt_disable();
  199. if (tsk->thread.regs->msr & MSR_SPE) {
  200. #ifdef CONFIG_SMP
  201. BUG_ON(tsk != current);
  202. #endif
  203. tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
  204. giveup_spe(tsk);
  205. }
  206. preempt_enable();
  207. }
  208. }
  209. #endif /* CONFIG_SPE */
  210. #ifndef CONFIG_SMP
  211. /*
  212. * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  213. * and the current task has some state, discard it.
  214. */
  215. void discard_lazy_cpu_state(void)
  216. {
  217. preempt_disable();
  218. if (last_task_used_math == current)
  219. last_task_used_math = NULL;
  220. #ifdef CONFIG_ALTIVEC
  221. if (last_task_used_altivec == current)
  222. last_task_used_altivec = NULL;
  223. #endif /* CONFIG_ALTIVEC */
  224. #ifdef CONFIG_VSX
  225. if (last_task_used_vsx == current)
  226. last_task_used_vsx = NULL;
  227. #endif /* CONFIG_VSX */
  228. #ifdef CONFIG_SPE
  229. if (last_task_used_spe == current)
  230. last_task_used_spe = NULL;
  231. #endif
  232. preempt_enable();
  233. }
  234. #endif /* CONFIG_SMP */
  235. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  236. void do_send_trap(struct pt_regs *regs, unsigned long address,
  237. unsigned long error_code, int signal_code, int breakpt)
  238. {
  239. siginfo_t info;
  240. current->thread.trap_nr = signal_code;
  241. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  242. 11, SIGSEGV) == NOTIFY_STOP)
  243. return;
  244. /* Deliver the signal to userspace */
  245. info.si_signo = SIGTRAP;
  246. info.si_errno = breakpt; /* breakpoint or watchpoint id */
  247. info.si_code = signal_code;
  248. info.si_addr = (void __user *)address;
  249. force_sig_info(SIGTRAP, &info, current);
  250. }
  251. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  252. void do_break (struct pt_regs *regs, unsigned long address,
  253. unsigned long error_code)
  254. {
  255. siginfo_t info;
  256. current->thread.trap_nr = TRAP_HWBKPT;
  257. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  258. 11, SIGSEGV) == NOTIFY_STOP)
  259. return;
  260. if (debugger_break_match(regs))
  261. return;
  262. /* Clear the breakpoint */
  263. hw_breakpoint_disable();
  264. /* Deliver the signal to userspace */
  265. info.si_signo = SIGTRAP;
  266. info.si_errno = 0;
  267. info.si_code = TRAP_HWBKPT;
  268. info.si_addr = (void __user *)address;
  269. force_sig_info(SIGTRAP, &info, current);
  270. }
  271. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  272. static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
  273. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  274. /*
  275. * Set the debug registers back to their default "safe" values.
  276. */
  277. static void set_debug_reg_defaults(struct thread_struct *thread)
  278. {
  279. thread->iac1 = thread->iac2 = 0;
  280. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  281. thread->iac3 = thread->iac4 = 0;
  282. #endif
  283. thread->dac1 = thread->dac2 = 0;
  284. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  285. thread->dvc1 = thread->dvc2 = 0;
  286. #endif
  287. thread->dbcr0 = 0;
  288. #ifdef CONFIG_BOOKE
  289. /*
  290. * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
  291. */
  292. thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
  293. DBCR1_IAC3US | DBCR1_IAC4US;
  294. /*
  295. * Force Data Address Compare User/Supervisor bits to be User-only
  296. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
  297. */
  298. thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  299. #else
  300. thread->dbcr1 = 0;
  301. #endif
  302. }
  303. static void prime_debug_regs(struct thread_struct *thread)
  304. {
  305. mtspr(SPRN_IAC1, thread->iac1);
  306. mtspr(SPRN_IAC2, thread->iac2);
  307. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  308. mtspr(SPRN_IAC3, thread->iac3);
  309. mtspr(SPRN_IAC4, thread->iac4);
  310. #endif
  311. mtspr(SPRN_DAC1, thread->dac1);
  312. mtspr(SPRN_DAC2, thread->dac2);
  313. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  314. mtspr(SPRN_DVC1, thread->dvc1);
  315. mtspr(SPRN_DVC2, thread->dvc2);
  316. #endif
  317. mtspr(SPRN_DBCR0, thread->dbcr0);
  318. mtspr(SPRN_DBCR1, thread->dbcr1);
  319. #ifdef CONFIG_BOOKE
  320. mtspr(SPRN_DBCR2, thread->dbcr2);
  321. #endif
  322. }
  323. /*
  324. * Unless neither the old or new thread are making use of the
  325. * debug registers, set the debug registers from the values
  326. * stored in the new thread.
  327. */
  328. static void switch_booke_debug_regs(struct thread_struct *new_thread)
  329. {
  330. if ((current->thread.dbcr0 & DBCR0_IDM)
  331. || (new_thread->dbcr0 & DBCR0_IDM))
  332. prime_debug_regs(new_thread);
  333. }
  334. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  335. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  336. static void set_debug_reg_defaults(struct thread_struct *thread)
  337. {
  338. thread->hw_brk.address = 0;
  339. thread->hw_brk.type = 0;
  340. set_break(&thread->hw_brk);
  341. }
  342. #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
  343. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  344. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  345. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  346. {
  347. mtspr(SPRN_DAC1, dabr);
  348. #ifdef CONFIG_PPC_47x
  349. isync();
  350. #endif
  351. return 0;
  352. }
  353. #elif defined(CONFIG_PPC_BOOK3S)
  354. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  355. {
  356. mtspr(SPRN_DABR, dabr);
  357. mtspr(SPRN_DABRX, dabrx);
  358. return 0;
  359. }
  360. #else
  361. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  362. {
  363. return -EINVAL;
  364. }
  365. #endif
  366. static inline int set_dabr(struct arch_hw_breakpoint *brk)
  367. {
  368. unsigned long dabr, dabrx;
  369. dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
  370. dabrx = ((brk->type >> 3) & 0x7);
  371. if (ppc_md.set_dabr)
  372. return ppc_md.set_dabr(dabr, dabrx);
  373. return __set_dabr(dabr, dabrx);
  374. }
  375. int set_break(struct arch_hw_breakpoint *brk)
  376. {
  377. __get_cpu_var(current_brk) = *brk;
  378. return set_dabr(brk);
  379. }
  380. #ifdef CONFIG_PPC64
  381. DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
  382. #endif
  383. static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
  384. struct arch_hw_breakpoint *b)
  385. {
  386. if (a->address != b->address)
  387. return false;
  388. if (a->type != b->type)
  389. return false;
  390. if (a->len != b->len)
  391. return false;
  392. return true;
  393. }
  394. struct task_struct *__switch_to(struct task_struct *prev,
  395. struct task_struct *new)
  396. {
  397. struct thread_struct *new_thread, *old_thread;
  398. unsigned long flags;
  399. struct task_struct *last;
  400. #ifdef CONFIG_PPC_BOOK3S_64
  401. struct ppc64_tlb_batch *batch;
  402. #endif
  403. #ifdef CONFIG_SMP
  404. /* avoid complexity of lazy save/restore of fpu
  405. * by just saving it every time we switch out if
  406. * this task used the fpu during the last quantum.
  407. *
  408. * If it tries to use the fpu again, it'll trap and
  409. * reload its fp regs. So we don't have to do a restore
  410. * every switch, just a save.
  411. * -- Cort
  412. */
  413. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  414. giveup_fpu(prev);
  415. #ifdef CONFIG_ALTIVEC
  416. /*
  417. * If the previous thread used altivec in the last quantum
  418. * (thus changing altivec regs) then save them.
  419. * We used to check the VRSAVE register but not all apps
  420. * set it, so we don't rely on it now (and in fact we need
  421. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  422. *
  423. * On SMP we always save/restore altivec regs just to avoid the
  424. * complexity of changing processors.
  425. * -- Cort
  426. */
  427. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  428. giveup_altivec(prev);
  429. #endif /* CONFIG_ALTIVEC */
  430. #ifdef CONFIG_VSX
  431. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
  432. /* VMX and FPU registers are already save here */
  433. __giveup_vsx(prev);
  434. #endif /* CONFIG_VSX */
  435. #ifdef CONFIG_SPE
  436. /*
  437. * If the previous thread used spe in the last quantum
  438. * (thus changing spe regs) then save them.
  439. *
  440. * On SMP we always save/restore spe regs just to avoid the
  441. * complexity of changing processors.
  442. */
  443. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  444. giveup_spe(prev);
  445. #endif /* CONFIG_SPE */
  446. #else /* CONFIG_SMP */
  447. #ifdef CONFIG_ALTIVEC
  448. /* Avoid the trap. On smp this this never happens since
  449. * we don't set last_task_used_altivec -- Cort
  450. */
  451. if (new->thread.regs && last_task_used_altivec == new)
  452. new->thread.regs->msr |= MSR_VEC;
  453. #endif /* CONFIG_ALTIVEC */
  454. #ifdef CONFIG_VSX
  455. if (new->thread.regs && last_task_used_vsx == new)
  456. new->thread.regs->msr |= MSR_VSX;
  457. #endif /* CONFIG_VSX */
  458. #ifdef CONFIG_SPE
  459. /* Avoid the trap. On smp this this never happens since
  460. * we don't set last_task_used_spe
  461. */
  462. if (new->thread.regs && last_task_used_spe == new)
  463. new->thread.regs->msr |= MSR_SPE;
  464. #endif /* CONFIG_SPE */
  465. #endif /* CONFIG_SMP */
  466. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  467. switch_booke_debug_regs(&new->thread);
  468. #else
  469. /*
  470. * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
  471. * schedule DABR
  472. */
  473. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  474. if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
  475. set_break(&new->thread.hw_brk);
  476. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  477. #endif
  478. new_thread = &new->thread;
  479. old_thread = &current->thread;
  480. #ifdef CONFIG_PPC64
  481. /*
  482. * Collect processor utilization data per process
  483. */
  484. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  485. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  486. long unsigned start_tb, current_tb;
  487. start_tb = old_thread->start_tb;
  488. cu->current_tb = current_tb = mfspr(SPRN_PURR);
  489. old_thread->accum_tb += (current_tb - start_tb);
  490. new_thread->start_tb = current_tb;
  491. }
  492. #endif /* CONFIG_PPC64 */
  493. #ifdef CONFIG_PPC_BOOK3S_64
  494. batch = &__get_cpu_var(ppc64_tlb_batch);
  495. if (batch->active) {
  496. current_thread_info()->local_flags |= _TLF_LAZY_MMU;
  497. if (batch->index)
  498. __flush_tlb_pending(batch);
  499. batch->active = 0;
  500. }
  501. #endif /* CONFIG_PPC_BOOK3S_64 */
  502. local_irq_save(flags);
  503. /*
  504. * We can't take a PMU exception inside _switch() since there is a
  505. * window where the kernel stack SLB and the kernel stack are out
  506. * of sync. Hard disable here.
  507. */
  508. hard_irq_disable();
  509. last = _switch(old_thread, new_thread);
  510. #ifdef CONFIG_PPC_BOOK3S_64
  511. if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
  512. current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
  513. batch = &__get_cpu_var(ppc64_tlb_batch);
  514. batch->active = 1;
  515. }
  516. #endif /* CONFIG_PPC_BOOK3S_64 */
  517. local_irq_restore(flags);
  518. return last;
  519. }
  520. static int instructions_to_print = 16;
  521. static void show_instructions(struct pt_regs *regs)
  522. {
  523. int i;
  524. unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
  525. sizeof(int));
  526. printk("Instruction dump:");
  527. for (i = 0; i < instructions_to_print; i++) {
  528. int instr;
  529. if (!(i % 8))
  530. printk("\n");
  531. #if !defined(CONFIG_BOOKE)
  532. /* If executing with the IMMU off, adjust pc rather
  533. * than print XXXXXXXX.
  534. */
  535. if (!(regs->msr & MSR_IR))
  536. pc = (unsigned long)phys_to_virt(pc);
  537. #endif
  538. /* We use __get_user here *only* to avoid an OOPS on a
  539. * bad address because the pc *should* only be a
  540. * kernel address.
  541. */
  542. if (!__kernel_text_address(pc) ||
  543. __get_user(instr, (unsigned int __user *)pc)) {
  544. printk(KERN_CONT "XXXXXXXX ");
  545. } else {
  546. if (regs->nip == pc)
  547. printk(KERN_CONT "<%08x> ", instr);
  548. else
  549. printk(KERN_CONT "%08x ", instr);
  550. }
  551. pc += sizeof(int);
  552. }
  553. printk("\n");
  554. }
  555. static struct regbit {
  556. unsigned long bit;
  557. const char *name;
  558. } msr_bits[] = {
  559. #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
  560. {MSR_SF, "SF"},
  561. {MSR_HV, "HV"},
  562. #endif
  563. {MSR_VEC, "VEC"},
  564. {MSR_VSX, "VSX"},
  565. #ifdef CONFIG_BOOKE
  566. {MSR_CE, "CE"},
  567. #endif
  568. {MSR_EE, "EE"},
  569. {MSR_PR, "PR"},
  570. {MSR_FP, "FP"},
  571. {MSR_ME, "ME"},
  572. #ifdef CONFIG_BOOKE
  573. {MSR_DE, "DE"},
  574. #else
  575. {MSR_SE, "SE"},
  576. {MSR_BE, "BE"},
  577. #endif
  578. {MSR_IR, "IR"},
  579. {MSR_DR, "DR"},
  580. {MSR_PMM, "PMM"},
  581. #ifndef CONFIG_BOOKE
  582. {MSR_RI, "RI"},
  583. {MSR_LE, "LE"},
  584. #endif
  585. {0, NULL}
  586. };
  587. static void printbits(unsigned long val, struct regbit *bits)
  588. {
  589. const char *sep = "";
  590. printk("<");
  591. for (; bits->bit; ++bits)
  592. if (val & bits->bit) {
  593. printk("%s%s", sep, bits->name);
  594. sep = ",";
  595. }
  596. printk(">");
  597. }
  598. #ifdef CONFIG_PPC64
  599. #define REG "%016lx"
  600. #define REGS_PER_LINE 4
  601. #define LAST_VOLATILE 13
  602. #else
  603. #define REG "%08lx"
  604. #define REGS_PER_LINE 8
  605. #define LAST_VOLATILE 12
  606. #endif
  607. void show_regs(struct pt_regs * regs)
  608. {
  609. int i, trap;
  610. printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
  611. regs->nip, regs->link, regs->ctr);
  612. printk("REGS: %p TRAP: %04lx %s (%s)\n",
  613. regs, regs->trap, print_tainted(), init_utsname()->release);
  614. printk("MSR: "REG" ", regs->msr);
  615. printbits(regs->msr, msr_bits);
  616. printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
  617. #ifdef CONFIG_PPC64
  618. printk("SOFTE: %ld\n", regs->softe);
  619. #endif
  620. trap = TRAP(regs);
  621. if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
  622. printk("CFAR: "REG"\n", regs->orig_gpr3);
  623. if (trap == 0x300 || trap == 0x600)
  624. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  625. printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
  626. #else
  627. printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
  628. #endif
  629. printk("TASK = %p[%d] '%s' THREAD: %p",
  630. current, task_pid_nr(current), current->comm, task_thread_info(current));
  631. #ifdef CONFIG_SMP
  632. printk(" CPU: %d", raw_smp_processor_id());
  633. #endif /* CONFIG_SMP */
  634. for (i = 0; i < 32; i++) {
  635. if ((i % REGS_PER_LINE) == 0)
  636. printk("\nGPR%02d: ", i);
  637. printk(REG " ", regs->gpr[i]);
  638. if (i == LAST_VOLATILE && !FULL_REGS(regs))
  639. break;
  640. }
  641. printk("\n");
  642. #ifdef CONFIG_KALLSYMS
  643. /*
  644. * Lookup NIP late so we have the best change of getting the
  645. * above info out without failing
  646. */
  647. printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
  648. printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
  649. #endif
  650. show_stack(current, (unsigned long *) regs->gpr[1]);
  651. if (!user_mode(regs))
  652. show_instructions(regs);
  653. }
  654. void exit_thread(void)
  655. {
  656. discard_lazy_cpu_state();
  657. }
  658. void flush_thread(void)
  659. {
  660. discard_lazy_cpu_state();
  661. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  662. flush_ptrace_hw_breakpoint(current);
  663. #else /* CONFIG_HAVE_HW_BREAKPOINT */
  664. set_debug_reg_defaults(&current->thread);
  665. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  666. }
  667. void
  668. release_thread(struct task_struct *t)
  669. {
  670. }
  671. /*
  672. * this gets called so that we can store coprocessor state into memory and
  673. * copy the current task into the new thread.
  674. */
  675. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  676. {
  677. flush_fp_to_thread(src);
  678. flush_altivec_to_thread(src);
  679. flush_vsx_to_thread(src);
  680. flush_spe_to_thread(src);
  681. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  682. flush_ptrace_hw_breakpoint(src);
  683. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  684. *dst = *src;
  685. return 0;
  686. }
  687. /*
  688. * Copy a thread..
  689. */
  690. extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
  691. int copy_thread(unsigned long clone_flags, unsigned long usp,
  692. unsigned long arg, struct task_struct *p)
  693. {
  694. struct pt_regs *childregs, *kregs;
  695. extern void ret_from_fork(void);
  696. extern void ret_from_kernel_thread(void);
  697. void (*f)(void);
  698. unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  699. /* Copy registers */
  700. sp -= sizeof(struct pt_regs);
  701. childregs = (struct pt_regs *) sp;
  702. if (unlikely(p->flags & PF_KTHREAD)) {
  703. struct thread_info *ti = (void *)task_stack_page(p);
  704. memset(childregs, 0, sizeof(struct pt_regs));
  705. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  706. childregs->gpr[14] = usp; /* function */
  707. #ifdef CONFIG_PPC64
  708. clear_tsk_thread_flag(p, TIF_32BIT);
  709. childregs->softe = 1;
  710. #endif
  711. childregs->gpr[15] = arg;
  712. p->thread.regs = NULL; /* no user register state */
  713. ti->flags |= _TIF_RESTOREALL;
  714. f = ret_from_kernel_thread;
  715. } else {
  716. struct pt_regs *regs = current_pt_regs();
  717. CHECK_FULL_REGS(regs);
  718. *childregs = *regs;
  719. if (usp)
  720. childregs->gpr[1] = usp;
  721. p->thread.regs = childregs;
  722. childregs->gpr[3] = 0; /* Result from fork() */
  723. if (clone_flags & CLONE_SETTLS) {
  724. #ifdef CONFIG_PPC64
  725. if (!is_32bit_task())
  726. childregs->gpr[13] = childregs->gpr[6];
  727. else
  728. #endif
  729. childregs->gpr[2] = childregs->gpr[6];
  730. }
  731. f = ret_from_fork;
  732. }
  733. sp -= STACK_FRAME_OVERHEAD;
  734. /*
  735. * The way this works is that at some point in the future
  736. * some task will call _switch to switch to the new task.
  737. * That will pop off the stack frame created below and start
  738. * the new task running at ret_from_fork. The new task will
  739. * do some house keeping and then return from the fork or clone
  740. * system call, using the stack frame created above.
  741. */
  742. sp -= sizeof(struct pt_regs);
  743. kregs = (struct pt_regs *) sp;
  744. sp -= STACK_FRAME_OVERHEAD;
  745. p->thread.ksp = sp;
  746. p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
  747. _ALIGN_UP(sizeof(struct thread_info), 16);
  748. #ifdef CONFIG_PPC_STD_MMU_64
  749. if (mmu_has_feature(MMU_FTR_SLB)) {
  750. unsigned long sp_vsid;
  751. unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
  752. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  753. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
  754. << SLB_VSID_SHIFT_1T;
  755. else
  756. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
  757. << SLB_VSID_SHIFT;
  758. sp_vsid |= SLB_VSID_KERNEL | llp;
  759. p->thread.ksp_vsid = sp_vsid;
  760. }
  761. #endif /* CONFIG_PPC_STD_MMU_64 */
  762. #ifdef CONFIG_PPC64
  763. if (cpu_has_feature(CPU_FTR_DSCR)) {
  764. p->thread.dscr_inherit = current->thread.dscr_inherit;
  765. p->thread.dscr = current->thread.dscr;
  766. }
  767. if (cpu_has_feature(CPU_FTR_HAS_PPR))
  768. p->thread.ppr = INIT_PPR;
  769. #endif
  770. /*
  771. * The PPC64 ABI makes use of a TOC to contain function
  772. * pointers. The function (ret_from_except) is actually a pointer
  773. * to the TOC entry. The first entry is a pointer to the actual
  774. * function.
  775. */
  776. #ifdef CONFIG_PPC64
  777. kregs->nip = *((unsigned long *)f);
  778. #else
  779. kregs->nip = (unsigned long)f;
  780. #endif
  781. return 0;
  782. }
  783. /*
  784. * Set up a thread for executing a new program
  785. */
  786. void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
  787. {
  788. #ifdef CONFIG_PPC64
  789. unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
  790. #endif
  791. /*
  792. * If we exec out of a kernel thread then thread.regs will not be
  793. * set. Do it now.
  794. */
  795. if (!current->thread.regs) {
  796. struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
  797. current->thread.regs = regs - 1;
  798. }
  799. memset(regs->gpr, 0, sizeof(regs->gpr));
  800. regs->ctr = 0;
  801. regs->link = 0;
  802. regs->xer = 0;
  803. regs->ccr = 0;
  804. regs->gpr[1] = sp;
  805. /*
  806. * We have just cleared all the nonvolatile GPRs, so make
  807. * FULL_REGS(regs) return true. This is necessary to allow
  808. * ptrace to examine the thread immediately after exec.
  809. */
  810. regs->trap &= ~1UL;
  811. #ifdef CONFIG_PPC32
  812. regs->mq = 0;
  813. regs->nip = start;
  814. regs->msr = MSR_USER;
  815. #else
  816. if (!is_32bit_task()) {
  817. unsigned long entry, toc;
  818. /* start is a relocated pointer to the function descriptor for
  819. * the elf _start routine. The first entry in the function
  820. * descriptor is the entry address of _start and the second
  821. * entry is the TOC value we need to use.
  822. */
  823. __get_user(entry, (unsigned long __user *)start);
  824. __get_user(toc, (unsigned long __user *)start+1);
  825. /* Check whether the e_entry function descriptor entries
  826. * need to be relocated before we can use them.
  827. */
  828. if (load_addr != 0) {
  829. entry += load_addr;
  830. toc += load_addr;
  831. }
  832. regs->nip = entry;
  833. regs->gpr[2] = toc;
  834. regs->msr = MSR_USER64;
  835. } else {
  836. regs->nip = start;
  837. regs->gpr[2] = 0;
  838. regs->msr = MSR_USER32;
  839. }
  840. #endif
  841. discard_lazy_cpu_state();
  842. #ifdef CONFIG_VSX
  843. current->thread.used_vsr = 0;
  844. #endif
  845. memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
  846. current->thread.fpscr.val = 0;
  847. #ifdef CONFIG_ALTIVEC
  848. memset(current->thread.vr, 0, sizeof(current->thread.vr));
  849. memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
  850. current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
  851. current->thread.vrsave = 0;
  852. current->thread.used_vr = 0;
  853. #endif /* CONFIG_ALTIVEC */
  854. #ifdef CONFIG_SPE
  855. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  856. current->thread.acc = 0;
  857. current->thread.spefscr = 0;
  858. current->thread.used_spe = 0;
  859. #endif /* CONFIG_SPE */
  860. }
  861. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  862. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  863. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  864. {
  865. struct pt_regs *regs = tsk->thread.regs;
  866. /* This is a bit hairy. If we are an SPE enabled processor
  867. * (have embedded fp) we store the IEEE exception enable flags in
  868. * fpexc_mode. fpexc_mode is also used for setting FP exception
  869. * mode (asyn, precise, disabled) for 'Classic' FP. */
  870. if (val & PR_FP_EXC_SW_ENABLE) {
  871. #ifdef CONFIG_SPE
  872. if (cpu_has_feature(CPU_FTR_SPE)) {
  873. tsk->thread.fpexc_mode = val &
  874. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  875. return 0;
  876. } else {
  877. return -EINVAL;
  878. }
  879. #else
  880. return -EINVAL;
  881. #endif
  882. }
  883. /* on a CONFIG_SPE this does not hurt us. The bits that
  884. * __pack_fe01 use do not overlap with bits used for
  885. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  886. * on CONFIG_SPE implementations are reserved so writing to
  887. * them does not change anything */
  888. if (val > PR_FP_EXC_PRECISE)
  889. return -EINVAL;
  890. tsk->thread.fpexc_mode = __pack_fe01(val);
  891. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  892. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  893. | tsk->thread.fpexc_mode;
  894. return 0;
  895. }
  896. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  897. {
  898. unsigned int val;
  899. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  900. #ifdef CONFIG_SPE
  901. if (cpu_has_feature(CPU_FTR_SPE))
  902. val = tsk->thread.fpexc_mode;
  903. else
  904. return -EINVAL;
  905. #else
  906. return -EINVAL;
  907. #endif
  908. else
  909. val = __unpack_fe01(tsk->thread.fpexc_mode);
  910. return put_user(val, (unsigned int __user *) adr);
  911. }
  912. int set_endian(struct task_struct *tsk, unsigned int val)
  913. {
  914. struct pt_regs *regs = tsk->thread.regs;
  915. if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
  916. (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
  917. return -EINVAL;
  918. if (regs == NULL)
  919. return -EINVAL;
  920. if (val == PR_ENDIAN_BIG)
  921. regs->msr &= ~MSR_LE;
  922. else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
  923. regs->msr |= MSR_LE;
  924. else
  925. return -EINVAL;
  926. return 0;
  927. }
  928. int get_endian(struct task_struct *tsk, unsigned long adr)
  929. {
  930. struct pt_regs *regs = tsk->thread.regs;
  931. unsigned int val;
  932. if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
  933. !cpu_has_feature(CPU_FTR_REAL_LE))
  934. return -EINVAL;
  935. if (regs == NULL)
  936. return -EINVAL;
  937. if (regs->msr & MSR_LE) {
  938. if (cpu_has_feature(CPU_FTR_REAL_LE))
  939. val = PR_ENDIAN_LITTLE;
  940. else
  941. val = PR_ENDIAN_PPC_LITTLE;
  942. } else
  943. val = PR_ENDIAN_BIG;
  944. return put_user(val, (unsigned int __user *)adr);
  945. }
  946. int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
  947. {
  948. tsk->thread.align_ctl = val;
  949. return 0;
  950. }
  951. int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
  952. {
  953. return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
  954. }
  955. static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
  956. unsigned long nbytes)
  957. {
  958. unsigned long stack_page;
  959. unsigned long cpu = task_cpu(p);
  960. /*
  961. * Avoid crashing if the stack has overflowed and corrupted
  962. * task_cpu(p), which is in the thread_info struct.
  963. */
  964. if (cpu < NR_CPUS && cpu_possible(cpu)) {
  965. stack_page = (unsigned long) hardirq_ctx[cpu];
  966. if (sp >= stack_page + sizeof(struct thread_struct)
  967. && sp <= stack_page + THREAD_SIZE - nbytes)
  968. return 1;
  969. stack_page = (unsigned long) softirq_ctx[cpu];
  970. if (sp >= stack_page + sizeof(struct thread_struct)
  971. && sp <= stack_page + THREAD_SIZE - nbytes)
  972. return 1;
  973. }
  974. return 0;
  975. }
  976. int validate_sp(unsigned long sp, struct task_struct *p,
  977. unsigned long nbytes)
  978. {
  979. unsigned long stack_page = (unsigned long)task_stack_page(p);
  980. if (sp >= stack_page + sizeof(struct thread_struct)
  981. && sp <= stack_page + THREAD_SIZE - nbytes)
  982. return 1;
  983. return valid_irq_stack(sp, p, nbytes);
  984. }
  985. EXPORT_SYMBOL(validate_sp);
  986. unsigned long get_wchan(struct task_struct *p)
  987. {
  988. unsigned long ip, sp;
  989. int count = 0;
  990. if (!p || p == current || p->state == TASK_RUNNING)
  991. return 0;
  992. sp = p->thread.ksp;
  993. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  994. return 0;
  995. do {
  996. sp = *(unsigned long *)sp;
  997. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  998. return 0;
  999. if (count > 0) {
  1000. ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
  1001. if (!in_sched_functions(ip))
  1002. return ip;
  1003. }
  1004. } while (count++ < 16);
  1005. return 0;
  1006. }
  1007. static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
  1008. void show_stack(struct task_struct *tsk, unsigned long *stack)
  1009. {
  1010. unsigned long sp, ip, lr, newsp;
  1011. int count = 0;
  1012. int firstframe = 1;
  1013. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1014. int curr_frame = current->curr_ret_stack;
  1015. extern void return_to_handler(void);
  1016. unsigned long rth = (unsigned long)return_to_handler;
  1017. unsigned long mrth = -1;
  1018. #ifdef CONFIG_PPC64
  1019. extern void mod_return_to_handler(void);
  1020. rth = *(unsigned long *)rth;
  1021. mrth = (unsigned long)mod_return_to_handler;
  1022. mrth = *(unsigned long *)mrth;
  1023. #endif
  1024. #endif
  1025. sp = (unsigned long) stack;
  1026. if (tsk == NULL)
  1027. tsk = current;
  1028. if (sp == 0) {
  1029. if (tsk == current)
  1030. asm("mr %0,1" : "=r" (sp));
  1031. else
  1032. sp = tsk->thread.ksp;
  1033. }
  1034. lr = 0;
  1035. printk("Call Trace:\n");
  1036. do {
  1037. if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
  1038. return;
  1039. stack = (unsigned long *) sp;
  1040. newsp = stack[0];
  1041. ip = stack[STACK_FRAME_LR_SAVE];
  1042. if (!firstframe || ip != lr) {
  1043. printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
  1044. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1045. if ((ip == rth || ip == mrth) && curr_frame >= 0) {
  1046. printk(" (%pS)",
  1047. (void *)current->ret_stack[curr_frame].ret);
  1048. curr_frame--;
  1049. }
  1050. #endif
  1051. if (firstframe)
  1052. printk(" (unreliable)");
  1053. printk("\n");
  1054. }
  1055. firstframe = 0;
  1056. /*
  1057. * See if this is an exception frame.
  1058. * We look for the "regshere" marker in the current frame.
  1059. */
  1060. if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
  1061. && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  1062. struct pt_regs *regs = (struct pt_regs *)
  1063. (sp + STACK_FRAME_OVERHEAD);
  1064. lr = regs->link;
  1065. printk("--- Exception: %lx at %pS\n LR = %pS\n",
  1066. regs->trap, (void *)regs->nip, (void *)lr);
  1067. firstframe = 1;
  1068. }
  1069. sp = newsp;
  1070. } while (count++ < kstack_depth_to_print);
  1071. }
  1072. void dump_stack(void)
  1073. {
  1074. show_stack(current, NULL);
  1075. }
  1076. EXPORT_SYMBOL(dump_stack);
  1077. #ifdef CONFIG_PPC64
  1078. /* Called with hard IRQs off */
  1079. void __ppc64_runlatch_on(void)
  1080. {
  1081. struct thread_info *ti = current_thread_info();
  1082. unsigned long ctrl;
  1083. ctrl = mfspr(SPRN_CTRLF);
  1084. ctrl |= CTRL_RUNLATCH;
  1085. mtspr(SPRN_CTRLT, ctrl);
  1086. ti->local_flags |= _TLF_RUNLATCH;
  1087. }
  1088. /* Called with hard IRQs off */
  1089. void __ppc64_runlatch_off(void)
  1090. {
  1091. struct thread_info *ti = current_thread_info();
  1092. unsigned long ctrl;
  1093. ti->local_flags &= ~_TLF_RUNLATCH;
  1094. ctrl = mfspr(SPRN_CTRLF);
  1095. ctrl &= ~CTRL_RUNLATCH;
  1096. mtspr(SPRN_CTRLT, ctrl);
  1097. }
  1098. #endif /* CONFIG_PPC64 */
  1099. unsigned long arch_align_stack(unsigned long sp)
  1100. {
  1101. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  1102. sp -= get_random_int() & ~PAGE_MASK;
  1103. return sp & ~0xf;
  1104. }
  1105. static inline unsigned long brk_rnd(void)
  1106. {
  1107. unsigned long rnd = 0;
  1108. /* 8MB for 32bit, 1GB for 64bit */
  1109. if (is_32bit_task())
  1110. rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
  1111. else
  1112. rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
  1113. return rnd << PAGE_SHIFT;
  1114. }
  1115. unsigned long arch_randomize_brk(struct mm_struct *mm)
  1116. {
  1117. unsigned long base = mm->brk;
  1118. unsigned long ret;
  1119. #ifdef CONFIG_PPC_STD_MMU_64
  1120. /*
  1121. * If we are using 1TB segments and we are allowed to randomise
  1122. * the heap, we can put it above 1TB so it is backed by a 1TB
  1123. * segment. Otherwise the heap will be in the bottom 1TB
  1124. * which always uses 256MB segments and this may result in a
  1125. * performance penalty.
  1126. */
  1127. if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
  1128. base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
  1129. #endif
  1130. ret = PAGE_ALIGN(base + brk_rnd());
  1131. if (ret < mm->brk)
  1132. return mm->brk;
  1133. return ret;
  1134. }
  1135. unsigned long randomize_et_dyn(unsigned long base)
  1136. {
  1137. unsigned long ret = PAGE_ALIGN(base + brk_rnd());
  1138. if (ret < base)
  1139. return base;
  1140. return ret;
  1141. }