process.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281
  1. /*
  2. * Derived from "arch/i386/kernel/process.c"
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  6. * Paul Mackerras (paulus@cs.anu.edu.au)
  7. *
  8. * PowerPC version
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/stddef.h>
  22. #include <linux/unistd.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/slab.h>
  25. #include <linux/user.h>
  26. #include <linux/elf.h>
  27. #include <linux/init.h>
  28. #include <linux/prctl.h>
  29. #include <linux/init_task.h>
  30. #include <linux/module.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/mqueue.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/utsname.h>
  35. #include <linux/ftrace.h>
  36. #include <linux/kernel_stat.h>
  37. #include <linux/personality.h>
  38. #include <linux/random.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/uaccess.h>
  41. #include <asm/system.h>
  42. #include <asm/io.h>
  43. #include <asm/processor.h>
  44. #include <asm/mmu.h>
  45. #include <asm/prom.h>
  46. #include <asm/machdep.h>
  47. #include <asm/time.h>
  48. #include <asm/syscalls.h>
  49. #ifdef CONFIG_PPC64
  50. #include <asm/firmware.h>
  51. #endif
  52. #include <linux/kprobes.h>
  53. #include <linux/kdebug.h>
  54. extern unsigned long _get_SP(void);
  55. #ifndef CONFIG_SMP
  56. struct task_struct *last_task_used_math = NULL;
  57. struct task_struct *last_task_used_altivec = NULL;
  58. struct task_struct *last_task_used_vsx = NULL;
  59. struct task_struct *last_task_used_spe = NULL;
  60. #endif
  61. /*
  62. * Make sure the floating-point register state in the
  63. * the thread_struct is up to date for task tsk.
  64. */
  65. void flush_fp_to_thread(struct task_struct *tsk)
  66. {
  67. if (tsk->thread.regs) {
  68. /*
  69. * We need to disable preemption here because if we didn't,
  70. * another process could get scheduled after the regs->msr
  71. * test but before we have finished saving the FP registers
  72. * to the thread_struct. That process could take over the
  73. * FPU, and then when we get scheduled again we would store
  74. * bogus values for the remaining FP registers.
  75. */
  76. preempt_disable();
  77. if (tsk->thread.regs->msr & MSR_FP) {
  78. #ifdef CONFIG_SMP
  79. /*
  80. * This should only ever be called for current or
  81. * for a stopped child process. Since we save away
  82. * the FP register state on context switch on SMP,
  83. * there is something wrong if a stopped child appears
  84. * to still have its FP state in the CPU registers.
  85. */
  86. BUG_ON(tsk != current);
  87. #endif
  88. giveup_fpu(tsk);
  89. }
  90. preempt_enable();
  91. }
  92. }
  93. void enable_kernel_fp(void)
  94. {
  95. WARN_ON(preemptible());
  96. #ifdef CONFIG_SMP
  97. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  98. giveup_fpu(current);
  99. else
  100. giveup_fpu(NULL); /* just enables FP for kernel */
  101. #else
  102. giveup_fpu(last_task_used_math);
  103. #endif /* CONFIG_SMP */
  104. }
  105. EXPORT_SYMBOL(enable_kernel_fp);
  106. #ifdef CONFIG_ALTIVEC
  107. void enable_kernel_altivec(void)
  108. {
  109. WARN_ON(preemptible());
  110. #ifdef CONFIG_SMP
  111. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  112. giveup_altivec(current);
  113. else
  114. giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
  115. #else
  116. giveup_altivec(last_task_used_altivec);
  117. #endif /* CONFIG_SMP */
  118. }
  119. EXPORT_SYMBOL(enable_kernel_altivec);
  120. /*
  121. * Make sure the VMX/Altivec register state in the
  122. * the thread_struct is up to date for task tsk.
  123. */
  124. void flush_altivec_to_thread(struct task_struct *tsk)
  125. {
  126. if (tsk->thread.regs) {
  127. preempt_disable();
  128. if (tsk->thread.regs->msr & MSR_VEC) {
  129. #ifdef CONFIG_SMP
  130. BUG_ON(tsk != current);
  131. #endif
  132. giveup_altivec(tsk);
  133. }
  134. preempt_enable();
  135. }
  136. }
  137. #endif /* CONFIG_ALTIVEC */
  138. #ifdef CONFIG_VSX
  139. #if 0
  140. /* not currently used, but some crazy RAID module might want to later */
  141. void enable_kernel_vsx(void)
  142. {
  143. WARN_ON(preemptible());
  144. #ifdef CONFIG_SMP
  145. if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
  146. giveup_vsx(current);
  147. else
  148. giveup_vsx(NULL); /* just enable vsx for kernel - force */
  149. #else
  150. giveup_vsx(last_task_used_vsx);
  151. #endif /* CONFIG_SMP */
  152. }
  153. EXPORT_SYMBOL(enable_kernel_vsx);
  154. #endif
  155. void giveup_vsx(struct task_struct *tsk)
  156. {
  157. giveup_fpu(tsk);
  158. giveup_altivec(tsk);
  159. __giveup_vsx(tsk);
  160. }
  161. void flush_vsx_to_thread(struct task_struct *tsk)
  162. {
  163. if (tsk->thread.regs) {
  164. preempt_disable();
  165. if (tsk->thread.regs->msr & MSR_VSX) {
  166. #ifdef CONFIG_SMP
  167. BUG_ON(tsk != current);
  168. #endif
  169. giveup_vsx(tsk);
  170. }
  171. preempt_enable();
  172. }
  173. }
  174. #endif /* CONFIG_VSX */
  175. #ifdef CONFIG_SPE
  176. void enable_kernel_spe(void)
  177. {
  178. WARN_ON(preemptible());
  179. #ifdef CONFIG_SMP
  180. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  181. giveup_spe(current);
  182. else
  183. giveup_spe(NULL); /* just enable SPE for kernel - force */
  184. #else
  185. giveup_spe(last_task_used_spe);
  186. #endif /* __SMP __ */
  187. }
  188. EXPORT_SYMBOL(enable_kernel_spe);
  189. void flush_spe_to_thread(struct task_struct *tsk)
  190. {
  191. if (tsk->thread.regs) {
  192. preempt_disable();
  193. if (tsk->thread.regs->msr & MSR_SPE) {
  194. #ifdef CONFIG_SMP
  195. BUG_ON(tsk != current);
  196. #endif
  197. giveup_spe(tsk);
  198. }
  199. preempt_enable();
  200. }
  201. }
  202. #endif /* CONFIG_SPE */
  203. #ifndef CONFIG_SMP
  204. /*
  205. * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  206. * and the current task has some state, discard it.
  207. */
  208. void discard_lazy_cpu_state(void)
  209. {
  210. preempt_disable();
  211. if (last_task_used_math == current)
  212. last_task_used_math = NULL;
  213. #ifdef CONFIG_ALTIVEC
  214. if (last_task_used_altivec == current)
  215. last_task_used_altivec = NULL;
  216. #endif /* CONFIG_ALTIVEC */
  217. #ifdef CONFIG_VSX
  218. if (last_task_used_vsx == current)
  219. last_task_used_vsx = NULL;
  220. #endif /* CONFIG_VSX */
  221. #ifdef CONFIG_SPE
  222. if (last_task_used_spe == current)
  223. last_task_used_spe = NULL;
  224. #endif
  225. preempt_enable();
  226. }
  227. #endif /* CONFIG_SMP */
  228. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  229. void do_send_trap(struct pt_regs *regs, unsigned long address,
  230. unsigned long error_code, int signal_code, int breakpt)
  231. {
  232. siginfo_t info;
  233. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  234. 11, SIGSEGV) == NOTIFY_STOP)
  235. return;
  236. /* Deliver the signal to userspace */
  237. info.si_signo = SIGTRAP;
  238. info.si_errno = breakpt; /* breakpoint or watchpoint id */
  239. info.si_code = signal_code;
  240. info.si_addr = (void __user *)address;
  241. force_sig_info(SIGTRAP, &info, current);
  242. }
  243. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  244. void do_dabr(struct pt_regs *regs, unsigned long address,
  245. unsigned long error_code)
  246. {
  247. siginfo_t info;
  248. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  249. 11, SIGSEGV) == NOTIFY_STOP)
  250. return;
  251. if (debugger_dabr_match(regs))
  252. return;
  253. /* Clear the DABR */
  254. set_dabr(0);
  255. /* Deliver the signal to userspace */
  256. info.si_signo = SIGTRAP;
  257. info.si_errno = 0;
  258. info.si_code = TRAP_HWBKPT;
  259. info.si_addr = (void __user *)address;
  260. force_sig_info(SIGTRAP, &info, current);
  261. }
  262. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  263. static DEFINE_PER_CPU(unsigned long, current_dabr);
  264. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  265. /*
  266. * Set the debug registers back to their default "safe" values.
  267. */
  268. static void set_debug_reg_defaults(struct thread_struct *thread)
  269. {
  270. thread->iac1 = thread->iac2 = 0;
  271. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  272. thread->iac3 = thread->iac4 = 0;
  273. #endif
  274. thread->dac1 = thread->dac2 = 0;
  275. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  276. thread->dvc1 = thread->dvc2 = 0;
  277. #endif
  278. thread->dbcr0 = 0;
  279. #ifdef CONFIG_BOOKE
  280. /*
  281. * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
  282. */
  283. thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
  284. DBCR1_IAC3US | DBCR1_IAC4US;
  285. /*
  286. * Force Data Address Compare User/Supervisor bits to be User-only
  287. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
  288. */
  289. thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  290. #else
  291. thread->dbcr1 = 0;
  292. #endif
  293. }
  294. static void prime_debug_regs(struct thread_struct *thread)
  295. {
  296. mtspr(SPRN_IAC1, thread->iac1);
  297. mtspr(SPRN_IAC2, thread->iac2);
  298. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  299. mtspr(SPRN_IAC3, thread->iac3);
  300. mtspr(SPRN_IAC4, thread->iac4);
  301. #endif
  302. mtspr(SPRN_DAC1, thread->dac1);
  303. mtspr(SPRN_DAC2, thread->dac2);
  304. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  305. mtspr(SPRN_DVC1, thread->dvc1);
  306. mtspr(SPRN_DVC2, thread->dvc2);
  307. #endif
  308. mtspr(SPRN_DBCR0, thread->dbcr0);
  309. mtspr(SPRN_DBCR1, thread->dbcr1);
  310. #ifdef CONFIG_BOOKE
  311. mtspr(SPRN_DBCR2, thread->dbcr2);
  312. #endif
  313. }
  314. /*
  315. * Unless neither the old or new thread are making use of the
  316. * debug registers, set the debug registers from the values
  317. * stored in the new thread.
  318. */
  319. static void switch_booke_debug_regs(struct thread_struct *new_thread)
  320. {
  321. if ((current->thread.dbcr0 & DBCR0_IDM)
  322. || (new_thread->dbcr0 & DBCR0_IDM))
  323. prime_debug_regs(new_thread);
  324. }
  325. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  326. static void set_debug_reg_defaults(struct thread_struct *thread)
  327. {
  328. if (thread->dabr) {
  329. thread->dabr = 0;
  330. set_dabr(0);
  331. }
  332. }
  333. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  334. int set_dabr(unsigned long dabr)
  335. {
  336. __get_cpu_var(current_dabr) = dabr;
  337. if (ppc_md.set_dabr)
  338. return ppc_md.set_dabr(dabr);
  339. /* XXX should we have a CPU_FTR_HAS_DABR ? */
  340. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  341. mtspr(SPRN_DAC1, dabr);
  342. #ifdef CONFIG_PPC_47x
  343. isync();
  344. #endif
  345. #elif defined(CONFIG_PPC_BOOK3S)
  346. mtspr(SPRN_DABR, dabr);
  347. #endif
  348. return 0;
  349. }
  350. #ifdef CONFIG_PPC64
  351. DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
  352. #endif
  353. struct task_struct *__switch_to(struct task_struct *prev,
  354. struct task_struct *new)
  355. {
  356. struct thread_struct *new_thread, *old_thread;
  357. unsigned long flags;
  358. struct task_struct *last;
  359. #ifdef CONFIG_SMP
  360. /* avoid complexity of lazy save/restore of fpu
  361. * by just saving it every time we switch out if
  362. * this task used the fpu during the last quantum.
  363. *
  364. * If it tries to use the fpu again, it'll trap and
  365. * reload its fp regs. So we don't have to do a restore
  366. * every switch, just a save.
  367. * -- Cort
  368. */
  369. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  370. giveup_fpu(prev);
  371. #ifdef CONFIG_ALTIVEC
  372. /*
  373. * If the previous thread used altivec in the last quantum
  374. * (thus changing altivec regs) then save them.
  375. * We used to check the VRSAVE register but not all apps
  376. * set it, so we don't rely on it now (and in fact we need
  377. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  378. *
  379. * On SMP we always save/restore altivec regs just to avoid the
  380. * complexity of changing processors.
  381. * -- Cort
  382. */
  383. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  384. giveup_altivec(prev);
  385. #endif /* CONFIG_ALTIVEC */
  386. #ifdef CONFIG_VSX
  387. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
  388. /* VMX and FPU registers are already save here */
  389. __giveup_vsx(prev);
  390. #endif /* CONFIG_VSX */
  391. #ifdef CONFIG_SPE
  392. /*
  393. * If the previous thread used spe in the last quantum
  394. * (thus changing spe regs) then save them.
  395. *
  396. * On SMP we always save/restore spe regs just to avoid the
  397. * complexity of changing processors.
  398. */
  399. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  400. giveup_spe(prev);
  401. #endif /* CONFIG_SPE */
  402. #else /* CONFIG_SMP */
  403. #ifdef CONFIG_ALTIVEC
  404. /* Avoid the trap. On smp this this never happens since
  405. * we don't set last_task_used_altivec -- Cort
  406. */
  407. if (new->thread.regs && last_task_used_altivec == new)
  408. new->thread.regs->msr |= MSR_VEC;
  409. #endif /* CONFIG_ALTIVEC */
  410. #ifdef CONFIG_VSX
  411. if (new->thread.regs && last_task_used_vsx == new)
  412. new->thread.regs->msr |= MSR_VSX;
  413. #endif /* CONFIG_VSX */
  414. #ifdef CONFIG_SPE
  415. /* Avoid the trap. On smp this this never happens since
  416. * we don't set last_task_used_spe
  417. */
  418. if (new->thread.regs && last_task_used_spe == new)
  419. new->thread.regs->msr |= MSR_SPE;
  420. #endif /* CONFIG_SPE */
  421. #endif /* CONFIG_SMP */
  422. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  423. switch_booke_debug_regs(&new->thread);
  424. #else
  425. if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
  426. set_dabr(new->thread.dabr);
  427. #endif
  428. new_thread = &new->thread;
  429. old_thread = &current->thread;
  430. #ifdef CONFIG_PPC64
  431. /*
  432. * Collect processor utilization data per process
  433. */
  434. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  435. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  436. long unsigned start_tb, current_tb;
  437. start_tb = old_thread->start_tb;
  438. cu->current_tb = current_tb = mfspr(SPRN_PURR);
  439. old_thread->accum_tb += (current_tb - start_tb);
  440. new_thread->start_tb = current_tb;
  441. }
  442. #endif
  443. local_irq_save(flags);
  444. account_system_vtime(current);
  445. account_process_vtime(current);
  446. calculate_steal_time();
  447. /*
  448. * We can't take a PMU exception inside _switch() since there is a
  449. * window where the kernel stack SLB and the kernel stack are out
  450. * of sync. Hard disable here.
  451. */
  452. hard_irq_disable();
  453. last = _switch(old_thread, new_thread);
  454. local_irq_restore(flags);
  455. return last;
  456. }
  457. static int instructions_to_print = 16;
  458. static void show_instructions(struct pt_regs *regs)
  459. {
  460. int i;
  461. unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
  462. sizeof(int));
  463. printk("Instruction dump:");
  464. for (i = 0; i < instructions_to_print; i++) {
  465. int instr;
  466. if (!(i % 8))
  467. printk("\n");
  468. #if !defined(CONFIG_BOOKE)
  469. /* If executing with the IMMU off, adjust pc rather
  470. * than print XXXXXXXX.
  471. */
  472. if (!(regs->msr & MSR_IR))
  473. pc = (unsigned long)phys_to_virt(pc);
  474. #endif
  475. /* We use __get_user here *only* to avoid an OOPS on a
  476. * bad address because the pc *should* only be a
  477. * kernel address.
  478. */
  479. if (!__kernel_text_address(pc) ||
  480. __get_user(instr, (unsigned int __user *)pc)) {
  481. printk("XXXXXXXX ");
  482. } else {
  483. if (regs->nip == pc)
  484. printk("<%08x> ", instr);
  485. else
  486. printk("%08x ", instr);
  487. }
  488. pc += sizeof(int);
  489. }
  490. printk("\n");
  491. }
  492. static struct regbit {
  493. unsigned long bit;
  494. const char *name;
  495. } msr_bits[] = {
  496. {MSR_EE, "EE"},
  497. {MSR_PR, "PR"},
  498. {MSR_FP, "FP"},
  499. {MSR_VEC, "VEC"},
  500. {MSR_VSX, "VSX"},
  501. {MSR_ME, "ME"},
  502. {MSR_CE, "CE"},
  503. {MSR_DE, "DE"},
  504. {MSR_IR, "IR"},
  505. {MSR_DR, "DR"},
  506. {0, NULL}
  507. };
  508. static void printbits(unsigned long val, struct regbit *bits)
  509. {
  510. const char *sep = "";
  511. printk("<");
  512. for (; bits->bit; ++bits)
  513. if (val & bits->bit) {
  514. printk("%s%s", sep, bits->name);
  515. sep = ",";
  516. }
  517. printk(">");
  518. }
  519. #ifdef CONFIG_PPC64
  520. #define REG "%016lx"
  521. #define REGS_PER_LINE 4
  522. #define LAST_VOLATILE 13
  523. #else
  524. #define REG "%08lx"
  525. #define REGS_PER_LINE 8
  526. #define LAST_VOLATILE 12
  527. #endif
  528. void show_regs(struct pt_regs * regs)
  529. {
  530. int i, trap;
  531. printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
  532. regs->nip, regs->link, regs->ctr);
  533. printk("REGS: %p TRAP: %04lx %s (%s)\n",
  534. regs, regs->trap, print_tainted(), init_utsname()->release);
  535. printk("MSR: "REG" ", regs->msr);
  536. printbits(regs->msr, msr_bits);
  537. printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
  538. trap = TRAP(regs);
  539. if (trap == 0x300 || trap == 0x600)
  540. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  541. printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
  542. #else
  543. printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
  544. #endif
  545. printk("TASK = %p[%d] '%s' THREAD: %p",
  546. current, task_pid_nr(current), current->comm, task_thread_info(current));
  547. #ifdef CONFIG_SMP
  548. printk(" CPU: %d", raw_smp_processor_id());
  549. #endif /* CONFIG_SMP */
  550. for (i = 0; i < 32; i++) {
  551. if ((i % REGS_PER_LINE) == 0)
  552. printk("\nGPR%02d: ", i);
  553. printk(REG " ", regs->gpr[i]);
  554. if (i == LAST_VOLATILE && !FULL_REGS(regs))
  555. break;
  556. }
  557. printk("\n");
  558. #ifdef CONFIG_KALLSYMS
  559. /*
  560. * Lookup NIP late so we have the best change of getting the
  561. * above info out without failing
  562. */
  563. printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
  564. printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
  565. #endif
  566. show_stack(current, (unsigned long *) regs->gpr[1]);
  567. if (!user_mode(regs))
  568. show_instructions(regs);
  569. }
  570. void exit_thread(void)
  571. {
  572. discard_lazy_cpu_state();
  573. }
  574. void flush_thread(void)
  575. {
  576. discard_lazy_cpu_state();
  577. set_debug_reg_defaults(&current->thread);
  578. }
  579. void
  580. release_thread(struct task_struct *t)
  581. {
  582. }
  583. /*
  584. * This gets called before we allocate a new thread and copy
  585. * the current task into it.
  586. */
  587. void prepare_to_copy(struct task_struct *tsk)
  588. {
  589. flush_fp_to_thread(current);
  590. flush_altivec_to_thread(current);
  591. flush_vsx_to_thread(current);
  592. flush_spe_to_thread(current);
  593. }
  594. /*
  595. * Copy a thread..
  596. */
  597. int copy_thread(unsigned long clone_flags, unsigned long usp,
  598. unsigned long unused, struct task_struct *p,
  599. struct pt_regs *regs)
  600. {
  601. struct pt_regs *childregs, *kregs;
  602. extern void ret_from_fork(void);
  603. unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  604. CHECK_FULL_REGS(regs);
  605. /* Copy registers */
  606. sp -= sizeof(struct pt_regs);
  607. childregs = (struct pt_regs *) sp;
  608. *childregs = *regs;
  609. if ((childregs->msr & MSR_PR) == 0) {
  610. /* for kernel thread, set `current' and stackptr in new task */
  611. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  612. #ifdef CONFIG_PPC32
  613. childregs->gpr[2] = (unsigned long) p;
  614. #else
  615. clear_tsk_thread_flag(p, TIF_32BIT);
  616. #endif
  617. p->thread.regs = NULL; /* no user register state */
  618. } else {
  619. childregs->gpr[1] = usp;
  620. p->thread.regs = childregs;
  621. if (clone_flags & CLONE_SETTLS) {
  622. #ifdef CONFIG_PPC64
  623. if (!test_thread_flag(TIF_32BIT))
  624. childregs->gpr[13] = childregs->gpr[6];
  625. else
  626. #endif
  627. childregs->gpr[2] = childregs->gpr[6];
  628. }
  629. }
  630. childregs->gpr[3] = 0; /* Result from fork() */
  631. sp -= STACK_FRAME_OVERHEAD;
  632. /*
  633. * The way this works is that at some point in the future
  634. * some task will call _switch to switch to the new task.
  635. * That will pop off the stack frame created below and start
  636. * the new task running at ret_from_fork. The new task will
  637. * do some house keeping and then return from the fork or clone
  638. * system call, using the stack frame created above.
  639. */
  640. sp -= sizeof(struct pt_regs);
  641. kregs = (struct pt_regs *) sp;
  642. sp -= STACK_FRAME_OVERHEAD;
  643. p->thread.ksp = sp;
  644. p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
  645. _ALIGN_UP(sizeof(struct thread_info), 16);
  646. #ifdef CONFIG_PPC_STD_MMU_64
  647. if (cpu_has_feature(CPU_FTR_SLB)) {
  648. unsigned long sp_vsid;
  649. unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
  650. if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
  651. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
  652. << SLB_VSID_SHIFT_1T;
  653. else
  654. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
  655. << SLB_VSID_SHIFT;
  656. sp_vsid |= SLB_VSID_KERNEL | llp;
  657. p->thread.ksp_vsid = sp_vsid;
  658. }
  659. #endif /* CONFIG_PPC_STD_MMU_64 */
  660. /*
  661. * The PPC64 ABI makes use of a TOC to contain function
  662. * pointers. The function (ret_from_except) is actually a pointer
  663. * to the TOC entry. The first entry is a pointer to the actual
  664. * function.
  665. */
  666. #ifdef CONFIG_PPC64
  667. kregs->nip = *((unsigned long *)ret_from_fork);
  668. #else
  669. kregs->nip = (unsigned long)ret_from_fork;
  670. #endif
  671. return 0;
  672. }
  673. /*
  674. * Set up a thread for executing a new program
  675. */
  676. void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
  677. {
  678. #ifdef CONFIG_PPC64
  679. unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
  680. #endif
  681. set_fs(USER_DS);
  682. /*
  683. * If we exec out of a kernel thread then thread.regs will not be
  684. * set. Do it now.
  685. */
  686. if (!current->thread.regs) {
  687. struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
  688. current->thread.regs = regs - 1;
  689. }
  690. memset(regs->gpr, 0, sizeof(regs->gpr));
  691. regs->ctr = 0;
  692. regs->link = 0;
  693. regs->xer = 0;
  694. regs->ccr = 0;
  695. regs->gpr[1] = sp;
  696. /*
  697. * We have just cleared all the nonvolatile GPRs, so make
  698. * FULL_REGS(regs) return true. This is necessary to allow
  699. * ptrace to examine the thread immediately after exec.
  700. */
  701. regs->trap &= ~1UL;
  702. #ifdef CONFIG_PPC32
  703. regs->mq = 0;
  704. regs->nip = start;
  705. regs->msr = MSR_USER;
  706. #else
  707. if (!test_thread_flag(TIF_32BIT)) {
  708. unsigned long entry, toc;
  709. /* start is a relocated pointer to the function descriptor for
  710. * the elf _start routine. The first entry in the function
  711. * descriptor is the entry address of _start and the second
  712. * entry is the TOC value we need to use.
  713. */
  714. __get_user(entry, (unsigned long __user *)start);
  715. __get_user(toc, (unsigned long __user *)start+1);
  716. /* Check whether the e_entry function descriptor entries
  717. * need to be relocated before we can use them.
  718. */
  719. if (load_addr != 0) {
  720. entry += load_addr;
  721. toc += load_addr;
  722. }
  723. regs->nip = entry;
  724. regs->gpr[2] = toc;
  725. regs->msr = MSR_USER64;
  726. } else {
  727. regs->nip = start;
  728. regs->gpr[2] = 0;
  729. regs->msr = MSR_USER32;
  730. }
  731. #endif
  732. discard_lazy_cpu_state();
  733. #ifdef CONFIG_VSX
  734. current->thread.used_vsr = 0;
  735. #endif
  736. memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
  737. current->thread.fpscr.val = 0;
  738. #ifdef CONFIG_ALTIVEC
  739. memset(current->thread.vr, 0, sizeof(current->thread.vr));
  740. memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
  741. current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
  742. current->thread.vrsave = 0;
  743. current->thread.used_vr = 0;
  744. #endif /* CONFIG_ALTIVEC */
  745. #ifdef CONFIG_SPE
  746. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  747. current->thread.acc = 0;
  748. current->thread.spefscr = 0;
  749. current->thread.used_spe = 0;
  750. #endif /* CONFIG_SPE */
  751. }
  752. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  753. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  754. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  755. {
  756. struct pt_regs *regs = tsk->thread.regs;
  757. /* This is a bit hairy. If we are an SPE enabled processor
  758. * (have embedded fp) we store the IEEE exception enable flags in
  759. * fpexc_mode. fpexc_mode is also used for setting FP exception
  760. * mode (asyn, precise, disabled) for 'Classic' FP. */
  761. if (val & PR_FP_EXC_SW_ENABLE) {
  762. #ifdef CONFIG_SPE
  763. if (cpu_has_feature(CPU_FTR_SPE)) {
  764. tsk->thread.fpexc_mode = val &
  765. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  766. return 0;
  767. } else {
  768. return -EINVAL;
  769. }
  770. #else
  771. return -EINVAL;
  772. #endif
  773. }
  774. /* on a CONFIG_SPE this does not hurt us. The bits that
  775. * __pack_fe01 use do not overlap with bits used for
  776. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  777. * on CONFIG_SPE implementations are reserved so writing to
  778. * them does not change anything */
  779. if (val > PR_FP_EXC_PRECISE)
  780. return -EINVAL;
  781. tsk->thread.fpexc_mode = __pack_fe01(val);
  782. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  783. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  784. | tsk->thread.fpexc_mode;
  785. return 0;
  786. }
  787. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  788. {
  789. unsigned int val;
  790. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  791. #ifdef CONFIG_SPE
  792. if (cpu_has_feature(CPU_FTR_SPE))
  793. val = tsk->thread.fpexc_mode;
  794. else
  795. return -EINVAL;
  796. #else
  797. return -EINVAL;
  798. #endif
  799. else
  800. val = __unpack_fe01(tsk->thread.fpexc_mode);
  801. return put_user(val, (unsigned int __user *) adr);
  802. }
  803. int set_endian(struct task_struct *tsk, unsigned int val)
  804. {
  805. struct pt_regs *regs = tsk->thread.regs;
  806. if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
  807. (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
  808. return -EINVAL;
  809. if (regs == NULL)
  810. return -EINVAL;
  811. if (val == PR_ENDIAN_BIG)
  812. regs->msr &= ~MSR_LE;
  813. else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
  814. regs->msr |= MSR_LE;
  815. else
  816. return -EINVAL;
  817. return 0;
  818. }
  819. int get_endian(struct task_struct *tsk, unsigned long adr)
  820. {
  821. struct pt_regs *regs = tsk->thread.regs;
  822. unsigned int val;
  823. if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
  824. !cpu_has_feature(CPU_FTR_REAL_LE))
  825. return -EINVAL;
  826. if (regs == NULL)
  827. return -EINVAL;
  828. if (regs->msr & MSR_LE) {
  829. if (cpu_has_feature(CPU_FTR_REAL_LE))
  830. val = PR_ENDIAN_LITTLE;
  831. else
  832. val = PR_ENDIAN_PPC_LITTLE;
  833. } else
  834. val = PR_ENDIAN_BIG;
  835. return put_user(val, (unsigned int __user *)adr);
  836. }
  837. int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
  838. {
  839. tsk->thread.align_ctl = val;
  840. return 0;
  841. }
  842. int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
  843. {
  844. return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
  845. }
  846. #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
  847. int sys_clone(unsigned long clone_flags, unsigned long usp,
  848. int __user *parent_tidp, void __user *child_threadptr,
  849. int __user *child_tidp, int p6,
  850. struct pt_regs *regs)
  851. {
  852. CHECK_FULL_REGS(regs);
  853. if (usp == 0)
  854. usp = regs->gpr[1]; /* stack pointer for child */
  855. #ifdef CONFIG_PPC64
  856. if (test_thread_flag(TIF_32BIT)) {
  857. parent_tidp = TRUNC_PTR(parent_tidp);
  858. child_tidp = TRUNC_PTR(child_tidp);
  859. }
  860. #endif
  861. return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
  862. }
  863. int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
  864. unsigned long p4, unsigned long p5, unsigned long p6,
  865. struct pt_regs *regs)
  866. {
  867. CHECK_FULL_REGS(regs);
  868. return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
  869. }
  870. int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
  871. unsigned long p4, unsigned long p5, unsigned long p6,
  872. struct pt_regs *regs)
  873. {
  874. CHECK_FULL_REGS(regs);
  875. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
  876. regs, 0, NULL, NULL);
  877. }
  878. int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
  879. unsigned long a3, unsigned long a4, unsigned long a5,
  880. struct pt_regs *regs)
  881. {
  882. int error;
  883. char *filename;
  884. filename = getname((char __user *) a0);
  885. error = PTR_ERR(filename);
  886. if (IS_ERR(filename))
  887. goto out;
  888. flush_fp_to_thread(current);
  889. flush_altivec_to_thread(current);
  890. flush_spe_to_thread(current);
  891. error = do_execve(filename, (char __user * __user *) a1,
  892. (char __user * __user *) a2, regs);
  893. putname(filename);
  894. out:
  895. return error;
  896. }
  897. #ifdef CONFIG_IRQSTACKS
  898. static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
  899. unsigned long nbytes)
  900. {
  901. unsigned long stack_page;
  902. unsigned long cpu = task_cpu(p);
  903. /*
  904. * Avoid crashing if the stack has overflowed and corrupted
  905. * task_cpu(p), which is in the thread_info struct.
  906. */
  907. if (cpu < NR_CPUS && cpu_possible(cpu)) {
  908. stack_page = (unsigned long) hardirq_ctx[cpu];
  909. if (sp >= stack_page + sizeof(struct thread_struct)
  910. && sp <= stack_page + THREAD_SIZE - nbytes)
  911. return 1;
  912. stack_page = (unsigned long) softirq_ctx[cpu];
  913. if (sp >= stack_page + sizeof(struct thread_struct)
  914. && sp <= stack_page + THREAD_SIZE - nbytes)
  915. return 1;
  916. }
  917. return 0;
  918. }
  919. #else
  920. #define valid_irq_stack(sp, p, nb) 0
  921. #endif /* CONFIG_IRQSTACKS */
  922. int validate_sp(unsigned long sp, struct task_struct *p,
  923. unsigned long nbytes)
  924. {
  925. unsigned long stack_page = (unsigned long)task_stack_page(p);
  926. if (sp >= stack_page + sizeof(struct thread_struct)
  927. && sp <= stack_page + THREAD_SIZE - nbytes)
  928. return 1;
  929. return valid_irq_stack(sp, p, nbytes);
  930. }
  931. EXPORT_SYMBOL(validate_sp);
  932. unsigned long get_wchan(struct task_struct *p)
  933. {
  934. unsigned long ip, sp;
  935. int count = 0;
  936. if (!p || p == current || p->state == TASK_RUNNING)
  937. return 0;
  938. sp = p->thread.ksp;
  939. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  940. return 0;
  941. do {
  942. sp = *(unsigned long *)sp;
  943. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  944. return 0;
  945. if (count > 0) {
  946. ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
  947. if (!in_sched_functions(ip))
  948. return ip;
  949. }
  950. } while (count++ < 16);
  951. return 0;
  952. }
  953. static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
  954. void show_stack(struct task_struct *tsk, unsigned long *stack)
  955. {
  956. unsigned long sp, ip, lr, newsp;
  957. int count = 0;
  958. int firstframe = 1;
  959. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  960. int curr_frame = current->curr_ret_stack;
  961. extern void return_to_handler(void);
  962. unsigned long rth = (unsigned long)return_to_handler;
  963. unsigned long mrth = -1;
  964. #ifdef CONFIG_PPC64
  965. extern void mod_return_to_handler(void);
  966. rth = *(unsigned long *)rth;
  967. mrth = (unsigned long)mod_return_to_handler;
  968. mrth = *(unsigned long *)mrth;
  969. #endif
  970. #endif
  971. sp = (unsigned long) stack;
  972. if (tsk == NULL)
  973. tsk = current;
  974. if (sp == 0) {
  975. if (tsk == current)
  976. asm("mr %0,1" : "=r" (sp));
  977. else
  978. sp = tsk->thread.ksp;
  979. }
  980. lr = 0;
  981. printk("Call Trace:\n");
  982. do {
  983. if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
  984. return;
  985. stack = (unsigned long *) sp;
  986. newsp = stack[0];
  987. ip = stack[STACK_FRAME_LR_SAVE];
  988. if (!firstframe || ip != lr) {
  989. printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
  990. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  991. if ((ip == rth || ip == mrth) && curr_frame >= 0) {
  992. printk(" (%pS)",
  993. (void *)current->ret_stack[curr_frame].ret);
  994. curr_frame--;
  995. }
  996. #endif
  997. if (firstframe)
  998. printk(" (unreliable)");
  999. printk("\n");
  1000. }
  1001. firstframe = 0;
  1002. /*
  1003. * See if this is an exception frame.
  1004. * We look for the "regshere" marker in the current frame.
  1005. */
  1006. if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
  1007. && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  1008. struct pt_regs *regs = (struct pt_regs *)
  1009. (sp + STACK_FRAME_OVERHEAD);
  1010. lr = regs->link;
  1011. printk("--- Exception: %lx at %pS\n LR = %pS\n",
  1012. regs->trap, (void *)regs->nip, (void *)lr);
  1013. firstframe = 1;
  1014. }
  1015. sp = newsp;
  1016. } while (count++ < kstack_depth_to_print);
  1017. }
  1018. void dump_stack(void)
  1019. {
  1020. show_stack(current, NULL);
  1021. }
  1022. EXPORT_SYMBOL(dump_stack);
  1023. #ifdef CONFIG_PPC64
  1024. void ppc64_runlatch_on(void)
  1025. {
  1026. unsigned long ctrl;
  1027. if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
  1028. HMT_medium();
  1029. ctrl = mfspr(SPRN_CTRLF);
  1030. ctrl |= CTRL_RUNLATCH;
  1031. mtspr(SPRN_CTRLT, ctrl);
  1032. set_thread_flag(TIF_RUNLATCH);
  1033. }
  1034. }
  1035. void ppc64_runlatch_off(void)
  1036. {
  1037. unsigned long ctrl;
  1038. if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
  1039. HMT_medium();
  1040. clear_thread_flag(TIF_RUNLATCH);
  1041. ctrl = mfspr(SPRN_CTRLF);
  1042. ctrl &= ~CTRL_RUNLATCH;
  1043. mtspr(SPRN_CTRLT, ctrl);
  1044. }
  1045. }
  1046. #endif
  1047. #if THREAD_SHIFT < PAGE_SHIFT
  1048. static struct kmem_cache *thread_info_cache;
  1049. struct thread_info *alloc_thread_info(struct task_struct *tsk)
  1050. {
  1051. struct thread_info *ti;
  1052. ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
  1053. if (unlikely(ti == NULL))
  1054. return NULL;
  1055. #ifdef CONFIG_DEBUG_STACK_USAGE
  1056. memset(ti, 0, THREAD_SIZE);
  1057. #endif
  1058. return ti;
  1059. }
  1060. void free_thread_info(struct thread_info *ti)
  1061. {
  1062. kmem_cache_free(thread_info_cache, ti);
  1063. }
  1064. void thread_info_cache_init(void)
  1065. {
  1066. thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
  1067. THREAD_SIZE, 0, NULL);
  1068. BUG_ON(thread_info_cache == NULL);
  1069. }
  1070. #endif /* THREAD_SHIFT < PAGE_SHIFT */
  1071. unsigned long arch_align_stack(unsigned long sp)
  1072. {
  1073. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  1074. sp -= get_random_int() & ~PAGE_MASK;
  1075. return sp & ~0xf;
  1076. }
  1077. static inline unsigned long brk_rnd(void)
  1078. {
  1079. unsigned long rnd = 0;
  1080. /* 8MB for 32bit, 1GB for 64bit */
  1081. if (is_32bit_task())
  1082. rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
  1083. else
  1084. rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
  1085. return rnd << PAGE_SHIFT;
  1086. }
  1087. unsigned long arch_randomize_brk(struct mm_struct *mm)
  1088. {
  1089. unsigned long base = mm->brk;
  1090. unsigned long ret;
  1091. #ifdef CONFIG_PPC_STD_MMU_64
  1092. /*
  1093. * If we are using 1TB segments and we are allowed to randomise
  1094. * the heap, we can put it above 1TB so it is backed by a 1TB
  1095. * segment. Otherwise the heap will be in the bottom 1TB
  1096. * which always uses 256MB segments and this may result in a
  1097. * performance penalty.
  1098. */
  1099. if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
  1100. base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
  1101. #endif
  1102. ret = PAGE_ALIGN(base + brk_rnd());
  1103. if (ret < mm->brk)
  1104. return mm->brk;
  1105. return ret;
  1106. }
  1107. unsigned long randomize_et_dyn(unsigned long base)
  1108. {
  1109. unsigned long ret = PAGE_ALIGN(base + brk_rnd());
  1110. if (ret < base)
  1111. return base;
  1112. return ret;
  1113. }
  1114. #ifdef CONFIG_SMP
  1115. int arch_sd_sibling_asym_packing(void)
  1116. {
  1117. if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
  1118. printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
  1119. return SD_ASYM_PACKING;
  1120. }
  1121. return 0;
  1122. }
  1123. #endif