process_32.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. /*
  2. * Copyright (C) 1995 Linus Torvalds
  3. *
  4. * Pentium III FXSR, SSE support
  5. * Gareth Hughes <gareth@valinux.com>, May 2000
  6. */
  7. /*
  8. * This file handles the architecture-dependent parts of process handling..
  9. */
  10. #include <stdarg.h>
  11. #include <linux/cpu.h>
  12. #include <linux/errno.h>
  13. #include <linux/sched.h>
  14. #include <linux/fs.h>
  15. #include <linux/kernel.h>
  16. #include <linux/mm.h>
  17. #include <linux/elfcore.h>
  18. #include <linux/smp.h>
  19. #include <linux/stddef.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/user.h>
  23. #include <linux/a.out.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/utsname.h>
  26. #include <linux/delay.h>
  27. #include <linux/reboot.h>
  28. #include <linux/init.h>
  29. #include <linux/mc146818rtc.h>
  30. #include <linux/module.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/ptrace.h>
  33. #include <linux/random.h>
  34. #include <linux/personality.h>
  35. #include <linux/tick.h>
  36. #include <linux/percpu.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/system.h>
  40. #include <asm/io.h>
  41. #include <asm/ldt.h>
  42. #include <asm/processor.h>
  43. #include <asm/i387.h>
  44. #include <asm/desc.h>
  45. #include <asm/vm86.h>
  46. #ifdef CONFIG_MATH_EMULATION
  47. #include <asm/math_emu.h>
  48. #endif
  49. #include <linux/err.h>
  50. #include <asm/tlbflush.h>
  51. #include <asm/cpu.h>
  52. #include <asm/kdebug.h>
  53. asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
  54. static int hlt_counter;
  55. unsigned long boot_option_idle_override = 0;
  56. EXPORT_SYMBOL(boot_option_idle_override);
  57. DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
  58. EXPORT_PER_CPU_SYMBOL(current_task);
  59. DEFINE_PER_CPU(int, cpu_number);
  60. EXPORT_PER_CPU_SYMBOL(cpu_number);
  61. /*
  62. * Return saved PC of a blocked thread.
  63. */
  64. unsigned long thread_saved_pc(struct task_struct *tsk)
  65. {
  66. return ((unsigned long *)tsk->thread.sp)[3];
  67. }
  68. /*
  69. * Powermanagement idle function, if any..
  70. */
  71. void (*pm_idle)(void);
  72. EXPORT_SYMBOL(pm_idle);
  73. static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
  74. void disable_hlt(void)
  75. {
  76. hlt_counter++;
  77. }
  78. EXPORT_SYMBOL(disable_hlt);
  79. void enable_hlt(void)
  80. {
  81. hlt_counter--;
  82. }
  83. EXPORT_SYMBOL(enable_hlt);
  84. /*
  85. * We use this if we don't have any better
  86. * idle routine..
  87. */
  88. void default_idle(void)
  89. {
  90. if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
  91. current_thread_info()->status &= ~TS_POLLING;
  92. /*
  93. * TS_POLLING-cleared state must be visible before we
  94. * test NEED_RESCHED:
  95. */
  96. smp_mb();
  97. local_irq_disable();
  98. if (!need_resched()) {
  99. ktime_t t0, t1;
  100. u64 t0n, t1n;
  101. t0 = ktime_get();
  102. t0n = ktime_to_ns(t0);
  103. safe_halt(); /* enables interrupts racelessly */
  104. local_irq_disable();
  105. t1 = ktime_get();
  106. t1n = ktime_to_ns(t1);
  107. sched_clock_idle_wakeup_event(t1n - t0n);
  108. }
  109. local_irq_enable();
  110. current_thread_info()->status |= TS_POLLING;
  111. } else {
  112. /* loop is done by the caller */
  113. cpu_relax();
  114. }
  115. }
  116. #ifdef CONFIG_APM_MODULE
  117. EXPORT_SYMBOL(default_idle);
  118. #endif
  119. /*
  120. * On SMP it's slightly faster (but much more power-consuming!)
  121. * to poll the ->work.need_resched flag instead of waiting for the
  122. * cross-CPU IPI to arrive. Use this option with caution.
  123. */
  124. static void poll_idle(void)
  125. {
  126. cpu_relax();
  127. }
  128. #ifdef CONFIG_HOTPLUG_CPU
  129. #include <asm/nmi.h>
  130. /* We don't actually take CPU down, just spin without interrupts. */
  131. static inline void play_dead(void)
  132. {
  133. /* This must be done before dead CPU ack */
  134. cpu_exit_clear();
  135. wbinvd();
  136. mb();
  137. /* Ack it */
  138. __get_cpu_var(cpu_state) = CPU_DEAD;
  139. /*
  140. * With physical CPU hotplug, we should halt the cpu
  141. */
  142. local_irq_disable();
  143. while (1)
  144. halt();
  145. }
  146. #else
  147. static inline void play_dead(void)
  148. {
  149. BUG();
  150. }
  151. #endif /* CONFIG_HOTPLUG_CPU */
  152. /*
  153. * The idle thread. There's no useful work to be
  154. * done, so just try to conserve power and have a
  155. * low exit latency (ie sit in a loop waiting for
  156. * somebody to say that they'd like to reschedule)
  157. */
  158. void cpu_idle(void)
  159. {
  160. int cpu = smp_processor_id();
  161. current_thread_info()->status |= TS_POLLING;
  162. /* endless idle loop with no priority at all */
  163. while (1) {
  164. tick_nohz_stop_sched_tick();
  165. while (!need_resched()) {
  166. void (*idle)(void);
  167. if (__get_cpu_var(cpu_idle_state))
  168. __get_cpu_var(cpu_idle_state) = 0;
  169. check_pgt_cache();
  170. rmb();
  171. idle = pm_idle;
  172. if (rcu_pending(cpu))
  173. rcu_check_callbacks(cpu, 0);
  174. if (!idle)
  175. idle = default_idle;
  176. if (cpu_is_offline(cpu))
  177. play_dead();
  178. __get_cpu_var(irq_stat).idle_timestamp = jiffies;
  179. idle();
  180. }
  181. tick_nohz_restart_sched_tick();
  182. preempt_enable_no_resched();
  183. schedule();
  184. preempt_disable();
  185. }
  186. }
  187. static void do_nothing(void *unused)
  188. {
  189. }
  190. void cpu_idle_wait(void)
  191. {
  192. unsigned int cpu, this_cpu = get_cpu();
  193. cpumask_t map, tmp = current->cpus_allowed;
  194. set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
  195. put_cpu();
  196. cpus_clear(map);
  197. for_each_online_cpu(cpu) {
  198. per_cpu(cpu_idle_state, cpu) = 1;
  199. cpu_set(cpu, map);
  200. }
  201. __get_cpu_var(cpu_idle_state) = 0;
  202. wmb();
  203. do {
  204. ssleep(1);
  205. for_each_online_cpu(cpu) {
  206. if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
  207. cpu_clear(cpu, map);
  208. }
  209. cpus_and(map, map, cpu_online_map);
  210. /*
  211. * We waited 1 sec, if a CPU still did not call idle
  212. * it may be because it is in idle and not waking up
  213. * because it has nothing to do.
  214. * Give all the remaining CPUS a kick.
  215. */
  216. smp_call_function_mask(map, do_nothing, 0, 0);
  217. } while (!cpus_empty(map));
  218. set_cpus_allowed(current, tmp);
  219. }
  220. EXPORT_SYMBOL_GPL(cpu_idle_wait);
  221. /*
  222. * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
  223. * which can obviate IPI to trigger checking of need_resched.
  224. * We execute MONITOR against need_resched and enter optimized wait state
  225. * through MWAIT. Whenever someone changes need_resched, we would be woken
  226. * up from MWAIT (without an IPI).
  227. *
  228. * New with Core Duo processors, MWAIT can take some hints based on CPU
  229. * capability.
  230. */
  231. void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
  232. {
  233. if (!need_resched()) {
  234. __monitor((void *)&current_thread_info()->flags, 0, 0);
  235. smp_mb();
  236. if (!need_resched())
  237. __mwait(ax, cx);
  238. }
  239. }
  240. /* Default MONITOR/MWAIT with no hints, used for default C1 state */
  241. static void mwait_idle(void)
  242. {
  243. local_irq_enable();
  244. mwait_idle_with_hints(0, 0);
  245. }
  246. static int mwait_usable(const struct cpuinfo_x86 *c)
  247. {
  248. if (force_mwait)
  249. return 1;
  250. /* Any C1 states supported? */
  251. return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
  252. }
  253. void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
  254. {
  255. if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
  256. printk("monitor/mwait feature present.\n");
  257. /*
  258. * Skip, if setup has overridden idle.
  259. * One CPU supports mwait => All CPUs supports mwait
  260. */
  261. if (!pm_idle) {
  262. printk("using mwait in idle threads.\n");
  263. pm_idle = mwait_idle;
  264. }
  265. }
  266. }
  267. static int __init idle_setup(char *str)
  268. {
  269. if (!strcmp(str, "poll")) {
  270. printk("using polling idle threads.\n");
  271. pm_idle = poll_idle;
  272. #ifdef CONFIG_X86_SMP
  273. if (smp_num_siblings > 1)
  274. printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
  275. #endif
  276. } else if (!strcmp(str, "mwait"))
  277. force_mwait = 1;
  278. else
  279. return -1;
  280. boot_option_idle_override = 1;
  281. return 0;
  282. }
  283. early_param("idle", idle_setup);
  284. void __show_registers(struct pt_regs *regs, int all)
  285. {
  286. unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
  287. unsigned long d0, d1, d2, d3, d6, d7;
  288. unsigned long sp;
  289. unsigned short ss, gs;
  290. if (user_mode_vm(regs)) {
  291. sp = regs->sp;
  292. ss = regs->ss & 0xffff;
  293. savesegment(gs, gs);
  294. } else {
  295. sp = (unsigned long) (&regs->sp);
  296. savesegment(ss, ss);
  297. savesegment(gs, gs);
  298. }
  299. printk("\n");
  300. printk("Pid: %d, comm: %s %s (%s %.*s)\n",
  301. task_pid_nr(current), current->comm,
  302. print_tainted(), init_utsname()->release,
  303. (int)strcspn(init_utsname()->version, " "),
  304. init_utsname()->version);
  305. printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
  306. 0xffff & regs->cs, regs->ip, regs->flags,
  307. smp_processor_id());
  308. print_symbol("EIP is at %s\n", regs->ip);
  309. printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
  310. regs->ax, regs->bx, regs->cx, regs->dx);
  311. printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
  312. regs->si, regs->di, regs->bp, sp);
  313. printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
  314. regs->ds & 0xffff, regs->es & 0xffff,
  315. regs->fs & 0xffff, gs, ss);
  316. if (!all)
  317. return;
  318. cr0 = read_cr0();
  319. cr2 = read_cr2();
  320. cr3 = read_cr3();
  321. cr4 = read_cr4_safe();
  322. printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
  323. cr0, cr2, cr3, cr4);
  324. get_debugreg(d0, 0);
  325. get_debugreg(d1, 1);
  326. get_debugreg(d2, 2);
  327. get_debugreg(d3, 3);
  328. printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
  329. d0, d1, d2, d3);
  330. get_debugreg(d6, 6);
  331. get_debugreg(d7, 7);
  332. printk("DR6: %08lx DR7: %08lx\n",
  333. d6, d7);
  334. }
  335. void show_regs(struct pt_regs *regs)
  336. {
  337. __show_registers(regs, 1);
  338. show_trace(NULL, regs, &regs->sp, regs->bp);
  339. }
  340. /*
  341. * This gets run with %bx containing the
  342. * function to call, and %dx containing
  343. * the "args".
  344. */
  345. extern void kernel_thread_helper(void);
  346. /*
  347. * Create a kernel thread
  348. */
  349. int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  350. {
  351. struct pt_regs regs;
  352. memset(&regs, 0, sizeof(regs));
  353. regs.bx = (unsigned long) fn;
  354. regs.dx = (unsigned long) arg;
  355. regs.ds = __USER_DS;
  356. regs.es = __USER_DS;
  357. regs.fs = __KERNEL_PERCPU;
  358. regs.orig_ax = -1;
  359. regs.ip = (unsigned long) kernel_thread_helper;
  360. regs.cs = __KERNEL_CS | get_kernel_rpl();
  361. regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
  362. /* Ok, create the new process.. */
  363. return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
  364. }
  365. EXPORT_SYMBOL(kernel_thread);
  366. /*
  367. * Free current thread data structures etc..
  368. */
  369. void exit_thread(void)
  370. {
  371. /* The process may have allocated an io port bitmap... nuke it. */
  372. if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
  373. struct task_struct *tsk = current;
  374. struct thread_struct *t = &tsk->thread;
  375. int cpu = get_cpu();
  376. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  377. kfree(t->io_bitmap_ptr);
  378. t->io_bitmap_ptr = NULL;
  379. clear_thread_flag(TIF_IO_BITMAP);
  380. /*
  381. * Careful, clear this in the TSS too:
  382. */
  383. memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
  384. t->io_bitmap_max = 0;
  385. tss->io_bitmap_owner = NULL;
  386. tss->io_bitmap_max = 0;
  387. tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
  388. put_cpu();
  389. }
  390. }
  391. void flush_thread(void)
  392. {
  393. struct task_struct *tsk = current;
  394. tsk->thread.debugreg0 = 0;
  395. tsk->thread.debugreg1 = 0;
  396. tsk->thread.debugreg2 = 0;
  397. tsk->thread.debugreg3 = 0;
  398. tsk->thread.debugreg6 = 0;
  399. tsk->thread.debugreg7 = 0;
  400. memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
  401. clear_tsk_thread_flag(tsk, TIF_DEBUG);
  402. /*
  403. * Forget coprocessor state..
  404. */
  405. clear_fpu(tsk);
  406. clear_used_math();
  407. }
  408. void release_thread(struct task_struct *dead_task)
  409. {
  410. BUG_ON(dead_task->mm);
  411. release_vm86_irqs(dead_task);
  412. }
  413. /*
  414. * This gets called before we allocate a new thread and copy
  415. * the current task into it.
  416. */
  417. void prepare_to_copy(struct task_struct *tsk)
  418. {
  419. unlazy_fpu(tsk);
  420. }
  421. int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
  422. unsigned long unused,
  423. struct task_struct * p, struct pt_regs * regs)
  424. {
  425. struct pt_regs * childregs;
  426. struct task_struct *tsk;
  427. int err;
  428. childregs = task_pt_regs(p);
  429. *childregs = *regs;
  430. childregs->ax = 0;
  431. childregs->sp = sp;
  432. p->thread.sp = (unsigned long) childregs;
  433. p->thread.sp0 = (unsigned long) (childregs+1);
  434. p->thread.ip = (unsigned long) ret_from_fork;
  435. savesegment(gs, p->thread.gs);
  436. tsk = current;
  437. if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
  438. p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
  439. IO_BITMAP_BYTES, GFP_KERNEL);
  440. if (!p->thread.io_bitmap_ptr) {
  441. p->thread.io_bitmap_max = 0;
  442. return -ENOMEM;
  443. }
  444. set_tsk_thread_flag(p, TIF_IO_BITMAP);
  445. }
  446. err = 0;
  447. /*
  448. * Set a new TLS for the child thread?
  449. */
  450. if (clone_flags & CLONE_SETTLS)
  451. err = do_set_thread_area(p, -1,
  452. (struct user_desc __user *)childregs->si, 0);
  453. if (err && p->thread.io_bitmap_ptr) {
  454. kfree(p->thread.io_bitmap_ptr);
  455. p->thread.io_bitmap_max = 0;
  456. }
  457. return err;
  458. }
  459. /*
  460. * fill in the user structure for a core dump..
  461. */
  462. void dump_thread(struct pt_regs * regs, struct user * dump)
  463. {
  464. u16 gs;
  465. /* changed the size calculations - should hopefully work better. lbt */
  466. dump->magic = CMAGIC;
  467. dump->start_code = 0;
  468. dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
  469. dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
  470. dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
  471. dump->u_dsize -= dump->u_tsize;
  472. dump->u_ssize = 0;
  473. dump->u_debugreg[0] = current->thread.debugreg0;
  474. dump->u_debugreg[1] = current->thread.debugreg1;
  475. dump->u_debugreg[2] = current->thread.debugreg2;
  476. dump->u_debugreg[3] = current->thread.debugreg3;
  477. dump->u_debugreg[4] = 0;
  478. dump->u_debugreg[5] = 0;
  479. dump->u_debugreg[6] = current->thread.debugreg6;
  480. dump->u_debugreg[7] = current->thread.debugreg7;
  481. if (dump->start_stack < TASK_SIZE)
  482. dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
  483. dump->regs.bx = regs->bx;
  484. dump->regs.cx = regs->cx;
  485. dump->regs.dx = regs->dx;
  486. dump->regs.si = regs->si;
  487. dump->regs.di = regs->di;
  488. dump->regs.bp = regs->bp;
  489. dump->regs.ax = regs->ax;
  490. dump->regs.ds = (u16)regs->ds;
  491. dump->regs.es = (u16)regs->es;
  492. dump->regs.fs = (u16)regs->fs;
  493. savesegment(gs,gs);
  494. dump->regs.orig_ax = regs->orig_ax;
  495. dump->regs.ip = regs->ip;
  496. dump->regs.cs = (u16)regs->cs;
  497. dump->regs.flags = regs->flags;
  498. dump->regs.sp = regs->sp;
  499. dump->regs.ss = (u16)regs->ss;
  500. dump->u_fpvalid = dump_fpu (regs, &dump->i387);
  501. }
  502. EXPORT_SYMBOL(dump_thread);
  503. #ifdef CONFIG_SECCOMP
  504. static void hard_disable_TSC(void)
  505. {
  506. write_cr4(read_cr4() | X86_CR4_TSD);
  507. }
  508. void disable_TSC(void)
  509. {
  510. preempt_disable();
  511. if (!test_and_set_thread_flag(TIF_NOTSC))
  512. /*
  513. * Must flip the CPU state synchronously with
  514. * TIF_NOTSC in the current running context.
  515. */
  516. hard_disable_TSC();
  517. preempt_enable();
  518. }
  519. static void hard_enable_TSC(void)
  520. {
  521. write_cr4(read_cr4() & ~X86_CR4_TSD);
  522. }
  523. #endif /* CONFIG_SECCOMP */
  524. static noinline void
  525. __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
  526. struct tss_struct *tss)
  527. {
  528. struct thread_struct *prev, *next;
  529. unsigned long debugctl;
  530. prev = &prev_p->thread;
  531. next = &next_p->thread;
  532. debugctl = prev->debugctlmsr;
  533. if (next->ds_area_msr != prev->ds_area_msr) {
  534. /* we clear debugctl to make sure DS
  535. * is not in use when we change it */
  536. debugctl = 0;
  537. wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
  538. wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
  539. }
  540. if (next->debugctlmsr != debugctl)
  541. wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0);
  542. if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
  543. set_debugreg(next->debugreg0, 0);
  544. set_debugreg(next->debugreg1, 1);
  545. set_debugreg(next->debugreg2, 2);
  546. set_debugreg(next->debugreg3, 3);
  547. /* no 4 and 5 */
  548. set_debugreg(next->debugreg6, 6);
  549. set_debugreg(next->debugreg7, 7);
  550. }
  551. #ifdef CONFIG_SECCOMP
  552. if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
  553. test_tsk_thread_flag(next_p, TIF_NOTSC)) {
  554. /* prev and next are different */
  555. if (test_tsk_thread_flag(next_p, TIF_NOTSC))
  556. hard_disable_TSC();
  557. else
  558. hard_enable_TSC();
  559. }
  560. #endif
  561. if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
  562. ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
  563. if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
  564. ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
  565. if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
  566. /*
  567. * Disable the bitmap via an invalid offset. We still cache
  568. * the previous bitmap owner and the IO bitmap contents:
  569. */
  570. tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
  571. return;
  572. }
  573. if (likely(next == tss->io_bitmap_owner)) {
  574. /*
  575. * Previous owner of the bitmap (hence the bitmap content)
  576. * matches the next task, we dont have to do anything but
  577. * to set a valid offset in the TSS:
  578. */
  579. tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
  580. return;
  581. }
  582. /*
  583. * Lazy TSS's I/O bitmap copy. We set an invalid offset here
  584. * and we let the task to get a GPF in case an I/O instruction
  585. * is performed. The handler of the GPF will verify that the
  586. * faulting task has a valid I/O bitmap and, it true, does the
  587. * real copy and restart the instruction. This will save us
  588. * redundant copies when the currently switched task does not
  589. * perform any I/O during its timeslice.
  590. */
  591. tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
  592. }
  593. /*
  594. * switch_to(x,yn) should switch tasks from x to y.
  595. *
  596. * We fsave/fwait so that an exception goes off at the right time
  597. * (as a call from the fsave or fwait in effect) rather than to
  598. * the wrong process. Lazy FP saving no longer makes any sense
  599. * with modern CPU's, and this simplifies a lot of things (SMP
  600. * and UP become the same).
  601. *
  602. * NOTE! We used to use the x86 hardware context switching. The
  603. * reason for not using it any more becomes apparent when you
  604. * try to recover gracefully from saved state that is no longer
  605. * valid (stale segment register values in particular). With the
  606. * hardware task-switch, there is no way to fix up bad state in
  607. * a reasonable manner.
  608. *
  609. * The fact that Intel documents the hardware task-switching to
  610. * be slow is a fairly red herring - this code is not noticeably
  611. * faster. However, there _is_ some room for improvement here,
  612. * so the performance issues may eventually be a valid point.
  613. * More important, however, is the fact that this allows us much
  614. * more flexibility.
  615. *
  616. * The return value (in %ax) will be the "prev" task after
  617. * the task-switch, and shows up in ret_from_fork in entry.S,
  618. * for example.
  619. */
  620. struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  621. {
  622. struct thread_struct *prev = &prev_p->thread,
  623. *next = &next_p->thread;
  624. int cpu = smp_processor_id();
  625. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  626. /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
  627. __unlazy_fpu(prev_p);
  628. /* we're going to use this soon, after a few expensive things */
  629. if (next_p->fpu_counter > 5)
  630. prefetch(&next->i387.fxsave);
  631. /*
  632. * Reload esp0.
  633. */
  634. load_sp0(tss, next);
  635. /*
  636. * Save away %gs. No need to save %fs, as it was saved on the
  637. * stack on entry. No need to save %es and %ds, as those are
  638. * always kernel segments while inside the kernel. Doing this
  639. * before setting the new TLS descriptors avoids the situation
  640. * where we temporarily have non-reloadable segments in %fs
  641. * and %gs. This could be an issue if the NMI handler ever
  642. * used %fs or %gs (it does not today), or if the kernel is
  643. * running inside of a hypervisor layer.
  644. */
  645. savesegment(gs, prev->gs);
  646. /*
  647. * Load the per-thread Thread-Local Storage descriptor.
  648. */
  649. load_TLS(next, cpu);
  650. /*
  651. * Restore IOPL if needed. In normal use, the flags restore
  652. * in the switch assembly will handle this. But if the kernel
  653. * is running virtualized at a non-zero CPL, the popf will
  654. * not restore flags, so it must be done in a separate step.
  655. */
  656. if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
  657. set_iopl_mask(next->iopl);
  658. /*
  659. * Now maybe handle debug registers and/or IO bitmaps
  660. */
  661. if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
  662. task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
  663. __switch_to_xtra(prev_p, next_p, tss);
  664. /*
  665. * Leave lazy mode, flushing any hypercalls made here.
  666. * This must be done before restoring TLS segments so
  667. * the GDT and LDT are properly updated, and must be
  668. * done before math_state_restore, so the TS bit is up
  669. * to date.
  670. */
  671. arch_leave_lazy_cpu_mode();
  672. /* If the task has used fpu the last 5 timeslices, just do a full
  673. * restore of the math state immediately to avoid the trap; the
  674. * chances of needing FPU soon are obviously high now
  675. */
  676. if (next_p->fpu_counter > 5)
  677. math_state_restore();
  678. /*
  679. * Restore %gs if needed (which is common)
  680. */
  681. if (prev->gs | next->gs)
  682. loadsegment(gs, next->gs);
  683. x86_write_percpu(current_task, next_p);
  684. return prev_p;
  685. }
  686. asmlinkage int sys_fork(struct pt_regs regs)
  687. {
  688. return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
  689. }
  690. asmlinkage int sys_clone(struct pt_regs regs)
  691. {
  692. unsigned long clone_flags;
  693. unsigned long newsp;
  694. int __user *parent_tidptr, *child_tidptr;
  695. clone_flags = regs.bx;
  696. newsp = regs.cx;
  697. parent_tidptr = (int __user *)regs.dx;
  698. child_tidptr = (int __user *)regs.di;
  699. if (!newsp)
  700. newsp = regs.sp;
  701. return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
  702. }
  703. /*
  704. * This is trivial, and on the face of it looks like it
  705. * could equally well be done in user mode.
  706. *
  707. * Not so, for quite unobvious reasons - register pressure.
  708. * In user mode vfork() cannot have a stack frame, and if
  709. * done by calling the "clone()" system call directly, you
  710. * do not have enough call-clobbered registers to hold all
  711. * the information you need.
  712. */
  713. asmlinkage int sys_vfork(struct pt_regs regs)
  714. {
  715. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
  716. }
  717. /*
  718. * sys_execve() executes a new program.
  719. */
  720. asmlinkage int sys_execve(struct pt_regs regs)
  721. {
  722. int error;
  723. char * filename;
  724. filename = getname((char __user *) regs.bx);
  725. error = PTR_ERR(filename);
  726. if (IS_ERR(filename))
  727. goto out;
  728. error = do_execve(filename,
  729. (char __user * __user *) regs.cx,
  730. (char __user * __user *) regs.dx,
  731. &regs);
  732. if (error == 0) {
  733. /* Make sure we don't return using sysenter.. */
  734. set_thread_flag(TIF_IRET);
  735. }
  736. putname(filename);
  737. out:
  738. return error;
  739. }
  740. #define top_esp (THREAD_SIZE - sizeof(unsigned long))
  741. #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
  742. unsigned long get_wchan(struct task_struct *p)
  743. {
  744. unsigned long bp, sp, ip;
  745. unsigned long stack_page;
  746. int count = 0;
  747. if (!p || p == current || p->state == TASK_RUNNING)
  748. return 0;
  749. stack_page = (unsigned long)task_stack_page(p);
  750. sp = p->thread.sp;
  751. if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
  752. return 0;
  753. /* include/asm-i386/system.h:switch_to() pushes bp last. */
  754. bp = *(unsigned long *) sp;
  755. do {
  756. if (bp < stack_page || bp > top_ebp+stack_page)
  757. return 0;
  758. ip = *(unsigned long *) (bp+4);
  759. if (!in_sched_functions(ip))
  760. return ip;
  761. bp = *(unsigned long *) bp;
  762. } while (count++ < 16);
  763. return 0;
  764. }
  765. unsigned long arch_align_stack(unsigned long sp)
  766. {
  767. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  768. sp -= get_random_int() % 8192;
  769. return sp & ~0xf;
  770. }
  771. unsigned long arch_randomize_brk(struct mm_struct *mm)
  772. {
  773. unsigned long range_end = mm->brk + 0x02000000;
  774. return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
  775. }