process_32.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. /*
  2. * Copyright (C) 1995 Linus Torvalds
  3. *
  4. * Pentium III FXSR, SSE support
  5. * Gareth Hughes <gareth@valinux.com>, May 2000
  6. */
  7. /*
  8. * This file handles the architecture-dependent parts of process handling..
  9. */
  10. #include <stdarg.h>
  11. #include <linux/cpu.h>
  12. #include <linux/errno.h>
  13. #include <linux/sched.h>
  14. #include <linux/fs.h>
  15. #include <linux/kernel.h>
  16. #include <linux/mm.h>
  17. #include <linux/elfcore.h>
  18. #include <linux/smp.h>
  19. #include <linux/stddef.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/user.h>
  23. #include <linux/a.out.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/utsname.h>
  26. #include <linux/delay.h>
  27. #include <linux/reboot.h>
  28. #include <linux/init.h>
  29. #include <linux/mc146818rtc.h>
  30. #include <linux/module.h>
  31. #include <linux/kallsyms.h>
  32. #include <linux/ptrace.h>
  33. #include <linux/random.h>
  34. #include <linux/personality.h>
  35. #include <linux/tick.h>
  36. #include <linux/percpu.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/system.h>
  40. #include <asm/io.h>
  41. #include <asm/ldt.h>
  42. #include <asm/processor.h>
  43. #include <asm/i387.h>
  44. #include <asm/desc.h>
  45. #include <asm/vm86.h>
  46. #ifdef CONFIG_MATH_EMULATION
  47. #include <asm/math_emu.h>
  48. #endif
  49. #include <linux/err.h>
  50. #include <asm/tlbflush.h>
  51. #include <asm/cpu.h>
  52. #include <asm/kdebug.h>
  53. asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
  54. static int hlt_counter;
  55. unsigned long boot_option_idle_override = 0;
  56. EXPORT_SYMBOL(boot_option_idle_override);
  57. DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
  58. EXPORT_PER_CPU_SYMBOL(current_task);
  59. DEFINE_PER_CPU(int, cpu_number);
  60. EXPORT_PER_CPU_SYMBOL(cpu_number);
  61. /*
  62. * Return saved PC of a blocked thread.
  63. */
  64. unsigned long thread_saved_pc(struct task_struct *tsk)
  65. {
  66. return ((unsigned long *)tsk->thread.sp)[3];
  67. }
  68. /*
  69. * Powermanagement idle function, if any..
  70. */
  71. void (*pm_idle)(void);
  72. EXPORT_SYMBOL(pm_idle);
  73. static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
  74. void disable_hlt(void)
  75. {
  76. hlt_counter++;
  77. }
  78. EXPORT_SYMBOL(disable_hlt);
  79. void enable_hlt(void)
  80. {
  81. hlt_counter--;
  82. }
  83. EXPORT_SYMBOL(enable_hlt);
  84. /*
  85. * We use this if we don't have any better
  86. * idle routine..
  87. */
  88. void default_idle(void)
  89. {
  90. if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
  91. current_thread_info()->status &= ~TS_POLLING;
  92. /*
  93. * TS_POLLING-cleared state must be visible before we
  94. * test NEED_RESCHED:
  95. */
  96. smp_mb();
  97. local_irq_disable();
  98. if (!need_resched()) {
  99. ktime_t t0, t1;
  100. u64 t0n, t1n;
  101. t0 = ktime_get();
  102. t0n = ktime_to_ns(t0);
  103. safe_halt(); /* enables interrupts racelessly */
  104. local_irq_disable();
  105. t1 = ktime_get();
  106. t1n = ktime_to_ns(t1);
  107. sched_clock_idle_wakeup_event(t1n - t0n);
  108. }
  109. local_irq_enable();
  110. current_thread_info()->status |= TS_POLLING;
  111. } else {
  112. /* loop is done by the caller */
  113. cpu_relax();
  114. }
  115. }
  116. #ifdef CONFIG_APM_MODULE
  117. EXPORT_SYMBOL(default_idle);
  118. #endif
  119. /*
  120. * On SMP it's slightly faster (but much more power-consuming!)
  121. * to poll the ->work.need_resched flag instead of waiting for the
  122. * cross-CPU IPI to arrive. Use this option with caution.
  123. */
  124. static void poll_idle(void)
  125. {
  126. cpu_relax();
  127. }
  128. #ifdef CONFIG_HOTPLUG_CPU
  129. #include <asm/nmi.h>
  130. /* We don't actually take CPU down, just spin without interrupts. */
  131. static inline void play_dead(void)
  132. {
  133. /* This must be done before dead CPU ack */
  134. cpu_exit_clear();
  135. wbinvd();
  136. mb();
  137. /* Ack it */
  138. __get_cpu_var(cpu_state) = CPU_DEAD;
  139. /*
  140. * With physical CPU hotplug, we should halt the cpu
  141. */
  142. local_irq_disable();
  143. while (1)
  144. halt();
  145. }
  146. #else
  147. static inline void play_dead(void)
  148. {
  149. BUG();
  150. }
  151. #endif /* CONFIG_HOTPLUG_CPU */
  152. /*
  153. * The idle thread. There's no useful work to be
  154. * done, so just try to conserve power and have a
  155. * low exit latency (ie sit in a loop waiting for
  156. * somebody to say that they'd like to reschedule)
  157. */
  158. void cpu_idle(void)
  159. {
  160. int cpu = smp_processor_id();
  161. current_thread_info()->status |= TS_POLLING;
  162. /* endless idle loop with no priority at all */
  163. while (1) {
  164. tick_nohz_stop_sched_tick();
  165. while (!need_resched()) {
  166. void (*idle)(void);
  167. if (__get_cpu_var(cpu_idle_state))
  168. __get_cpu_var(cpu_idle_state) = 0;
  169. check_pgt_cache();
  170. rmb();
  171. idle = pm_idle;
  172. if (rcu_pending(cpu))
  173. rcu_check_callbacks(cpu, 0);
  174. if (!idle)
  175. idle = default_idle;
  176. if (cpu_is_offline(cpu))
  177. play_dead();
  178. __get_cpu_var(irq_stat).idle_timestamp = jiffies;
  179. idle();
  180. }
  181. tick_nohz_restart_sched_tick();
  182. preempt_enable_no_resched();
  183. schedule();
  184. preempt_disable();
  185. }
  186. }
  187. static void do_nothing(void *unused)
  188. {
  189. }
  190. void cpu_idle_wait(void)
  191. {
  192. unsigned int cpu, this_cpu = get_cpu();
  193. cpumask_t map, tmp = current->cpus_allowed;
  194. set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
  195. put_cpu();
  196. cpus_clear(map);
  197. for_each_online_cpu(cpu) {
  198. per_cpu(cpu_idle_state, cpu) = 1;
  199. cpu_set(cpu, map);
  200. }
  201. __get_cpu_var(cpu_idle_state) = 0;
  202. wmb();
  203. do {
  204. ssleep(1);
  205. for_each_online_cpu(cpu) {
  206. if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
  207. cpu_clear(cpu, map);
  208. }
  209. cpus_and(map, map, cpu_online_map);
  210. /*
  211. * We waited 1 sec, if a CPU still did not call idle
  212. * it may be because it is in idle and not waking up
  213. * because it has nothing to do.
  214. * Give all the remaining CPUS a kick.
  215. */
  216. smp_call_function_mask(map, do_nothing, 0, 0);
  217. } while (!cpus_empty(map));
  218. set_cpus_allowed(current, tmp);
  219. }
  220. EXPORT_SYMBOL_GPL(cpu_idle_wait);
  221. /*
  222. * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
  223. * which can obviate IPI to trigger checking of need_resched.
  224. * We execute MONITOR against need_resched and enter optimized wait state
  225. * through MWAIT. Whenever someone changes need_resched, we would be woken
  226. * up from MWAIT (without an IPI).
  227. *
  228. * New with Core Duo processors, MWAIT can take some hints based on CPU
  229. * capability.
  230. */
  231. void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
  232. {
  233. if (!need_resched()) {
  234. __monitor((void *)&current_thread_info()->flags, 0, 0);
  235. smp_mb();
  236. if (!need_resched())
  237. __mwait(ax, cx);
  238. }
  239. }
  240. /* Default MONITOR/MWAIT with no hints, used for default C1 state */
  241. static void mwait_idle(void)
  242. {
  243. local_irq_enable();
  244. mwait_idle_with_hints(0, 0);
  245. }
  246. void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
  247. {
  248. if (cpu_has(c, X86_FEATURE_MWAIT)) {
  249. printk("monitor/mwait feature present.\n");
  250. /*
  251. * Skip, if setup has overridden idle.
  252. * One CPU supports mwait => All CPUs supports mwait
  253. */
  254. if (!pm_idle) {
  255. printk("using mwait in idle threads.\n");
  256. pm_idle = mwait_idle;
  257. }
  258. }
  259. }
  260. static int __init idle_setup(char *str)
  261. {
  262. if (!strcmp(str, "poll")) {
  263. printk("using polling idle threads.\n");
  264. pm_idle = poll_idle;
  265. #ifdef CONFIG_X86_SMP
  266. if (smp_num_siblings > 1)
  267. printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
  268. #endif
  269. } else if (!strcmp(str, "mwait"))
  270. force_mwait = 1;
  271. else
  272. return -1;
  273. boot_option_idle_override = 1;
  274. return 0;
  275. }
  276. early_param("idle", idle_setup);
  277. void __show_registers(struct pt_regs *regs, int all)
  278. {
  279. unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
  280. unsigned long d0, d1, d2, d3, d6, d7;
  281. unsigned long sp;
  282. unsigned short ss, gs;
  283. if (user_mode_vm(regs)) {
  284. sp = regs->sp;
  285. ss = regs->ss & 0xffff;
  286. savesegment(gs, gs);
  287. } else {
  288. sp = (unsigned long) (&regs->sp);
  289. savesegment(ss, ss);
  290. savesegment(gs, gs);
  291. }
  292. printk("\n");
  293. printk("Pid: %d, comm: %s %s (%s %.*s)\n",
  294. task_pid_nr(current), current->comm,
  295. print_tainted(), init_utsname()->release,
  296. (int)strcspn(init_utsname()->version, " "),
  297. init_utsname()->version);
  298. printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
  299. 0xffff & regs->cs, regs->ip, regs->flags,
  300. smp_processor_id());
  301. print_symbol("EIP is at %s\n", regs->ip);
  302. printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
  303. regs->ax, regs->bx, regs->cx, regs->dx);
  304. printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
  305. regs->si, regs->di, regs->bp, sp);
  306. printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
  307. regs->ds & 0xffff, regs->es & 0xffff,
  308. regs->fs & 0xffff, gs, ss);
  309. if (!all)
  310. return;
  311. cr0 = read_cr0();
  312. cr2 = read_cr2();
  313. cr3 = read_cr3();
  314. cr4 = read_cr4_safe();
  315. printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
  316. cr0, cr2, cr3, cr4);
  317. get_debugreg(d0, 0);
  318. get_debugreg(d1, 1);
  319. get_debugreg(d2, 2);
  320. get_debugreg(d3, 3);
  321. printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
  322. d0, d1, d2, d3);
  323. get_debugreg(d6, 6);
  324. get_debugreg(d7, 7);
  325. printk("DR6: %08lx DR7: %08lx\n",
  326. d6, d7);
  327. }
  328. void show_regs(struct pt_regs *regs)
  329. {
  330. __show_registers(regs, 1);
  331. show_trace(NULL, regs, &regs->sp, regs->bp);
  332. }
  333. /*
  334. * This gets run with %bx containing the
  335. * function to call, and %dx containing
  336. * the "args".
  337. */
  338. extern void kernel_thread_helper(void);
  339. /*
  340. * Create a kernel thread
  341. */
  342. int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  343. {
  344. struct pt_regs regs;
  345. memset(&regs, 0, sizeof(regs));
  346. regs.bx = (unsigned long) fn;
  347. regs.dx = (unsigned long) arg;
  348. regs.ds = __USER_DS;
  349. regs.es = __USER_DS;
  350. regs.fs = __KERNEL_PERCPU;
  351. regs.orig_ax = -1;
  352. regs.ip = (unsigned long) kernel_thread_helper;
  353. regs.cs = __KERNEL_CS | get_kernel_rpl();
  354. regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
  355. /* Ok, create the new process.. */
  356. return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
  357. }
  358. EXPORT_SYMBOL(kernel_thread);
  359. /*
  360. * Free current thread data structures etc..
  361. */
  362. void exit_thread(void)
  363. {
  364. /* The process may have allocated an io port bitmap... nuke it. */
  365. if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
  366. struct task_struct *tsk = current;
  367. struct thread_struct *t = &tsk->thread;
  368. int cpu = get_cpu();
  369. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  370. kfree(t->io_bitmap_ptr);
  371. t->io_bitmap_ptr = NULL;
  372. clear_thread_flag(TIF_IO_BITMAP);
  373. /*
  374. * Careful, clear this in the TSS too:
  375. */
  376. memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
  377. t->io_bitmap_max = 0;
  378. tss->io_bitmap_owner = NULL;
  379. tss->io_bitmap_max = 0;
  380. tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
  381. put_cpu();
  382. }
  383. }
  384. void flush_thread(void)
  385. {
  386. struct task_struct *tsk = current;
  387. tsk->thread.debugreg0 = 0;
  388. tsk->thread.debugreg1 = 0;
  389. tsk->thread.debugreg2 = 0;
  390. tsk->thread.debugreg3 = 0;
  391. tsk->thread.debugreg6 = 0;
  392. tsk->thread.debugreg7 = 0;
  393. memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
  394. clear_tsk_thread_flag(tsk, TIF_DEBUG);
  395. /*
  396. * Forget coprocessor state..
  397. */
  398. clear_fpu(tsk);
  399. clear_used_math();
  400. }
  401. void release_thread(struct task_struct *dead_task)
  402. {
  403. BUG_ON(dead_task->mm);
  404. release_vm86_irqs(dead_task);
  405. }
  406. /*
  407. * This gets called before we allocate a new thread and copy
  408. * the current task into it.
  409. */
  410. void prepare_to_copy(struct task_struct *tsk)
  411. {
  412. unlazy_fpu(tsk);
  413. }
  414. int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
  415. unsigned long unused,
  416. struct task_struct * p, struct pt_regs * regs)
  417. {
  418. struct pt_regs * childregs;
  419. struct task_struct *tsk;
  420. int err;
  421. childregs = task_pt_regs(p);
  422. *childregs = *regs;
  423. childregs->ax = 0;
  424. childregs->sp = sp;
  425. p->thread.sp = (unsigned long) childregs;
  426. p->thread.sp0 = (unsigned long) (childregs+1);
  427. p->thread.ip = (unsigned long) ret_from_fork;
  428. savesegment(gs, p->thread.gs);
  429. tsk = current;
  430. if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
  431. p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
  432. IO_BITMAP_BYTES, GFP_KERNEL);
  433. if (!p->thread.io_bitmap_ptr) {
  434. p->thread.io_bitmap_max = 0;
  435. return -ENOMEM;
  436. }
  437. set_tsk_thread_flag(p, TIF_IO_BITMAP);
  438. }
  439. err = 0;
  440. /*
  441. * Set a new TLS for the child thread?
  442. */
  443. if (clone_flags & CLONE_SETTLS)
  444. err = do_set_thread_area(p, -1,
  445. (struct user_desc __user *)childregs->si, 0);
  446. if (err && p->thread.io_bitmap_ptr) {
  447. kfree(p->thread.io_bitmap_ptr);
  448. p->thread.io_bitmap_max = 0;
  449. }
  450. return err;
  451. }
  452. /*
  453. * fill in the user structure for a core dump..
  454. */
  455. void dump_thread(struct pt_regs * regs, struct user * dump)
  456. {
  457. u16 gs;
  458. /* changed the size calculations - should hopefully work better. lbt */
  459. dump->magic = CMAGIC;
  460. dump->start_code = 0;
  461. dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
  462. dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
  463. dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
  464. dump->u_dsize -= dump->u_tsize;
  465. dump->u_ssize = 0;
  466. dump->u_debugreg[0] = current->thread.debugreg0;
  467. dump->u_debugreg[1] = current->thread.debugreg1;
  468. dump->u_debugreg[2] = current->thread.debugreg2;
  469. dump->u_debugreg[3] = current->thread.debugreg3;
  470. dump->u_debugreg[4] = 0;
  471. dump->u_debugreg[5] = 0;
  472. dump->u_debugreg[6] = current->thread.debugreg6;
  473. dump->u_debugreg[7] = current->thread.debugreg7;
  474. if (dump->start_stack < TASK_SIZE)
  475. dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
  476. dump->regs.bx = regs->bx;
  477. dump->regs.cx = regs->cx;
  478. dump->regs.dx = regs->dx;
  479. dump->regs.si = regs->si;
  480. dump->regs.di = regs->di;
  481. dump->regs.bp = regs->bp;
  482. dump->regs.ax = regs->ax;
  483. dump->regs.ds = (u16)regs->ds;
  484. dump->regs.es = (u16)regs->es;
  485. dump->regs.fs = (u16)regs->fs;
  486. savesegment(gs,gs);
  487. dump->regs.orig_ax = regs->orig_ax;
  488. dump->regs.ip = regs->ip;
  489. dump->regs.cs = (u16)regs->cs;
  490. dump->regs.flags = regs->flags;
  491. dump->regs.sp = regs->sp;
  492. dump->regs.ss = (u16)regs->ss;
  493. dump->u_fpvalid = dump_fpu (regs, &dump->i387);
  494. }
  495. EXPORT_SYMBOL(dump_thread);
  496. #ifdef CONFIG_SECCOMP
  497. static void hard_disable_TSC(void)
  498. {
  499. write_cr4(read_cr4() | X86_CR4_TSD);
  500. }
  501. void disable_TSC(void)
  502. {
  503. preempt_disable();
  504. if (!test_and_set_thread_flag(TIF_NOTSC))
  505. /*
  506. * Must flip the CPU state synchronously with
  507. * TIF_NOTSC in the current running context.
  508. */
  509. hard_disable_TSC();
  510. preempt_enable();
  511. }
  512. static void hard_enable_TSC(void)
  513. {
  514. write_cr4(read_cr4() & ~X86_CR4_TSD);
  515. }
  516. #endif /* CONFIG_SECCOMP */
  517. static noinline void
  518. __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
  519. struct tss_struct *tss)
  520. {
  521. struct thread_struct *prev, *next;
  522. unsigned long debugctl;
  523. prev = &prev_p->thread;
  524. next = &next_p->thread;
  525. debugctl = prev->debugctlmsr;
  526. if (next->ds_area_msr != prev->ds_area_msr) {
  527. /* we clear debugctl to make sure DS
  528. * is not in use when we change it */
  529. debugctl = 0;
  530. wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
  531. wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
  532. }
  533. if (next->debugctlmsr != debugctl)
  534. wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0);
  535. if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
  536. set_debugreg(next->debugreg0, 0);
  537. set_debugreg(next->debugreg1, 1);
  538. set_debugreg(next->debugreg2, 2);
  539. set_debugreg(next->debugreg3, 3);
  540. /* no 4 and 5 */
  541. set_debugreg(next->debugreg6, 6);
  542. set_debugreg(next->debugreg7, 7);
  543. }
  544. #ifdef CONFIG_SECCOMP
  545. if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
  546. test_tsk_thread_flag(next_p, TIF_NOTSC)) {
  547. /* prev and next are different */
  548. if (test_tsk_thread_flag(next_p, TIF_NOTSC))
  549. hard_disable_TSC();
  550. else
  551. hard_enable_TSC();
  552. }
  553. #endif
  554. if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
  555. ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
  556. if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
  557. ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
  558. if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
  559. /*
  560. * Disable the bitmap via an invalid offset. We still cache
  561. * the previous bitmap owner and the IO bitmap contents:
  562. */
  563. tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
  564. return;
  565. }
  566. if (likely(next == tss->io_bitmap_owner)) {
  567. /*
  568. * Previous owner of the bitmap (hence the bitmap content)
  569. * matches the next task, we dont have to do anything but
  570. * to set a valid offset in the TSS:
  571. */
  572. tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
  573. return;
  574. }
  575. /*
  576. * Lazy TSS's I/O bitmap copy. We set an invalid offset here
  577. * and we let the task to get a GPF in case an I/O instruction
  578. * is performed. The handler of the GPF will verify that the
  579. * faulting task has a valid I/O bitmap and, it true, does the
  580. * real copy and restart the instruction. This will save us
  581. * redundant copies when the currently switched task does not
  582. * perform any I/O during its timeslice.
  583. */
  584. tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
  585. }
  586. /*
  587. * switch_to(x,yn) should switch tasks from x to y.
  588. *
  589. * We fsave/fwait so that an exception goes off at the right time
  590. * (as a call from the fsave or fwait in effect) rather than to
  591. * the wrong process. Lazy FP saving no longer makes any sense
  592. * with modern CPU's, and this simplifies a lot of things (SMP
  593. * and UP become the same).
  594. *
  595. * NOTE! We used to use the x86 hardware context switching. The
  596. * reason for not using it any more becomes apparent when you
  597. * try to recover gracefully from saved state that is no longer
  598. * valid (stale segment register values in particular). With the
  599. * hardware task-switch, there is no way to fix up bad state in
  600. * a reasonable manner.
  601. *
  602. * The fact that Intel documents the hardware task-switching to
  603. * be slow is a fairly red herring - this code is not noticeably
  604. * faster. However, there _is_ some room for improvement here,
  605. * so the performance issues may eventually be a valid point.
  606. * More important, however, is the fact that this allows us much
  607. * more flexibility.
  608. *
  609. * The return value (in %ax) will be the "prev" task after
  610. * the task-switch, and shows up in ret_from_fork in entry.S,
  611. * for example.
  612. */
  613. struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  614. {
  615. struct thread_struct *prev = &prev_p->thread,
  616. *next = &next_p->thread;
  617. int cpu = smp_processor_id();
  618. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  619. /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
  620. __unlazy_fpu(prev_p);
  621. /* we're going to use this soon, after a few expensive things */
  622. if (next_p->fpu_counter > 5)
  623. prefetch(&next->i387.fxsave);
  624. /*
  625. * Reload esp0.
  626. */
  627. load_sp0(tss, next);
  628. /*
  629. * Save away %gs. No need to save %fs, as it was saved on the
  630. * stack on entry. No need to save %es and %ds, as those are
  631. * always kernel segments while inside the kernel. Doing this
  632. * before setting the new TLS descriptors avoids the situation
  633. * where we temporarily have non-reloadable segments in %fs
  634. * and %gs. This could be an issue if the NMI handler ever
  635. * used %fs or %gs (it does not today), or if the kernel is
  636. * running inside of a hypervisor layer.
  637. */
  638. savesegment(gs, prev->gs);
  639. /*
  640. * Load the per-thread Thread-Local Storage descriptor.
  641. */
  642. load_TLS(next, cpu);
  643. /*
  644. * Restore IOPL if needed. In normal use, the flags restore
  645. * in the switch assembly will handle this. But if the kernel
  646. * is running virtualized at a non-zero CPL, the popf will
  647. * not restore flags, so it must be done in a separate step.
  648. */
  649. if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
  650. set_iopl_mask(next->iopl);
  651. /*
  652. * Now maybe handle debug registers and/or IO bitmaps
  653. */
  654. if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
  655. task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
  656. __switch_to_xtra(prev_p, next_p, tss);
  657. /*
  658. * Leave lazy mode, flushing any hypercalls made here.
  659. * This must be done before restoring TLS segments so
  660. * the GDT and LDT are properly updated, and must be
  661. * done before math_state_restore, so the TS bit is up
  662. * to date.
  663. */
  664. arch_leave_lazy_cpu_mode();
  665. /* If the task has used fpu the last 5 timeslices, just do a full
  666. * restore of the math state immediately to avoid the trap; the
  667. * chances of needing FPU soon are obviously high now
  668. */
  669. if (next_p->fpu_counter > 5)
  670. math_state_restore();
  671. /*
  672. * Restore %gs if needed (which is common)
  673. */
  674. if (prev->gs | next->gs)
  675. loadsegment(gs, next->gs);
  676. x86_write_percpu(current_task, next_p);
  677. return prev_p;
  678. }
  679. asmlinkage int sys_fork(struct pt_regs regs)
  680. {
  681. return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
  682. }
  683. asmlinkage int sys_clone(struct pt_regs regs)
  684. {
  685. unsigned long clone_flags;
  686. unsigned long newsp;
  687. int __user *parent_tidptr, *child_tidptr;
  688. clone_flags = regs.bx;
  689. newsp = regs.cx;
  690. parent_tidptr = (int __user *)regs.dx;
  691. child_tidptr = (int __user *)regs.di;
  692. if (!newsp)
  693. newsp = regs.sp;
  694. return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
  695. }
  696. /*
  697. * This is trivial, and on the face of it looks like it
  698. * could equally well be done in user mode.
  699. *
  700. * Not so, for quite unobvious reasons - register pressure.
  701. * In user mode vfork() cannot have a stack frame, and if
  702. * done by calling the "clone()" system call directly, you
  703. * do not have enough call-clobbered registers to hold all
  704. * the information you need.
  705. */
  706. asmlinkage int sys_vfork(struct pt_regs regs)
  707. {
  708. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
  709. }
  710. /*
  711. * sys_execve() executes a new program.
  712. */
  713. asmlinkage int sys_execve(struct pt_regs regs)
  714. {
  715. int error;
  716. char * filename;
  717. filename = getname((char __user *) regs.bx);
  718. error = PTR_ERR(filename);
  719. if (IS_ERR(filename))
  720. goto out;
  721. error = do_execve(filename,
  722. (char __user * __user *) regs.cx,
  723. (char __user * __user *) regs.dx,
  724. &regs);
  725. if (error == 0) {
  726. /* Make sure we don't return using sysenter.. */
  727. set_thread_flag(TIF_IRET);
  728. }
  729. putname(filename);
  730. out:
  731. return error;
  732. }
  733. #define top_esp (THREAD_SIZE - sizeof(unsigned long))
  734. #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
  735. unsigned long get_wchan(struct task_struct *p)
  736. {
  737. unsigned long bp, sp, ip;
  738. unsigned long stack_page;
  739. int count = 0;
  740. if (!p || p == current || p->state == TASK_RUNNING)
  741. return 0;
  742. stack_page = (unsigned long)task_stack_page(p);
  743. sp = p->thread.sp;
  744. if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
  745. return 0;
  746. /* include/asm-i386/system.h:switch_to() pushes bp last. */
  747. bp = *(unsigned long *) sp;
  748. do {
  749. if (bp < stack_page || bp > top_ebp+stack_page)
  750. return 0;
  751. ip = *(unsigned long *) (bp+4);
  752. if (!in_sched_functions(ip))
  753. return ip;
  754. bp = *(unsigned long *) bp;
  755. } while (count++ < 16);
  756. return 0;
  757. }
  758. unsigned long arch_align_stack(unsigned long sp)
  759. {
  760. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  761. sp -= get_random_int() % 8192;
  762. return sp & ~0xf;
  763. }
  764. unsigned long arch_randomize_brk(struct mm_struct *mm)
  765. {
  766. unsigned long range_end = mm->brk + 0x02000000;
  767. return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
  768. }