process.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936
  1. /*
  2. * linux/arch/i386/kernel/process.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. *
  6. * Pentium III FXSR, SSE support
  7. * Gareth Hughes <gareth@valinux.com>, May 2000
  8. */
  9. /*
  10. * This file handles the architecture-dependent parts of process handling..
  11. */
  12. #include <stdarg.h>
  13. #include <linux/cpu.h>
  14. #include <linux/errno.h>
  15. #include <linux/sched.h>
  16. #include <linux/fs.h>
  17. #include <linux/kernel.h>
  18. #include <linux/mm.h>
  19. #include <linux/elfcore.h>
  20. #include <linux/smp.h>
  21. #include <linux/smp_lock.h>
  22. #include <linux/stddef.h>
  23. #include <linux/slab.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/user.h>
  26. #include <linux/a.out.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/config.h>
  29. #include <linux/utsname.h>
  30. #include <linux/delay.h>
  31. #include <linux/reboot.h>
  32. #include <linux/init.h>
  33. #include <linux/mc146818rtc.h>
  34. #include <linux/module.h>
  35. #include <linux/kallsyms.h>
  36. #include <linux/ptrace.h>
  37. #include <linux/random.h>
  38. #include <linux/kprobes.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/system.h>
  42. #include <asm/io.h>
  43. #include <asm/ldt.h>
  44. #include <asm/processor.h>
  45. #include <asm/i387.h>
  46. #include <asm/desc.h>
  47. #ifdef CONFIG_MATH_EMULATION
  48. #include <asm/math_emu.h>
  49. #endif
  50. #include <linux/err.h>
  51. #include <asm/tlbflush.h>
  52. #include <asm/cpu.h>
  53. asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
  54. static int hlt_counter;
  55. unsigned long boot_option_idle_override = 0;
  56. EXPORT_SYMBOL(boot_option_idle_override);
  57. /*
  58. * Return saved PC of a blocked thread.
  59. */
  60. unsigned long thread_saved_pc(struct task_struct *tsk)
  61. {
  62. return ((unsigned long *)tsk->thread.esp)[3];
  63. }
  64. /*
  65. * Powermanagement idle function, if any..
  66. */
  67. void (*pm_idle)(void);
  68. EXPORT_SYMBOL(pm_idle);
  69. static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
  70. void disable_hlt(void)
  71. {
  72. hlt_counter++;
  73. }
  74. EXPORT_SYMBOL(disable_hlt);
  75. void enable_hlt(void)
  76. {
  77. hlt_counter--;
  78. }
  79. EXPORT_SYMBOL(enable_hlt);
  80. /*
  81. * We use this if we don't have any better
  82. * idle routine..
  83. */
  84. void default_idle(void)
  85. {
  86. local_irq_enable();
  87. if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
  88. clear_thread_flag(TIF_POLLING_NRFLAG);
  89. smp_mb__after_clear_bit();
  90. while (!need_resched()) {
  91. local_irq_disable();
  92. if (!need_resched())
  93. safe_halt();
  94. else
  95. local_irq_enable();
  96. }
  97. set_thread_flag(TIF_POLLING_NRFLAG);
  98. } else {
  99. while (!need_resched())
  100. cpu_relax();
  101. }
  102. }
  103. #ifdef CONFIG_APM_MODULE
  104. EXPORT_SYMBOL(default_idle);
  105. #endif
  106. /*
  107. * On SMP it's slightly faster (but much more power-consuming!)
  108. * to poll the ->work.need_resched flag instead of waiting for the
  109. * cross-CPU IPI to arrive. Use this option with caution.
  110. */
  111. static void poll_idle (void)
  112. {
  113. local_irq_enable();
  114. asm volatile(
  115. "2:"
  116. "testl %0, %1;"
  117. "rep; nop;"
  118. "je 2b;"
  119. : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
  120. }
  121. #ifdef CONFIG_HOTPLUG_CPU
  122. #include <asm/nmi.h>
  123. /* We don't actually take CPU down, just spin without interrupts. */
  124. static inline void play_dead(void)
  125. {
  126. /* This must be done before dead CPU ack */
  127. cpu_exit_clear();
  128. wbinvd();
  129. mb();
  130. /* Ack it */
  131. __get_cpu_var(cpu_state) = CPU_DEAD;
  132. /*
  133. * With physical CPU hotplug, we should halt the cpu
  134. */
  135. local_irq_disable();
  136. while (1)
  137. halt();
  138. }
  139. #else
  140. static inline void play_dead(void)
  141. {
  142. BUG();
  143. }
  144. #endif /* CONFIG_HOTPLUG_CPU */
  145. /*
  146. * The idle thread. There's no useful work to be
  147. * done, so just try to conserve power and have a
  148. * low exit latency (ie sit in a loop waiting for
  149. * somebody to say that they'd like to reschedule)
  150. */
  151. void cpu_idle(void)
  152. {
  153. int cpu = smp_processor_id();
  154. set_thread_flag(TIF_POLLING_NRFLAG);
  155. /* endless idle loop with no priority at all */
  156. while (1) {
  157. while (!need_resched()) {
  158. void (*idle)(void);
  159. if (__get_cpu_var(cpu_idle_state))
  160. __get_cpu_var(cpu_idle_state) = 0;
  161. rmb();
  162. idle = pm_idle;
  163. if (!idle)
  164. idle = default_idle;
  165. if (cpu_is_offline(cpu))
  166. play_dead();
  167. __get_cpu_var(irq_stat).idle_timestamp = jiffies;
  168. idle();
  169. }
  170. preempt_enable_no_resched();
  171. schedule();
  172. preempt_disable();
  173. }
  174. }
  175. void cpu_idle_wait(void)
  176. {
  177. unsigned int cpu, this_cpu = get_cpu();
  178. cpumask_t map;
  179. set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
  180. put_cpu();
  181. cpus_clear(map);
  182. for_each_online_cpu(cpu) {
  183. per_cpu(cpu_idle_state, cpu) = 1;
  184. cpu_set(cpu, map);
  185. }
  186. __get_cpu_var(cpu_idle_state) = 0;
  187. wmb();
  188. do {
  189. ssleep(1);
  190. for_each_online_cpu(cpu) {
  191. if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
  192. cpu_clear(cpu, map);
  193. }
  194. cpus_and(map, map, cpu_online_map);
  195. } while (!cpus_empty(map));
  196. }
  197. EXPORT_SYMBOL_GPL(cpu_idle_wait);
  198. /*
  199. * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
  200. * which can obviate IPI to trigger checking of need_resched.
  201. * We execute MONITOR against need_resched and enter optimized wait state
  202. * through MWAIT. Whenever someone changes need_resched, we would be woken
  203. * up from MWAIT (without an IPI).
  204. */
  205. static void mwait_idle(void)
  206. {
  207. local_irq_enable();
  208. while (!need_resched()) {
  209. __monitor((void *)&current_thread_info()->flags, 0, 0);
  210. smp_mb();
  211. if (need_resched())
  212. break;
  213. __mwait(0, 0);
  214. }
  215. }
  216. void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
  217. {
  218. if (cpu_has(c, X86_FEATURE_MWAIT)) {
  219. printk("monitor/mwait feature present.\n");
  220. /*
  221. * Skip, if setup has overridden idle.
  222. * One CPU supports mwait => All CPUs supports mwait
  223. */
  224. if (!pm_idle) {
  225. printk("using mwait in idle threads.\n");
  226. pm_idle = mwait_idle;
  227. }
  228. }
  229. }
  230. static int __init idle_setup (char *str)
  231. {
  232. if (!strncmp(str, "poll", 4)) {
  233. printk("using polling idle threads.\n");
  234. pm_idle = poll_idle;
  235. #ifdef CONFIG_X86_SMP
  236. if (smp_num_siblings > 1)
  237. printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
  238. #endif
  239. } else if (!strncmp(str, "halt", 4)) {
  240. printk("using halt in idle threads.\n");
  241. pm_idle = default_idle;
  242. }
  243. boot_option_idle_override = 1;
  244. return 1;
  245. }
  246. __setup("idle=", idle_setup);
  247. void show_regs(struct pt_regs * regs)
  248. {
  249. unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
  250. printk("\n");
  251. printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
  252. printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
  253. print_symbol("EIP is at %s\n", regs->eip);
  254. if (user_mode(regs))
  255. printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
  256. printk(" EFLAGS: %08lx %s (%s)\n",
  257. regs->eflags, print_tainted(), system_utsname.release);
  258. printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
  259. regs->eax,regs->ebx,regs->ecx,regs->edx);
  260. printk("ESI: %08lx EDI: %08lx EBP: %08lx",
  261. regs->esi, regs->edi, regs->ebp);
  262. printk(" DS: %04x ES: %04x\n",
  263. 0xffff & regs->xds,0xffff & regs->xes);
  264. cr0 = read_cr0();
  265. cr2 = read_cr2();
  266. cr3 = read_cr3();
  267. if (current_cpu_data.x86 > 4) {
  268. cr4 = read_cr4();
  269. }
  270. printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
  271. show_trace(NULL, &regs->esp);
  272. }
  273. /*
  274. * This gets run with %ebx containing the
  275. * function to call, and %edx containing
  276. * the "args".
  277. */
  278. extern void kernel_thread_helper(void);
  279. __asm__(".section .text\n"
  280. ".align 4\n"
  281. "kernel_thread_helper:\n\t"
  282. "movl %edx,%eax\n\t"
  283. "pushl %edx\n\t"
  284. "call *%ebx\n\t"
  285. "pushl %eax\n\t"
  286. "call do_exit\n"
  287. ".previous");
  288. /*
  289. * Create a kernel thread
  290. */
  291. int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  292. {
  293. struct pt_regs regs;
  294. memset(&regs, 0, sizeof(regs));
  295. regs.ebx = (unsigned long) fn;
  296. regs.edx = (unsigned long) arg;
  297. regs.xds = __USER_DS;
  298. regs.xes = __USER_DS;
  299. regs.orig_eax = -1;
  300. regs.eip = (unsigned long) kernel_thread_helper;
  301. regs.xcs = __KERNEL_CS;
  302. regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
  303. /* Ok, create the new process.. */
  304. return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
  305. }
  306. EXPORT_SYMBOL(kernel_thread);
  307. /*
  308. * Free current thread data structures etc..
  309. */
  310. void exit_thread(void)
  311. {
  312. struct task_struct *tsk = current;
  313. struct thread_struct *t = &tsk->thread;
  314. /*
  315. * Remove function-return probe instances associated with this task
  316. * and put them back on the free list. Do not insert an exit probe for
  317. * this function, it will be disabled by kprobe_flush_task if you do.
  318. */
  319. kprobe_flush_task(tsk);
  320. /* The process may have allocated an io port bitmap... nuke it. */
  321. if (unlikely(NULL != t->io_bitmap_ptr)) {
  322. int cpu = get_cpu();
  323. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  324. kfree(t->io_bitmap_ptr);
  325. t->io_bitmap_ptr = NULL;
  326. /*
  327. * Careful, clear this in the TSS too:
  328. */
  329. memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
  330. t->io_bitmap_max = 0;
  331. tss->io_bitmap_owner = NULL;
  332. tss->io_bitmap_max = 0;
  333. tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
  334. put_cpu();
  335. }
  336. }
  337. void flush_thread(void)
  338. {
  339. struct task_struct *tsk = current;
  340. memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
  341. memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
  342. /*
  343. * Forget coprocessor state..
  344. */
  345. clear_fpu(tsk);
  346. clear_used_math();
  347. }
  348. void release_thread(struct task_struct *dead_task)
  349. {
  350. if (dead_task->mm) {
  351. // temporary debugging check
  352. if (dead_task->mm->context.size) {
  353. printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
  354. dead_task->comm,
  355. dead_task->mm->context.ldt,
  356. dead_task->mm->context.size);
  357. BUG();
  358. }
  359. }
  360. release_vm86_irqs(dead_task);
  361. }
  362. /*
  363. * This gets called before we allocate a new thread and copy
  364. * the current task into it.
  365. */
  366. void prepare_to_copy(struct task_struct *tsk)
  367. {
  368. unlazy_fpu(tsk);
  369. }
  370. int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
  371. unsigned long unused,
  372. struct task_struct * p, struct pt_regs * regs)
  373. {
  374. struct pt_regs * childregs;
  375. struct task_struct *tsk;
  376. int err;
  377. childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
  378. /*
  379. * The below -8 is to reserve 8 bytes on top of the ring0 stack.
  380. * This is necessary to guarantee that the entire "struct pt_regs"
  381. * is accessable even if the CPU haven't stored the SS/ESP registers
  382. * on the stack (interrupt gate does not save these registers
  383. * when switching to the same priv ring).
  384. * Therefore beware: accessing the xss/esp fields of the
  385. * "struct pt_regs" is possible, but they may contain the
  386. * completely wrong values.
  387. */
  388. childregs = (struct pt_regs *) ((unsigned long) childregs - 8);
  389. *childregs = *regs;
  390. childregs->eax = 0;
  391. childregs->esp = esp;
  392. p->thread.esp = (unsigned long) childregs;
  393. p->thread.esp0 = (unsigned long) (childregs+1);
  394. p->thread.eip = (unsigned long) ret_from_fork;
  395. savesegment(fs,p->thread.fs);
  396. savesegment(gs,p->thread.gs);
  397. tsk = current;
  398. if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
  399. p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
  400. if (!p->thread.io_bitmap_ptr) {
  401. p->thread.io_bitmap_max = 0;
  402. return -ENOMEM;
  403. }
  404. memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
  405. IO_BITMAP_BYTES);
  406. }
  407. /*
  408. * Set a new TLS for the child thread?
  409. */
  410. if (clone_flags & CLONE_SETTLS) {
  411. struct desc_struct *desc;
  412. struct user_desc info;
  413. int idx;
  414. err = -EFAULT;
  415. if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
  416. goto out;
  417. err = -EINVAL;
  418. if (LDT_empty(&info))
  419. goto out;
  420. idx = info.entry_number;
  421. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  422. goto out;
  423. desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
  424. desc->a = LDT_entry_a(&info);
  425. desc->b = LDT_entry_b(&info);
  426. }
  427. err = 0;
  428. out:
  429. if (err && p->thread.io_bitmap_ptr) {
  430. kfree(p->thread.io_bitmap_ptr);
  431. p->thread.io_bitmap_max = 0;
  432. }
  433. return err;
  434. }
  435. /*
  436. * fill in the user structure for a core dump..
  437. */
  438. void dump_thread(struct pt_regs * regs, struct user * dump)
  439. {
  440. int i;
  441. /* changed the size calculations - should hopefully work better. lbt */
  442. dump->magic = CMAGIC;
  443. dump->start_code = 0;
  444. dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
  445. dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
  446. dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
  447. dump->u_dsize -= dump->u_tsize;
  448. dump->u_ssize = 0;
  449. for (i = 0; i < 8; i++)
  450. dump->u_debugreg[i] = current->thread.debugreg[i];
  451. if (dump->start_stack < TASK_SIZE)
  452. dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
  453. dump->regs.ebx = regs->ebx;
  454. dump->regs.ecx = regs->ecx;
  455. dump->regs.edx = regs->edx;
  456. dump->regs.esi = regs->esi;
  457. dump->regs.edi = regs->edi;
  458. dump->regs.ebp = regs->ebp;
  459. dump->regs.eax = regs->eax;
  460. dump->regs.ds = regs->xds;
  461. dump->regs.es = regs->xes;
  462. savesegment(fs,dump->regs.fs);
  463. savesegment(gs,dump->regs.gs);
  464. dump->regs.orig_eax = regs->orig_eax;
  465. dump->regs.eip = regs->eip;
  466. dump->regs.cs = regs->xcs;
  467. dump->regs.eflags = regs->eflags;
  468. dump->regs.esp = regs->esp;
  469. dump->regs.ss = regs->xss;
  470. dump->u_fpvalid = dump_fpu (regs, &dump->i387);
  471. }
  472. EXPORT_SYMBOL(dump_thread);
  473. /*
  474. * Capture the user space registers if the task is not running (in user space)
  475. */
  476. int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
  477. {
  478. struct pt_regs ptregs;
  479. ptregs = *(struct pt_regs *)
  480. ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
  481. ptregs.xcs &= 0xffff;
  482. ptregs.xds &= 0xffff;
  483. ptregs.xes &= 0xffff;
  484. ptregs.xss &= 0xffff;
  485. elf_core_copy_regs(regs, &ptregs);
  486. return 1;
  487. }
  488. static inline void
  489. handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
  490. {
  491. if (!next->io_bitmap_ptr) {
  492. /*
  493. * Disable the bitmap via an invalid offset. We still cache
  494. * the previous bitmap owner and the IO bitmap contents:
  495. */
  496. tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
  497. return;
  498. }
  499. if (likely(next == tss->io_bitmap_owner)) {
  500. /*
  501. * Previous owner of the bitmap (hence the bitmap content)
  502. * matches the next task, we dont have to do anything but
  503. * to set a valid offset in the TSS:
  504. */
  505. tss->io_bitmap_base = IO_BITMAP_OFFSET;
  506. return;
  507. }
  508. /*
  509. * Lazy TSS's I/O bitmap copy. We set an invalid offset here
  510. * and we let the task to get a GPF in case an I/O instruction
  511. * is performed. The handler of the GPF will verify that the
  512. * faulting task has a valid I/O bitmap and, it true, does the
  513. * real copy and restart the instruction. This will save us
  514. * redundant copies when the currently switched task does not
  515. * perform any I/O during its timeslice.
  516. */
  517. tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
  518. }
  519. /*
  520. * This function selects if the context switch from prev to next
  521. * has to tweak the TSC disable bit in the cr4.
  522. */
  523. static inline void disable_tsc(struct task_struct *prev_p,
  524. struct task_struct *next_p)
  525. {
  526. struct thread_info *prev, *next;
  527. /*
  528. * gcc should eliminate the ->thread_info dereference if
  529. * has_secure_computing returns 0 at compile time (SECCOMP=n).
  530. */
  531. prev = prev_p->thread_info;
  532. next = next_p->thread_info;
  533. if (has_secure_computing(prev) || has_secure_computing(next)) {
  534. /* slow path here */
  535. if (has_secure_computing(prev) &&
  536. !has_secure_computing(next)) {
  537. write_cr4(read_cr4() & ~X86_CR4_TSD);
  538. } else if (!has_secure_computing(prev) &&
  539. has_secure_computing(next))
  540. write_cr4(read_cr4() | X86_CR4_TSD);
  541. }
  542. }
  543. /*
  544. * switch_to(x,yn) should switch tasks from x to y.
  545. *
  546. * We fsave/fwait so that an exception goes off at the right time
  547. * (as a call from the fsave or fwait in effect) rather than to
  548. * the wrong process. Lazy FP saving no longer makes any sense
  549. * with modern CPU's, and this simplifies a lot of things (SMP
  550. * and UP become the same).
  551. *
  552. * NOTE! We used to use the x86 hardware context switching. The
  553. * reason for not using it any more becomes apparent when you
  554. * try to recover gracefully from saved state that is no longer
  555. * valid (stale segment register values in particular). With the
  556. * hardware task-switch, there is no way to fix up bad state in
  557. * a reasonable manner.
  558. *
  559. * The fact that Intel documents the hardware task-switching to
  560. * be slow is a fairly red herring - this code is not noticeably
  561. * faster. However, there _is_ some room for improvement here,
  562. * so the performance issues may eventually be a valid point.
  563. * More important, however, is the fact that this allows us much
  564. * more flexibility.
  565. *
  566. * The return value (in %eax) will be the "prev" task after
  567. * the task-switch, and shows up in ret_from_fork in entry.S,
  568. * for example.
  569. */
  570. struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  571. {
  572. struct thread_struct *prev = &prev_p->thread,
  573. *next = &next_p->thread;
  574. int cpu = smp_processor_id();
  575. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  576. /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
  577. __unlazy_fpu(prev_p);
  578. /*
  579. * Reload esp0.
  580. */
  581. load_esp0(tss, next);
  582. /*
  583. * Save away %fs and %gs. No need to save %es and %ds, as
  584. * those are always kernel segments while inside the kernel.
  585. * Doing this before setting the new TLS descriptors avoids
  586. * the situation where we temporarily have non-reloadable
  587. * segments in %fs and %gs. This could be an issue if the
  588. * NMI handler ever used %fs or %gs (it does not today), or
  589. * if the kernel is running inside of a hypervisor layer.
  590. */
  591. savesegment(fs, prev->fs);
  592. savesegment(gs, prev->gs);
  593. /*
  594. * Load the per-thread Thread-Local Storage descriptor.
  595. */
  596. load_TLS(next, cpu);
  597. /*
  598. * Restore %fs and %gs if needed.
  599. *
  600. * Glibc normally makes %fs be zero, and %gs is one of
  601. * the TLS segments.
  602. */
  603. if (unlikely(prev->fs | next->fs))
  604. loadsegment(fs, next->fs);
  605. if (prev->gs | next->gs)
  606. loadsegment(gs, next->gs);
  607. /*
  608. * Restore IOPL if needed.
  609. */
  610. if (unlikely(prev->iopl != next->iopl))
  611. set_iopl_mask(next->iopl);
  612. /*
  613. * Now maybe reload the debug registers
  614. */
  615. if (unlikely(next->debugreg[7])) {
  616. set_debugreg(next->debugreg[0], 0);
  617. set_debugreg(next->debugreg[1], 1);
  618. set_debugreg(next->debugreg[2], 2);
  619. set_debugreg(next->debugreg[3], 3);
  620. /* no 4 and 5 */
  621. set_debugreg(next->debugreg[6], 6);
  622. set_debugreg(next->debugreg[7], 7);
  623. }
  624. if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
  625. handle_io_bitmap(next, tss);
  626. disable_tsc(prev_p, next_p);
  627. return prev_p;
  628. }
  629. asmlinkage int sys_fork(struct pt_regs regs)
  630. {
  631. return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
  632. }
  633. asmlinkage int sys_clone(struct pt_regs regs)
  634. {
  635. unsigned long clone_flags;
  636. unsigned long newsp;
  637. int __user *parent_tidptr, *child_tidptr;
  638. clone_flags = regs.ebx;
  639. newsp = regs.ecx;
  640. parent_tidptr = (int __user *)regs.edx;
  641. child_tidptr = (int __user *)regs.edi;
  642. if (!newsp)
  643. newsp = regs.esp;
  644. return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
  645. }
  646. /*
  647. * This is trivial, and on the face of it looks like it
  648. * could equally well be done in user mode.
  649. *
  650. * Not so, for quite unobvious reasons - register pressure.
  651. * In user mode vfork() cannot have a stack frame, and if
  652. * done by calling the "clone()" system call directly, you
  653. * do not have enough call-clobbered registers to hold all
  654. * the information you need.
  655. */
  656. asmlinkage int sys_vfork(struct pt_regs regs)
  657. {
  658. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
  659. }
  660. /*
  661. * sys_execve() executes a new program.
  662. */
  663. asmlinkage int sys_execve(struct pt_regs regs)
  664. {
  665. int error;
  666. char * filename;
  667. filename = getname((char __user *) regs.ebx);
  668. error = PTR_ERR(filename);
  669. if (IS_ERR(filename))
  670. goto out;
  671. error = do_execve(filename,
  672. (char __user * __user *) regs.ecx,
  673. (char __user * __user *) regs.edx,
  674. &regs);
  675. if (error == 0) {
  676. task_lock(current);
  677. current->ptrace &= ~PT_DTRACE;
  678. task_unlock(current);
  679. /* Make sure we don't return using sysenter.. */
  680. set_thread_flag(TIF_IRET);
  681. }
  682. putname(filename);
  683. out:
  684. return error;
  685. }
  686. #define top_esp (THREAD_SIZE - sizeof(unsigned long))
  687. #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
  688. unsigned long get_wchan(struct task_struct *p)
  689. {
  690. unsigned long ebp, esp, eip;
  691. unsigned long stack_page;
  692. int count = 0;
  693. if (!p || p == current || p->state == TASK_RUNNING)
  694. return 0;
  695. stack_page = (unsigned long)p->thread_info;
  696. esp = p->thread.esp;
  697. if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
  698. return 0;
  699. /* include/asm-i386/system.h:switch_to() pushes ebp last. */
  700. ebp = *(unsigned long *) esp;
  701. do {
  702. if (ebp < stack_page || ebp > top_ebp+stack_page)
  703. return 0;
  704. eip = *(unsigned long *) (ebp+4);
  705. if (!in_sched_functions(eip))
  706. return eip;
  707. ebp = *(unsigned long *) ebp;
  708. } while (count++ < 16);
  709. return 0;
  710. }
  711. EXPORT_SYMBOL(get_wchan);
  712. /*
  713. * sys_alloc_thread_area: get a yet unused TLS descriptor index.
  714. */
  715. static int get_free_idx(void)
  716. {
  717. struct thread_struct *t = &current->thread;
  718. int idx;
  719. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  720. if (desc_empty(t->tls_array + idx))
  721. return idx + GDT_ENTRY_TLS_MIN;
  722. return -ESRCH;
  723. }
  724. /*
  725. * Set a given TLS descriptor:
  726. */
  727. asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
  728. {
  729. struct thread_struct *t = &current->thread;
  730. struct user_desc info;
  731. struct desc_struct *desc;
  732. int cpu, idx;
  733. if (copy_from_user(&info, u_info, sizeof(info)))
  734. return -EFAULT;
  735. idx = info.entry_number;
  736. /*
  737. * index -1 means the kernel should try to find and
  738. * allocate an empty descriptor:
  739. */
  740. if (idx == -1) {
  741. idx = get_free_idx();
  742. if (idx < 0)
  743. return idx;
  744. if (put_user(idx, &u_info->entry_number))
  745. return -EFAULT;
  746. }
  747. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  748. return -EINVAL;
  749. desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
  750. /*
  751. * We must not get preempted while modifying the TLS.
  752. */
  753. cpu = get_cpu();
  754. if (LDT_empty(&info)) {
  755. desc->a = 0;
  756. desc->b = 0;
  757. } else {
  758. desc->a = LDT_entry_a(&info);
  759. desc->b = LDT_entry_b(&info);
  760. }
  761. load_TLS(t, cpu);
  762. put_cpu();
  763. return 0;
  764. }
  765. /*
  766. * Get the current Thread-Local Storage area:
  767. */
  768. #define GET_BASE(desc) ( \
  769. (((desc)->a >> 16) & 0x0000ffff) | \
  770. (((desc)->b << 16) & 0x00ff0000) | \
  771. ( (desc)->b & 0xff000000) )
  772. #define GET_LIMIT(desc) ( \
  773. ((desc)->a & 0x0ffff) | \
  774. ((desc)->b & 0xf0000) )
  775. #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
  776. #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
  777. #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
  778. #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
  779. #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
  780. #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
  781. asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
  782. {
  783. struct user_desc info;
  784. struct desc_struct *desc;
  785. int idx;
  786. if (get_user(idx, &u_info->entry_number))
  787. return -EFAULT;
  788. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  789. return -EINVAL;
  790. memset(&info, 0, sizeof(info));
  791. desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
  792. info.entry_number = idx;
  793. info.base_addr = GET_BASE(desc);
  794. info.limit = GET_LIMIT(desc);
  795. info.seg_32bit = GET_32BIT(desc);
  796. info.contents = GET_CONTENTS(desc);
  797. info.read_exec_only = !GET_WRITABLE(desc);
  798. info.limit_in_pages = GET_LIMIT_PAGES(desc);
  799. info.seg_not_present = !GET_PRESENT(desc);
  800. info.useable = GET_USEABLE(desc);
  801. if (copy_to_user(u_info, &info, sizeof(info)))
  802. return -EFAULT;
  803. return 0;
  804. }
  805. unsigned long arch_align_stack(unsigned long sp)
  806. {
  807. if (randomize_va_space)
  808. sp -= get_random_int() % 8192;
  809. return sp & ~0xf;
  810. }