process.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /* MN10300 Process handling code
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/errno.h>
  13. #include <linux/sched.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/stddef.h>
  18. #include <linux/unistd.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/user.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/delay.h>
  23. #include <linux/reboot.h>
  24. #include <linux/percpu.h>
  25. #include <linux/err.h>
  26. #include <linux/fs.h>
  27. #include <linux/slab.h>
  28. #include <linux/rcupdate.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/io.h>
  32. #include <asm/processor.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/fpu.h>
  35. #include <asm/reset-regs.h>
  36. #include <asm/gdb-stub.h>
  37. #include "internal.h"
  38. /*
  39. * return saved PC of a blocked thread.
  40. */
  41. unsigned long thread_saved_pc(struct task_struct *tsk)
  42. {
  43. return ((unsigned long *) tsk->thread.sp)[3];
  44. }
  45. /*
  46. * power off function, if any
  47. */
  48. void (*pm_power_off)(void);
  49. EXPORT_SYMBOL(pm_power_off);
  50. #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
  51. /*
  52. * we use this if we don't have any better idle routine
  53. */
  54. static void default_idle(void)
  55. {
  56. local_irq_disable();
  57. if (!need_resched())
  58. safe_halt();
  59. else
  60. local_irq_enable();
  61. }
  62. #else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
  63. /*
  64. * On SMP it's slightly faster (but much more power-consuming!)
  65. * to poll the ->work.need_resched flag instead of waiting for the
  66. * cross-CPU IPI to arrive. Use this option with caution.
  67. */
  68. static inline void poll_idle(void)
  69. {
  70. int oldval;
  71. local_irq_enable();
  72. /*
  73. * Deal with another CPU just having chosen a thread to
  74. * run here:
  75. */
  76. oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
  77. if (!oldval) {
  78. set_thread_flag(TIF_POLLING_NRFLAG);
  79. while (!need_resched())
  80. cpu_relax();
  81. clear_thread_flag(TIF_POLLING_NRFLAG);
  82. } else {
  83. set_need_resched();
  84. }
  85. }
  86. #endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
  87. /*
  88. * the idle thread
  89. * - there's no useful work to be done, so just try to conserve power and have
  90. * a low exit latency (ie sit in a loop waiting for somebody to say that
  91. * they'd like to reschedule)
  92. */
  93. void cpu_idle(void)
  94. {
  95. /* endless idle loop with no priority at all */
  96. for (;;) {
  97. rcu_idle_enter();
  98. while (!need_resched()) {
  99. void (*idle)(void);
  100. smp_rmb();
  101. if (!idle) {
  102. #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
  103. idle = poll_idle;
  104. #else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
  105. idle = default_idle;
  106. #endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
  107. }
  108. idle();
  109. }
  110. rcu_idle_exit();
  111. schedule_preempt_disabled();
  112. }
  113. }
  114. void release_segments(struct mm_struct *mm)
  115. {
  116. }
  117. void machine_restart(char *cmd)
  118. {
  119. #ifdef CONFIG_KERNEL_DEBUGGER
  120. gdbstub_exit(0);
  121. #endif
  122. #ifdef mn10300_unit_hard_reset
  123. mn10300_unit_hard_reset();
  124. #else
  125. mn10300_proc_hard_reset();
  126. #endif
  127. }
  128. void machine_halt(void)
  129. {
  130. #ifdef CONFIG_KERNEL_DEBUGGER
  131. gdbstub_exit(0);
  132. #endif
  133. }
  134. void machine_power_off(void)
  135. {
  136. #ifdef CONFIG_KERNEL_DEBUGGER
  137. gdbstub_exit(0);
  138. #endif
  139. }
  140. void show_regs(struct pt_regs *regs)
  141. {
  142. }
  143. /*
  144. * free current thread data structures etc..
  145. */
  146. void exit_thread(void)
  147. {
  148. exit_fpu();
  149. }
  150. void flush_thread(void)
  151. {
  152. flush_fpu();
  153. }
  154. void release_thread(struct task_struct *dead_task)
  155. {
  156. }
  157. /*
  158. * we do not have to muck with descriptors here, that is
  159. * done in switch_mm() as needed.
  160. */
  161. void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
  162. {
  163. }
  164. /*
  165. * this gets called so that we can store lazy state into memory and copy the
  166. * current task into the new thread.
  167. */
  168. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  169. {
  170. unlazy_fpu(src);
  171. *dst = *src;
  172. return 0;
  173. }
  174. /*
  175. * set up the kernel stack for a new thread and copy arch-specific thread
  176. * control information
  177. */
  178. int copy_thread(unsigned long clone_flags,
  179. unsigned long c_usp, unsigned long ustk_size,
  180. struct task_struct *p)
  181. {
  182. struct thread_info *ti = task_thread_info(p);
  183. struct pt_regs *c_regs;
  184. unsigned long c_ksp;
  185. c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE;
  186. /* allocate the userspace exception frame and set it up */
  187. c_ksp -= sizeof(struct pt_regs);
  188. c_regs = (struct pt_regs *) c_ksp;
  189. c_ksp -= 12; /* allocate function call ABI slack */
  190. /* set up things up so the scheduler can start the new task */
  191. p->thread.uregs = c_regs;
  192. ti->frame = c_regs;
  193. p->thread.a3 = (unsigned long) c_regs;
  194. p->thread.sp = c_ksp;
  195. p->thread.wchan = p->thread.pc;
  196. p->thread.usp = c_usp;
  197. if (unlikely(p->flags & PF_KTHREAD)) {
  198. memset(c_regs, 0, sizeof(struct pt_regs));
  199. c_regs->a0 = c_usp; /* function */
  200. c_regs->d0 = ustk_size; /* argument */
  201. local_save_flags(c_regs->epsw);
  202. c_regs->epsw |= EPSW_IE | EPSW_IM_7;
  203. p->thread.pc = (unsigned long) ret_from_kernel_thread;
  204. return 0;
  205. }
  206. *c_regs = *current_pt_regs();
  207. if (c_usp)
  208. c_regs->sp = c_usp;
  209. c_regs->epsw &= ~EPSW_FE; /* my FPU */
  210. /* the new TLS pointer is passed in as arg #5 to sys_clone() */
  211. if (clone_flags & CLONE_SETTLS)
  212. c_regs->e2 = current_frame()->d3;
  213. p->thread.pc = (unsigned long) ret_from_fork;
  214. return 0;
  215. }
  216. unsigned long get_wchan(struct task_struct *p)
  217. {
  218. return p->thread.wchan;
  219. }