process_kern.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /*
  2. * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/sched.h"
  6. #include "linux/slab.h"
  7. #include "linux/ptrace.h"
  8. #include "linux/proc_fs.h"
  9. #include "linux/file.h"
  10. #include "linux/errno.h"
  11. #include "linux/init.h"
  12. #include "asm/uaccess.h"
  13. #include "asm/atomic.h"
  14. #include "kern_util.h"
  15. #include "time_user.h"
  16. #include "signal_user.h"
  17. #include "skas.h"
  18. #include "os.h"
  19. #include "user_util.h"
  20. #include "tlb.h"
  21. #include "kern.h"
  22. #include "mode.h"
  23. #include "proc_mm.h"
  24. #include "registers.h"
  25. void *switch_to_skas(void *prev, void *next)
  26. {
  27. struct task_struct *from, *to;
  28. from = prev;
  29. to = next;
  30. /* XXX need to check runqueues[cpu].idle */
  31. if(current->pid == 0)
  32. switch_timers(0);
  33. to->thread.prev_sched = from;
  34. set_current(to);
  35. switch_threads(&from->thread.mode.skas.switch_buf,
  36. to->thread.mode.skas.switch_buf);
  37. if(current->pid == 0)
  38. switch_timers(1);
  39. return(current->thread.prev_sched);
  40. }
  41. extern void schedule_tail(struct task_struct *prev);
  42. void new_thread_handler(int sig)
  43. {
  44. int (*fn)(void *), n;
  45. void *arg;
  46. fn = current->thread.request.u.thread.proc;
  47. arg = current->thread.request.u.thread.arg;
  48. change_sig(SIGUSR1, 1);
  49. thread_wait(&current->thread.mode.skas.switch_buf,
  50. current->thread.mode.skas.fork_buf);
  51. if(current->thread.prev_sched != NULL)
  52. schedule_tail(current->thread.prev_sched);
  53. current->thread.prev_sched = NULL;
  54. /* The return value is 1 if the kernel thread execs a process,
  55. * 0 if it just exits
  56. */
  57. n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
  58. if(n == 1){
  59. /* Handle any immediate reschedules or signals */
  60. interrupt_end();
  61. userspace(&current->thread.regs.regs);
  62. }
  63. else do_exit(0);
  64. }
  65. void new_thread_proc(void *stack, void (*handler)(int sig))
  66. {
  67. init_new_thread_stack(stack, handler);
  68. os_usr1_process(os_getpid());
  69. }
  70. void release_thread_skas(struct task_struct *task)
  71. {
  72. }
  73. void fork_handler(int sig)
  74. {
  75. change_sig(SIGUSR1, 1);
  76. thread_wait(&current->thread.mode.skas.switch_buf,
  77. current->thread.mode.skas.fork_buf);
  78. force_flush_all();
  79. if(current->thread.prev_sched == NULL)
  80. panic("blech");
  81. schedule_tail(current->thread.prev_sched);
  82. current->thread.prev_sched = NULL;
  83. /* Handle any immediate reschedules or signals */
  84. interrupt_end();
  85. userspace(&current->thread.regs.regs);
  86. }
  87. int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
  88. unsigned long stack_top, struct task_struct * p,
  89. struct pt_regs *regs)
  90. {
  91. void (*handler)(int);
  92. if(current->thread.forking){
  93. memcpy(&p->thread.regs.regs.skas, &regs->regs.skas,
  94. sizeof(p->thread.regs.regs.skas));
  95. REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0);
  96. if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
  97. handler = fork_handler;
  98. }
  99. else {
  100. init_thread_registers(&p->thread.regs.regs);
  101. p->thread.request.u.thread = current->thread.request.u.thread;
  102. handler = new_thread_handler;
  103. }
  104. new_thread(p->thread_info, &p->thread.mode.skas.switch_buf,
  105. &p->thread.mode.skas.fork_buf, handler);
  106. return(0);
  107. }
  108. extern void map_stub_pages(int fd, unsigned long code,
  109. unsigned long data, unsigned long stack);
  110. int new_mm(int from, unsigned long stack)
  111. {
  112. struct proc_mm_op copy;
  113. int n, fd;
  114. fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
  115. if(fd < 0)
  116. return(fd);
  117. if(from != -1){
  118. copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
  119. .u =
  120. { .copy_segments = from } } );
  121. n = os_write_file(fd, &copy, sizeof(copy));
  122. if(n != sizeof(copy))
  123. printk("new_mm : /proc/mm copy_segments failed, "
  124. "err = %d\n", -n);
  125. }
  126. if(!ptrace_faultinfo)
  127. map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
  128. return(fd);
  129. }
  130. void init_idle_skas(void)
  131. {
  132. cpu_tasks[current_thread->cpu].pid = os_getpid();
  133. default_idle();
  134. }
  135. extern void start_kernel(void);
  136. static int start_kernel_proc(void *unused)
  137. {
  138. int pid;
  139. block_signals();
  140. pid = os_getpid();
  141. cpu_tasks[0].pid = pid;
  142. cpu_tasks[0].task = current;
  143. #ifdef CONFIG_SMP
  144. cpu_online_map = cpumask_of_cpu(0);
  145. #endif
  146. start_kernel();
  147. return(0);
  148. }
  149. extern int userspace_pid[];
  150. int start_uml_skas(void)
  151. {
  152. if(proc_mm)
  153. userspace_pid[0] = start_userspace(0);
  154. init_new_thread_signals(1);
  155. init_task.thread.request.u.thread.proc = start_kernel_proc;
  156. init_task.thread.request.u.thread.arg = NULL;
  157. return(start_idle_thread(init_task.thread_info,
  158. &init_task.thread.mode.skas.switch_buf,
  159. &init_task.thread.mode.skas.fork_buf));
  160. }
  161. int external_pid_skas(struct task_struct *task)
  162. {
  163. #warning Need to look up userspace_pid by cpu
  164. return(userspace_pid[0]);
  165. }
  166. int thread_pid_skas(struct task_struct *task)
  167. {
  168. #warning Need to look up userspace_pid by cpu
  169. return(userspace_pid[0]);
  170. }
  171. void kill_off_processes_skas(void)
  172. {
  173. if(proc_mm)
  174. #warning need to loop over userspace_pids in kill_off_processes_skas
  175. os_kill_ptraced_process(userspace_pid[0], 1);
  176. else {
  177. struct task_struct *p;
  178. int pid, me;
  179. me = os_getpid();
  180. for_each_process(p){
  181. if(p->mm == NULL)
  182. continue;
  183. pid = p->mm->context.skas.id.u.pid;
  184. os_kill_ptraced_process(pid, 1);
  185. }
  186. }
  187. }
  188. unsigned long current_stub_stack(void)
  189. {
  190. if(current->mm == NULL)
  191. return(0);
  192. return(current->mm->context.skas.id.stack);
  193. }