process_kern.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/sched.h"
  6. #include "linux/slab.h"
  7. #include "linux/ptrace.h"
  8. #include "linux/proc_fs.h"
  9. #include "linux/file.h"
  10. #include "linux/errno.h"
  11. #include "linux/init.h"
  12. #include "asm/uaccess.h"
  13. #include "asm/atomic.h"
  14. #include "kern_util.h"
  15. #include "skas.h"
  16. #include "os.h"
  17. #include "user_util.h"
  18. #include "tlb.h"
  19. #include "kern.h"
  20. #include "mode.h"
  21. #include "registers.h"
  22. void switch_to_skas(void *prev, void *next)
  23. {
  24. struct task_struct *from, *to;
  25. from = prev;
  26. to = next;
  27. /* XXX need to check runqueues[cpu].idle */
  28. if(current->pid == 0)
  29. switch_timers(0);
  30. switch_threads(&from->thread.mode.skas.switch_buf,
  31. to->thread.mode.skas.switch_buf);
  32. if(current->pid == 0)
  33. switch_timers(1);
  34. }
  35. extern void schedule_tail(struct task_struct *prev);
  36. void new_thread_handler(int sig)
  37. {
  38. int (*fn)(void *), n;
  39. void *arg;
  40. fn = current->thread.request.u.thread.proc;
  41. arg = current->thread.request.u.thread.arg;
  42. os_usr1_signal(1);
  43. thread_wait(&current->thread.mode.skas.switch_buf,
  44. current->thread.mode.skas.fork_buf);
  45. if(current->thread.prev_sched != NULL)
  46. schedule_tail(current->thread.prev_sched);
  47. current->thread.prev_sched = NULL;
  48. /* The return value is 1 if the kernel thread execs a process,
  49. * 0 if it just exits
  50. */
  51. n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
  52. if(n == 1){
  53. /* Handle any immediate reschedules or signals */
  54. interrupt_end();
  55. userspace(&current->thread.regs.regs);
  56. }
  57. else do_exit(0);
  58. }
  59. void new_thread_proc(void *stack, void (*handler)(int sig))
  60. {
  61. init_new_thread_stack(stack, handler);
  62. os_usr1_process(os_getpid());
  63. }
  64. void release_thread_skas(struct task_struct *task)
  65. {
  66. }
  67. void fork_handler(int sig)
  68. {
  69. os_usr1_signal(1);
  70. thread_wait(&current->thread.mode.skas.switch_buf,
  71. current->thread.mode.skas.fork_buf);
  72. force_flush_all();
  73. if(current->thread.prev_sched == NULL)
  74. panic("blech");
  75. schedule_tail(current->thread.prev_sched);
  76. current->thread.prev_sched = NULL;
  77. /* Handle any immediate reschedules or signals */
  78. interrupt_end();
  79. userspace(&current->thread.regs.regs);
  80. }
  81. int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
  82. unsigned long stack_top, struct task_struct * p,
  83. struct pt_regs *regs)
  84. {
  85. void (*handler)(int);
  86. if(current->thread.forking){
  87. memcpy(&p->thread.regs.regs.skas, &regs->regs.skas,
  88. sizeof(p->thread.regs.regs.skas));
  89. REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0);
  90. if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
  91. handler = fork_handler;
  92. }
  93. else {
  94. init_thread_registers(&p->thread.regs.regs);
  95. p->thread.request.u.thread = current->thread.request.u.thread;
  96. handler = new_thread_handler;
  97. }
  98. new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf,
  99. &p->thread.mode.skas.fork_buf, handler);
  100. return(0);
  101. }
  102. int new_mm(unsigned long stack)
  103. {
  104. int fd;
  105. fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
  106. if(fd < 0)
  107. return(fd);
  108. if(skas_needs_stub)
  109. map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
  110. return(fd);
  111. }
  112. void init_idle_skas(void)
  113. {
  114. cpu_tasks[current_thread->cpu].pid = os_getpid();
  115. default_idle();
  116. }
  117. extern void start_kernel(void);
  118. static int start_kernel_proc(void *unused)
  119. {
  120. int pid;
  121. block_signals();
  122. pid = os_getpid();
  123. cpu_tasks[0].pid = pid;
  124. cpu_tasks[0].task = current;
  125. #ifdef CONFIG_SMP
  126. cpu_online_map = cpumask_of_cpu(0);
  127. #endif
  128. start_kernel();
  129. return(0);
  130. }
  131. extern int userspace_pid[];
  132. int start_uml_skas(void)
  133. {
  134. if(proc_mm)
  135. userspace_pid[0] = start_userspace(0);
  136. init_new_thread_signals(1);
  137. init_task.thread.request.u.thread.proc = start_kernel_proc;
  138. init_task.thread.request.u.thread.arg = NULL;
  139. return(start_idle_thread(task_stack_page(&init_task),
  140. &init_task.thread.mode.skas.switch_buf,
  141. &init_task.thread.mode.skas.fork_buf));
  142. }
  143. int external_pid_skas(struct task_struct *task)
  144. {
  145. #warning Need to look up userspace_pid by cpu
  146. return(userspace_pid[0]);
  147. }
  148. int thread_pid_skas(struct task_struct *task)
  149. {
  150. #warning Need to look up userspace_pid by cpu
  151. return(userspace_pid[0]);
  152. }
  153. void kill_off_processes_skas(void)
  154. {
  155. if(proc_mm)
  156. #warning need to loop over userspace_pids in kill_off_processes_skas
  157. os_kill_ptraced_process(userspace_pid[0], 1);
  158. else {
  159. struct task_struct *p;
  160. int pid, me;
  161. me = os_getpid();
  162. for_each_process(p){
  163. if(p->mm == NULL)
  164. continue;
  165. pid = p->mm->context.skas.id.u.pid;
  166. os_kill_ptraced_process(pid, 1);
  167. }
  168. }
  169. }
  170. unsigned long current_stub_stack(void)
  171. {
  172. if(current->mm == NULL)
  173. return(0);
  174. return(current->mm->context.skas.id.stack);
  175. }