process_kern.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. /*
  2. * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/sched.h"
  6. #include "linux/slab.h"
  7. #include "linux/ptrace.h"
  8. #include "linux/proc_fs.h"
  9. #include "linux/file.h"
  10. #include "linux/errno.h"
  11. #include "linux/init.h"
  12. #include "asm/uaccess.h"
  13. #include "asm/atomic.h"
  14. #include "kern_util.h"
  15. #include "time_user.h"
  16. #include "signal_user.h"
  17. #include "skas.h"
  18. #include "os.h"
  19. #include "user_util.h"
  20. #include "tlb.h"
  21. #include "kern.h"
  22. #include "mode.h"
  23. #include "proc_mm.h"
  24. #include "registers.h"
  25. void *switch_to_skas(void *prev, void *next)
  26. {
  27. struct task_struct *from, *to;
  28. from = prev;
  29. to = next;
  30. /* XXX need to check runqueues[cpu].idle */
  31. if(current->pid == 0)
  32. switch_timers(0);
  33. to->thread.prev_sched = from;
  34. set_current(to);
  35. switch_threads(&from->thread.mode.skas.switch_buf,
  36. to->thread.mode.skas.switch_buf);
  37. if(current->pid == 0)
  38. switch_timers(1);
  39. return(current->thread.prev_sched);
  40. }
  41. extern void schedule_tail(struct task_struct *prev);
  42. void new_thread_handler(int sig)
  43. {
  44. int (*fn)(void *), n;
  45. void *arg;
  46. fn = current->thread.request.u.thread.proc;
  47. arg = current->thread.request.u.thread.arg;
  48. change_sig(SIGUSR1, 1);
  49. thread_wait(&current->thread.mode.skas.switch_buf,
  50. current->thread.mode.skas.fork_buf);
  51. if(current->thread.prev_sched != NULL)
  52. schedule_tail(current->thread.prev_sched);
  53. current->thread.prev_sched = NULL;
  54. /* The return value is 1 if the kernel thread execs a process,
  55. * 0 if it just exits
  56. */
  57. n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
  58. if(n == 1){
  59. /* Handle any immediate reschedules or signals */
  60. interrupt_end();
  61. userspace(&current->thread.regs.regs);
  62. }
  63. else do_exit(0);
  64. }
  65. void new_thread_proc(void *stack, void (*handler)(int sig))
  66. {
  67. init_new_thread_stack(stack, handler);
  68. os_usr1_process(os_getpid());
  69. }
  70. void release_thread_skas(struct task_struct *task)
  71. {
  72. }
  73. void fork_handler(int sig)
  74. {
  75. change_sig(SIGUSR1, 1);
  76. thread_wait(&current->thread.mode.skas.switch_buf,
  77. current->thread.mode.skas.fork_buf);
  78. force_flush_all();
  79. if(current->thread.prev_sched == NULL)
  80. panic("blech");
  81. schedule_tail(current->thread.prev_sched);
  82. current->thread.prev_sched = NULL;
  83. /* Handle any immediate reschedules or signals */
  84. interrupt_end();
  85. userspace(&current->thread.regs.regs);
  86. }
  87. int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
  88. unsigned long stack_top, struct task_struct * p,
  89. struct pt_regs *regs)
  90. {
  91. void (*handler)(int);
  92. if(current->thread.forking){
  93. memcpy(&p->thread.regs.regs.skas, &regs->regs.skas,
  94. sizeof(p->thread.regs.regs.skas));
  95. REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0);
  96. if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
  97. handler = fork_handler;
  98. }
  99. else {
  100. init_thread_registers(&p->thread.regs.regs);
  101. p->thread.request.u.thread = current->thread.request.u.thread;
  102. handler = new_thread_handler;
  103. }
  104. new_thread(p->thread_info, &p->thread.mode.skas.switch_buf,
  105. &p->thread.mode.skas.fork_buf, handler);
  106. return(0);
  107. }
  108. int new_mm(int from)
  109. {
  110. struct proc_mm_op copy;
  111. int n, fd;
  112. fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
  113. if(fd < 0)
  114. return(fd);
  115. if(from != -1){
  116. copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS,
  117. .u =
  118. { .copy_segments = from } } );
  119. n = os_write_file(fd, &copy, sizeof(copy));
  120. if(n != sizeof(copy))
  121. printk("new_mm : /proc/mm copy_segments failed, "
  122. "err = %d\n", -n);
  123. }
  124. return(fd);
  125. }
  126. void init_idle_skas(void)
  127. {
  128. cpu_tasks[current_thread->cpu].pid = os_getpid();
  129. default_idle();
  130. }
  131. extern void start_kernel(void);
  132. static int start_kernel_proc(void *unused)
  133. {
  134. int pid;
  135. block_signals();
  136. pid = os_getpid();
  137. cpu_tasks[0].pid = pid;
  138. cpu_tasks[0].task = current;
  139. #ifdef CONFIG_SMP
  140. cpu_online_map = cpumask_of_cpu(0);
  141. #endif
  142. start_kernel();
  143. return(0);
  144. }
  145. int start_uml_skas(void)
  146. {
  147. start_userspace(0);
  148. init_new_thread_signals(1);
  149. init_task.thread.request.u.thread.proc = start_kernel_proc;
  150. init_task.thread.request.u.thread.arg = NULL;
  151. return(start_idle_thread(init_task.thread_info,
  152. &init_task.thread.mode.skas.switch_buf,
  153. &init_task.thread.mode.skas.fork_buf));
  154. }
  155. int external_pid_skas(struct task_struct *task)
  156. {
  157. #warning Need to look up userspace_pid by cpu
  158. return(userspace_pid[0]);
  159. }
  160. int thread_pid_skas(struct task_struct *task)
  161. {
  162. #warning Need to look up userspace_pid by cpu
  163. return(userspace_pid[0]);
  164. }