process_kern.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/sched.h"
  6. #include "linux/signal.h"
  7. #include "linux/kernel.h"
  8. #include "linux/interrupt.h"
  9. #include "linux/ptrace.h"
  10. #include "asm/system.h"
  11. #include "asm/pgalloc.h"
  12. #include "asm/ptrace.h"
  13. #include "asm/tlbflush.h"
  14. #include "irq_user.h"
  15. #include "kern_util.h"
  16. #include "os.h"
  17. #include "kern.h"
  18. #include "sigcontext.h"
  19. #include "mem_user.h"
  20. #include "tlb.h"
  21. #include "mode.h"
  22. #include "mode_kern.h"
  23. #include "init.h"
  24. #include "tt.h"
  25. void switch_to_tt(void *prev, void *next)
  26. {
  27. struct task_struct *from, *to, *prev_sched;
  28. unsigned long flags;
  29. int err, vtalrm, alrm, prof, cpu;
  30. char c;
  31. from = prev;
  32. to = next;
  33. cpu = task_thread_info(from)->cpu;
  34. if(cpu == 0)
  35. forward_interrupts(to->thread.mode.tt.extern_pid);
  36. #ifdef CONFIG_SMP
  37. forward_ipi(cpu_data[cpu].ipi_pipe[0], to->thread.mode.tt.extern_pid);
  38. #endif
  39. local_irq_save(flags);
  40. vtalrm = change_sig(SIGVTALRM, 0);
  41. alrm = change_sig(SIGALRM, 0);
  42. prof = change_sig(SIGPROF, 0);
  43. forward_pending_sigio(to->thread.mode.tt.extern_pid);
  44. c = 0;
  45. /* Notice that here we "up" the semaphore on which "to" is waiting, and
  46. * below (the read) we wait on this semaphore (which is implemented by
  47. * switch_pipe) and go sleeping. Thus, after that, we have resumed in
  48. * "to", and can't use any more the value of "from" (which is outdated),
  49. * nor the value in "to" (since it was the task which stole us the CPU,
  50. * which we don't care about). */
  51. err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
  52. if(err != sizeof(c))
  53. panic("write of switch_pipe failed, err = %d", -err);
  54. if(from->thread.mode.tt.switch_pipe[0] == -1)
  55. os_kill_process(os_getpid(), 0);
  56. err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c,
  57. sizeof(c));
  58. if(err != sizeof(c))
  59. panic("read of switch_pipe failed, errno = %d", -err);
  60. /* If the process that we have just scheduled away from has exited,
  61. * then it needs to be killed here. The reason is that, even though
  62. * it will kill itself when it next runs, that may be too late. Its
  63. * stack will be freed, possibly before then, and if that happens,
  64. * we have a use-after-free situation. So, it gets killed here
  65. * in case it has not already killed itself.
  66. */
  67. prev_sched = current->thread.prev_sched;
  68. if(prev_sched->thread.mode.tt.switch_pipe[0] == -1)
  69. os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1);
  70. change_sig(SIGVTALRM, vtalrm);
  71. change_sig(SIGALRM, alrm);
  72. change_sig(SIGPROF, prof);
  73. arch_switch_to_tt(prev_sched, current);
  74. flush_tlb_all();
  75. local_irq_restore(flags);
  76. }
  77. void release_thread_tt(struct task_struct *task)
  78. {
  79. int pid = task->thread.mode.tt.extern_pid;
  80. /*
  81. * We first have to kill the other process, before
  82. * closing its switch_pipe. Else it might wake up
  83. * and receive "EOF" before we could kill it.
  84. */
  85. if(os_getpid() != pid)
  86. os_kill_process(pid, 0);
  87. os_close_file(task->thread.mode.tt.switch_pipe[0]);
  88. os_close_file(task->thread.mode.tt.switch_pipe[1]);
  89. /* use switch_pipe as flag: thread is released */
  90. task->thread.mode.tt.switch_pipe[0] = -1;
  91. }
  92. void suspend_new_thread(int fd)
  93. {
  94. int err;
  95. char c;
  96. os_stop_process(os_getpid());
  97. err = os_read_file(fd, &c, sizeof(c));
  98. if(err != sizeof(c))
  99. panic("read failed in suspend_new_thread, err = %d", -err);
  100. }
  101. void schedule_tail(struct task_struct *prev);
  102. static void new_thread_handler(int sig)
  103. {
  104. unsigned long disable;
  105. int (*fn)(void *);
  106. void *arg;
  107. fn = current->thread.request.u.thread.proc;
  108. arg = current->thread.request.u.thread.arg;
  109. UPT_SC(&current->thread.regs.regs) = (void *) (&sig + 1);
  110. disable = (1 << (SIGVTALRM - 1)) | (1 << (SIGALRM - 1)) |
  111. (1 << (SIGIO - 1)) | (1 << (SIGPROF - 1));
  112. SC_SIGMASK(UPT_SC(&current->thread.regs.regs)) &= ~disable;
  113. suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);
  114. force_flush_all();
  115. if(current->thread.prev_sched != NULL)
  116. schedule_tail(current->thread.prev_sched);
  117. current->thread.prev_sched = NULL;
  118. init_new_thread_signals();
  119. enable_timer();
  120. free_page(current->thread.temp_stack);
  121. set_cmdline("(kernel thread)");
  122. change_sig(SIGUSR1, 1);
  123. change_sig(SIGPROF, 1);
  124. local_irq_enable();
  125. if(!run_kernel_thread(fn, arg, &current->thread.exec_buf))
  126. do_exit(0);
  127. /* XXX No set_user_mode here because a newly execed process will
  128. * immediately segfault on its non-existent IP, coming straight back
  129. * to the signal handler, which will call set_user_mode on its way
  130. * out. This should probably change since it's confusing.
  131. */
  132. }
  133. static int new_thread_proc(void *stack)
  134. {
  135. /* local_irq_disable is needed to block out signals until this thread is
  136. * properly scheduled. Otherwise, the tracing thread will get mighty
  137. * upset about any signals that arrive before that.
  138. * This has the complication that it sets the saved signal mask in
  139. * the sigcontext to block signals. This gets restored when this
  140. * thread (or a descendant, since they get a copy of this sigcontext)
  141. * returns to userspace.
  142. * So, this is compensated for elsewhere.
  143. * XXX There is still a small window until local_irq_disable() actually
  144. * finishes where signals are possible - shouldn't be a problem in
  145. * practice since SIGIO hasn't been forwarded here yet, and the
  146. * local_irq_disable should finish before a SIGVTALRM has time to be
  147. * delivered.
  148. */
  149. local_irq_disable();
  150. init_new_thread_stack(stack, new_thread_handler);
  151. os_usr1_process(os_getpid());
  152. change_sig(SIGUSR1, 1);
  153. return(0);
  154. }
  155. /* Signal masking - signals are blocked at the start of fork_tramp. They
  156. * are re-enabled when finish_fork_handler is entered by fork_tramp hitting
  157. * itself with a SIGUSR1. set_user_mode has to be run with SIGUSR1 off,
  158. * so it is blocked before it's called. They are re-enabled on sigreturn
  159. * despite the fact that they were blocked when the SIGUSR1 was issued because
  160. * copy_thread copies the parent's sigcontext, including the signal mask
  161. * onto the signal frame.
  162. */
  163. void finish_fork_handler(int sig)
  164. {
  165. UPT_SC(&current->thread.regs.regs) = (void *) (&sig + 1);
  166. suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);
  167. force_flush_all();
  168. if(current->thread.prev_sched != NULL)
  169. schedule_tail(current->thread.prev_sched);
  170. current->thread.prev_sched = NULL;
  171. enable_timer();
  172. change_sig(SIGVTALRM, 1);
  173. local_irq_enable();
  174. if(current->mm != current->parent->mm)
  175. protect_memory(uml_reserved, high_physmem - uml_reserved, 1,
  176. 1, 0, 1);
  177. stack_protections((unsigned long) current_thread);
  178. free_page(current->thread.temp_stack);
  179. local_irq_disable();
  180. change_sig(SIGUSR1, 0);
  181. set_user_mode(current);
  182. }
  183. int fork_tramp(void *stack)
  184. {
  185. local_irq_disable();
  186. arch_init_thread();
  187. init_new_thread_stack(stack, finish_fork_handler);
  188. os_usr1_process(os_getpid());
  189. change_sig(SIGUSR1, 1);
  190. return(0);
  191. }
  192. int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp,
  193. unsigned long stack_top, struct task_struct * p,
  194. struct pt_regs *regs)
  195. {
  196. int (*tramp)(void *);
  197. int new_pid, err;
  198. unsigned long stack;
  199. if(current->thread.forking)
  200. tramp = fork_tramp;
  201. else {
  202. tramp = new_thread_proc;
  203. p->thread.request.u.thread = current->thread.request.u.thread;
  204. }
  205. err = os_pipe(p->thread.mode.tt.switch_pipe, 1, 1);
  206. if(err < 0){
  207. printk("copy_thread : pipe failed, err = %d\n", -err);
  208. return(err);
  209. }
  210. stack = alloc_stack(0, 0);
  211. if(stack == 0){
  212. printk(KERN_ERR "copy_thread : failed to allocate "
  213. "temporary stack\n");
  214. return(-ENOMEM);
  215. }
  216. clone_flags &= CLONE_VM;
  217. p->thread.temp_stack = stack;
  218. new_pid = start_fork_tramp(task_stack_page(p), stack, clone_flags, tramp);
  219. if(new_pid < 0){
  220. printk(KERN_ERR "copy_thread : clone failed - errno = %d\n",
  221. -new_pid);
  222. return(new_pid);
  223. }
  224. if(current->thread.forking){
  225. sc_to_sc(UPT_SC(&p->thread.regs.regs), UPT_SC(&regs->regs));
  226. SC_SET_SYSCALL_RETURN(UPT_SC(&p->thread.regs.regs), 0);
  227. if(sp != 0)
  228. SC_SP(UPT_SC(&p->thread.regs.regs)) = sp;
  229. }
  230. p->thread.mode.tt.extern_pid = new_pid;
  231. current->thread.request.op = OP_FORK;
  232. current->thread.request.u.fork.pid = new_pid;
  233. os_usr1_process(os_getpid());
  234. /* Enable the signal and then disable it to ensure that it is handled
  235. * here, and nowhere else.
  236. */
  237. change_sig(SIGUSR1, 1);
  238. change_sig(SIGUSR1, 0);
  239. err = 0;
  240. return(err);
  241. }
  242. void reboot_tt(void)
  243. {
  244. current->thread.request.op = OP_REBOOT;
  245. os_usr1_process(os_getpid());
  246. change_sig(SIGUSR1, 1);
  247. }
  248. void halt_tt(void)
  249. {
  250. current->thread.request.op = OP_HALT;
  251. os_usr1_process(os_getpid());
  252. change_sig(SIGUSR1, 1);
  253. }
  254. void kill_off_processes_tt(void)
  255. {
  256. struct task_struct *p;
  257. int me;
  258. me = os_getpid();
  259. for_each_process(p){
  260. if(p->thread.mode.tt.extern_pid != me)
  261. os_kill_process(p->thread.mode.tt.extern_pid, 0);
  262. }
  263. if(init_task.thread.mode.tt.extern_pid != me)
  264. os_kill_process(init_task.thread.mode.tt.extern_pid, 0);
  265. }
  266. void initial_thread_cb_tt(void (*proc)(void *), void *arg)
  267. {
  268. if(os_getpid() == tracing_pid){
  269. (*proc)(arg);
  270. }
  271. else {
  272. current->thread.request.op = OP_CB;
  273. current->thread.request.u.cb.proc = proc;
  274. current->thread.request.u.cb.arg = arg;
  275. os_usr1_process(os_getpid());
  276. change_sig(SIGUSR1, 1);
  277. change_sig(SIGUSR1, 0);
  278. }
  279. }
  280. int do_proc_op(void *t, int proc_id)
  281. {
  282. struct task_struct *task;
  283. struct thread_struct *thread;
  284. int op, pid;
  285. task = t;
  286. thread = &task->thread;
  287. op = thread->request.op;
  288. switch(op){
  289. case OP_NONE:
  290. case OP_TRACE_ON:
  291. break;
  292. case OP_EXEC:
  293. pid = thread->request.u.exec.pid;
  294. do_exec(thread->mode.tt.extern_pid, pid);
  295. thread->mode.tt.extern_pid = pid;
  296. cpu_tasks[task_thread_info(task)->cpu].pid = pid;
  297. break;
  298. case OP_FORK:
  299. attach_process(thread->request.u.fork.pid);
  300. break;
  301. case OP_CB:
  302. (*thread->request.u.cb.proc)(thread->request.u.cb.arg);
  303. break;
  304. case OP_REBOOT:
  305. case OP_HALT:
  306. break;
  307. default:
  308. tracer_panic("Bad op in do_proc_op");
  309. break;
  310. }
  311. thread->request.op = OP_NONE;
  312. return(op);
  313. }
  314. void init_idle_tt(void)
  315. {
  316. default_idle();
  317. }
  318. extern void start_kernel(void);
  319. static int start_kernel_proc(void *unused)
  320. {
  321. int pid;
  322. block_signals();
  323. pid = os_getpid();
  324. cpu_tasks[0].pid = pid;
  325. cpu_tasks[0].task = current;
  326. #ifdef CONFIG_SMP
  327. cpu_online_map = cpumask_of_cpu(0);
  328. #endif
  329. if(debug) os_stop_process(pid);
  330. start_kernel();
  331. return(0);
  332. }
  333. void set_tracing(void *task, int tracing)
  334. {
  335. ((struct task_struct *) task)->thread.mode.tt.tracing = tracing;
  336. }
  337. int is_tracing(void *t)
  338. {
  339. return (((struct task_struct *) t)->thread.mode.tt.tracing);
  340. }
  341. int set_user_mode(void *t)
  342. {
  343. struct task_struct *task;
  344. task = t ? t : current;
  345. if(task->thread.mode.tt.tracing)
  346. return(1);
  347. task->thread.request.op = OP_TRACE_ON;
  348. os_usr1_process(os_getpid());
  349. return(0);
  350. }
  351. void set_init_pid(int pid)
  352. {
  353. int err;
  354. init_task.thread.mode.tt.extern_pid = pid;
  355. err = os_pipe(init_task.thread.mode.tt.switch_pipe, 1, 1);
  356. if(err)
  357. panic("Can't create switch pipe for init_task, errno = %d",
  358. -err);
  359. }
  360. int start_uml_tt(void)
  361. {
  362. void *sp;
  363. int pages;
  364. pages = (1 << CONFIG_KERNEL_STACK_ORDER);
  365. sp = task_stack_page(&init_task) +
  366. pages * PAGE_SIZE - sizeof(unsigned long);
  367. return(tracer(start_kernel_proc, sp));
  368. }
  369. int external_pid_tt(struct task_struct *task)
  370. {
  371. return(task->thread.mode.tt.extern_pid);
  372. }
  373. int thread_pid_tt(struct task_struct *task)
  374. {
  375. return(task->thread.mode.tt.extern_pid);
  376. }
  377. int is_valid_pid(int pid)
  378. {
  379. struct task_struct *task;
  380. read_lock(&tasklist_lock);
  381. for_each_process(task){
  382. if(task->thread.mode.tt.extern_pid == pid){
  383. read_unlock(&tasklist_lock);
  384. return(1);
  385. }
  386. }
  387. read_unlock(&tasklist_lock);
  388. return(0);
  389. }