process.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /*
  2. * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <stdlib.h>
  6. #include <unistd.h>
  7. #include <sched.h>
  8. #include <errno.h>
  9. #include <string.h>
  10. #include <sys/mman.h>
  11. #include <sys/ptrace.h>
  12. #include <sys/wait.h>
  13. #include <asm/unistd.h>
  14. #include "as-layout.h"
  15. #include "chan_user.h"
  16. #include "kern_constants.h"
  17. #include "kern_util.h"
  18. #include "mem.h"
  19. #include "os.h"
  20. #include "process.h"
  21. #include "proc_mm.h"
  22. #include "ptrace_user.h"
  23. #include "registers.h"
  24. #include "skas.h"
  25. #include "skas_ptrace.h"
  26. #include "user.h"
  27. #include "sysdep/stub.h"
  28. int is_skas_winch(int pid, int fd, void *data)
  29. {
  30. if (pid != getpgrp())
  31. return 0;
  32. register_winch_irq(-1, fd, -1, data, 0);
  33. return 1;
  34. }
  35. static int ptrace_dump_regs(int pid)
  36. {
  37. unsigned long regs[MAX_REG_NR];
  38. int i;
  39. if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
  40. return -errno;
  41. printk(UM_KERN_ERR "Stub registers -\n");
  42. for (i = 0; i < ARRAY_SIZE(regs); i++)
  43. printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
  44. return 0;
  45. }
  46. /*
  47. * Signals that are OK to receive in the stub - we'll just continue it.
  48. * SIGWINCH will happen when UML is inside a detached screen.
  49. */
  50. #define STUB_SIG_MASK (1 << SIGVTALRM)
  51. /* Signals that the stub will finish with - anything else is an error */
  52. #define STUB_DONE_MASK (1 << SIGTRAP)
  53. void wait_stub_done(int pid)
  54. {
  55. int n, status, err;
  56. while (1) {
  57. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  58. if ((n < 0) || !WIFSTOPPED(status))
  59. goto bad_wait;
  60. if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
  61. break;
  62. err = ptrace(PTRACE_CONT, pid, 0, 0);
  63. if (err) {
  64. printk(UM_KERN_ERR "wait_stub_done : continue failed, "
  65. "errno = %d\n", errno);
  66. fatal_sigsegv();
  67. }
  68. }
  69. if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
  70. return;
  71. bad_wait:
  72. err = ptrace_dump_regs(pid);
  73. if (err)
  74. printk(UM_KERN_ERR "Failed to get registers from stub, "
  75. "errno = %d\n", -err);
  76. printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
  77. "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
  78. status);
  79. fatal_sigsegv();
  80. }
  81. extern unsigned long current_stub_stack(void);
  82. void get_skas_faultinfo(int pid, struct faultinfo * fi)
  83. {
  84. int err;
  85. if (ptrace_faultinfo) {
  86. err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
  87. if (err) {
  88. printk(UM_KERN_ERR "get_skas_faultinfo - "
  89. "PTRACE_FAULTINFO failed, errno = %d\n", errno);
  90. fatal_sigsegv();
  91. }
  92. /* Special handling for i386, which has different structs */
  93. if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
  94. memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
  95. sizeof(struct faultinfo) -
  96. sizeof(struct ptrace_faultinfo));
  97. }
  98. else {
  99. err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
  100. if (err) {
  101. printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
  102. "errno = %d\n", pid, errno);
  103. fatal_sigsegv();
  104. }
  105. wait_stub_done(pid);
  106. /*
  107. * faultinfo is prepared by the stub-segv-handler at start of
  108. * the stub stack page. We just have to copy it.
  109. */
  110. memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
  111. }
  112. }
  113. static void handle_segv(int pid, struct uml_pt_regs * regs)
  114. {
  115. get_skas_faultinfo(pid, &regs->faultinfo);
  116. segv(regs->faultinfo, 0, 1, NULL);
  117. }
  118. /*
  119. * To use the same value of using_sysemu as the caller, ask it that value
  120. * (in local_using_sysemu
  121. */
  122. static void handle_trap(int pid, struct uml_pt_regs *regs,
  123. int local_using_sysemu)
  124. {
  125. int err, status;
  126. /* Mark this as a syscall */
  127. UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp);
  128. if (!local_using_sysemu)
  129. {
  130. err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET,
  131. __NR_getpid);
  132. if (err < 0) {
  133. printk(UM_KERN_ERR "handle_trap - nullifying syscall "
  134. "failed, errno = %d\n", errno);
  135. fatal_sigsegv();
  136. }
  137. err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
  138. if (err < 0) {
  139. printk(UM_KERN_ERR "handle_trap - continuing to end of "
  140. "syscall failed, errno = %d\n", errno);
  141. fatal_sigsegv();
  142. }
  143. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  144. if ((err < 0) || !WIFSTOPPED(status) ||
  145. (WSTOPSIG(status) != SIGTRAP + 0x80)) {
  146. err = ptrace_dump_regs(pid);
  147. if (err)
  148. printk(UM_KERN_ERR "Failed to get registers "
  149. "from process, errno = %d\n", -err);
  150. printk(UM_KERN_ERR "handle_trap - failed to wait at "
  151. "end of syscall, errno = %d, status = %d\n",
  152. errno, status);
  153. fatal_sigsegv();
  154. }
  155. }
  156. handle_syscall(regs);
  157. }
  158. extern int __syscall_stub_start;
  159. static int userspace_tramp(void *stack)
  160. {
  161. void *addr;
  162. int err;
  163. ptrace(PTRACE_TRACEME, 0, 0, 0);
  164. signal(SIGTERM, SIG_DFL);
  165. signal(SIGWINCH, SIG_IGN);
  166. err = set_interval();
  167. if (err) {
  168. printk(UM_KERN_ERR "userspace_tramp - setting timer failed, "
  169. "errno = %d\n", err);
  170. exit(1);
  171. }
  172. if (!proc_mm) {
  173. /*
  174. * This has a pte, but it can't be mapped in with the usual
  175. * tlb_flush mechanism because this is part of that mechanism
  176. */
  177. int fd;
  178. unsigned long long offset;
  179. fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
  180. addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
  181. PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
  182. if (addr == MAP_FAILED) {
  183. printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
  184. "errno = %d\n", STUB_CODE, errno);
  185. exit(1);
  186. }
  187. if (stack != NULL) {
  188. fd = phys_mapping(to_phys(stack), &offset);
  189. addr = mmap((void *) STUB_DATA,
  190. UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
  191. MAP_FIXED | MAP_SHARED, fd, offset);
  192. if (addr == MAP_FAILED) {
  193. printk(UM_KERN_ERR "mapping segfault stack "
  194. "at 0x%lx failed, errno = %d\n",
  195. STUB_DATA, errno);
  196. exit(1);
  197. }
  198. }
  199. }
  200. if (!ptrace_faultinfo && (stack != NULL)) {
  201. struct sigaction sa;
  202. unsigned long v = STUB_CODE +
  203. (unsigned long) stub_segv_handler -
  204. (unsigned long) &__syscall_stub_start;
  205. set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
  206. sigemptyset(&sa.sa_mask);
  207. sa.sa_flags = SA_ONSTACK | SA_NODEFER;
  208. sa.sa_handler = (void *) v;
  209. sa.sa_restorer = NULL;
  210. if (sigaction(SIGSEGV, &sa, NULL) < 0) {
  211. printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV "
  212. "handler failed - errno = %d\n", errno);
  213. exit(1);
  214. }
  215. }
  216. kill(os_getpid(), SIGSTOP);
  217. return 0;
  218. }
  219. /* Each element set once, and only accessed by a single processor anyway */
  220. #undef NR_CPUS
  221. #define NR_CPUS 1
  222. int userspace_pid[NR_CPUS];
  223. int start_userspace(unsigned long stub_stack)
  224. {
  225. void *stack;
  226. unsigned long sp;
  227. int pid, status, n, flags, err;
  228. stack = mmap(NULL, UM_KERN_PAGE_SIZE,
  229. PROT_READ | PROT_WRITE | PROT_EXEC,
  230. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  231. if (stack == MAP_FAILED) {
  232. err = -errno;
  233. printk(UM_KERN_ERR "start_userspace : mmap failed, "
  234. "errno = %d", errno);
  235. return err;
  236. }
  237. sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
  238. flags = CLONE_FILES;
  239. if (proc_mm)
  240. flags |= CLONE_VM;
  241. else
  242. flags |= SIGCHLD;
  243. pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
  244. if (pid < 0) {
  245. err = -errno;
  246. printk(UM_KERN_ERR "start_userspace : clone failed, "
  247. "errno = %d", errno);
  248. return err;
  249. }
  250. do {
  251. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  252. if (n < 0) {
  253. err = -errno;
  254. printk(UM_KERN_ERR "start_userspace : wait failed, "
  255. "errno = %d", errno);
  256. goto out_kill;
  257. }
  258. } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
  259. if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
  260. err = -EINVAL;
  261. printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got "
  262. "status = %d", status);
  263. goto out_kill;
  264. }
  265. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  266. (void *) PTRACE_O_TRACESYSGOOD) < 0) {
  267. err = -errno;
  268. printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS "
  269. "failed, errno = %d\n", errno);
  270. goto out_kill;
  271. }
  272. if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
  273. err = -errno;
  274. printk(UM_KERN_ERR "start_userspace : munmap failed, "
  275. "errno = %d\n", errno);
  276. goto out_kill;
  277. }
  278. return pid;
  279. out_kill:
  280. os_kill_ptraced_process(pid, 1);
  281. return err;
  282. }
  283. void userspace(struct uml_pt_regs *regs)
  284. {
  285. struct itimerval timer;
  286. unsigned long long nsecs, now;
  287. int err, status, op, pid = userspace_pid[0];
  288. /* To prevent races if using_sysemu changes under us.*/
  289. int local_using_sysemu;
  290. if (getitimer(ITIMER_VIRTUAL, &timer))
  291. printk("Failed to get itimer, errno = %d\n", errno);
  292. nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC +
  293. timer.it_value.tv_usec * UM_NSEC_PER_USEC;
  294. nsecs += os_nsecs();
  295. while (1) {
  296. /*
  297. * This can legitimately fail if the process loads a
  298. * bogus value into a segment register. It will
  299. * segfault and PTRACE_GETREGS will read that value
  300. * out of the process. However, PTRACE_SETREGS will
  301. * fail. In this case, there is nothing to do but
  302. * just kill the process.
  303. */
  304. if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
  305. fatal_sigsegv();
  306. /* Now we set local_using_sysemu to be used for one loop */
  307. local_using_sysemu = get_using_sysemu();
  308. op = SELECT_PTRACE_OPERATION(local_using_sysemu,
  309. singlestepping(NULL));
  310. if (ptrace(op, pid, 0, 0)) {
  311. printk(UM_KERN_ERR "userspace - ptrace continue "
  312. "failed, op = %d, errno = %d\n", op, errno);
  313. fatal_sigsegv();
  314. }
  315. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  316. if (err < 0) {
  317. printk(UM_KERN_ERR "userspace - wait failed, "
  318. "errno = %d\n", errno);
  319. fatal_sigsegv();
  320. }
  321. regs->is_user = 1;
  322. if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
  323. printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, "
  324. "errno = %d\n", errno);
  325. fatal_sigsegv();
  326. }
  327. UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
  328. if (WIFSTOPPED(status)) {
  329. int sig = WSTOPSIG(status);
  330. switch(sig) {
  331. case SIGSEGV:
  332. if (PTRACE_FULL_FAULTINFO ||
  333. !ptrace_faultinfo) {
  334. get_skas_faultinfo(pid,
  335. &regs->faultinfo);
  336. (*sig_info[SIGSEGV])(SIGSEGV, regs);
  337. }
  338. else handle_segv(pid, regs);
  339. break;
  340. case SIGTRAP + 0x80:
  341. handle_trap(pid, regs, local_using_sysemu);
  342. break;
  343. case SIGTRAP:
  344. relay_signal(SIGTRAP, regs);
  345. break;
  346. case SIGVTALRM:
  347. now = os_nsecs();
  348. if (now < nsecs)
  349. break;
  350. block_signals();
  351. (*sig_info[sig])(sig, regs);
  352. unblock_signals();
  353. nsecs = timer.it_value.tv_sec *
  354. UM_NSEC_PER_SEC +
  355. timer.it_value.tv_usec *
  356. UM_NSEC_PER_USEC;
  357. nsecs += os_nsecs();
  358. break;
  359. case SIGIO:
  360. case SIGILL:
  361. case SIGBUS:
  362. case SIGFPE:
  363. case SIGWINCH:
  364. block_signals();
  365. (*sig_info[sig])(sig, regs);
  366. unblock_signals();
  367. break;
  368. default:
  369. printk(UM_KERN_ERR "userspace - child stopped "
  370. "with signal %d\n", sig);
  371. fatal_sigsegv();
  372. }
  373. pid = userspace_pid[0];
  374. interrupt_end();
  375. /* Avoid -ERESTARTSYS handling in host */
  376. if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
  377. PT_SYSCALL_NR(regs->gp) = -1;
  378. }
  379. }
  380. }
  381. static unsigned long thread_regs[MAX_REG_NR];
  382. static int __init init_thread_regs(void)
  383. {
  384. get_safe_registers(thread_regs);
  385. /* Set parent's instruction pointer to start of clone-stub */
  386. thread_regs[REGS_IP_INDEX] = STUB_CODE +
  387. (unsigned long) stub_clone_handler -
  388. (unsigned long) &__syscall_stub_start;
  389. thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
  390. sizeof(void *);
  391. #ifdef __SIGNAL_FRAMESIZE
  392. thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
  393. #endif
  394. return 0;
  395. }
  396. __initcall(init_thread_regs);
  397. int copy_context_skas0(unsigned long new_stack, int pid)
  398. {
  399. struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ };
  400. int err;
  401. unsigned long current_stack = current_stub_stack();
  402. struct stub_data *data = (struct stub_data *) current_stack;
  403. struct stub_data *child_data = (struct stub_data *) new_stack;
  404. unsigned long long new_offset;
  405. int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
  406. /*
  407. * prepare offset and fd of child's stack as argument for parent's
  408. * and child's mmap2 calls
  409. */
  410. *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset),
  411. .fd = new_fd,
  412. .timer = ((struct itimerval)
  413. { .it_value = tv,
  414. .it_interval = tv }) });
  415. err = ptrace_setregs(pid, thread_regs);
  416. if (err < 0) {
  417. err = -errno;
  418. printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS "
  419. "failed, pid = %d, errno = %d\n", pid, -err);
  420. return err;
  421. }
  422. /* set a well known return code for detection of child write failure */
  423. child_data->err = 12345678;
  424. /*
  425. * Wait, until parent has finished its work: read child's pid from
  426. * parent's stack, and check, if bad result.
  427. */
  428. err = ptrace(PTRACE_CONT, pid, 0, 0);
  429. if (err) {
  430. err = -errno;
  431. printk(UM_KERN_ERR "Failed to continue new process, pid = %d, "
  432. "errno = %d\n", pid, errno);
  433. return err;
  434. }
  435. wait_stub_done(pid);
  436. pid = data->err;
  437. if (pid < 0) {
  438. printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports "
  439. "error %d\n", -pid);
  440. return pid;
  441. }
  442. /*
  443. * Wait, until child has finished too: read child's result from
  444. * child's stack and check it.
  445. */
  446. wait_stub_done(pid);
  447. if (child_data->err != STUB_DATA) {
  448. printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports "
  449. "error %ld\n", child_data->err);
  450. err = child_data->err;
  451. goto out_kill;
  452. }
  453. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  454. (void *)PTRACE_O_TRACESYSGOOD) < 0) {
  455. err = -errno;
  456. printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS "
  457. "failed, errno = %d\n", errno);
  458. goto out_kill;
  459. }
  460. return pid;
  461. out_kill:
  462. os_kill_ptraced_process(pid, 1);
  463. return err;
  464. }
  465. /*
  466. * This is used only, if stub pages are needed, while proc_mm is
  467. * available. Opening /proc/mm creates a new mm_context, which lacks
  468. * the stub-pages. Thus, we map them using /proc/mm-fd
  469. */
  470. int map_stub_pages(int fd, unsigned long code, unsigned long data,
  471. unsigned long stack)
  472. {
  473. struct proc_mm_op mmop;
  474. int n;
  475. unsigned long long code_offset;
  476. int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
  477. &code_offset);
  478. mmop = ((struct proc_mm_op) { .op = MM_MMAP,
  479. .u =
  480. { .mmap =
  481. { .addr = code,
  482. .len = UM_KERN_PAGE_SIZE,
  483. .prot = PROT_EXEC,
  484. .flags = MAP_FIXED | MAP_PRIVATE,
  485. .fd = code_fd,
  486. .offset = code_offset
  487. } } });
  488. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  489. if (n != sizeof(mmop)) {
  490. n = errno;
  491. printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
  492. "offset = %llx\n", code, code_fd,
  493. (unsigned long long) code_offset);
  494. printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code "
  495. "failed, err = %d\n", n);
  496. return -n;
  497. }
  498. if (stack) {
  499. unsigned long long map_offset;
  500. int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
  501. mmop = ((struct proc_mm_op)
  502. { .op = MM_MMAP,
  503. .u =
  504. { .mmap =
  505. { .addr = data,
  506. .len = UM_KERN_PAGE_SIZE,
  507. .prot = PROT_READ | PROT_WRITE,
  508. .flags = MAP_FIXED | MAP_SHARED,
  509. .fd = map_fd,
  510. .offset = map_offset
  511. } } });
  512. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  513. if (n != sizeof(mmop)) {
  514. n = errno;
  515. printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for "
  516. "data failed, err = %d\n", n);
  517. return -n;
  518. }
  519. }
  520. return 0;
  521. }
  522. void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
  523. {
  524. (*buf)[0].JB_IP = (unsigned long) handler;
  525. (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
  526. sizeof(void *);
  527. }
  528. #define INIT_JMP_NEW_THREAD 0
  529. #define INIT_JMP_CALLBACK 1
  530. #define INIT_JMP_HALT 2
  531. #define INIT_JMP_REBOOT 3
  532. void switch_threads(jmp_buf *me, jmp_buf *you)
  533. {
  534. if (UML_SETJMP(me) == 0)
  535. UML_LONGJMP(you, 1);
  536. }
  537. static jmp_buf initial_jmpbuf;
  538. /* XXX Make these percpu */
  539. static void (*cb_proc)(void *arg);
  540. static void *cb_arg;
  541. static jmp_buf *cb_back;
  542. int start_idle_thread(void *stack, jmp_buf *switch_buf)
  543. {
  544. int n;
  545. set_handler(SIGWINCH, (__sighandler_t) sig_handler,
  546. SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGVTALRM, -1);
  547. /*
  548. * Can't use UML_SETJMP or UML_LONGJMP here because they save
  549. * and restore signals, with the possible side-effect of
  550. * trying to handle any signals which came when they were
  551. * blocked, which can't be done on this stack.
  552. * Signals must be blocked when jumping back here and restored
  553. * after returning to the jumper.
  554. */
  555. n = setjmp(initial_jmpbuf);
  556. switch(n) {
  557. case INIT_JMP_NEW_THREAD:
  558. (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
  559. (*switch_buf)[0].JB_SP = (unsigned long) stack +
  560. UM_THREAD_SIZE - sizeof(void *);
  561. break;
  562. case INIT_JMP_CALLBACK:
  563. (*cb_proc)(cb_arg);
  564. longjmp(*cb_back, 1);
  565. break;
  566. case INIT_JMP_HALT:
  567. kmalloc_ok = 0;
  568. return 0;
  569. case INIT_JMP_REBOOT:
  570. kmalloc_ok = 0;
  571. return 1;
  572. default:
  573. printk(UM_KERN_ERR "Bad sigsetjmp return in "
  574. "start_idle_thread - %d\n", n);
  575. fatal_sigsegv();
  576. }
  577. longjmp(*switch_buf, 1);
  578. }
  579. void initial_thread_cb_skas(void (*proc)(void *), void *arg)
  580. {
  581. jmp_buf here;
  582. cb_proc = proc;
  583. cb_arg = arg;
  584. cb_back = &here;
  585. block_signals();
  586. if (UML_SETJMP(&here) == 0)
  587. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
  588. unblock_signals();
  589. cb_proc = NULL;
  590. cb_arg = NULL;
  591. cb_back = NULL;
  592. }
  593. void halt_skas(void)
  594. {
  595. block_signals();
  596. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
  597. }
  598. void reboot_skas(void)
  599. {
  600. block_signals();
  601. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
  602. }
  603. void __switch_mm(struct mm_id *mm_idp)
  604. {
  605. int err;
  606. /* FIXME: need cpu pid in __switch_mm */
  607. if (proc_mm) {
  608. err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
  609. mm_idp->u.mm_fd);
  610. if (err) {
  611. printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
  612. "failed, errno = %d\n", errno);
  613. fatal_sigsegv();
  614. }
  615. }
  616. else userspace_pid[0] = mm_idp->u.pid;
  617. }