process.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737
  1. /*
  2. * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <stdlib.h>
  6. #include <unistd.h>
  7. #include <sched.h>
  8. #include <errno.h>
  9. #include <string.h>
  10. #include <sys/mman.h>
  11. #include <sys/ptrace.h>
  12. #include <sys/wait.h>
  13. #include <asm/unistd.h>
  14. #include "as-layout.h"
  15. #include "chan_user.h"
  16. #include "kern_util.h"
  17. #include "mem.h"
  18. #include "os.h"
  19. #include "proc_mm.h"
  20. #include "ptrace_user.h"
  21. #include "registers.h"
  22. #include "skas.h"
  23. #include "skas_ptrace.h"
  24. #include "sysdep/stub.h"
  25. int is_skas_winch(int pid, int fd, void *data)
  26. {
  27. return pid == getpgrp();
  28. }
  29. static int ptrace_dump_regs(int pid)
  30. {
  31. unsigned long regs[MAX_REG_NR];
  32. int i;
  33. if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
  34. return -errno;
  35. printk(UM_KERN_ERR "Stub registers -\n");
  36. for (i = 0; i < ARRAY_SIZE(regs); i++)
  37. printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
  38. return 0;
  39. }
  40. /*
  41. * Signals that are OK to receive in the stub - we'll just continue it.
  42. * SIGWINCH will happen when UML is inside a detached screen.
  43. */
  44. #define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH))
  45. /* Signals that the stub will finish with - anything else is an error */
  46. #define STUB_DONE_MASK (1 << SIGTRAP)
  47. void wait_stub_done(int pid)
  48. {
  49. int n, status, err;
  50. while (1) {
  51. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  52. if ((n < 0) || !WIFSTOPPED(status))
  53. goto bad_wait;
  54. if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
  55. break;
  56. err = ptrace(PTRACE_CONT, pid, 0, 0);
  57. if (err) {
  58. printk(UM_KERN_ERR "wait_stub_done : continue failed, "
  59. "errno = %d\n", errno);
  60. fatal_sigsegv();
  61. }
  62. }
  63. if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
  64. return;
  65. bad_wait:
  66. err = ptrace_dump_regs(pid);
  67. if (err)
  68. printk(UM_KERN_ERR "Failed to get registers from stub, "
  69. "errno = %d\n", -err);
  70. printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
  71. "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
  72. status);
  73. fatal_sigsegv();
  74. }
  75. extern unsigned long current_stub_stack(void);
  76. static void get_skas_faultinfo(int pid, struct faultinfo *fi)
  77. {
  78. int err;
  79. if (ptrace_faultinfo) {
  80. err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
  81. if (err) {
  82. printk(UM_KERN_ERR "get_skas_faultinfo - "
  83. "PTRACE_FAULTINFO failed, errno = %d\n", errno);
  84. fatal_sigsegv();
  85. }
  86. /* Special handling for i386, which has different structs */
  87. if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
  88. memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
  89. sizeof(struct faultinfo) -
  90. sizeof(struct ptrace_faultinfo));
  91. }
  92. else {
  93. unsigned long fpregs[FP_SIZE];
  94. err = get_fp_registers(pid, fpregs);
  95. if (err < 0) {
  96. printk(UM_KERN_ERR "save_fp_registers returned %d\n",
  97. err);
  98. fatal_sigsegv();
  99. }
  100. err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
  101. if (err) {
  102. printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
  103. "errno = %d\n", pid, errno);
  104. fatal_sigsegv();
  105. }
  106. wait_stub_done(pid);
  107. /*
  108. * faultinfo is prepared by the stub-segv-handler at start of
  109. * the stub stack page. We just have to copy it.
  110. */
  111. memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
  112. err = put_fp_registers(pid, fpregs);
  113. if (err < 0) {
  114. printk(UM_KERN_ERR "put_fp_registers returned %d\n",
  115. err);
  116. fatal_sigsegv();
  117. }
  118. }
  119. }
  120. static void handle_segv(int pid, struct uml_pt_regs * regs)
  121. {
  122. get_skas_faultinfo(pid, &regs->faultinfo);
  123. segv(regs->faultinfo, 0, 1, NULL);
  124. }
  125. /*
  126. * To use the same value of using_sysemu as the caller, ask it that value
  127. * (in local_using_sysemu
  128. */
  129. static void handle_trap(int pid, struct uml_pt_regs *regs,
  130. int local_using_sysemu)
  131. {
  132. int err, status;
  133. if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
  134. fatal_sigsegv();
  135. /* Mark this as a syscall */
  136. UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp);
  137. if (!local_using_sysemu)
  138. {
  139. err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET,
  140. __NR_getpid);
  141. if (err < 0) {
  142. printk(UM_KERN_ERR "handle_trap - nullifying syscall "
  143. "failed, errno = %d\n", errno);
  144. fatal_sigsegv();
  145. }
  146. err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
  147. if (err < 0) {
  148. printk(UM_KERN_ERR "handle_trap - continuing to end of "
  149. "syscall failed, errno = %d\n", errno);
  150. fatal_sigsegv();
  151. }
  152. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  153. if ((err < 0) || !WIFSTOPPED(status) ||
  154. (WSTOPSIG(status) != SIGTRAP + 0x80)) {
  155. err = ptrace_dump_regs(pid);
  156. if (err)
  157. printk(UM_KERN_ERR "Failed to get registers "
  158. "from process, errno = %d\n", -err);
  159. printk(UM_KERN_ERR "handle_trap - failed to wait at "
  160. "end of syscall, errno = %d, status = %d\n",
  161. errno, status);
  162. fatal_sigsegv();
  163. }
  164. }
  165. handle_syscall(regs);
  166. }
  167. extern int __syscall_stub_start;
  168. static int userspace_tramp(void *stack)
  169. {
  170. void *addr;
  171. int err;
  172. ptrace(PTRACE_TRACEME, 0, 0, 0);
  173. signal(SIGTERM, SIG_DFL);
  174. signal(SIGWINCH, SIG_IGN);
  175. err = set_interval();
  176. if (err) {
  177. printk(UM_KERN_ERR "userspace_tramp - setting timer failed, "
  178. "errno = %d\n", err);
  179. exit(1);
  180. }
  181. if (!proc_mm) {
  182. /*
  183. * This has a pte, but it can't be mapped in with the usual
  184. * tlb_flush mechanism because this is part of that mechanism
  185. */
  186. int fd;
  187. unsigned long long offset;
  188. fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
  189. addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
  190. PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
  191. if (addr == MAP_FAILED) {
  192. printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
  193. "errno = %d\n", STUB_CODE, errno);
  194. exit(1);
  195. }
  196. if (stack != NULL) {
  197. fd = phys_mapping(to_phys(stack), &offset);
  198. addr = mmap((void *) STUB_DATA,
  199. UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
  200. MAP_FIXED | MAP_SHARED, fd, offset);
  201. if (addr == MAP_FAILED) {
  202. printk(UM_KERN_ERR "mapping segfault stack "
  203. "at 0x%lx failed, errno = %d\n",
  204. STUB_DATA, errno);
  205. exit(1);
  206. }
  207. }
  208. }
  209. if (!ptrace_faultinfo && (stack != NULL)) {
  210. struct sigaction sa;
  211. unsigned long v = STUB_CODE +
  212. (unsigned long) stub_segv_handler -
  213. (unsigned long) &__syscall_stub_start;
  214. set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
  215. sigemptyset(&sa.sa_mask);
  216. sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO;
  217. sa.sa_sigaction = (void *) v;
  218. sa.sa_restorer = NULL;
  219. if (sigaction(SIGSEGV, &sa, NULL) < 0) {
  220. printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV "
  221. "handler failed - errno = %d\n", errno);
  222. exit(1);
  223. }
  224. }
  225. kill(os_getpid(), SIGSTOP);
  226. return 0;
  227. }
  228. /* Each element set once, and only accessed by a single processor anyway */
  229. #undef NR_CPUS
  230. #define NR_CPUS 1
  231. int userspace_pid[NR_CPUS];
  232. int start_userspace(unsigned long stub_stack)
  233. {
  234. void *stack;
  235. unsigned long sp;
  236. int pid, status, n, flags, err;
  237. stack = mmap(NULL, UM_KERN_PAGE_SIZE,
  238. PROT_READ | PROT_WRITE | PROT_EXEC,
  239. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  240. if (stack == MAP_FAILED) {
  241. err = -errno;
  242. printk(UM_KERN_ERR "start_userspace : mmap failed, "
  243. "errno = %d\n", errno);
  244. return err;
  245. }
  246. sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
  247. flags = CLONE_FILES;
  248. if (proc_mm)
  249. flags |= CLONE_VM;
  250. else
  251. flags |= SIGCHLD;
  252. pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
  253. if (pid < 0) {
  254. err = -errno;
  255. printk(UM_KERN_ERR "start_userspace : clone failed, "
  256. "errno = %d\n", errno);
  257. return err;
  258. }
  259. do {
  260. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  261. if (n < 0) {
  262. err = -errno;
  263. printk(UM_KERN_ERR "start_userspace : wait failed, "
  264. "errno = %d\n", errno);
  265. goto out_kill;
  266. }
  267. } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
  268. if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
  269. err = -EINVAL;
  270. printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got "
  271. "status = %d\n", status);
  272. goto out_kill;
  273. }
  274. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  275. (void *) PTRACE_O_TRACESYSGOOD) < 0) {
  276. err = -errno;
  277. printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS "
  278. "failed, errno = %d\n", errno);
  279. goto out_kill;
  280. }
  281. if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
  282. err = -errno;
  283. printk(UM_KERN_ERR "start_userspace : munmap failed, "
  284. "errno = %d\n", errno);
  285. goto out_kill;
  286. }
  287. return pid;
  288. out_kill:
  289. os_kill_ptraced_process(pid, 1);
  290. return err;
  291. }
  292. void userspace(struct uml_pt_regs *regs)
  293. {
  294. struct itimerval timer;
  295. unsigned long long nsecs, now;
  296. int err, status, op, pid = userspace_pid[0];
  297. /* To prevent races if using_sysemu changes under us.*/
  298. int local_using_sysemu;
  299. if (getitimer(ITIMER_VIRTUAL, &timer))
  300. printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno);
  301. nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC +
  302. timer.it_value.tv_usec * UM_NSEC_PER_USEC;
  303. nsecs += os_nsecs();
  304. while (1) {
  305. /*
  306. * This can legitimately fail if the process loads a
  307. * bogus value into a segment register. It will
  308. * segfault and PTRACE_GETREGS will read that value
  309. * out of the process. However, PTRACE_SETREGS will
  310. * fail. In this case, there is nothing to do but
  311. * just kill the process.
  312. */
  313. if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
  314. fatal_sigsegv();
  315. if (put_fp_registers(pid, regs->fp))
  316. fatal_sigsegv();
  317. /* Now we set local_using_sysemu to be used for one loop */
  318. local_using_sysemu = get_using_sysemu();
  319. op = SELECT_PTRACE_OPERATION(local_using_sysemu,
  320. singlestepping(NULL));
  321. if (ptrace(op, pid, 0, 0)) {
  322. printk(UM_KERN_ERR "userspace - ptrace continue "
  323. "failed, op = %d, errno = %d\n", op, errno);
  324. fatal_sigsegv();
  325. }
  326. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  327. if (err < 0) {
  328. printk(UM_KERN_ERR "userspace - wait failed, "
  329. "errno = %d\n", errno);
  330. fatal_sigsegv();
  331. }
  332. regs->is_user = 1;
  333. if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
  334. printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, "
  335. "errno = %d\n", errno);
  336. fatal_sigsegv();
  337. }
  338. if (get_fp_registers(pid, regs->fp)) {
  339. printk(UM_KERN_ERR "userspace - get_fp_registers failed, "
  340. "errno = %d\n", errno);
  341. fatal_sigsegv();
  342. }
  343. UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
  344. if (WIFSTOPPED(status)) {
  345. int sig = WSTOPSIG(status);
  346. switch (sig) {
  347. case SIGSEGV:
  348. if (PTRACE_FULL_FAULTINFO ||
  349. !ptrace_faultinfo) {
  350. get_skas_faultinfo(pid,
  351. &regs->faultinfo);
  352. (*sig_info[SIGSEGV])(SIGSEGV, regs);
  353. }
  354. else handle_segv(pid, regs);
  355. break;
  356. case SIGTRAP + 0x80:
  357. handle_trap(pid, regs, local_using_sysemu);
  358. break;
  359. case SIGTRAP:
  360. relay_signal(SIGTRAP, regs);
  361. break;
  362. case SIGVTALRM:
  363. now = os_nsecs();
  364. if (now < nsecs)
  365. break;
  366. block_signals();
  367. (*sig_info[sig])(sig, regs);
  368. unblock_signals();
  369. nsecs = timer.it_value.tv_sec *
  370. UM_NSEC_PER_SEC +
  371. timer.it_value.tv_usec *
  372. UM_NSEC_PER_USEC;
  373. nsecs += os_nsecs();
  374. break;
  375. case SIGIO:
  376. case SIGILL:
  377. case SIGBUS:
  378. case SIGFPE:
  379. case SIGWINCH:
  380. block_signals();
  381. (*sig_info[sig])(sig, regs);
  382. unblock_signals();
  383. break;
  384. default:
  385. printk(UM_KERN_ERR "userspace - child stopped "
  386. "with signal %d\n", sig);
  387. fatal_sigsegv();
  388. }
  389. pid = userspace_pid[0];
  390. interrupt_end();
  391. /* Avoid -ERESTARTSYS handling in host */
  392. if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
  393. PT_SYSCALL_NR(regs->gp) = -1;
  394. }
  395. }
  396. }
  397. static unsigned long thread_regs[MAX_REG_NR];
  398. static unsigned long thread_fp_regs[FP_SIZE];
  399. static int __init init_thread_regs(void)
  400. {
  401. get_safe_registers(thread_regs, thread_fp_regs);
  402. /* Set parent's instruction pointer to start of clone-stub */
  403. thread_regs[REGS_IP_INDEX] = STUB_CODE +
  404. (unsigned long) stub_clone_handler -
  405. (unsigned long) &__syscall_stub_start;
  406. thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
  407. sizeof(void *);
  408. #ifdef __SIGNAL_FRAMESIZE
  409. thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
  410. #endif
  411. return 0;
  412. }
  413. __initcall(init_thread_regs);
  414. int copy_context_skas0(unsigned long new_stack, int pid)
  415. {
  416. struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ };
  417. int err;
  418. unsigned long current_stack = current_stub_stack();
  419. struct stub_data *data = (struct stub_data *) current_stack;
  420. struct stub_data *child_data = (struct stub_data *) new_stack;
  421. unsigned long long new_offset;
  422. int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
  423. /*
  424. * prepare offset and fd of child's stack as argument for parent's
  425. * and child's mmap2 calls
  426. */
  427. *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset),
  428. .fd = new_fd,
  429. .timer = ((struct itimerval)
  430. { .it_value = tv,
  431. .it_interval = tv }) });
  432. err = ptrace_setregs(pid, thread_regs);
  433. if (err < 0) {
  434. err = -errno;
  435. printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS "
  436. "failed, pid = %d, errno = %d\n", pid, -err);
  437. return err;
  438. }
  439. err = put_fp_registers(pid, thread_fp_regs);
  440. if (err < 0) {
  441. printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers "
  442. "failed, pid = %d, err = %d\n", pid, err);
  443. return err;
  444. }
  445. /* set a well known return code for detection of child write failure */
  446. child_data->err = 12345678;
  447. /*
  448. * Wait, until parent has finished its work: read child's pid from
  449. * parent's stack, and check, if bad result.
  450. */
  451. err = ptrace(PTRACE_CONT, pid, 0, 0);
  452. if (err) {
  453. err = -errno;
  454. printk(UM_KERN_ERR "Failed to continue new process, pid = %d, "
  455. "errno = %d\n", pid, errno);
  456. return err;
  457. }
  458. wait_stub_done(pid);
  459. pid = data->err;
  460. if (pid < 0) {
  461. printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports "
  462. "error %d\n", -pid);
  463. return pid;
  464. }
  465. /*
  466. * Wait, until child has finished too: read child's result from
  467. * child's stack and check it.
  468. */
  469. wait_stub_done(pid);
  470. if (child_data->err != STUB_DATA) {
  471. printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports "
  472. "error %ld\n", child_data->err);
  473. err = child_data->err;
  474. goto out_kill;
  475. }
  476. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  477. (void *)PTRACE_O_TRACESYSGOOD) < 0) {
  478. err = -errno;
  479. printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS "
  480. "failed, errno = %d\n", errno);
  481. goto out_kill;
  482. }
  483. return pid;
  484. out_kill:
  485. os_kill_ptraced_process(pid, 1);
  486. return err;
  487. }
  488. /*
  489. * This is used only, if stub pages are needed, while proc_mm is
  490. * available. Opening /proc/mm creates a new mm_context, which lacks
  491. * the stub-pages. Thus, we map them using /proc/mm-fd
  492. */
  493. int map_stub_pages(int fd, unsigned long code, unsigned long data,
  494. unsigned long stack)
  495. {
  496. struct proc_mm_op mmop;
  497. int n;
  498. unsigned long long code_offset;
  499. int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
  500. &code_offset);
  501. mmop = ((struct proc_mm_op) { .op = MM_MMAP,
  502. .u =
  503. { .mmap =
  504. { .addr = code,
  505. .len = UM_KERN_PAGE_SIZE,
  506. .prot = PROT_EXEC,
  507. .flags = MAP_FIXED | MAP_PRIVATE,
  508. .fd = code_fd,
  509. .offset = code_offset
  510. } } });
  511. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  512. if (n != sizeof(mmop)) {
  513. n = errno;
  514. printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
  515. "offset = %llx\n", code, code_fd,
  516. (unsigned long long) code_offset);
  517. printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code "
  518. "failed, err = %d\n", n);
  519. return -n;
  520. }
  521. if (stack) {
  522. unsigned long long map_offset;
  523. int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
  524. mmop = ((struct proc_mm_op)
  525. { .op = MM_MMAP,
  526. .u =
  527. { .mmap =
  528. { .addr = data,
  529. .len = UM_KERN_PAGE_SIZE,
  530. .prot = PROT_READ | PROT_WRITE,
  531. .flags = MAP_FIXED | MAP_SHARED,
  532. .fd = map_fd,
  533. .offset = map_offset
  534. } } });
  535. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  536. if (n != sizeof(mmop)) {
  537. n = errno;
  538. printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for "
  539. "data failed, err = %d\n", n);
  540. return -n;
  541. }
  542. }
  543. return 0;
  544. }
  545. void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
  546. {
  547. (*buf)[0].JB_IP = (unsigned long) handler;
  548. (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
  549. sizeof(void *);
  550. }
  551. #define INIT_JMP_NEW_THREAD 0
  552. #define INIT_JMP_CALLBACK 1
  553. #define INIT_JMP_HALT 2
  554. #define INIT_JMP_REBOOT 3
  555. void switch_threads(jmp_buf *me, jmp_buf *you)
  556. {
  557. if (UML_SETJMP(me) == 0)
  558. UML_LONGJMP(you, 1);
  559. }
  560. static jmp_buf initial_jmpbuf;
  561. /* XXX Make these percpu */
  562. static void (*cb_proc)(void *arg);
  563. static void *cb_arg;
  564. static jmp_buf *cb_back;
  565. int start_idle_thread(void *stack, jmp_buf *switch_buf)
  566. {
  567. int n;
  568. set_handler(SIGWINCH);
  569. /*
  570. * Can't use UML_SETJMP or UML_LONGJMP here because they save
  571. * and restore signals, with the possible side-effect of
  572. * trying to handle any signals which came when they were
  573. * blocked, which can't be done on this stack.
  574. * Signals must be blocked when jumping back here and restored
  575. * after returning to the jumper.
  576. */
  577. n = setjmp(initial_jmpbuf);
  578. switch (n) {
  579. case INIT_JMP_NEW_THREAD:
  580. (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
  581. (*switch_buf)[0].JB_SP = (unsigned long) stack +
  582. UM_THREAD_SIZE - sizeof(void *);
  583. break;
  584. case INIT_JMP_CALLBACK:
  585. (*cb_proc)(cb_arg);
  586. longjmp(*cb_back, 1);
  587. break;
  588. case INIT_JMP_HALT:
  589. kmalloc_ok = 0;
  590. return 0;
  591. case INIT_JMP_REBOOT:
  592. kmalloc_ok = 0;
  593. return 1;
  594. default:
  595. printk(UM_KERN_ERR "Bad sigsetjmp return in "
  596. "start_idle_thread - %d\n", n);
  597. fatal_sigsegv();
  598. }
  599. longjmp(*switch_buf, 1);
  600. }
  601. void initial_thread_cb_skas(void (*proc)(void *), void *arg)
  602. {
  603. jmp_buf here;
  604. cb_proc = proc;
  605. cb_arg = arg;
  606. cb_back = &here;
  607. block_signals();
  608. if (UML_SETJMP(&here) == 0)
  609. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
  610. unblock_signals();
  611. cb_proc = NULL;
  612. cb_arg = NULL;
  613. cb_back = NULL;
  614. }
  615. void halt_skas(void)
  616. {
  617. block_signals();
  618. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
  619. }
  620. void reboot_skas(void)
  621. {
  622. block_signals();
  623. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
  624. }
  625. void __switch_mm(struct mm_id *mm_idp)
  626. {
  627. int err;
  628. /* FIXME: need cpu pid in __switch_mm */
  629. if (proc_mm) {
  630. err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
  631. mm_idp->u.mm_fd);
  632. if (err) {
  633. printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
  634. "failed, errno = %d\n", errno);
  635. fatal_sigsegv();
  636. }
  637. }
  638. else userspace_pid[0] = mm_idp->u.pid;
  639. }