process.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /*
  2. * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <stdlib.h>
  6. #include <unistd.h>
  7. #include <sched.h>
  8. #include <errno.h>
  9. #include <string.h>
  10. #include <sys/mman.h>
  11. #include <sys/wait.h>
  12. #include <asm/unistd.h>
  13. #include <as-layout.h>
  14. #include <init.h>
  15. #include <kern_util.h>
  16. #include <mem.h>
  17. #include <os.h>
  18. #include <proc_mm.h>
  19. #include <ptrace_user.h>
  20. #include <registers.h>
  21. #include <skas.h>
  22. #include <skas_ptrace.h>
  23. #include <sysdep/stub.h>
  24. int is_skas_winch(int pid, int fd, void *data)
  25. {
  26. return pid == getpgrp();
  27. }
  28. static int ptrace_dump_regs(int pid)
  29. {
  30. unsigned long regs[MAX_REG_NR];
  31. int i;
  32. if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
  33. return -errno;
  34. printk(UM_KERN_ERR "Stub registers -\n");
  35. for (i = 0; i < ARRAY_SIZE(regs); i++)
  36. printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
  37. return 0;
  38. }
  39. /*
  40. * Signals that are OK to receive in the stub - we'll just continue it.
  41. * SIGWINCH will happen when UML is inside a detached screen.
  42. */
  43. #define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH))
  44. /* Signals that the stub will finish with - anything else is an error */
  45. #define STUB_DONE_MASK (1 << SIGTRAP)
  46. void wait_stub_done(int pid)
  47. {
  48. int n, status, err, bad_stop = 0;
  49. while (1) {
  50. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  51. if ((n < 0) || !WIFSTOPPED(status))
  52. goto bad_wait;
  53. if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
  54. break;
  55. err = ptrace(PTRACE_CONT, pid, 0, 0);
  56. if (err) {
  57. printk(UM_KERN_ERR "wait_stub_done : continue failed, "
  58. "errno = %d\n", errno);
  59. fatal_sigsegv();
  60. }
  61. }
  62. if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
  63. return;
  64. else
  65. bad_stop = 1;
  66. bad_wait:
  67. err = ptrace_dump_regs(pid);
  68. if (err)
  69. printk(UM_KERN_ERR "Failed to get registers from stub, "
  70. "errno = %d\n", -err);
  71. printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
  72. "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
  73. status);
  74. if (bad_stop)
  75. kill(pid, SIGKILL);
  76. else
  77. fatal_sigsegv();
  78. }
  79. extern unsigned long current_stub_stack(void);
  80. static void get_skas_faultinfo(int pid, struct faultinfo *fi)
  81. {
  82. int err;
  83. if (ptrace_faultinfo) {
  84. err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
  85. if (err) {
  86. printk(UM_KERN_ERR "get_skas_faultinfo - "
  87. "PTRACE_FAULTINFO failed, errno = %d\n", errno);
  88. fatal_sigsegv();
  89. }
  90. /* Special handling for i386, which has different structs */
  91. if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
  92. memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
  93. sizeof(struct faultinfo) -
  94. sizeof(struct ptrace_faultinfo));
  95. }
  96. else {
  97. unsigned long fpregs[FP_SIZE];
  98. err = get_fp_registers(pid, fpregs);
  99. if (err < 0) {
  100. printk(UM_KERN_ERR "save_fp_registers returned %d\n",
  101. err);
  102. fatal_sigsegv();
  103. }
  104. err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
  105. if (err) {
  106. printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
  107. "errno = %d\n", pid, errno);
  108. fatal_sigsegv();
  109. }
  110. wait_stub_done(pid);
  111. /*
  112. * faultinfo is prepared by the stub-segv-handler at start of
  113. * the stub stack page. We just have to copy it.
  114. */
  115. memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
  116. err = put_fp_registers(pid, fpregs);
  117. if (err < 0) {
  118. printk(UM_KERN_ERR "put_fp_registers returned %d\n",
  119. err);
  120. fatal_sigsegv();
  121. }
  122. }
  123. }
  124. static void handle_segv(int pid, struct uml_pt_regs * regs)
  125. {
  126. get_skas_faultinfo(pid, &regs->faultinfo);
  127. segv(regs->faultinfo, 0, 1, NULL);
  128. }
  129. /*
  130. * To use the same value of using_sysemu as the caller, ask it that value
  131. * (in local_using_sysemu
  132. */
  133. static void handle_trap(int pid, struct uml_pt_regs *regs,
  134. int local_using_sysemu)
  135. {
  136. int err, status;
  137. if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
  138. fatal_sigsegv();
  139. /* Mark this as a syscall */
  140. UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp);
  141. if (!local_using_sysemu)
  142. {
  143. err = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET,
  144. __NR_getpid);
  145. if (err < 0) {
  146. printk(UM_KERN_ERR "handle_trap - nullifying syscall "
  147. "failed, errno = %d\n", errno);
  148. fatal_sigsegv();
  149. }
  150. err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
  151. if (err < 0) {
  152. printk(UM_KERN_ERR "handle_trap - continuing to end of "
  153. "syscall failed, errno = %d\n", errno);
  154. fatal_sigsegv();
  155. }
  156. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  157. if ((err < 0) || !WIFSTOPPED(status) ||
  158. (WSTOPSIG(status) != SIGTRAP + 0x80)) {
  159. err = ptrace_dump_regs(pid);
  160. if (err)
  161. printk(UM_KERN_ERR "Failed to get registers "
  162. "from process, errno = %d\n", -err);
  163. printk(UM_KERN_ERR "handle_trap - failed to wait at "
  164. "end of syscall, errno = %d, status = %d\n",
  165. errno, status);
  166. fatal_sigsegv();
  167. }
  168. }
  169. handle_syscall(regs);
  170. }
  171. extern int __syscall_stub_start;
  172. static int userspace_tramp(void *stack)
  173. {
  174. void *addr;
  175. int err;
  176. ptrace(PTRACE_TRACEME, 0, 0, 0);
  177. signal(SIGTERM, SIG_DFL);
  178. signal(SIGWINCH, SIG_IGN);
  179. err = set_interval();
  180. if (err) {
  181. printk(UM_KERN_ERR "userspace_tramp - setting timer failed, "
  182. "errno = %d\n", err);
  183. exit(1);
  184. }
  185. if (!proc_mm) {
  186. /*
  187. * This has a pte, but it can't be mapped in with the usual
  188. * tlb_flush mechanism because this is part of that mechanism
  189. */
  190. int fd;
  191. unsigned long long offset;
  192. fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
  193. addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
  194. PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
  195. if (addr == MAP_FAILED) {
  196. printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
  197. "errno = %d\n", STUB_CODE, errno);
  198. exit(1);
  199. }
  200. if (stack != NULL) {
  201. fd = phys_mapping(to_phys(stack), &offset);
  202. addr = mmap((void *) STUB_DATA,
  203. UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
  204. MAP_FIXED | MAP_SHARED, fd, offset);
  205. if (addr == MAP_FAILED) {
  206. printk(UM_KERN_ERR "mapping segfault stack "
  207. "at 0x%lx failed, errno = %d\n",
  208. STUB_DATA, errno);
  209. exit(1);
  210. }
  211. }
  212. }
  213. if (!ptrace_faultinfo && (stack != NULL)) {
  214. struct sigaction sa;
  215. unsigned long v = STUB_CODE +
  216. (unsigned long) stub_segv_handler -
  217. (unsigned long) &__syscall_stub_start;
  218. set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
  219. sigemptyset(&sa.sa_mask);
  220. sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO;
  221. sa.sa_sigaction = (void *) v;
  222. sa.sa_restorer = NULL;
  223. if (sigaction(SIGSEGV, &sa, NULL) < 0) {
  224. printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV "
  225. "handler failed - errno = %d\n", errno);
  226. exit(1);
  227. }
  228. }
  229. kill(os_getpid(), SIGSTOP);
  230. return 0;
  231. }
  232. /* Each element set once, and only accessed by a single processor anyway */
  233. #undef NR_CPUS
  234. #define NR_CPUS 1
  235. int userspace_pid[NR_CPUS];
  236. int start_userspace(unsigned long stub_stack)
  237. {
  238. void *stack;
  239. unsigned long sp;
  240. int pid, status, n, flags, err;
  241. stack = mmap(NULL, UM_KERN_PAGE_SIZE,
  242. PROT_READ | PROT_WRITE | PROT_EXEC,
  243. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  244. if (stack == MAP_FAILED) {
  245. err = -errno;
  246. printk(UM_KERN_ERR "start_userspace : mmap failed, "
  247. "errno = %d\n", errno);
  248. return err;
  249. }
  250. sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
  251. flags = CLONE_FILES;
  252. if (proc_mm)
  253. flags |= CLONE_VM;
  254. else
  255. flags |= SIGCHLD;
  256. pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
  257. if (pid < 0) {
  258. err = -errno;
  259. printk(UM_KERN_ERR "start_userspace : clone failed, "
  260. "errno = %d\n", errno);
  261. return err;
  262. }
  263. do {
  264. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  265. if (n < 0) {
  266. err = -errno;
  267. printk(UM_KERN_ERR "start_userspace : wait failed, "
  268. "errno = %d\n", errno);
  269. goto out_kill;
  270. }
  271. } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
  272. if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
  273. err = -EINVAL;
  274. printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got "
  275. "status = %d\n", status);
  276. goto out_kill;
  277. }
  278. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  279. (void *) PTRACE_O_TRACESYSGOOD) < 0) {
  280. err = -errno;
  281. printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS "
  282. "failed, errno = %d\n", errno);
  283. goto out_kill;
  284. }
  285. if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
  286. err = -errno;
  287. printk(UM_KERN_ERR "start_userspace : munmap failed, "
  288. "errno = %d\n", errno);
  289. goto out_kill;
  290. }
  291. return pid;
  292. out_kill:
  293. os_kill_ptraced_process(pid, 1);
  294. return err;
  295. }
  296. void userspace(struct uml_pt_regs *regs)
  297. {
  298. struct itimerval timer;
  299. unsigned long long nsecs, now;
  300. int err, status, op, pid = userspace_pid[0];
  301. /* To prevent races if using_sysemu changes under us.*/
  302. int local_using_sysemu;
  303. siginfo_t si;
  304. /* Handle any immediate reschedules or signals */
  305. interrupt_end();
  306. if (getitimer(ITIMER_VIRTUAL, &timer))
  307. printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno);
  308. nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC +
  309. timer.it_value.tv_usec * UM_NSEC_PER_USEC;
  310. nsecs += os_nsecs();
  311. while (1) {
  312. /*
  313. * This can legitimately fail if the process loads a
  314. * bogus value into a segment register. It will
  315. * segfault and PTRACE_GETREGS will read that value
  316. * out of the process. However, PTRACE_SETREGS will
  317. * fail. In this case, there is nothing to do but
  318. * just kill the process.
  319. */
  320. if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
  321. fatal_sigsegv();
  322. if (put_fp_registers(pid, regs->fp))
  323. fatal_sigsegv();
  324. /* Now we set local_using_sysemu to be used for one loop */
  325. local_using_sysemu = get_using_sysemu();
  326. op = SELECT_PTRACE_OPERATION(local_using_sysemu,
  327. singlestepping(NULL));
  328. if (ptrace(op, pid, 0, 0)) {
  329. printk(UM_KERN_ERR "userspace - ptrace continue "
  330. "failed, op = %d, errno = %d\n", op, errno);
  331. fatal_sigsegv();
  332. }
  333. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  334. if (err < 0) {
  335. printk(UM_KERN_ERR "userspace - wait failed, "
  336. "errno = %d\n", errno);
  337. fatal_sigsegv();
  338. }
  339. regs->is_user = 1;
  340. if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
  341. printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, "
  342. "errno = %d\n", errno);
  343. fatal_sigsegv();
  344. }
  345. if (get_fp_registers(pid, regs->fp)) {
  346. printk(UM_KERN_ERR "userspace - get_fp_registers failed, "
  347. "errno = %d\n", errno);
  348. fatal_sigsegv();
  349. }
  350. UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
  351. if (WIFSTOPPED(status)) {
  352. int sig = WSTOPSIG(status);
  353. ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
  354. switch (sig) {
  355. case SIGSEGV:
  356. if (PTRACE_FULL_FAULTINFO ||
  357. !ptrace_faultinfo) {
  358. get_skas_faultinfo(pid,
  359. &regs->faultinfo);
  360. (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
  361. regs);
  362. }
  363. else handle_segv(pid, regs);
  364. break;
  365. case SIGTRAP + 0x80:
  366. handle_trap(pid, regs, local_using_sysemu);
  367. break;
  368. case SIGTRAP:
  369. relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
  370. break;
  371. case SIGVTALRM:
  372. now = os_nsecs();
  373. if (now < nsecs)
  374. break;
  375. block_signals();
  376. (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
  377. unblock_signals();
  378. nsecs = timer.it_value.tv_sec *
  379. UM_NSEC_PER_SEC +
  380. timer.it_value.tv_usec *
  381. UM_NSEC_PER_USEC;
  382. nsecs += os_nsecs();
  383. break;
  384. case SIGIO:
  385. case SIGILL:
  386. case SIGBUS:
  387. case SIGFPE:
  388. case SIGWINCH:
  389. block_signals();
  390. (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
  391. unblock_signals();
  392. break;
  393. default:
  394. printk(UM_KERN_ERR "userspace - child stopped "
  395. "with signal %d\n", sig);
  396. fatal_sigsegv();
  397. }
  398. pid = userspace_pid[0];
  399. interrupt_end();
  400. /* Avoid -ERESTARTSYS handling in host */
  401. if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
  402. PT_SYSCALL_NR(regs->gp) = -1;
  403. }
  404. }
  405. }
  406. static unsigned long thread_regs[MAX_REG_NR];
  407. static unsigned long thread_fp_regs[FP_SIZE];
  408. static int __init init_thread_regs(void)
  409. {
  410. get_safe_registers(thread_regs, thread_fp_regs);
  411. /* Set parent's instruction pointer to start of clone-stub */
  412. thread_regs[REGS_IP_INDEX] = STUB_CODE +
  413. (unsigned long) stub_clone_handler -
  414. (unsigned long) &__syscall_stub_start;
  415. thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
  416. sizeof(void *);
  417. #ifdef __SIGNAL_FRAMESIZE
  418. thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
  419. #endif
  420. return 0;
  421. }
  422. __initcall(init_thread_regs);
  423. int copy_context_skas0(unsigned long new_stack, int pid)
  424. {
  425. struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ };
  426. int err;
  427. unsigned long current_stack = current_stub_stack();
  428. struct stub_data *data = (struct stub_data *) current_stack;
  429. struct stub_data *child_data = (struct stub_data *) new_stack;
  430. unsigned long long new_offset;
  431. int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
  432. /*
  433. * prepare offset and fd of child's stack as argument for parent's
  434. * and child's mmap2 calls
  435. */
  436. *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset),
  437. .fd = new_fd,
  438. .timer = ((struct itimerval)
  439. { .it_value = tv,
  440. .it_interval = tv }) });
  441. err = ptrace_setregs(pid, thread_regs);
  442. if (err < 0) {
  443. err = -errno;
  444. printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS "
  445. "failed, pid = %d, errno = %d\n", pid, -err);
  446. return err;
  447. }
  448. err = put_fp_registers(pid, thread_fp_regs);
  449. if (err < 0) {
  450. printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers "
  451. "failed, pid = %d, err = %d\n", pid, err);
  452. return err;
  453. }
  454. /* set a well known return code for detection of child write failure */
  455. child_data->err = 12345678;
  456. /*
  457. * Wait, until parent has finished its work: read child's pid from
  458. * parent's stack, and check, if bad result.
  459. */
  460. err = ptrace(PTRACE_CONT, pid, 0, 0);
  461. if (err) {
  462. err = -errno;
  463. printk(UM_KERN_ERR "Failed to continue new process, pid = %d, "
  464. "errno = %d\n", pid, errno);
  465. return err;
  466. }
  467. wait_stub_done(pid);
  468. pid = data->err;
  469. if (pid < 0) {
  470. printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports "
  471. "error %d\n", -pid);
  472. return pid;
  473. }
  474. /*
  475. * Wait, until child has finished too: read child's result from
  476. * child's stack and check it.
  477. */
  478. wait_stub_done(pid);
  479. if (child_data->err != STUB_DATA) {
  480. printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports "
  481. "error %ld\n", child_data->err);
  482. err = child_data->err;
  483. goto out_kill;
  484. }
  485. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  486. (void *)PTRACE_O_TRACESYSGOOD) < 0) {
  487. err = -errno;
  488. printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS "
  489. "failed, errno = %d\n", errno);
  490. goto out_kill;
  491. }
  492. return pid;
  493. out_kill:
  494. os_kill_ptraced_process(pid, 1);
  495. return err;
  496. }
  497. /*
  498. * This is used only, if stub pages are needed, while proc_mm is
  499. * available. Opening /proc/mm creates a new mm_context, which lacks
  500. * the stub-pages. Thus, we map them using /proc/mm-fd
  501. */
  502. int map_stub_pages(int fd, unsigned long code, unsigned long data,
  503. unsigned long stack)
  504. {
  505. struct proc_mm_op mmop;
  506. int n;
  507. unsigned long long code_offset;
  508. int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
  509. &code_offset);
  510. mmop = ((struct proc_mm_op) { .op = MM_MMAP,
  511. .u =
  512. { .mmap =
  513. { .addr = code,
  514. .len = UM_KERN_PAGE_SIZE,
  515. .prot = PROT_EXEC,
  516. .flags = MAP_FIXED | MAP_PRIVATE,
  517. .fd = code_fd,
  518. .offset = code_offset
  519. } } });
  520. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  521. if (n != sizeof(mmop)) {
  522. n = errno;
  523. printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
  524. "offset = %llx\n", code, code_fd,
  525. (unsigned long long) code_offset);
  526. printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code "
  527. "failed, err = %d\n", n);
  528. return -n;
  529. }
  530. if (stack) {
  531. unsigned long long map_offset;
  532. int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
  533. mmop = ((struct proc_mm_op)
  534. { .op = MM_MMAP,
  535. .u =
  536. { .mmap =
  537. { .addr = data,
  538. .len = UM_KERN_PAGE_SIZE,
  539. .prot = PROT_READ | PROT_WRITE,
  540. .flags = MAP_FIXED | MAP_SHARED,
  541. .fd = map_fd,
  542. .offset = map_offset
  543. } } });
  544. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  545. if (n != sizeof(mmop)) {
  546. n = errno;
  547. printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for "
  548. "data failed, err = %d\n", n);
  549. return -n;
  550. }
  551. }
  552. return 0;
  553. }
  554. void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
  555. {
  556. (*buf)[0].JB_IP = (unsigned long) handler;
  557. (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
  558. sizeof(void *);
  559. }
  560. #define INIT_JMP_NEW_THREAD 0
  561. #define INIT_JMP_CALLBACK 1
  562. #define INIT_JMP_HALT 2
  563. #define INIT_JMP_REBOOT 3
  564. void switch_threads(jmp_buf *me, jmp_buf *you)
  565. {
  566. if (UML_SETJMP(me) == 0)
  567. UML_LONGJMP(you, 1);
  568. }
  569. static jmp_buf initial_jmpbuf;
  570. /* XXX Make these percpu */
  571. static void (*cb_proc)(void *arg);
  572. static void *cb_arg;
  573. static jmp_buf *cb_back;
  574. int start_idle_thread(void *stack, jmp_buf *switch_buf)
  575. {
  576. int n;
  577. set_handler(SIGWINCH);
  578. /*
  579. * Can't use UML_SETJMP or UML_LONGJMP here because they save
  580. * and restore signals, with the possible side-effect of
  581. * trying to handle any signals which came when they were
  582. * blocked, which can't be done on this stack.
  583. * Signals must be blocked when jumping back here and restored
  584. * after returning to the jumper.
  585. */
  586. n = setjmp(initial_jmpbuf);
  587. switch (n) {
  588. case INIT_JMP_NEW_THREAD:
  589. (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
  590. (*switch_buf)[0].JB_SP = (unsigned long) stack +
  591. UM_THREAD_SIZE - sizeof(void *);
  592. break;
  593. case INIT_JMP_CALLBACK:
  594. (*cb_proc)(cb_arg);
  595. longjmp(*cb_back, 1);
  596. break;
  597. case INIT_JMP_HALT:
  598. kmalloc_ok = 0;
  599. return 0;
  600. case INIT_JMP_REBOOT:
  601. kmalloc_ok = 0;
  602. return 1;
  603. default:
  604. printk(UM_KERN_ERR "Bad sigsetjmp return in "
  605. "start_idle_thread - %d\n", n);
  606. fatal_sigsegv();
  607. }
  608. longjmp(*switch_buf, 1);
  609. }
  610. void initial_thread_cb_skas(void (*proc)(void *), void *arg)
  611. {
  612. jmp_buf here;
  613. cb_proc = proc;
  614. cb_arg = arg;
  615. cb_back = &here;
  616. block_signals();
  617. if (UML_SETJMP(&here) == 0)
  618. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
  619. unblock_signals();
  620. cb_proc = NULL;
  621. cb_arg = NULL;
  622. cb_back = NULL;
  623. }
  624. void halt_skas(void)
  625. {
  626. block_signals();
  627. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
  628. }
  629. void reboot_skas(void)
  630. {
  631. block_signals();
  632. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
  633. }
  634. void __switch_mm(struct mm_id *mm_idp)
  635. {
  636. int err;
  637. /* FIXME: need cpu pid in __switch_mm */
  638. if (proc_mm) {
  639. err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
  640. mm_idp->u.mm_fd);
  641. if (err) {
  642. printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
  643. "failed, errno = %d\n", errno);
  644. fatal_sigsegv();
  645. }
  646. }
  647. else userspace_pid[0] = mm_idp->u.pid;
  648. }