process.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. /*
  2. * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include <stdlib.h>
  6. #include <unistd.h>
  7. #include <sched.h>
  8. #include <errno.h>
  9. #include <string.h>
  10. #include <sys/mman.h>
  11. #include <sys/ptrace.h>
  12. #include <sys/wait.h>
  13. #include <asm/unistd.h>
  14. #include "as-layout.h"
  15. #include "chan_user.h"
  16. #include "kern_constants.h"
  17. #include "mem.h"
  18. #include "os.h"
  19. #include "process.h"
  20. #include "proc_mm.h"
  21. #include "ptrace_user.h"
  22. #include "registers.h"
  23. #include "skas.h"
  24. #include "skas_ptrace.h"
  25. #include "user.h"
  26. #include "sysdep/stub.h"
  27. int is_skas_winch(int pid, int fd, void *data)
  28. {
  29. if (pid != getpgrp())
  30. return 0;
  31. register_winch_irq(-1, fd, -1, data, 0);
  32. return 1;
  33. }
  34. static int ptrace_dump_regs(int pid)
  35. {
  36. unsigned long regs[MAX_REG_NR];
  37. int i;
  38. if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
  39. return -errno;
  40. printk(UM_KERN_ERR "Stub registers -\n");
  41. for (i = 0; i < ARRAY_SIZE(regs); i++)
  42. printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
  43. return 0;
  44. }
  45. /*
  46. * Signals that are OK to receive in the stub - we'll just continue it.
  47. * SIGWINCH will happen when UML is inside a detached screen.
  48. */
  49. #define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH))
  50. /* Signals that the stub will finish with - anything else is an error */
  51. #define STUB_DONE_MASK ((1 << SIGUSR1) | (1 << SIGTRAP))
  52. void wait_stub_done(int pid)
  53. {
  54. int n, status, err;
  55. while (1) {
  56. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  57. if ((n < 0) || !WIFSTOPPED(status))
  58. goto bad_wait;
  59. if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
  60. break;
  61. err = ptrace(PTRACE_CONT, pid, 0, 0);
  62. if (err)
  63. panic("wait_stub_done : continue failed, errno = %d\n",
  64. errno);
  65. }
  66. if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
  67. return;
  68. bad_wait:
  69. err = ptrace_dump_regs(pid);
  70. if (err)
  71. printk(UM_KERN_ERR "Failed to get registers from stub, "
  72. "errno = %d\n", -err);
  73. panic("wait_stub_done : failed to wait for SIGUSR1/SIGTRAP, pid = %d, "
  74. "n = %d, errno = %d, status = 0x%x\n", pid, n, errno, status);
  75. }
  76. extern unsigned long current_stub_stack(void);
  77. void get_skas_faultinfo(int pid, struct faultinfo * fi)
  78. {
  79. int err;
  80. if (ptrace_faultinfo) {
  81. err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
  82. if (err)
  83. panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, "
  84. "errno = %d\n", errno);
  85. /* Special handling for i386, which has different structs */
  86. if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
  87. memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
  88. sizeof(struct faultinfo) -
  89. sizeof(struct ptrace_faultinfo));
  90. }
  91. else {
  92. err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
  93. if (err)
  94. panic("Failed to continue stub, pid = %d, errno = %d\n",
  95. pid, errno);
  96. wait_stub_done(pid);
  97. /*
  98. * faultinfo is prepared by the stub-segv-handler at start of
  99. * the stub stack page. We just have to copy it.
  100. */
  101. memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
  102. }
  103. }
  104. static void handle_segv(int pid, struct uml_pt_regs * regs)
  105. {
  106. get_skas_faultinfo(pid, &regs->faultinfo);
  107. segv(regs->faultinfo, 0, 1, NULL);
  108. }
  109. /*
  110. * To use the same value of using_sysemu as the caller, ask it that value
  111. * (in local_using_sysemu
  112. */
  113. static void handle_trap(int pid, struct uml_pt_regs *regs,
  114. int local_using_sysemu)
  115. {
  116. int err, status;
  117. /* Mark this as a syscall */
  118. UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp);
  119. if (!local_using_sysemu)
  120. {
  121. err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET,
  122. __NR_getpid);
  123. if (err < 0)
  124. panic("handle_trap - nullifying syscall failed, "
  125. "errno = %d\n", errno);
  126. err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
  127. if (err < 0)
  128. panic("handle_trap - continuing to end of syscall "
  129. "failed, errno = %d\n", errno);
  130. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  131. if ((err < 0) || !WIFSTOPPED(status) ||
  132. (WSTOPSIG(status) != SIGTRAP + 0x80)) {
  133. err = ptrace_dump_regs(pid);
  134. if (err)
  135. printk(UM_KERN_ERR "Failed to get registers "
  136. "from process, errno = %d\n", -err);
  137. panic("handle_trap - failed to wait at end of syscall, "
  138. "errno = %d, status = %d\n", errno, status);
  139. }
  140. }
  141. handle_syscall(regs);
  142. }
  143. extern int __syscall_stub_start;
  144. static int userspace_tramp(void *stack)
  145. {
  146. void *addr;
  147. int err;
  148. ptrace(PTRACE_TRACEME, 0, 0, 0);
  149. signal(SIGTERM, SIG_DFL);
  150. err = set_interval();
  151. if (err)
  152. panic("userspace_tramp - setting timer failed, errno = %d\n",
  153. err);
  154. if (!proc_mm) {
  155. /*
  156. * This has a pte, but it can't be mapped in with the usual
  157. * tlb_flush mechanism because this is part of that mechanism
  158. */
  159. int fd;
  160. unsigned long long offset;
  161. fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
  162. addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
  163. PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
  164. if (addr == MAP_FAILED) {
  165. printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
  166. "errno = %d\n", STUB_CODE, errno);
  167. exit(1);
  168. }
  169. if (stack != NULL) {
  170. fd = phys_mapping(to_phys(stack), &offset);
  171. addr = mmap((void *) STUB_DATA,
  172. UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
  173. MAP_FIXED | MAP_SHARED, fd, offset);
  174. if (addr == MAP_FAILED) {
  175. printk(UM_KERN_ERR "mapping segfault stack "
  176. "at 0x%lx failed, errno = %d\n",
  177. STUB_DATA, errno);
  178. exit(1);
  179. }
  180. }
  181. }
  182. if (!ptrace_faultinfo && (stack != NULL)) {
  183. struct sigaction sa;
  184. unsigned long v = STUB_CODE +
  185. (unsigned long) stub_segv_handler -
  186. (unsigned long) &__syscall_stub_start;
  187. set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
  188. sigemptyset(&sa.sa_mask);
  189. sigaddset(&sa.sa_mask, SIGIO);
  190. sigaddset(&sa.sa_mask, SIGWINCH);
  191. sigaddset(&sa.sa_mask, SIGVTALRM);
  192. sigaddset(&sa.sa_mask, SIGUSR1);
  193. sa.sa_flags = SA_ONSTACK;
  194. sa.sa_handler = (void *) v;
  195. sa.sa_restorer = NULL;
  196. if (sigaction(SIGSEGV, &sa, NULL) < 0)
  197. panic("userspace_tramp - setting SIGSEGV handler "
  198. "failed - errno = %d\n", errno);
  199. }
  200. kill(os_getpid(), SIGSTOP);
  201. return 0;
  202. }
  203. /* Each element set once, and only accessed by a single processor anyway */
  204. #undef NR_CPUS
  205. #define NR_CPUS 1
  206. int userspace_pid[NR_CPUS];
  207. int start_userspace(unsigned long stub_stack)
  208. {
  209. void *stack;
  210. unsigned long sp;
  211. int pid, status, n, flags;
  212. stack = mmap(NULL, UM_KERN_PAGE_SIZE,
  213. PROT_READ | PROT_WRITE | PROT_EXEC,
  214. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  215. if (stack == MAP_FAILED)
  216. panic("start_userspace : mmap failed, errno = %d", errno);
  217. sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
  218. flags = CLONE_FILES;
  219. if (proc_mm)
  220. flags |= CLONE_VM;
  221. else
  222. flags |= SIGCHLD;
  223. pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
  224. if (pid < 0)
  225. panic("start_userspace : clone failed, errno = %d", errno);
  226. do {
  227. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  228. if (n < 0)
  229. panic("start_userspace : wait failed, errno = %d",
  230. errno);
  231. } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
  232. if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP))
  233. panic("start_userspace : expected SIGSTOP, got status = %d",
  234. status);
  235. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  236. (void *) PTRACE_O_TRACESYSGOOD) < 0)
  237. panic("start_userspace : PTRACE_OLDSETOPTIONS failed, "
  238. "errno = %d\n", errno);
  239. if (munmap(stack, UM_KERN_PAGE_SIZE) < 0)
  240. panic("start_userspace : munmap failed, errno = %d\n", errno);
  241. return pid;
  242. }
  243. void userspace(struct uml_pt_regs *regs)
  244. {
  245. struct itimerval timer;
  246. unsigned long long nsecs, now;
  247. int err, status, op, pid = userspace_pid[0];
  248. /* To prevent races if using_sysemu changes under us.*/
  249. int local_using_sysemu;
  250. if (getitimer(ITIMER_VIRTUAL, &timer))
  251. printk("Failed to get itimer, errno = %d\n", errno);
  252. nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC +
  253. timer.it_value.tv_usec * UM_NSEC_PER_USEC;
  254. nsecs += os_nsecs();
  255. while (1) {
  256. restore_registers(pid, regs);
  257. /* Now we set local_using_sysemu to be used for one loop */
  258. local_using_sysemu = get_using_sysemu();
  259. op = SELECT_PTRACE_OPERATION(local_using_sysemu,
  260. singlestepping(NULL));
  261. err = ptrace(op, pid, 0, 0);
  262. if (err)
  263. panic("userspace - could not resume userspace process, "
  264. "pid=%d, ptrace operation = %d, errno = %d\n",
  265. pid, op, errno);
  266. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  267. if (err < 0)
  268. panic("userspace - waitpid failed, errno = %d\n",
  269. errno);
  270. regs->is_user = 1;
  271. save_registers(pid, regs);
  272. UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
  273. if (WIFSTOPPED(status)) {
  274. int sig = WSTOPSIG(status);
  275. switch(sig) {
  276. case SIGSEGV:
  277. if (PTRACE_FULL_FAULTINFO ||
  278. !ptrace_faultinfo) {
  279. get_skas_faultinfo(pid,
  280. &regs->faultinfo);
  281. (*sig_info[SIGSEGV])(SIGSEGV, regs);
  282. }
  283. else handle_segv(pid, regs);
  284. break;
  285. case SIGTRAP + 0x80:
  286. handle_trap(pid, regs, local_using_sysemu);
  287. break;
  288. case SIGTRAP:
  289. relay_signal(SIGTRAP, regs);
  290. break;
  291. case SIGVTALRM:
  292. now = os_nsecs();
  293. if(now < nsecs)
  294. break;
  295. block_signals();
  296. (*sig_info[sig])(sig, regs);
  297. unblock_signals();
  298. nsecs = timer.it_value.tv_sec *
  299. UM_NSEC_PER_SEC +
  300. timer.it_value.tv_usec *
  301. UM_NSEC_PER_USEC;
  302. nsecs += os_nsecs();
  303. break;
  304. case SIGIO:
  305. case SIGILL:
  306. case SIGBUS:
  307. case SIGFPE:
  308. case SIGWINCH:
  309. block_signals();
  310. (*sig_info[sig])(sig, regs);
  311. unblock_signals();
  312. break;
  313. default:
  314. printk(UM_KERN_ERR "userspace - child stopped "
  315. "with signal %d\n", sig);
  316. }
  317. pid = userspace_pid[0];
  318. interrupt_end();
  319. /* Avoid -ERESTARTSYS handling in host */
  320. if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
  321. PT_SYSCALL_NR(regs->gp) = -1;
  322. }
  323. }
  324. }
  325. static unsigned long thread_regs[MAX_REG_NR];
  326. static int __init init_thread_regs(void)
  327. {
  328. get_safe_registers(thread_regs);
  329. /* Set parent's instruction pointer to start of clone-stub */
  330. thread_regs[REGS_IP_INDEX] = STUB_CODE +
  331. (unsigned long) stub_clone_handler -
  332. (unsigned long) &__syscall_stub_start;
  333. thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
  334. sizeof(void *);
  335. #ifdef __SIGNAL_FRAMESIZE
  336. thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
  337. #endif
  338. return 0;
  339. }
  340. __initcall(init_thread_regs);
  341. int copy_context_skas0(unsigned long new_stack, int pid)
  342. {
  343. struct timeval tv = { .tv_sec = 0, .tv_usec = UM_USEC_PER_SEC / UM_HZ };
  344. int err;
  345. unsigned long current_stack = current_stub_stack();
  346. struct stub_data *data = (struct stub_data *) current_stack;
  347. struct stub_data *child_data = (struct stub_data *) new_stack;
  348. unsigned long long new_offset;
  349. int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
  350. /*
  351. * prepare offset and fd of child's stack as argument for parent's
  352. * and child's mmap2 calls
  353. */
  354. *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset),
  355. .fd = new_fd,
  356. .timer = ((struct itimerval)
  357. { .it_value = tv,
  358. .it_interval = tv }) });
  359. err = ptrace_setregs(pid, thread_regs);
  360. if (err < 0)
  361. panic("copy_context_skas0 : PTRACE_SETREGS failed, "
  362. "pid = %d, errno = %d\n", pid, -err);
  363. /* set a well known return code for detection of child write failure */
  364. child_data->err = 12345678;
  365. /*
  366. * Wait, until parent has finished its work: read child's pid from
  367. * parent's stack, and check, if bad result.
  368. */
  369. err = ptrace(PTRACE_CONT, pid, 0, 0);
  370. if (err)
  371. panic("Failed to continue new process, pid = %d, "
  372. "errno = %d\n", pid, errno);
  373. wait_stub_done(pid);
  374. pid = data->err;
  375. if (pid < 0)
  376. panic("copy_context_skas0 - stub-parent reports error %d\n",
  377. -pid);
  378. /*
  379. * Wait, until child has finished too: read child's result from
  380. * child's stack and check it.
  381. */
  382. wait_stub_done(pid);
  383. if (child_data->err != STUB_DATA)
  384. panic("copy_context_skas0 - stub-child reports error %ld\n",
  385. child_data->err);
  386. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  387. (void *)PTRACE_O_TRACESYSGOOD) < 0)
  388. panic("copy_context_skas0 : PTRACE_OLDSETOPTIONS failed, "
  389. "errno = %d\n", errno);
  390. return pid;
  391. }
  392. /*
  393. * This is used only, if stub pages are needed, while proc_mm is
  394. * available. Opening /proc/mm creates a new mm_context, which lacks
  395. * the stub-pages. Thus, we map them using /proc/mm-fd
  396. */
  397. void map_stub_pages(int fd, unsigned long code,
  398. unsigned long data, unsigned long stack)
  399. {
  400. struct proc_mm_op mmop;
  401. int n;
  402. unsigned long long code_offset;
  403. int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
  404. &code_offset);
  405. mmop = ((struct proc_mm_op) { .op = MM_MMAP,
  406. .u =
  407. { .mmap =
  408. { .addr = code,
  409. .len = UM_KERN_PAGE_SIZE,
  410. .prot = PROT_EXEC,
  411. .flags = MAP_FIXED | MAP_PRIVATE,
  412. .fd = code_fd,
  413. .offset = code_offset
  414. } } });
  415. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  416. if (n != sizeof(mmop)) {
  417. n = errno;
  418. printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
  419. "offset = %llx\n", code, code_fd,
  420. (unsigned long long) code_offset);
  421. panic("map_stub_pages : /proc/mm map for code failed, "
  422. "err = %d\n", n);
  423. }
  424. if (stack) {
  425. unsigned long long map_offset;
  426. int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
  427. mmop = ((struct proc_mm_op)
  428. { .op = MM_MMAP,
  429. .u =
  430. { .mmap =
  431. { .addr = data,
  432. .len = UM_KERN_PAGE_SIZE,
  433. .prot = PROT_READ | PROT_WRITE,
  434. .flags = MAP_FIXED | MAP_SHARED,
  435. .fd = map_fd,
  436. .offset = map_offset
  437. } } });
  438. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  439. if (n != sizeof(mmop))
  440. panic("map_stub_pages : /proc/mm map for data failed, "
  441. "err = %d\n", errno);
  442. }
  443. }
  444. void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
  445. {
  446. (*buf)[0].JB_IP = (unsigned long) handler;
  447. (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
  448. sizeof(void *);
  449. }
  450. #define INIT_JMP_NEW_THREAD 0
  451. #define INIT_JMP_CALLBACK 1
  452. #define INIT_JMP_HALT 2
  453. #define INIT_JMP_REBOOT 3
  454. void switch_threads(jmp_buf *me, jmp_buf *you)
  455. {
  456. if (UML_SETJMP(me) == 0)
  457. UML_LONGJMP(you, 1);
  458. }
  459. static jmp_buf initial_jmpbuf;
  460. /* XXX Make these percpu */
  461. static void (*cb_proc)(void *arg);
  462. static void *cb_arg;
  463. static jmp_buf *cb_back;
  464. int start_idle_thread(void *stack, jmp_buf *switch_buf)
  465. {
  466. int n;
  467. set_handler(SIGWINCH, (__sighandler_t) sig_handler,
  468. SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGVTALRM, -1);
  469. /*
  470. * Can't use UML_SETJMP or UML_LONGJMP here because they save
  471. * and restore signals, with the possible side-effect of
  472. * trying to handle any signals which came when they were
  473. * blocked, which can't be done on this stack.
  474. * Signals must be blocked when jumping back here and restored
  475. * after returning to the jumper.
  476. */
  477. n = setjmp(initial_jmpbuf);
  478. switch(n) {
  479. case INIT_JMP_NEW_THREAD:
  480. (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
  481. (*switch_buf)[0].JB_SP = (unsigned long) stack +
  482. UM_THREAD_SIZE - sizeof(void *);
  483. break;
  484. case INIT_JMP_CALLBACK:
  485. (*cb_proc)(cb_arg);
  486. longjmp(*cb_back, 1);
  487. break;
  488. case INIT_JMP_HALT:
  489. kmalloc_ok = 0;
  490. return 0;
  491. case INIT_JMP_REBOOT:
  492. kmalloc_ok = 0;
  493. return 1;
  494. default:
  495. panic("Bad sigsetjmp return in start_idle_thread - %d\n", n);
  496. }
  497. longjmp(*switch_buf, 1);
  498. }
  499. void initial_thread_cb_skas(void (*proc)(void *), void *arg)
  500. {
  501. jmp_buf here;
  502. cb_proc = proc;
  503. cb_arg = arg;
  504. cb_back = &here;
  505. block_signals();
  506. if (UML_SETJMP(&here) == 0)
  507. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
  508. unblock_signals();
  509. cb_proc = NULL;
  510. cb_arg = NULL;
  511. cb_back = NULL;
  512. }
  513. void halt_skas(void)
  514. {
  515. block_signals();
  516. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
  517. }
  518. void reboot_skas(void)
  519. {
  520. block_signals();
  521. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
  522. }
  523. void __switch_mm(struct mm_id *mm_idp)
  524. {
  525. int err;
  526. /* FIXME: need cpu pid in __switch_mm */
  527. if (proc_mm) {
  528. err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
  529. mm_idp->u.mm_fd);
  530. if (err)
  531. panic("__switch_mm - PTRACE_SWITCH_MM failed, "
  532. "errno = %d\n", errno);
  533. }
  534. else userspace_pid[0] = mm_idp->u.pid;
  535. }