process.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600
  1. /*
  2. * Copyright (C) 2002- 2004 Jeff Dike (jdike@addtoit.com)
  3. * Licensed under the GPL
  4. */
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <unistd.h>
  8. #include <errno.h>
  9. #include <signal.h>
  10. #include <sched.h>
  11. #include "ptrace_user.h"
  12. #include <sys/wait.h>
  13. #include <sys/mman.h>
  14. #include <sys/user.h>
  15. #include <sys/time.h>
  16. #include <sys/syscall.h>
  17. #include <asm/types.h>
  18. #include "user.h"
  19. #include "sysdep/ptrace.h"
  20. #include "kern_util.h"
  21. #include "skas.h"
  22. #include "stub-data.h"
  23. #include "mm_id.h"
  24. #include "sysdep/sigcontext.h"
  25. #include "sysdep/stub.h"
  26. #include "os.h"
  27. #include "proc_mm.h"
  28. #include "skas_ptrace.h"
  29. #include "chan_user.h"
  30. #include "registers.h"
  31. #include "mem.h"
  32. #include "uml-config.h"
  33. #include "process.h"
  34. #include "longjmp.h"
  35. #include "kern_constants.h"
  36. #include "as-layout.h"
  37. int is_skas_winch(int pid, int fd, void *data)
  38. {
  39. if(pid != os_getpgrp())
  40. return(0);
  41. register_winch_irq(-1, fd, -1, data, 0);
  42. return(1);
  43. }
  44. static int ptrace_dump_regs(int pid)
  45. {
  46. unsigned long regs[MAX_REG_NR];
  47. int i;
  48. if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
  49. return -errno;
  50. else {
  51. printk("Stub registers -\n");
  52. for(i = 0; i < ARRAY_SIZE(regs); i++)
  53. printk("\t%d - %lx\n", i, regs[i]);
  54. }
  55. return 0;
  56. }
  57. /*
  58. * Signals that are OK to receive in the stub - we'll just continue it.
  59. * SIGWINCH will happen when UML is inside a detached screen.
  60. */
  61. #define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH))
  62. /* Signals that the stub will finish with - anything else is an error */
  63. #define STUB_DONE_MASK ((1 << SIGUSR1) | (1 << SIGTRAP))
  64. void wait_stub_done(int pid)
  65. {
  66. int n, status, err;
  67. while(1){
  68. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
  69. if((n < 0) || !WIFSTOPPED(status))
  70. goto bad_wait;
  71. if(((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
  72. break;
  73. err = ptrace(PTRACE_CONT, pid, 0, 0);
  74. if(err)
  75. panic("wait_stub_done : continue failed, errno = %d\n",
  76. errno);
  77. }
  78. if(((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
  79. return;
  80. bad_wait:
  81. err = ptrace_dump_regs(pid);
  82. if(err)
  83. printk("Failed to get registers from stub, errno = %d\n", -err);
  84. panic("wait_stub_done : failed to wait for SIGUSR1/SIGTRAP, pid = %d, "
  85. "n = %d, errno = %d, status = 0x%x\n", pid, n, errno, status);
  86. }
  87. extern unsigned long current_stub_stack(void);
  88. void get_skas_faultinfo(int pid, struct faultinfo * fi)
  89. {
  90. int err;
  91. if(ptrace_faultinfo){
  92. err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
  93. if(err)
  94. panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, "
  95. "errno = %d\n", errno);
  96. /* Special handling for i386, which has different structs */
  97. if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
  98. memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
  99. sizeof(struct faultinfo) -
  100. sizeof(struct ptrace_faultinfo));
  101. }
  102. else {
  103. err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
  104. if(err)
  105. panic("Failed to continue stub, pid = %d, errno = %d\n",
  106. pid, errno);
  107. wait_stub_done(pid);
  108. /* faultinfo is prepared by the stub-segv-handler at start of
  109. * the stub stack page. We just have to copy it.
  110. */
  111. memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
  112. }
  113. }
  114. static void handle_segv(int pid, union uml_pt_regs * regs)
  115. {
  116. get_skas_faultinfo(pid, &regs->skas.faultinfo);
  117. segv(regs->skas.faultinfo, 0, 1, NULL);
  118. }
  119. /*To use the same value of using_sysemu as the caller, ask it that value (in local_using_sysemu)*/
  120. static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu)
  121. {
  122. int err, status;
  123. /* Mark this as a syscall */
  124. UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->skas.regs);
  125. if (!local_using_sysemu)
  126. {
  127. err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET,
  128. __NR_getpid);
  129. if(err < 0)
  130. panic("handle_trap - nullifying syscall failed errno = %d\n",
  131. errno);
  132. err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
  133. if(err < 0)
  134. panic("handle_trap - continuing to end of syscall failed, "
  135. "errno = %d\n", errno);
  136. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
  137. if((err < 0) || !WIFSTOPPED(status) ||
  138. (WSTOPSIG(status) != SIGTRAP + 0x80)){
  139. err = ptrace_dump_regs(pid);
  140. if(err)
  141. printk("Failed to get registers from process, "
  142. "errno = %d\n", -err);
  143. panic("handle_trap - failed to wait at end of syscall, "
  144. "errno = %d, status = %d\n", errno, status);
  145. }
  146. }
  147. handle_syscall(regs);
  148. }
  149. extern int __syscall_stub_start;
  150. static int userspace_tramp(void *stack)
  151. {
  152. void *addr;
  153. int err;
  154. ptrace(PTRACE_TRACEME, 0, 0, 0);
  155. init_new_thread_signals();
  156. err = set_interval(1);
  157. if(err)
  158. panic("userspace_tramp - setting timer failed, errno = %d\n",
  159. err);
  160. if(!proc_mm){
  161. /* This has a pte, but it can't be mapped in with the usual
  162. * tlb_flush mechanism because this is part of that mechanism
  163. */
  164. int fd;
  165. __u64 offset;
  166. fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
  167. addr = mmap64((void *) UML_CONFIG_STUB_CODE, UM_KERN_PAGE_SIZE,
  168. PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
  169. if(addr == MAP_FAILED){
  170. printk("mapping mmap stub failed, errno = %d\n",
  171. errno);
  172. exit(1);
  173. }
  174. if(stack != NULL){
  175. fd = phys_mapping(to_phys(stack), &offset);
  176. addr = mmap((void *) UML_CONFIG_STUB_DATA,
  177. UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
  178. MAP_FIXED | MAP_SHARED, fd, offset);
  179. if(addr == MAP_FAILED){
  180. printk("mapping segfault stack failed, "
  181. "errno = %d\n", errno);
  182. exit(1);
  183. }
  184. }
  185. }
  186. if(!ptrace_faultinfo && (stack != NULL)){
  187. struct sigaction sa;
  188. unsigned long v = UML_CONFIG_STUB_CODE +
  189. (unsigned long) stub_segv_handler -
  190. (unsigned long) &__syscall_stub_start;
  191. set_sigstack((void *) UML_CONFIG_STUB_DATA, UM_KERN_PAGE_SIZE);
  192. sigemptyset(&sa.sa_mask);
  193. sigaddset(&sa.sa_mask, SIGIO);
  194. sigaddset(&sa.sa_mask, SIGWINCH);
  195. sigaddset(&sa.sa_mask, SIGALRM);
  196. sigaddset(&sa.sa_mask, SIGVTALRM);
  197. sigaddset(&sa.sa_mask, SIGUSR1);
  198. sa.sa_flags = SA_ONSTACK;
  199. sa.sa_handler = (void *) v;
  200. sa.sa_restorer = NULL;
  201. if(sigaction(SIGSEGV, &sa, NULL) < 0)
  202. panic("userspace_tramp - setting SIGSEGV handler "
  203. "failed - errno = %d\n", errno);
  204. }
  205. os_stop_process(os_getpid());
  206. return(0);
  207. }
  208. /* Each element set once, and only accessed by a single processor anyway */
  209. #undef NR_CPUS
  210. #define NR_CPUS 1
  211. int userspace_pid[NR_CPUS];
  212. int start_userspace(unsigned long stub_stack)
  213. {
  214. void *stack;
  215. unsigned long sp;
  216. int pid, status, n, flags;
  217. stack = mmap(NULL, UM_KERN_PAGE_SIZE,
  218. PROT_READ | PROT_WRITE | PROT_EXEC,
  219. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  220. if(stack == MAP_FAILED)
  221. panic("start_userspace : mmap failed, errno = %d", errno);
  222. sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
  223. flags = CLONE_FILES | SIGCHLD;
  224. if(proc_mm) flags |= CLONE_VM;
  225. pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
  226. if(pid < 0)
  227. panic("start_userspace : clone failed, errno = %d", errno);
  228. do {
  229. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
  230. if(n < 0)
  231. panic("start_userspace : wait failed, errno = %d",
  232. errno);
  233. } while(WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
  234. if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP))
  235. panic("start_userspace : expected SIGSTOP, got status = %d",
  236. status);
  237. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, (void *)PTRACE_O_TRACESYSGOOD) < 0)
  238. panic("start_userspace : PTRACE_OLDSETOPTIONS failed, errno=%d\n",
  239. errno);
  240. if(munmap(stack, UM_KERN_PAGE_SIZE) < 0)
  241. panic("start_userspace : munmap failed, errno = %d\n", errno);
  242. return(pid);
  243. }
  244. void userspace(union uml_pt_regs *regs)
  245. {
  246. int err, status, op, pid = userspace_pid[0];
  247. /* To prevent races if using_sysemu changes under us.*/
  248. int local_using_sysemu;
  249. while(1){
  250. restore_registers(pid, regs);
  251. /* Now we set local_using_sysemu to be used for one loop */
  252. local_using_sysemu = get_using_sysemu();
  253. op = SELECT_PTRACE_OPERATION(local_using_sysemu,
  254. singlestepping(NULL));
  255. err = ptrace(op, pid, 0, 0);
  256. if(err)
  257. panic("userspace - could not resume userspace process, "
  258. "pid=%d, ptrace operation = %d, errno = %d\n",
  259. pid, op, errno);
  260. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
  261. if(err < 0)
  262. panic("userspace - waitpid failed, errno = %d\n",
  263. errno);
  264. regs->skas.is_user = 1;
  265. save_registers(pid, regs);
  266. UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
  267. if(WIFSTOPPED(status)){
  268. int sig = WSTOPSIG(status);
  269. switch(sig){
  270. case SIGSEGV:
  271. if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo){
  272. get_skas_faultinfo(pid, &regs->skas.faultinfo);
  273. (*sig_info[SIGSEGV])(SIGSEGV, regs);
  274. }
  275. else handle_segv(pid, regs);
  276. break;
  277. case SIGTRAP + 0x80:
  278. handle_trap(pid, regs, local_using_sysemu);
  279. break;
  280. case SIGTRAP:
  281. relay_signal(SIGTRAP, regs);
  282. break;
  283. case SIGIO:
  284. case SIGVTALRM:
  285. case SIGILL:
  286. case SIGBUS:
  287. case SIGFPE:
  288. case SIGWINCH:
  289. block_signals();
  290. (*sig_info[sig])(sig, regs);
  291. unblock_signals();
  292. break;
  293. default:
  294. printk("userspace - child stopped with signal "
  295. "%d\n", sig);
  296. }
  297. pid = userspace_pid[0];
  298. interrupt_end();
  299. /* Avoid -ERESTARTSYS handling in host */
  300. if(PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
  301. PT_SYSCALL_NR(regs->skas.regs) = -1;
  302. }
  303. }
  304. }
  305. static unsigned long thread_regs[MAX_REG_NR];
  306. static unsigned long thread_fp_regs[HOST_FP_SIZE];
  307. static int __init init_thread_regs(void)
  308. {
  309. get_safe_registers(thread_regs, thread_fp_regs);
  310. /* Set parent's instruction pointer to start of clone-stub */
  311. thread_regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
  312. (unsigned long) stub_clone_handler -
  313. (unsigned long) &__syscall_stub_start;
  314. thread_regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + UM_KERN_PAGE_SIZE -
  315. sizeof(void *);
  316. #ifdef __SIGNAL_FRAMESIZE
  317. thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
  318. #endif
  319. return 0;
  320. }
  321. __initcall(init_thread_regs);
  322. int copy_context_skas0(unsigned long new_stack, int pid)
  323. {
  324. int err;
  325. unsigned long current_stack = current_stub_stack();
  326. struct stub_data *data = (struct stub_data *) current_stack;
  327. struct stub_data *child_data = (struct stub_data *) new_stack;
  328. __u64 new_offset;
  329. int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
  330. /* prepare offset and fd of child's stack as argument for parent's
  331. * and child's mmap2 calls
  332. */
  333. *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset),
  334. .fd = new_fd,
  335. .timer = ((struct itimerval)
  336. { { 0, 1000000 / hz() },
  337. { 0, 1000000 / hz() }})});
  338. err = ptrace_setregs(pid, thread_regs);
  339. if(err < 0)
  340. panic("copy_context_skas0 : PTRACE_SETREGS failed, "
  341. "pid = %d, errno = %d\n", pid, -err);
  342. err = ptrace_setfpregs(pid, thread_fp_regs);
  343. if(err < 0)
  344. panic("copy_context_skas0 : PTRACE_SETFPREGS failed, "
  345. "pid = %d, errno = %d\n", pid, -err);
  346. /* set a well known return code for detection of child write failure */
  347. child_data->err = 12345678;
  348. /* Wait, until parent has finished its work: read child's pid from
  349. * parent's stack, and check, if bad result.
  350. */
  351. err = ptrace(PTRACE_CONT, pid, 0, 0);
  352. if(err)
  353. panic("Failed to continue new process, pid = %d, "
  354. "errno = %d\n", pid, errno);
  355. wait_stub_done(pid);
  356. pid = data->err;
  357. if(pid < 0)
  358. panic("copy_context_skas0 - stub-parent reports error %d\n",
  359. -pid);
  360. /* Wait, until child has finished too: read child's result from
  361. * child's stack and check it.
  362. */
  363. wait_stub_done(pid);
  364. if (child_data->err != UML_CONFIG_STUB_DATA)
  365. panic("copy_context_skas0 - stub-child reports error %ld\n",
  366. child_data->err);
  367. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  368. (void *)PTRACE_O_TRACESYSGOOD) < 0)
  369. panic("copy_context_skas0 : PTRACE_OLDSETOPTIONS failed, "
  370. "errno = %d\n", errno);
  371. return pid;
  372. }
  373. /*
  374. * This is used only, if stub pages are needed, while proc_mm is
  375. * available. Opening /proc/mm creates a new mm_context, which lacks
  376. * the stub-pages. Thus, we map them using /proc/mm-fd
  377. */
  378. void map_stub_pages(int fd, unsigned long code,
  379. unsigned long data, unsigned long stack)
  380. {
  381. struct proc_mm_op mmop;
  382. int n;
  383. __u64 code_offset;
  384. int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
  385. &code_offset);
  386. mmop = ((struct proc_mm_op) { .op = MM_MMAP,
  387. .u =
  388. { .mmap =
  389. { .addr = code,
  390. .len = UM_KERN_PAGE_SIZE,
  391. .prot = PROT_EXEC,
  392. .flags = MAP_FIXED | MAP_PRIVATE,
  393. .fd = code_fd,
  394. .offset = code_offset
  395. } } });
  396. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  397. if(n != sizeof(mmop)){
  398. n = errno;
  399. printk("mmap args - addr = 0x%lx, fd = %d, offset = %llx\n",
  400. code, code_fd, (unsigned long long) code_offset);
  401. panic("map_stub_pages : /proc/mm map for code failed, "
  402. "err = %d\n", n);
  403. }
  404. if ( stack ) {
  405. __u64 map_offset;
  406. int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
  407. mmop = ((struct proc_mm_op)
  408. { .op = MM_MMAP,
  409. .u =
  410. { .mmap =
  411. { .addr = data,
  412. .len = UM_KERN_PAGE_SIZE,
  413. .prot = PROT_READ | PROT_WRITE,
  414. .flags = MAP_FIXED | MAP_SHARED,
  415. .fd = map_fd,
  416. .offset = map_offset
  417. } } });
  418. CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
  419. if(n != sizeof(mmop))
  420. panic("map_stub_pages : /proc/mm map for data failed, "
  421. "err = %d\n", errno);
  422. }
  423. }
  424. void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
  425. {
  426. (*buf)[0].JB_IP = (unsigned long) handler;
  427. (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
  428. sizeof(void *);
  429. }
  430. #define INIT_JMP_NEW_THREAD 0
  431. #define INIT_JMP_CALLBACK 1
  432. #define INIT_JMP_HALT 2
  433. #define INIT_JMP_REBOOT 3
  434. void switch_threads(jmp_buf *me, jmp_buf *you)
  435. {
  436. if(UML_SETJMP(me) == 0)
  437. UML_LONGJMP(you, 1);
  438. }
  439. static jmp_buf initial_jmpbuf;
  440. /* XXX Make these percpu */
  441. static void (*cb_proc)(void *arg);
  442. static void *cb_arg;
  443. static jmp_buf *cb_back;
  444. int start_idle_thread(void *stack, jmp_buf *switch_buf)
  445. {
  446. int n;
  447. set_handler(SIGWINCH, (__sighandler_t) sig_handler,
  448. SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM,
  449. SIGVTALRM, -1);
  450. /*
  451. * Can't use UML_SETJMP or UML_LONGJMP here because they save
  452. * and restore signals, with the possible side-effect of
  453. * trying to handle any signals which came when they were
  454. * blocked, which can't be done on this stack.
  455. * Signals must be blocked when jumping back here and restored
  456. * after returning to the jumper.
  457. */
  458. n = setjmp(initial_jmpbuf);
  459. switch(n){
  460. case INIT_JMP_NEW_THREAD:
  461. (*switch_buf)[0].JB_IP = (unsigned long) new_thread_handler;
  462. (*switch_buf)[0].JB_SP = (unsigned long) stack +
  463. UM_THREAD_SIZE - sizeof(void *);
  464. break;
  465. case INIT_JMP_CALLBACK:
  466. (*cb_proc)(cb_arg);
  467. longjmp(*cb_back, 1);
  468. break;
  469. case INIT_JMP_HALT:
  470. kmalloc_ok = 0;
  471. return(0);
  472. case INIT_JMP_REBOOT:
  473. kmalloc_ok = 0;
  474. return(1);
  475. default:
  476. panic("Bad sigsetjmp return in start_idle_thread - %d\n", n);
  477. }
  478. longjmp(*switch_buf, 1);
  479. }
  480. void initial_thread_cb_skas(void (*proc)(void *), void *arg)
  481. {
  482. jmp_buf here;
  483. cb_proc = proc;
  484. cb_arg = arg;
  485. cb_back = &here;
  486. block_signals();
  487. if(UML_SETJMP(&here) == 0)
  488. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
  489. unblock_signals();
  490. cb_proc = NULL;
  491. cb_arg = NULL;
  492. cb_back = NULL;
  493. }
  494. void halt_skas(void)
  495. {
  496. block_signals();
  497. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
  498. }
  499. void reboot_skas(void)
  500. {
  501. block_signals();
  502. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
  503. }
  504. void switch_mm_skas(struct mm_id *mm_idp)
  505. {
  506. int err;
  507. /* FIXME: need cpu pid in switch_mm_skas */
  508. if(proc_mm){
  509. err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
  510. mm_idp->u.mm_fd);
  511. if(err)
  512. panic("switch_mm_skas - PTRACE_SWITCH_MM failed, "
  513. "errno = %d\n", errno);
  514. }
  515. else userspace_pid[0] = mm_idp->u.pid;
  516. }