signal_mm.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /*
  2. * linux/arch/m68k/kernel/signal.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. /*
  11. * Linux/m68k support by Hamish Macdonald
  12. *
  13. * 68060 fixes by Jesper Skov
  14. *
  15. * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
  16. *
  17. * mathemu support by Roman Zippel
  18. * (Note: fpstate in the signal context is completely ignored for the emulator
  19. * and the internal floating point format is put on stack)
  20. */
  21. /*
  22. * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
  23. * Atari :-) Current limitation: Only one sigstack can be active at one time.
  24. * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
  25. * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
  26. * signal handlers!
  27. */
  28. #include <linux/sched.h>
  29. #include <linux/mm.h>
  30. #include <linux/kernel.h>
  31. #include <linux/signal.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/errno.h>
  34. #include <linux/wait.h>
  35. #include <linux/ptrace.h>
  36. #include <linux/unistd.h>
  37. #include <linux/stddef.h>
  38. #include <linux/highuid.h>
  39. #include <linux/personality.h>
  40. #include <linux/tty.h>
  41. #include <linux/binfmts.h>
  42. #include <linux/module.h>
  43. #include <asm/setup.h>
  44. #include <asm/uaccess.h>
  45. #include <asm/pgtable.h>
  46. #include <asm/traps.h>
  47. #include <asm/ucontext.h>
  48. #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
  49. static const int frame_extra_sizes[16] = {
  50. [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
  51. [2] = sizeof(((struct frame *)0)->un.fmt2),
  52. [3] = sizeof(((struct frame *)0)->un.fmt3),
  53. [4] = sizeof(((struct frame *)0)->un.fmt4),
  54. [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
  55. [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
  56. [7] = sizeof(((struct frame *)0)->un.fmt7),
  57. [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
  58. [9] = sizeof(((struct frame *)0)->un.fmt9),
  59. [10] = sizeof(((struct frame *)0)->un.fmta),
  60. [11] = sizeof(((struct frame *)0)->un.fmtb),
  61. [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
  62. [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
  63. [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
  64. [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
  65. };
  66. int handle_kernel_fault(struct pt_regs *regs)
  67. {
  68. const struct exception_table_entry *fixup;
  69. struct pt_regs *tregs;
  70. /* Are we prepared to handle this kernel fault? */
  71. fixup = search_exception_tables(regs->pc);
  72. if (!fixup)
  73. return 0;
  74. /* Create a new four word stack frame, discarding the old one. */
  75. regs->stkadj = frame_extra_sizes[regs->format];
  76. tregs = (struct pt_regs *)((long)regs + regs->stkadj);
  77. tregs->vector = regs->vector;
  78. tregs->format = 0;
  79. tregs->pc = fixup->fixup;
  80. tregs->sr = regs->sr;
  81. return 1;
  82. }
  83. /*
  84. * Atomically swap in the new signal mask, and wait for a signal.
  85. */
  86. asmlinkage int
  87. sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
  88. {
  89. mask &= _BLOCKABLE;
  90. spin_lock_irq(&current->sighand->siglock);
  91. current->saved_sigmask = current->blocked;
  92. siginitset(&current->blocked, mask);
  93. recalc_sigpending();
  94. spin_unlock_irq(&current->sighand->siglock);
  95. current->state = TASK_INTERRUPTIBLE;
  96. schedule();
  97. set_restore_sigmask();
  98. return -ERESTARTNOHAND;
  99. }
  100. asmlinkage int
  101. sys_sigaction(int sig, const struct old_sigaction __user *act,
  102. struct old_sigaction __user *oact)
  103. {
  104. struct k_sigaction new_ka, old_ka;
  105. int ret;
  106. if (act) {
  107. old_sigset_t mask;
  108. if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
  109. __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
  110. __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
  111. __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
  112. __get_user(mask, &act->sa_mask))
  113. return -EFAULT;
  114. siginitset(&new_ka.sa.sa_mask, mask);
  115. }
  116. ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
  117. if (!ret && oact) {
  118. if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
  119. __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
  120. __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
  121. __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
  122. __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
  123. return -EFAULT;
  124. }
  125. return ret;
  126. }
  127. asmlinkage int
  128. sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
  129. {
  130. return do_sigaltstack(uss, uoss, rdusp());
  131. }
  132. /*
  133. * Do a signal return; undo the signal stack.
  134. *
  135. * Keep the return code on the stack quadword aligned!
  136. * That makes the cache flush below easier.
  137. */
  138. struct sigframe
  139. {
  140. char __user *pretcode;
  141. int sig;
  142. int code;
  143. struct sigcontext __user *psc;
  144. char retcode[8];
  145. unsigned long extramask[_NSIG_WORDS-1];
  146. struct sigcontext sc;
  147. };
  148. struct rt_sigframe
  149. {
  150. char __user *pretcode;
  151. int sig;
  152. struct siginfo __user *pinfo;
  153. void __user *puc;
  154. char retcode[8];
  155. struct siginfo info;
  156. struct ucontext uc;
  157. };
  158. static unsigned char fpu_version; /* version number of fpu, set by setup_frame */
  159. static inline int restore_fpu_state(struct sigcontext *sc)
  160. {
  161. int err = 1;
  162. if (FPU_IS_EMU) {
  163. /* restore registers */
  164. memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
  165. memcpy(current->thread.fp, sc->sc_fpregs, 24);
  166. return 0;
  167. }
  168. if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
  169. /* Verify the frame format. */
  170. if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
  171. goto out;
  172. if (CPU_IS_020_OR_030) {
  173. if (m68k_fputype & FPU_68881 &&
  174. !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
  175. goto out;
  176. if (m68k_fputype & FPU_68882 &&
  177. !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
  178. goto out;
  179. } else if (CPU_IS_040) {
  180. if (!(sc->sc_fpstate[1] == 0x00 ||
  181. sc->sc_fpstate[1] == 0x28 ||
  182. sc->sc_fpstate[1] == 0x60))
  183. goto out;
  184. } else if (CPU_IS_060) {
  185. if (!(sc->sc_fpstate[3] == 0x00 ||
  186. sc->sc_fpstate[3] == 0x60 ||
  187. sc->sc_fpstate[3] == 0xe0))
  188. goto out;
  189. } else
  190. goto out;
  191. __asm__ volatile (".chip 68k/68881\n\t"
  192. "fmovemx %0,%%fp0-%%fp1\n\t"
  193. "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
  194. ".chip 68k"
  195. : /* no outputs */
  196. : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
  197. }
  198. __asm__ volatile (".chip 68k/68881\n\t"
  199. "frestore %0\n\t"
  200. ".chip 68k" : : "m" (*sc->sc_fpstate));
  201. err = 0;
  202. out:
  203. return err;
  204. }
  205. #define FPCONTEXT_SIZE 216
  206. #define uc_fpstate uc_filler[0]
  207. #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
  208. #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
  209. static inline int rt_restore_fpu_state(struct ucontext __user *uc)
  210. {
  211. unsigned char fpstate[FPCONTEXT_SIZE];
  212. int context_size = CPU_IS_060 ? 8 : 0;
  213. fpregset_t fpregs;
  214. int err = 1;
  215. if (FPU_IS_EMU) {
  216. /* restore fpu control register */
  217. if (__copy_from_user(current->thread.fpcntl,
  218. uc->uc_mcontext.fpregs.f_fpcntl, 12))
  219. goto out;
  220. /* restore all other fpu register */
  221. if (__copy_from_user(current->thread.fp,
  222. uc->uc_mcontext.fpregs.f_fpregs, 96))
  223. goto out;
  224. return 0;
  225. }
  226. if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
  227. goto out;
  228. if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
  229. if (!CPU_IS_060)
  230. context_size = fpstate[1];
  231. /* Verify the frame format. */
  232. if (!CPU_IS_060 && (fpstate[0] != fpu_version))
  233. goto out;
  234. if (CPU_IS_020_OR_030) {
  235. if (m68k_fputype & FPU_68881 &&
  236. !(context_size == 0x18 || context_size == 0xb4))
  237. goto out;
  238. if (m68k_fputype & FPU_68882 &&
  239. !(context_size == 0x38 || context_size == 0xd4))
  240. goto out;
  241. } else if (CPU_IS_040) {
  242. if (!(context_size == 0x00 ||
  243. context_size == 0x28 ||
  244. context_size == 0x60))
  245. goto out;
  246. } else if (CPU_IS_060) {
  247. if (!(fpstate[3] == 0x00 ||
  248. fpstate[3] == 0x60 ||
  249. fpstate[3] == 0xe0))
  250. goto out;
  251. } else
  252. goto out;
  253. if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
  254. sizeof(fpregs)))
  255. goto out;
  256. __asm__ volatile (".chip 68k/68881\n\t"
  257. "fmovemx %0,%%fp0-%%fp7\n\t"
  258. "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
  259. ".chip 68k"
  260. : /* no outputs */
  261. : "m" (*fpregs.f_fpregs),
  262. "m" (*fpregs.f_fpcntl));
  263. }
  264. if (context_size &&
  265. __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
  266. context_size))
  267. goto out;
  268. __asm__ volatile (".chip 68k/68881\n\t"
  269. "frestore %0\n\t"
  270. ".chip 68k" : : "m" (*fpstate));
  271. err = 0;
  272. out:
  273. return err;
  274. }
  275. static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
  276. void __user *fp)
  277. {
  278. int fsize = frame_extra_sizes[formatvec >> 12];
  279. if (fsize < 0) {
  280. /*
  281. * user process trying to return with weird frame format
  282. */
  283. #ifdef DEBUG
  284. printk("user process returning with weird frame format\n");
  285. #endif
  286. return 1;
  287. }
  288. if (!fsize) {
  289. regs->format = formatvec >> 12;
  290. regs->vector = formatvec & 0xfff;
  291. } else {
  292. struct switch_stack *sw = (struct switch_stack *)regs - 1;
  293. unsigned long buf[fsize / 2]; /* yes, twice as much */
  294. /* that'll make sure that expansion won't crap over data */
  295. if (copy_from_user(buf + fsize / 4, fp, fsize))
  296. return 1;
  297. /* point of no return */
  298. regs->format = formatvec >> 12;
  299. regs->vector = formatvec & 0xfff;
  300. #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
  301. __asm__ __volatile__
  302. (" movel %0,%/a0\n\t"
  303. " subl %1,%/a0\n\t" /* make room on stack */
  304. " movel %/a0,%/sp\n\t" /* set stack pointer */
  305. /* move switch_stack and pt_regs */
  306. "1: movel %0@+,%/a0@+\n\t"
  307. " dbra %2,1b\n\t"
  308. " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
  309. " lsrl #2,%1\n\t"
  310. " subql #1,%1\n\t"
  311. /* copy to the gap we'd made */
  312. "2: movel %4@+,%/a0@+\n\t"
  313. " dbra %1,2b\n\t"
  314. " bral ret_from_signal\n"
  315. : /* no outputs, it doesn't ever return */
  316. : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
  317. "n" (frame_offset), "a" (buf + fsize/4)
  318. : "a0");
  319. #undef frame_offset
  320. }
  321. return 0;
  322. }
  323. static inline int
  324. restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
  325. {
  326. int formatvec;
  327. struct sigcontext context;
  328. int err;
  329. /* Always make any pending restarted system calls return -EINTR */
  330. current_thread_info()->restart_block.fn = do_no_restart_syscall;
  331. /* get previous context */
  332. if (copy_from_user(&context, usc, sizeof(context)))
  333. goto badframe;
  334. /* restore passed registers */
  335. regs->d0 = context.sc_d0;
  336. regs->d1 = context.sc_d1;
  337. regs->a0 = context.sc_a0;
  338. regs->a1 = context.sc_a1;
  339. regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
  340. regs->pc = context.sc_pc;
  341. regs->orig_d0 = -1; /* disable syscall checks */
  342. wrusp(context.sc_usp);
  343. formatvec = context.sc_formatvec;
  344. err = restore_fpu_state(&context);
  345. if (err || mangle_kernel_stack(regs, formatvec, fp))
  346. goto badframe;
  347. return 0;
  348. badframe:
  349. return 1;
  350. }
  351. static inline int
  352. rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
  353. struct ucontext __user *uc)
  354. {
  355. int temp;
  356. greg_t __user *gregs = uc->uc_mcontext.gregs;
  357. unsigned long usp;
  358. int err;
  359. /* Always make any pending restarted system calls return -EINTR */
  360. current_thread_info()->restart_block.fn = do_no_restart_syscall;
  361. err = __get_user(temp, &uc->uc_mcontext.version);
  362. if (temp != MCONTEXT_VERSION)
  363. goto badframe;
  364. /* restore passed registers */
  365. err |= __get_user(regs->d0, &gregs[0]);
  366. err |= __get_user(regs->d1, &gregs[1]);
  367. err |= __get_user(regs->d2, &gregs[2]);
  368. err |= __get_user(regs->d3, &gregs[3]);
  369. err |= __get_user(regs->d4, &gregs[4]);
  370. err |= __get_user(regs->d5, &gregs[5]);
  371. err |= __get_user(sw->d6, &gregs[6]);
  372. err |= __get_user(sw->d7, &gregs[7]);
  373. err |= __get_user(regs->a0, &gregs[8]);
  374. err |= __get_user(regs->a1, &gregs[9]);
  375. err |= __get_user(regs->a2, &gregs[10]);
  376. err |= __get_user(sw->a3, &gregs[11]);
  377. err |= __get_user(sw->a4, &gregs[12]);
  378. err |= __get_user(sw->a5, &gregs[13]);
  379. err |= __get_user(sw->a6, &gregs[14]);
  380. err |= __get_user(usp, &gregs[15]);
  381. wrusp(usp);
  382. err |= __get_user(regs->pc, &gregs[16]);
  383. err |= __get_user(temp, &gregs[17]);
  384. regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
  385. regs->orig_d0 = -1; /* disable syscall checks */
  386. err |= __get_user(temp, &uc->uc_formatvec);
  387. err |= rt_restore_fpu_state(uc);
  388. if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
  389. goto badframe;
  390. if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
  391. goto badframe;
  392. return 0;
  393. badframe:
  394. return 1;
  395. }
  396. asmlinkage int do_sigreturn(unsigned long __unused)
  397. {
  398. struct switch_stack *sw = (struct switch_stack *) &__unused;
  399. struct pt_regs *regs = (struct pt_regs *) (sw + 1);
  400. unsigned long usp = rdusp();
  401. struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
  402. sigset_t set;
  403. if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
  404. goto badframe;
  405. if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
  406. (_NSIG_WORDS > 1 &&
  407. __copy_from_user(&set.sig[1], &frame->extramask,
  408. sizeof(frame->extramask))))
  409. goto badframe;
  410. sigdelsetmask(&set, ~_BLOCKABLE);
  411. current->blocked = set;
  412. recalc_sigpending();
  413. if (restore_sigcontext(regs, &frame->sc, frame + 1))
  414. goto badframe;
  415. return regs->d0;
  416. badframe:
  417. force_sig(SIGSEGV, current);
  418. return 0;
  419. }
  420. asmlinkage int do_rt_sigreturn(unsigned long __unused)
  421. {
  422. struct switch_stack *sw = (struct switch_stack *) &__unused;
  423. struct pt_regs *regs = (struct pt_regs *) (sw + 1);
  424. unsigned long usp = rdusp();
  425. struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
  426. sigset_t set;
  427. if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
  428. goto badframe;
  429. if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
  430. goto badframe;
  431. sigdelsetmask(&set, ~_BLOCKABLE);
  432. current->blocked = set;
  433. recalc_sigpending();
  434. if (rt_restore_ucontext(regs, sw, &frame->uc))
  435. goto badframe;
  436. return regs->d0;
  437. badframe:
  438. force_sig(SIGSEGV, current);
  439. return 0;
  440. }
  441. /*
  442. * Set up a signal frame.
  443. */
  444. static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
  445. {
  446. if (FPU_IS_EMU) {
  447. /* save registers */
  448. memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
  449. memcpy(sc->sc_fpregs, current->thread.fp, 24);
  450. return;
  451. }
  452. __asm__ volatile (".chip 68k/68881\n\t"
  453. "fsave %0\n\t"
  454. ".chip 68k"
  455. : : "m" (*sc->sc_fpstate) : "memory");
  456. if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
  457. fpu_version = sc->sc_fpstate[0];
  458. if (CPU_IS_020_OR_030 &&
  459. regs->vector >= (VEC_FPBRUC * 4) &&
  460. regs->vector <= (VEC_FPNAN * 4)) {
  461. /* Clear pending exception in 68882 idle frame */
  462. if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
  463. sc->sc_fpstate[0x38] |= 1 << 3;
  464. }
  465. __asm__ volatile (".chip 68k/68881\n\t"
  466. "fmovemx %%fp0-%%fp1,%0\n\t"
  467. "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
  468. ".chip 68k"
  469. : "=m" (*sc->sc_fpregs),
  470. "=m" (*sc->sc_fpcntl)
  471. : /* no inputs */
  472. : "memory");
  473. }
  474. }
  475. static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
  476. {
  477. unsigned char fpstate[FPCONTEXT_SIZE];
  478. int context_size = CPU_IS_060 ? 8 : 0;
  479. int err = 0;
  480. if (FPU_IS_EMU) {
  481. /* save fpu control register */
  482. err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
  483. current->thread.fpcntl, 12);
  484. /* save all other fpu register */
  485. err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
  486. current->thread.fp, 96);
  487. return err;
  488. }
  489. __asm__ volatile (".chip 68k/68881\n\t"
  490. "fsave %0\n\t"
  491. ".chip 68k"
  492. : : "m" (*fpstate) : "memory");
  493. err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
  494. if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
  495. fpregset_t fpregs;
  496. if (!CPU_IS_060)
  497. context_size = fpstate[1];
  498. fpu_version = fpstate[0];
  499. if (CPU_IS_020_OR_030 &&
  500. regs->vector >= (VEC_FPBRUC * 4) &&
  501. regs->vector <= (VEC_FPNAN * 4)) {
  502. /* Clear pending exception in 68882 idle frame */
  503. if (*(unsigned short *) fpstate == 0x1f38)
  504. fpstate[0x38] |= 1 << 3;
  505. }
  506. __asm__ volatile (".chip 68k/68881\n\t"
  507. "fmovemx %%fp0-%%fp7,%0\n\t"
  508. "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
  509. ".chip 68k"
  510. : "=m" (*fpregs.f_fpregs),
  511. "=m" (*fpregs.f_fpcntl)
  512. : /* no inputs */
  513. : "memory");
  514. err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
  515. sizeof(fpregs));
  516. }
  517. if (context_size)
  518. err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
  519. context_size);
  520. return err;
  521. }
  522. static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
  523. unsigned long mask)
  524. {
  525. sc->sc_mask = mask;
  526. sc->sc_usp = rdusp();
  527. sc->sc_d0 = regs->d0;
  528. sc->sc_d1 = regs->d1;
  529. sc->sc_a0 = regs->a0;
  530. sc->sc_a1 = regs->a1;
  531. sc->sc_sr = regs->sr;
  532. sc->sc_pc = regs->pc;
  533. sc->sc_formatvec = regs->format << 12 | regs->vector;
  534. save_fpu_state(sc, regs);
  535. }
  536. static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
  537. {
  538. struct switch_stack *sw = (struct switch_stack *)regs - 1;
  539. greg_t __user *gregs = uc->uc_mcontext.gregs;
  540. int err = 0;
  541. err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
  542. err |= __put_user(regs->d0, &gregs[0]);
  543. err |= __put_user(regs->d1, &gregs[1]);
  544. err |= __put_user(regs->d2, &gregs[2]);
  545. err |= __put_user(regs->d3, &gregs[3]);
  546. err |= __put_user(regs->d4, &gregs[4]);
  547. err |= __put_user(regs->d5, &gregs[5]);
  548. err |= __put_user(sw->d6, &gregs[6]);
  549. err |= __put_user(sw->d7, &gregs[7]);
  550. err |= __put_user(regs->a0, &gregs[8]);
  551. err |= __put_user(regs->a1, &gregs[9]);
  552. err |= __put_user(regs->a2, &gregs[10]);
  553. err |= __put_user(sw->a3, &gregs[11]);
  554. err |= __put_user(sw->a4, &gregs[12]);
  555. err |= __put_user(sw->a5, &gregs[13]);
  556. err |= __put_user(sw->a6, &gregs[14]);
  557. err |= __put_user(rdusp(), &gregs[15]);
  558. err |= __put_user(regs->pc, &gregs[16]);
  559. err |= __put_user(regs->sr, &gregs[17]);
  560. err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
  561. err |= rt_save_fpu_state(uc, regs);
  562. return err;
  563. }
  564. static inline void push_cache (unsigned long vaddr)
  565. {
  566. /*
  567. * Using the old cache_push_v() was really a big waste.
  568. *
  569. * What we are trying to do is to flush 8 bytes to ram.
  570. * Flushing 2 cache lines of 16 bytes is much cheaper than
  571. * flushing 1 or 2 pages, as previously done in
  572. * cache_push_v().
  573. * Jes
  574. */
  575. if (CPU_IS_040) {
  576. unsigned long temp;
  577. __asm__ __volatile__ (".chip 68040\n\t"
  578. "nop\n\t"
  579. "ptestr (%1)\n\t"
  580. "movec %%mmusr,%0\n\t"
  581. ".chip 68k"
  582. : "=r" (temp)
  583. : "a" (vaddr));
  584. temp &= PAGE_MASK;
  585. temp |= vaddr & ~PAGE_MASK;
  586. __asm__ __volatile__ (".chip 68040\n\t"
  587. "nop\n\t"
  588. "cpushl %%bc,(%0)\n\t"
  589. ".chip 68k"
  590. : : "a" (temp));
  591. }
  592. else if (CPU_IS_060) {
  593. unsigned long temp;
  594. __asm__ __volatile__ (".chip 68060\n\t"
  595. "plpar (%0)\n\t"
  596. ".chip 68k"
  597. : "=a" (temp)
  598. : "0" (vaddr));
  599. __asm__ __volatile__ (".chip 68060\n\t"
  600. "cpushl %%bc,(%0)\n\t"
  601. ".chip 68k"
  602. : : "a" (temp));
  603. }
  604. else {
  605. /*
  606. * 68030/68020 have no writeback cache;
  607. * still need to clear icache.
  608. * Note that vaddr is guaranteed to be long word aligned.
  609. */
  610. unsigned long temp;
  611. asm volatile ("movec %%cacr,%0" : "=r" (temp));
  612. temp += 4;
  613. asm volatile ("movec %0,%%caar\n\t"
  614. "movec %1,%%cacr"
  615. : : "r" (vaddr), "r" (temp));
  616. asm volatile ("movec %0,%%caar\n\t"
  617. "movec %1,%%cacr"
  618. : : "r" (vaddr + 4), "r" (temp));
  619. }
  620. }
  621. static inline void __user *
  622. get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
  623. {
  624. unsigned long usp;
  625. /* Default to using normal stack. */
  626. usp = rdusp();
  627. /* This is the X/Open sanctioned signal stack switching. */
  628. if (ka->sa.sa_flags & SA_ONSTACK) {
  629. if (!sas_ss_flags(usp))
  630. usp = current->sas_ss_sp + current->sas_ss_size;
  631. }
  632. return (void __user *)((usp - frame_size) & -8UL);
  633. }
  634. static int setup_frame (int sig, struct k_sigaction *ka,
  635. sigset_t *set, struct pt_regs *regs)
  636. {
  637. struct sigframe __user *frame;
  638. int fsize = frame_extra_sizes[regs->format];
  639. struct sigcontext context;
  640. int err = 0;
  641. if (fsize < 0) {
  642. #ifdef DEBUG
  643. printk ("setup_frame: Unknown frame format %#x\n",
  644. regs->format);
  645. #endif
  646. goto give_sigsegv;
  647. }
  648. frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
  649. if (fsize)
  650. err |= copy_to_user (frame + 1, regs + 1, fsize);
  651. err |= __put_user((current_thread_info()->exec_domain
  652. && current_thread_info()->exec_domain->signal_invmap
  653. && sig < 32
  654. ? current_thread_info()->exec_domain->signal_invmap[sig]
  655. : sig),
  656. &frame->sig);
  657. err |= __put_user(regs->vector, &frame->code);
  658. err |= __put_user(&frame->sc, &frame->psc);
  659. if (_NSIG_WORDS > 1)
  660. err |= copy_to_user(frame->extramask, &set->sig[1],
  661. sizeof(frame->extramask));
  662. setup_sigcontext(&context, regs, set->sig[0]);
  663. err |= copy_to_user (&frame->sc, &context, sizeof(context));
  664. /* Set up to return from userspace. */
  665. err |= __put_user(frame->retcode, &frame->pretcode);
  666. /* moveq #,d0; trap #0 */
  667. err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
  668. (long __user *)(frame->retcode));
  669. if (err)
  670. goto give_sigsegv;
  671. push_cache ((unsigned long) &frame->retcode);
  672. /*
  673. * Set up registers for signal handler. All the state we are about
  674. * to destroy is successfully copied to sigframe.
  675. */
  676. wrusp ((unsigned long) frame);
  677. regs->pc = (unsigned long) ka->sa.sa_handler;
  678. /*
  679. * This is subtle; if we build more than one sigframe, all but the
  680. * first one will see frame format 0 and have fsize == 0, so we won't
  681. * screw stkadj.
  682. */
  683. if (fsize)
  684. regs->stkadj = fsize;
  685. /* Prepare to skip over the extra stuff in the exception frame. */
  686. if (regs->stkadj) {
  687. struct pt_regs *tregs =
  688. (struct pt_regs *)((ulong)regs + regs->stkadj);
  689. #ifdef DEBUG
  690. printk("Performing stackadjust=%04x\n", regs->stkadj);
  691. #endif
  692. /* This must be copied with decreasing addresses to
  693. handle overlaps. */
  694. tregs->vector = 0;
  695. tregs->format = 0;
  696. tregs->pc = regs->pc;
  697. tregs->sr = regs->sr;
  698. }
  699. return 0;
  700. give_sigsegv:
  701. force_sigsegv(sig, current);
  702. return err;
  703. }
  704. static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
  705. sigset_t *set, struct pt_regs *regs)
  706. {
  707. struct rt_sigframe __user *frame;
  708. int fsize = frame_extra_sizes[regs->format];
  709. int err = 0;
  710. if (fsize < 0) {
  711. #ifdef DEBUG
  712. printk ("setup_frame: Unknown frame format %#x\n",
  713. regs->format);
  714. #endif
  715. goto give_sigsegv;
  716. }
  717. frame = get_sigframe(ka, regs, sizeof(*frame));
  718. if (fsize)
  719. err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
  720. err |= __put_user((current_thread_info()->exec_domain
  721. && current_thread_info()->exec_domain->signal_invmap
  722. && sig < 32
  723. ? current_thread_info()->exec_domain->signal_invmap[sig]
  724. : sig),
  725. &frame->sig);
  726. err |= __put_user(&frame->info, &frame->pinfo);
  727. err |= __put_user(&frame->uc, &frame->puc);
  728. err |= copy_siginfo_to_user(&frame->info, info);
  729. /* Create the ucontext. */
  730. err |= __put_user(0, &frame->uc.uc_flags);
  731. err |= __put_user(NULL, &frame->uc.uc_link);
  732. err |= __put_user((void __user *)current->sas_ss_sp,
  733. &frame->uc.uc_stack.ss_sp);
  734. err |= __put_user(sas_ss_flags(rdusp()),
  735. &frame->uc.uc_stack.ss_flags);
  736. err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
  737. err |= rt_setup_ucontext(&frame->uc, regs);
  738. err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
  739. /* Set up to return from userspace. */
  740. err |= __put_user(frame->retcode, &frame->pretcode);
  741. #ifdef __mcoldfire__
  742. /* movel #__NR_rt_sigreturn,d0; trap #0 */
  743. err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
  744. err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
  745. (long __user *)(frame->retcode + 4));
  746. #else
  747. /* moveq #,d0; notb d0; trap #0 */
  748. err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
  749. (long __user *)(frame->retcode + 0));
  750. err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
  751. #endif
  752. if (err)
  753. goto give_sigsegv;
  754. push_cache ((unsigned long) &frame->retcode);
  755. /*
  756. * Set up registers for signal handler. All the state we are about
  757. * to destroy is successfully copied to sigframe.
  758. */
  759. wrusp ((unsigned long) frame);
  760. regs->pc = (unsigned long) ka->sa.sa_handler;
  761. /*
  762. * This is subtle; if we build more than one sigframe, all but the
  763. * first one will see frame format 0 and have fsize == 0, so we won't
  764. * screw stkadj.
  765. */
  766. if (fsize)
  767. regs->stkadj = fsize;
  768. /* Prepare to skip over the extra stuff in the exception frame. */
  769. if (regs->stkadj) {
  770. struct pt_regs *tregs =
  771. (struct pt_regs *)((ulong)regs + regs->stkadj);
  772. #ifdef DEBUG
  773. printk("Performing stackadjust=%04x\n", regs->stkadj);
  774. #endif
  775. /* This must be copied with decreasing addresses to
  776. handle overlaps. */
  777. tregs->vector = 0;
  778. tregs->format = 0;
  779. tregs->pc = regs->pc;
  780. tregs->sr = regs->sr;
  781. }
  782. return 0;
  783. give_sigsegv:
  784. force_sigsegv(sig, current);
  785. return err;
  786. }
  787. static inline void
  788. handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
  789. {
  790. switch (regs->d0) {
  791. case -ERESTARTNOHAND:
  792. if (!has_handler)
  793. goto do_restart;
  794. regs->d0 = -EINTR;
  795. break;
  796. case -ERESTART_RESTARTBLOCK:
  797. if (!has_handler) {
  798. regs->d0 = __NR_restart_syscall;
  799. regs->pc -= 2;
  800. break;
  801. }
  802. regs->d0 = -EINTR;
  803. break;
  804. case -ERESTARTSYS:
  805. if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
  806. regs->d0 = -EINTR;
  807. break;
  808. }
  809. /* fallthrough */
  810. case -ERESTARTNOINTR:
  811. do_restart:
  812. regs->d0 = regs->orig_d0;
  813. regs->pc -= 2;
  814. break;
  815. }
  816. }
  817. void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
  818. {
  819. if (regs->orig_d0 < 0)
  820. return;
  821. switch (regs->d0) {
  822. case -ERESTARTNOHAND:
  823. case -ERESTARTSYS:
  824. case -ERESTARTNOINTR:
  825. regs->d0 = regs->orig_d0;
  826. regs->orig_d0 = -1;
  827. regs->pc -= 2;
  828. break;
  829. }
  830. }
  831. /*
  832. * OK, we're invoking a handler
  833. */
  834. static void
  835. handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
  836. sigset_t *oldset, struct pt_regs *regs)
  837. {
  838. int err;
  839. /* are we from a system call? */
  840. if (regs->orig_d0 >= 0)
  841. /* If so, check system call restarting.. */
  842. handle_restart(regs, ka, 1);
  843. /* set up the stack frame */
  844. if (ka->sa.sa_flags & SA_SIGINFO)
  845. err = setup_rt_frame(sig, ka, info, oldset, regs);
  846. else
  847. err = setup_frame(sig, ka, oldset, regs);
  848. if (err)
  849. return;
  850. sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
  851. if (!(ka->sa.sa_flags & SA_NODEFER))
  852. sigaddset(&current->blocked,sig);
  853. recalc_sigpending();
  854. if (test_thread_flag(TIF_DELAYED_TRACE)) {
  855. regs->sr &= ~0x8000;
  856. send_sig(SIGTRAP, current, 1);
  857. }
  858. clear_thread_flag(TIF_RESTORE_SIGMASK);
  859. }
  860. /*
  861. * Note that 'init' is a special process: it doesn't get signals it doesn't
  862. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  863. * mistake.
  864. */
  865. asmlinkage void do_signal(struct pt_regs *regs)
  866. {
  867. siginfo_t info;
  868. struct k_sigaction ka;
  869. int signr;
  870. sigset_t *oldset;
  871. current->thread.esp0 = (unsigned long) regs;
  872. if (test_thread_flag(TIF_RESTORE_SIGMASK))
  873. oldset = &current->saved_sigmask;
  874. else
  875. oldset = &current->blocked;
  876. signr = get_signal_to_deliver(&info, &ka, regs, NULL);
  877. if (signr > 0) {
  878. /* Whee! Actually deliver the signal. */
  879. handle_signal(signr, &ka, &info, oldset, regs);
  880. return;
  881. }
  882. /* Did we come from a system call? */
  883. if (regs->orig_d0 >= 0)
  884. /* Restart the system call - no handlers present */
  885. handle_restart(regs, NULL, 0);
  886. /* If there's no signal to deliver, we just restore the saved mask. */
  887. if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
  888. clear_thread_flag(TIF_RESTORE_SIGMASK);
  889. sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
  890. }
  891. }