signal.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /* $Id: signal.c,v 1.60 2002/02/09 19:49:31 davem Exp $
  2. * arch/sparc64/kernel/signal.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  6. * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
  7. * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
  8. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  9. */
  10. #include <linux/config.h>
  11. #ifdef CONFIG_SPARC32_COMPAT
  12. #include <linux/compat.h> /* for compat_old_sigset_t */
  13. #endif
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/signal.h>
  17. #include <linux/errno.h>
  18. #include <linux/wait.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/unistd.h>
  21. #include <linux/mm.h>
  22. #include <linux/tty.h>
  23. #include <linux/smp_lock.h>
  24. #include <linux/binfmts.h>
  25. #include <linux/bitops.h>
  26. #include <asm/uaccess.h>
  27. #include <asm/ptrace.h>
  28. #include <asm/svr4.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/fpumacro.h>
  31. #include <asm/uctx.h>
  32. #include <asm/siginfo.h>
  33. #include <asm/visasm.h>
  34. #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
  35. static int do_signal(sigset_t *oldset, struct pt_regs * regs,
  36. unsigned long orig_o0, int ret_from_syscall);
  37. /* {set, get}context() needed for 64-bit SparcLinux userland. */
  38. asmlinkage void sparc64_set_context(struct pt_regs *regs)
  39. {
  40. struct ucontext __user *ucp = (struct ucontext __user *)
  41. regs->u_regs[UREG_I0];
  42. mc_gregset_t __user *grp;
  43. unsigned long pc, npc, tstate;
  44. unsigned long fp, i7;
  45. unsigned char fenab;
  46. int err;
  47. flush_user_windows();
  48. if (get_thread_wsaved() ||
  49. (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
  50. (!__access_ok(ucp, sizeof(*ucp))))
  51. goto do_sigsegv;
  52. grp = &ucp->uc_mcontext.mc_gregs;
  53. err = __get_user(pc, &((*grp)[MC_PC]));
  54. err |= __get_user(npc, &((*grp)[MC_NPC]));
  55. if (err || ((pc | npc) & 3))
  56. goto do_sigsegv;
  57. if (regs->u_regs[UREG_I1]) {
  58. sigset_t set;
  59. if (_NSIG_WORDS == 1) {
  60. if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
  61. goto do_sigsegv;
  62. } else {
  63. if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
  64. goto do_sigsegv;
  65. }
  66. sigdelsetmask(&set, ~_BLOCKABLE);
  67. spin_lock_irq(&current->sighand->siglock);
  68. current->blocked = set;
  69. recalc_sigpending();
  70. spin_unlock_irq(&current->sighand->siglock);
  71. }
  72. if (test_thread_flag(TIF_32BIT)) {
  73. pc &= 0xffffffff;
  74. npc &= 0xffffffff;
  75. }
  76. regs->tpc = pc;
  77. regs->tnpc = npc;
  78. err |= __get_user(regs->y, &((*grp)[MC_Y]));
  79. err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
  80. regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
  81. regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
  82. err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
  83. err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
  84. err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
  85. err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
  86. err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
  87. err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
  88. err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
  89. err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
  90. err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
  91. err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
  92. err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
  93. err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
  94. err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
  95. err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
  96. err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
  97. err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
  98. err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
  99. err |= __put_user(fp,
  100. (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
  101. err |= __put_user(i7,
  102. (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
  103. err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
  104. if (fenab) {
  105. unsigned long *fpregs = current_thread_info()->fpregs;
  106. unsigned long fprs;
  107. fprs_write(0);
  108. err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
  109. if (fprs & FPRS_DL)
  110. err |= copy_from_user(fpregs,
  111. &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
  112. (sizeof(unsigned int) * 32));
  113. if (fprs & FPRS_DU)
  114. err |= copy_from_user(fpregs+16,
  115. ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
  116. (sizeof(unsigned int) * 32));
  117. err |= __get_user(current_thread_info()->xfsr[0],
  118. &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
  119. err |= __get_user(current_thread_info()->gsr[0],
  120. &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
  121. regs->tstate &= ~TSTATE_PEF;
  122. }
  123. if (err)
  124. goto do_sigsegv;
  125. return;
  126. do_sigsegv:
  127. force_sig(SIGSEGV, current);
  128. }
  129. asmlinkage void sparc64_get_context(struct pt_regs *regs)
  130. {
  131. struct ucontext __user *ucp = (struct ucontext __user *)
  132. regs->u_regs[UREG_I0];
  133. mc_gregset_t __user *grp;
  134. mcontext_t __user *mcp;
  135. unsigned long fp, i7;
  136. unsigned char fenab;
  137. int err;
  138. synchronize_user_stack();
  139. if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
  140. goto do_sigsegv;
  141. #if 1
  142. fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
  143. #else
  144. fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
  145. #endif
  146. mcp = &ucp->uc_mcontext;
  147. grp = &mcp->mc_gregs;
  148. /* Skip over the trap instruction, first. */
  149. if (test_thread_flag(TIF_32BIT)) {
  150. regs->tpc = (regs->tnpc & 0xffffffff);
  151. regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
  152. } else {
  153. regs->tpc = regs->tnpc;
  154. regs->tnpc += 4;
  155. }
  156. err = 0;
  157. if (_NSIG_WORDS == 1)
  158. err |= __put_user(current->blocked.sig[0],
  159. (unsigned long __user *)&ucp->uc_sigmask);
  160. else
  161. err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
  162. sizeof(sigset_t));
  163. err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
  164. err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
  165. err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
  166. err |= __put_user(regs->y, &((*grp)[MC_Y]));
  167. err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
  168. err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
  169. err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
  170. err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
  171. err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
  172. err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
  173. err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
  174. err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
  175. err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
  176. err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
  177. err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
  178. err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
  179. err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
  180. err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
  181. err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
  182. err |= __get_user(fp,
  183. (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
  184. err |= __get_user(i7,
  185. (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
  186. err |= __put_user(fp, &(mcp->mc_fp));
  187. err |= __put_user(i7, &(mcp->mc_i7));
  188. err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
  189. if (fenab) {
  190. unsigned long *fpregs = current_thread_info()->fpregs;
  191. unsigned long fprs;
  192. fprs = current_thread_info()->fpsaved[0];
  193. if (fprs & FPRS_DL)
  194. err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
  195. (sizeof(unsigned int) * 32));
  196. if (fprs & FPRS_DU)
  197. err |= copy_to_user(
  198. ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
  199. (sizeof(unsigned int) * 32));
  200. err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
  201. err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
  202. err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
  203. }
  204. if (err)
  205. goto do_sigsegv;
  206. return;
  207. do_sigsegv:
  208. force_sig(SIGSEGV, current);
  209. }
  210. struct rt_signal_frame {
  211. struct sparc_stackf ss;
  212. siginfo_t info;
  213. struct pt_regs regs;
  214. __siginfo_fpu_t __user *fpu_save;
  215. stack_t stack;
  216. sigset_t mask;
  217. __siginfo_fpu_t fpu_state;
  218. };
  219. /* Align macros */
  220. #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
  221. /*
  222. * atomically swap in the new signal mask, and wait for a signal.
  223. * This is really tricky on the Sparc, watch out...
  224. */
  225. asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
  226. {
  227. sigset_t saveset;
  228. #ifdef CONFIG_SPARC32_COMPAT
  229. if (test_thread_flag(TIF_32BIT)) {
  230. extern asmlinkage void _sigpause32_common(compat_old_sigset_t,
  231. struct pt_regs *);
  232. _sigpause32_common(set, regs);
  233. return;
  234. }
  235. #endif
  236. set &= _BLOCKABLE;
  237. spin_lock_irq(&current->sighand->siglock);
  238. saveset = current->blocked;
  239. siginitset(&current->blocked, set);
  240. recalc_sigpending();
  241. spin_unlock_irq(&current->sighand->siglock);
  242. if (test_thread_flag(TIF_32BIT)) {
  243. regs->tpc = (regs->tnpc & 0xffffffff);
  244. regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
  245. } else {
  246. regs->tpc = regs->tnpc;
  247. regs->tnpc += 4;
  248. }
  249. /* Condition codes and return value where set here for sigpause,
  250. * and so got used by setup_frame, which again causes sigreturn()
  251. * to return -EINTR.
  252. */
  253. while (1) {
  254. current->state = TASK_INTERRUPTIBLE;
  255. schedule();
  256. /*
  257. * Return -EINTR and set condition code here,
  258. * so the interrupted system call actually returns
  259. * these.
  260. */
  261. regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
  262. regs->u_regs[UREG_I0] = EINTR;
  263. if (do_signal(&saveset, regs, 0, 0))
  264. return;
  265. }
  266. }
  267. asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
  268. {
  269. _sigpause_common(set, regs);
  270. }
  271. asmlinkage void do_sigsuspend(struct pt_regs *regs)
  272. {
  273. _sigpause_common(regs->u_regs[UREG_I0], regs);
  274. }
  275. asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs)
  276. {
  277. sigset_t oldset, set;
  278. /* XXX: Don't preclude handling different sized sigset_t's. */
  279. if (sigsetsize != sizeof(sigset_t)) {
  280. regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
  281. regs->u_regs[UREG_I0] = EINVAL;
  282. return;
  283. }
  284. if (copy_from_user(&set, uset, sizeof(set))) {
  285. regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
  286. regs->u_regs[UREG_I0] = EFAULT;
  287. return;
  288. }
  289. sigdelsetmask(&set, ~_BLOCKABLE);
  290. spin_lock_irq(&current->sighand->siglock);
  291. oldset = current->blocked;
  292. current->blocked = set;
  293. recalc_sigpending();
  294. spin_unlock_irq(&current->sighand->siglock);
  295. if (test_thread_flag(TIF_32BIT)) {
  296. regs->tpc = (regs->tnpc & 0xffffffff);
  297. regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
  298. } else {
  299. regs->tpc = regs->tnpc;
  300. regs->tnpc += 4;
  301. }
  302. /* Condition codes and return value where set here for sigpause,
  303. * and so got used by setup_frame, which again causes sigreturn()
  304. * to return -EINTR.
  305. */
  306. while (1) {
  307. current->state = TASK_INTERRUPTIBLE;
  308. schedule();
  309. /*
  310. * Return -EINTR and set condition code here,
  311. * so the interrupted system call actually returns
  312. * these.
  313. */
  314. regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
  315. regs->u_regs[UREG_I0] = EINTR;
  316. if (do_signal(&oldset, regs, 0, 0))
  317. return;
  318. }
  319. }
  320. static inline int
  321. restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
  322. {
  323. unsigned long *fpregs = current_thread_info()->fpregs;
  324. unsigned long fprs;
  325. int err;
  326. err = __get_user(fprs, &fpu->si_fprs);
  327. fprs_write(0);
  328. regs->tstate &= ~TSTATE_PEF;
  329. if (fprs & FPRS_DL)
  330. err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
  331. (sizeof(unsigned int) * 32));
  332. if (fprs & FPRS_DU)
  333. err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
  334. (sizeof(unsigned int) * 32));
  335. err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
  336. err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
  337. current_thread_info()->fpsaved[0] |= fprs;
  338. return err;
  339. }
  340. void do_rt_sigreturn(struct pt_regs *regs)
  341. {
  342. struct rt_signal_frame __user *sf;
  343. unsigned long tpc, tnpc, tstate;
  344. __siginfo_fpu_t __user *fpu_save;
  345. mm_segment_t old_fs;
  346. sigset_t set;
  347. stack_t st;
  348. int err;
  349. /* Always make any pending restarted system calls return -EINTR */
  350. current_thread_info()->restart_block.fn = do_no_restart_syscall;
  351. synchronize_user_stack ();
  352. sf = (struct rt_signal_frame __user *)
  353. (regs->u_regs [UREG_FP] + STACK_BIAS);
  354. /* 1. Make sure we are not getting garbage from the user */
  355. if (((unsigned long) sf) & 3)
  356. goto segv;
  357. err = get_user(tpc, &sf->regs.tpc);
  358. err |= __get_user(tnpc, &sf->regs.tnpc);
  359. if (test_thread_flag(TIF_32BIT)) {
  360. tpc &= 0xffffffff;
  361. tnpc &= 0xffffffff;
  362. }
  363. err |= ((tpc | tnpc) & 3);
  364. /* 2. Restore the state */
  365. err |= __get_user(regs->y, &sf->regs.y);
  366. err |= __get_user(tstate, &sf->regs.tstate);
  367. err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
  368. /* User can only change condition codes and %asi in %tstate. */
  369. regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
  370. regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
  371. err |= __get_user(fpu_save, &sf->fpu_save);
  372. if (fpu_save)
  373. err |= restore_fpu_state(regs, &sf->fpu_state);
  374. err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
  375. err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
  376. if (err)
  377. goto segv;
  378. regs->tpc = tpc;
  379. regs->tnpc = tnpc;
  380. /* It is more difficult to avoid calling this function than to
  381. call it and ignore errors. */
  382. old_fs = get_fs();
  383. set_fs(KERNEL_DS);
  384. do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
  385. set_fs(old_fs);
  386. sigdelsetmask(&set, ~_BLOCKABLE);
  387. spin_lock_irq(&current->sighand->siglock);
  388. current->blocked = set;
  389. recalc_sigpending();
  390. spin_unlock_irq(&current->sighand->siglock);
  391. return;
  392. segv:
  393. force_sig(SIGSEGV, current);
  394. }
  395. /* Checks if the fp is valid */
  396. static int invalid_frame_pointer(void __user *fp, int fplen)
  397. {
  398. if (((unsigned long) fp) & 7)
  399. return 1;
  400. return 0;
  401. }
  402. static inline int
  403. save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
  404. {
  405. unsigned long *fpregs = (unsigned long *)(regs+1);
  406. unsigned long fprs;
  407. int err = 0;
  408. fprs = current_thread_info()->fpsaved[0];
  409. if (fprs & FPRS_DL)
  410. err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
  411. (sizeof(unsigned int) * 32));
  412. if (fprs & FPRS_DU)
  413. err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
  414. (sizeof(unsigned int) * 32));
  415. err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
  416. err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
  417. err |= __put_user(fprs, &fpu->si_fprs);
  418. return err;
  419. }
  420. static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
  421. {
  422. unsigned long sp;
  423. sp = regs->u_regs[UREG_FP] + STACK_BIAS;
  424. /* This is the X/Open sanctioned signal stack switching. */
  425. if (ka->sa.sa_flags & SA_ONSTACK) {
  426. if (!on_sig_stack(sp) &&
  427. !((current->sas_ss_sp + current->sas_ss_size) & 7))
  428. sp = current->sas_ss_sp + current->sas_ss_size;
  429. }
  430. return (void __user *)(sp - framesize);
  431. }
  432. static inline void
  433. setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
  434. int signo, sigset_t *oldset, siginfo_t *info)
  435. {
  436. struct rt_signal_frame __user *sf;
  437. int sigframe_size, err;
  438. /* 1. Make sure everything is clean */
  439. synchronize_user_stack();
  440. save_and_clear_fpu();
  441. sigframe_size = RT_ALIGNEDSZ;
  442. if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
  443. sigframe_size -= sizeof(__siginfo_fpu_t);
  444. sf = (struct rt_signal_frame __user *)
  445. get_sigframe(ka, regs, sigframe_size);
  446. if (invalid_frame_pointer (sf, sigframe_size))
  447. goto sigill;
  448. if (get_thread_wsaved() != 0)
  449. goto sigill;
  450. /* 2. Save the current process state */
  451. err = copy_to_user(&sf->regs, regs, sizeof (*regs));
  452. if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
  453. err |= save_fpu_state(regs, &sf->fpu_state);
  454. err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
  455. } else {
  456. err |= __put_user(0, &sf->fpu_save);
  457. }
  458. /* Setup sigaltstack */
  459. err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
  460. err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
  461. err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
  462. err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
  463. err |= copy_in_user((u64 __user *)sf,
  464. (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS),
  465. sizeof(struct reg_window));
  466. if (info)
  467. err |= copy_siginfo_to_user(&sf->info, info);
  468. else {
  469. err |= __put_user(signo, &sf->info.si_signo);
  470. err |= __put_user(SI_NOINFO, &sf->info.si_code);
  471. }
  472. if (err)
  473. goto sigsegv;
  474. /* 3. signal handler back-trampoline and parameters */
  475. regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
  476. regs->u_regs[UREG_I0] = signo;
  477. regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
  478. /* The sigcontext is passed in this way because of how it
  479. * is defined in GLIBC's /usr/include/bits/sigcontext.h
  480. * for sparc64. It includes the 128 bytes of siginfo_t.
  481. */
  482. regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
  483. /* 5. signal handler */
  484. regs->tpc = (unsigned long) ka->sa.sa_handler;
  485. regs->tnpc = (regs->tpc + 4);
  486. if (test_thread_flag(TIF_32BIT)) {
  487. regs->tpc &= 0xffffffff;
  488. regs->tnpc &= 0xffffffff;
  489. }
  490. /* 4. return to kernel instructions */
  491. regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
  492. return;
  493. sigill:
  494. do_exit(SIGILL);
  495. sigsegv:
  496. force_sigsegv(signo, current);
  497. }
  498. static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
  499. siginfo_t *info,
  500. sigset_t *oldset, struct pt_regs *regs)
  501. {
  502. setup_rt_frame(ka, regs, signr, oldset,
  503. (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
  504. spin_lock_irq(&current->sighand->siglock);
  505. sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
  506. if (!(ka->sa.sa_flags & SA_NOMASK))
  507. sigaddset(&current->blocked,signr);
  508. recalc_sigpending();
  509. spin_unlock_irq(&current->sighand->siglock);
  510. }
  511. static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
  512. struct sigaction *sa)
  513. {
  514. switch (regs->u_regs[UREG_I0]) {
  515. case ERESTART_RESTARTBLOCK:
  516. case ERESTARTNOHAND:
  517. no_system_call_restart:
  518. regs->u_regs[UREG_I0] = EINTR;
  519. regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
  520. break;
  521. case ERESTARTSYS:
  522. if (!(sa->sa_flags & SA_RESTART))
  523. goto no_system_call_restart;
  524. /* fallthrough */
  525. case ERESTARTNOINTR:
  526. regs->u_regs[UREG_I0] = orig_i0;
  527. regs->tpc -= 4;
  528. regs->tnpc -= 4;
  529. }
  530. }
  531. /* Note that 'init' is a special process: it doesn't get signals it doesn't
  532. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  533. * mistake.
  534. */
  535. static int do_signal(sigset_t *oldset, struct pt_regs * regs,
  536. unsigned long orig_i0, int restart_syscall)
  537. {
  538. siginfo_t info;
  539. struct signal_deliver_cookie cookie;
  540. struct k_sigaction ka;
  541. int signr;
  542. cookie.restart_syscall = restart_syscall;
  543. cookie.orig_i0 = orig_i0;
  544. if (!oldset)
  545. oldset = &current->blocked;
  546. #ifdef CONFIG_SPARC32_COMPAT
  547. if (test_thread_flag(TIF_32BIT)) {
  548. extern int do_signal32(sigset_t *, struct pt_regs *,
  549. unsigned long, int);
  550. return do_signal32(oldset, regs, orig_i0,
  551. cookie.restart_syscall);
  552. }
  553. #endif
  554. signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
  555. if (signr > 0) {
  556. if (cookie.restart_syscall)
  557. syscall_restart(orig_i0, regs, &ka.sa);
  558. handle_signal(signr, &ka, &info, oldset, regs);
  559. return 1;
  560. }
  561. if (cookie.restart_syscall &&
  562. (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
  563. regs->u_regs[UREG_I0] == ERESTARTSYS ||
  564. regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
  565. /* replay the system call when we are done */
  566. regs->u_regs[UREG_I0] = cookie.orig_i0;
  567. regs->tpc -= 4;
  568. regs->tnpc -= 4;
  569. }
  570. if (cookie.restart_syscall &&
  571. regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
  572. regs->u_regs[UREG_G1] = __NR_restart_syscall;
  573. regs->tpc -= 4;
  574. regs->tnpc -= 4;
  575. }
  576. return 0;
  577. }
  578. void do_notify_resume(sigset_t *oldset, struct pt_regs *regs,
  579. unsigned long orig_i0, int restart_syscall,
  580. unsigned long thread_info_flags)
  581. {
  582. if (thread_info_flags & _TIF_SIGPENDING)
  583. do_signal(oldset, regs, orig_i0, restart_syscall);
  584. }
  585. void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
  586. {
  587. struct signal_deliver_cookie *cp = cookie;
  588. if (cp->restart_syscall &&
  589. (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
  590. regs->u_regs[UREG_I0] == ERESTARTSYS ||
  591. regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
  592. /* replay the system call when we are done */
  593. regs->u_regs[UREG_I0] = cp->orig_i0;
  594. regs->tpc -= 4;
  595. regs->tnpc -= 4;
  596. cp->restart_syscall = 0;
  597. }
  598. if (cp->restart_syscall &&
  599. regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
  600. regs->u_regs[UREG_G1] = __NR_restart_syscall;
  601. regs->tpc -= 4;
  602. regs->tnpc -= 4;
  603. cp->restart_syscall = 0;
  604. }
  605. }