signal.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836
  1. /* $Id: signal.c,v 1.110 2002/02/08 03:57:14 davem Exp $
  2. * linux/arch/sparc/kernel/signal.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  6. * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
  7. * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/signal.h>
  12. #include <linux/errno.h>
  13. #include <linux/wait.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/unistd.h>
  16. #include <linux/mm.h>
  17. #include <linux/tty.h>
  18. #include <linux/smp.h>
  19. #include <linux/binfmts.h> /* do_coredum */
  20. #include <linux/bitops.h>
  21. #include <asm/uaccess.h>
  22. #include <asm/ptrace.h>
  23. #include <asm/pgalloc.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/cacheflush.h> /* flush_sig_insns */
  26. #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
  27. extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
  28. void *fpqueue, unsigned long *fpqdepth);
  29. extern void fpload(unsigned long *fpregs, unsigned long *fsr);
  30. /* Signal frames: the original one (compatible with SunOS):
  31. *
  32. * Set up a signal frame... Make the stack look the way SunOS
  33. * expects it to look which is basically:
  34. *
  35. * ---------------------------------- <-- %sp at signal time
  36. * Struct sigcontext
  37. * Signal address
  38. * Ptr to sigcontext area above
  39. * Signal code
  40. * The signal number itself
  41. * One register window
  42. * ---------------------------------- <-- New %sp
  43. */
  44. struct signal_sframe {
  45. struct reg_window sig_window;
  46. int sig_num;
  47. int sig_code;
  48. struct sigcontext __user *sig_scptr;
  49. int sig_address;
  50. struct sigcontext sig_context;
  51. unsigned int extramask[_NSIG_WORDS - 1];
  52. };
  53. /*
  54. * And the new one, intended to be used for Linux applications only
  55. * (we have enough in there to work with clone).
  56. * All the interesting bits are in the info field.
  57. */
  58. struct new_signal_frame {
  59. struct sparc_stackf ss;
  60. __siginfo_t info;
  61. __siginfo_fpu_t __user *fpu_save;
  62. unsigned long insns[2] __attribute__ ((aligned (8)));
  63. unsigned int extramask[_NSIG_WORDS - 1];
  64. unsigned int extra_size; /* Should be 0 */
  65. __siginfo_fpu_t fpu_state;
  66. };
  67. struct rt_signal_frame {
  68. struct sparc_stackf ss;
  69. siginfo_t info;
  70. struct pt_regs regs;
  71. sigset_t mask;
  72. __siginfo_fpu_t __user *fpu_save;
  73. unsigned int insns[2];
  74. stack_t stack;
  75. unsigned int extra_size; /* Should be 0 */
  76. __siginfo_fpu_t fpu_state;
  77. };
  78. /* Align macros */
  79. #define SF_ALIGNEDSZ (((sizeof(struct signal_sframe) + 7) & (~7)))
  80. #define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7)))
  81. #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
  82. static int _sigpause_common(old_sigset_t set)
  83. {
  84. set &= _BLOCKABLE;
  85. spin_lock_irq(&current->sighand->siglock);
  86. current->saved_sigmask = current->blocked;
  87. siginitset(&current->blocked, set);
  88. recalc_sigpending();
  89. spin_unlock_irq(&current->sighand->siglock);
  90. current->state = TASK_INTERRUPTIBLE;
  91. schedule();
  92. set_thread_flag(TIF_RESTORE_SIGMASK);
  93. return -ERESTARTNOHAND;
  94. }
  95. asmlinkage int sys_sigpause(unsigned int set)
  96. {
  97. return _sigpause_common(set);
  98. }
  99. asmlinkage int sys_sigsuspend(old_sigset_t set)
  100. {
  101. return _sigpause_common(set);
  102. }
  103. static inline int
  104. restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
  105. {
  106. int err;
  107. #ifdef CONFIG_SMP
  108. if (test_tsk_thread_flag(current, TIF_USEDFPU))
  109. regs->psr &= ~PSR_EF;
  110. #else
  111. if (current == last_task_used_math) {
  112. last_task_used_math = NULL;
  113. regs->psr &= ~PSR_EF;
  114. }
  115. #endif
  116. set_used_math();
  117. clear_tsk_thread_flag(current, TIF_USEDFPU);
  118. if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
  119. return -EFAULT;
  120. err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
  121. (sizeof(unsigned long) * 32));
  122. err |= __get_user(current->thread.fsr, &fpu->si_fsr);
  123. err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
  124. if (current->thread.fpqdepth != 0)
  125. err |= __copy_from_user(&current->thread.fpqueue[0],
  126. &fpu->si_fpqueue[0],
  127. ((sizeof(unsigned long) +
  128. (sizeof(unsigned long *)))*16));
  129. return err;
  130. }
  131. static inline void do_new_sigreturn (struct pt_regs *regs)
  132. {
  133. struct new_signal_frame __user *sf;
  134. unsigned long up_psr, pc, npc;
  135. sigset_t set;
  136. __siginfo_fpu_t __user *fpu_save;
  137. int err;
  138. sf = (struct new_signal_frame __user *) regs->u_regs[UREG_FP];
  139. /* 1. Make sure we are not getting garbage from the user */
  140. if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
  141. goto segv_and_exit;
  142. if (((unsigned long) sf) & 3)
  143. goto segv_and_exit;
  144. err = __get_user(pc, &sf->info.si_regs.pc);
  145. err |= __get_user(npc, &sf->info.si_regs.npc);
  146. if ((pc | npc) & 3)
  147. goto segv_and_exit;
  148. /* 2. Restore the state */
  149. up_psr = regs->psr;
  150. err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs));
  151. /* User can only change condition codes and FPU enabling in %psr. */
  152. regs->psr = (up_psr & ~(PSR_ICC | PSR_EF))
  153. | (regs->psr & (PSR_ICC | PSR_EF));
  154. err |= __get_user(fpu_save, &sf->fpu_save);
  155. if (fpu_save)
  156. err |= restore_fpu_state(regs, fpu_save);
  157. /* This is pretty much atomic, no amount locking would prevent
  158. * the races which exist anyways.
  159. */
  160. err |= __get_user(set.sig[0], &sf->info.si_mask);
  161. err |= __copy_from_user(&set.sig[1], &sf->extramask,
  162. (_NSIG_WORDS-1) * sizeof(unsigned int));
  163. if (err)
  164. goto segv_and_exit;
  165. sigdelsetmask(&set, ~_BLOCKABLE);
  166. spin_lock_irq(&current->sighand->siglock);
  167. current->blocked = set;
  168. recalc_sigpending();
  169. spin_unlock_irq(&current->sighand->siglock);
  170. return;
  171. segv_and_exit:
  172. force_sig(SIGSEGV, current);
  173. }
  174. asmlinkage void do_sigreturn(struct pt_regs *regs)
  175. {
  176. struct sigcontext __user *scptr;
  177. unsigned long pc, npc, psr;
  178. sigset_t set;
  179. int err;
  180. /* Always make any pending restarted system calls return -EINTR */
  181. current_thread_info()->restart_block.fn = do_no_restart_syscall;
  182. synchronize_user_stack();
  183. if (current->thread.new_signal) {
  184. do_new_sigreturn(regs);
  185. return;
  186. }
  187. scptr = (struct sigcontext __user *) regs->u_regs[UREG_I0];
  188. /* Check sanity of the user arg. */
  189. if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext)) ||
  190. (((unsigned long) scptr) & 3))
  191. goto segv_and_exit;
  192. err = __get_user(pc, &scptr->sigc_pc);
  193. err |= __get_user(npc, &scptr->sigc_npc);
  194. if ((pc | npc) & 3)
  195. goto segv_and_exit;
  196. /* This is pretty much atomic, no amount locking would prevent
  197. * the races which exist anyways.
  198. */
  199. err |= __get_user(set.sig[0], &scptr->sigc_mask);
  200. /* Note that scptr + 1 points to extramask */
  201. err |= __copy_from_user(&set.sig[1], scptr + 1,
  202. (_NSIG_WORDS - 1) * sizeof(unsigned int));
  203. if (err)
  204. goto segv_and_exit;
  205. sigdelsetmask(&set, ~_BLOCKABLE);
  206. spin_lock_irq(&current->sighand->siglock);
  207. current->blocked = set;
  208. recalc_sigpending();
  209. spin_unlock_irq(&current->sighand->siglock);
  210. regs->pc = pc;
  211. regs->npc = npc;
  212. err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
  213. err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
  214. err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
  215. /* User can only change condition codes in %psr. */
  216. err |= __get_user(psr, &scptr->sigc_psr);
  217. if (err)
  218. goto segv_and_exit;
  219. regs->psr &= ~(PSR_ICC);
  220. regs->psr |= (psr & PSR_ICC);
  221. return;
  222. segv_and_exit:
  223. force_sig(SIGSEGV, current);
  224. }
  225. asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
  226. {
  227. struct rt_signal_frame __user *sf;
  228. unsigned int psr, pc, npc;
  229. __siginfo_fpu_t __user *fpu_save;
  230. mm_segment_t old_fs;
  231. sigset_t set;
  232. stack_t st;
  233. int err;
  234. synchronize_user_stack();
  235. sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
  236. if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
  237. (((unsigned long) sf) & 0x03))
  238. goto segv;
  239. err = __get_user(pc, &sf->regs.pc);
  240. err |= __get_user(npc, &sf->regs.npc);
  241. err |= ((pc | npc) & 0x03);
  242. err |= __get_user(regs->y, &sf->regs.y);
  243. err |= __get_user(psr, &sf->regs.psr);
  244. err |= __copy_from_user(&regs->u_regs[UREG_G1],
  245. &sf->regs.u_regs[UREG_G1], 15 * sizeof(u32));
  246. regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC);
  247. err |= __get_user(fpu_save, &sf->fpu_save);
  248. if (fpu_save)
  249. err |= restore_fpu_state(regs, fpu_save);
  250. err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
  251. err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
  252. if (err)
  253. goto segv;
  254. regs->pc = pc;
  255. regs->npc = npc;
  256. /* It is more difficult to avoid calling this function than to
  257. * call it and ignore errors.
  258. */
  259. old_fs = get_fs();
  260. set_fs(KERNEL_DS);
  261. do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
  262. set_fs(old_fs);
  263. sigdelsetmask(&set, ~_BLOCKABLE);
  264. spin_lock_irq(&current->sighand->siglock);
  265. current->blocked = set;
  266. recalc_sigpending();
  267. spin_unlock_irq(&current->sighand->siglock);
  268. return;
  269. segv:
  270. force_sig(SIGSEGV, current);
  271. }
  272. /* Checks if the fp is valid */
  273. static inline int invalid_frame_pointer(void __user *fp, int fplen)
  274. {
  275. if ((((unsigned long) fp) & 7) ||
  276. !__access_ok((unsigned long)fp, fplen) ||
  277. ((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) &&
  278. ((unsigned long) fp < 0xe0000000 && (unsigned long) fp >= 0x20000000)))
  279. return 1;
  280. return 0;
  281. }
  282. static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
  283. {
  284. unsigned long sp;
  285. sp = regs->u_regs[UREG_FP];
  286. /* This is the X/Open sanctioned signal stack switching. */
  287. if (sa->sa_flags & SA_ONSTACK) {
  288. if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7))
  289. sp = current->sas_ss_sp + current->sas_ss_size;
  290. }
  291. return (void __user *)(sp - framesize);
  292. }
  293. static inline void
  294. setup_frame(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
  295. {
  296. struct signal_sframe __user *sframep;
  297. struct sigcontext __user *sc;
  298. int window = 0, err;
  299. unsigned long pc = regs->pc;
  300. unsigned long npc = regs->npc;
  301. struct thread_info *tp = current_thread_info();
  302. void __user *sig_address;
  303. int sig_code;
  304. synchronize_user_stack();
  305. sframep = (struct signal_sframe __user *)
  306. get_sigframe(sa, regs, SF_ALIGNEDSZ);
  307. if (invalid_frame_pointer(sframep, sizeof(*sframep))){
  308. /* Don't change signal code and address, so that
  309. * post mortem debuggers can have a look.
  310. */
  311. goto sigill_and_return;
  312. }
  313. sc = &sframep->sig_context;
  314. /* We've already made sure frame pointer isn't in kernel space... */
  315. err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
  316. &sc->sigc_onstack);
  317. err |= __put_user(oldset->sig[0], &sc->sigc_mask);
  318. err |= __copy_to_user(sframep->extramask, &oldset->sig[1],
  319. (_NSIG_WORDS - 1) * sizeof(unsigned int));
  320. err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
  321. err |= __put_user(pc, &sc->sigc_pc);
  322. err |= __put_user(npc, &sc->sigc_npc);
  323. err |= __put_user(regs->psr, &sc->sigc_psr);
  324. err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
  325. err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
  326. err |= __put_user(tp->w_saved, &sc->sigc_oswins);
  327. if (tp->w_saved)
  328. for (window = 0; window < tp->w_saved; window++) {
  329. put_user((char *)tp->rwbuf_stkptrs[window],
  330. &sc->sigc_spbuf[window]);
  331. err |= __copy_to_user(&sc->sigc_wbuf[window],
  332. &tp->reg_window[window],
  333. sizeof(struct reg_window));
  334. }
  335. else
  336. err |= __copy_to_user(sframep, (char *) regs->u_regs[UREG_FP],
  337. sizeof(struct reg_window));
  338. tp->w_saved = 0; /* So process is allowed to execute. */
  339. err |= __put_user(signr, &sframep->sig_num);
  340. sig_address = NULL;
  341. sig_code = 0;
  342. if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
  343. sig_address = info->si_addr;
  344. switch (signr) {
  345. case SIGSEGV:
  346. switch (info->si_code) {
  347. case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
  348. default: sig_code = SUBSIG_PROTECTION; break;
  349. }
  350. break;
  351. case SIGILL:
  352. switch (info->si_code) {
  353. case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
  354. case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
  355. case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
  356. default: sig_code = SUBSIG_STACK; break;
  357. }
  358. break;
  359. case SIGFPE:
  360. switch (info->si_code) {
  361. case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
  362. case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
  363. case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
  364. case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
  365. case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
  366. case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
  367. case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
  368. default: sig_code = SUBSIG_FPERROR; break;
  369. }
  370. break;
  371. case SIGBUS:
  372. switch (info->si_code) {
  373. case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
  374. case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
  375. default: sig_code = SUBSIG_BUSTIMEOUT; break;
  376. }
  377. break;
  378. case SIGEMT:
  379. switch (info->si_code) {
  380. case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
  381. }
  382. break;
  383. case SIGSYS:
  384. if (info->si_code == (__SI_FAULT|0x100)) {
  385. sig_code = info->si_trapno;
  386. break;
  387. }
  388. default:
  389. sig_address = NULL;
  390. }
  391. }
  392. err |= __put_user((unsigned long)sig_address, &sframep->sig_address);
  393. err |= __put_user(sig_code, &sframep->sig_code);
  394. err |= __put_user(sc, &sframep->sig_scptr);
  395. if (err)
  396. goto sigsegv;
  397. regs->u_regs[UREG_FP] = (unsigned long) sframep;
  398. regs->pc = (unsigned long) sa->sa_handler;
  399. regs->npc = (regs->pc + 4);
  400. return;
  401. sigill_and_return:
  402. do_exit(SIGILL);
  403. sigsegv:
  404. force_sigsegv(signr, current);
  405. }
  406. static inline int
  407. save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
  408. {
  409. int err = 0;
  410. #ifdef CONFIG_SMP
  411. if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
  412. put_psr(get_psr() | PSR_EF);
  413. fpsave(&current->thread.float_regs[0], &current->thread.fsr,
  414. &current->thread.fpqueue[0], &current->thread.fpqdepth);
  415. regs->psr &= ~(PSR_EF);
  416. clear_tsk_thread_flag(current, TIF_USEDFPU);
  417. }
  418. #else
  419. if (current == last_task_used_math) {
  420. put_psr(get_psr() | PSR_EF);
  421. fpsave(&current->thread.float_regs[0], &current->thread.fsr,
  422. &current->thread.fpqueue[0], &current->thread.fpqdepth);
  423. last_task_used_math = NULL;
  424. regs->psr &= ~(PSR_EF);
  425. }
  426. #endif
  427. err |= __copy_to_user(&fpu->si_float_regs[0],
  428. &current->thread.float_regs[0],
  429. (sizeof(unsigned long) * 32));
  430. err |= __put_user(current->thread.fsr, &fpu->si_fsr);
  431. err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
  432. if (current->thread.fpqdepth != 0)
  433. err |= __copy_to_user(&fpu->si_fpqueue[0],
  434. &current->thread.fpqueue[0],
  435. ((sizeof(unsigned long) +
  436. (sizeof(unsigned long *)))*16));
  437. clear_used_math();
  438. return err;
  439. }
  440. static inline void
  441. new_setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
  442. int signo, sigset_t *oldset)
  443. {
  444. struct new_signal_frame __user *sf;
  445. int sigframe_size, err;
  446. /* 1. Make sure everything is clean */
  447. synchronize_user_stack();
  448. sigframe_size = NF_ALIGNEDSZ;
  449. if (!used_math())
  450. sigframe_size -= sizeof(__siginfo_fpu_t);
  451. sf = (struct new_signal_frame __user *)
  452. get_sigframe(&ka->sa, regs, sigframe_size);
  453. if (invalid_frame_pointer(sf, sigframe_size))
  454. goto sigill_and_return;
  455. if (current_thread_info()->w_saved != 0)
  456. goto sigill_and_return;
  457. /* 2. Save the current process state */
  458. err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs));
  459. err |= __put_user(0, &sf->extra_size);
  460. if (used_math()) {
  461. err |= save_fpu_state(regs, &sf->fpu_state);
  462. err |= __put_user(&sf->fpu_state, &sf->fpu_save);
  463. } else {
  464. err |= __put_user(0, &sf->fpu_save);
  465. }
  466. err |= __put_user(oldset->sig[0], &sf->info.si_mask);
  467. err |= __copy_to_user(sf->extramask, &oldset->sig[1],
  468. (_NSIG_WORDS - 1) * sizeof(unsigned int));
  469. err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
  470. sizeof(struct reg_window));
  471. if (err)
  472. goto sigsegv;
  473. /* 3. signal handler back-trampoline and parameters */
  474. regs->u_regs[UREG_FP] = (unsigned long) sf;
  475. regs->u_regs[UREG_I0] = signo;
  476. regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
  477. regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
  478. /* 4. signal handler */
  479. regs->pc = (unsigned long) ka->sa.sa_handler;
  480. regs->npc = (regs->pc + 4);
  481. /* 5. return to kernel instructions */
  482. if (ka->ka_restorer)
  483. regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
  484. else {
  485. regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
  486. /* mov __NR_sigreturn, %g1 */
  487. err |= __put_user(0x821020d8, &sf->insns[0]);
  488. /* t 0x10 */
  489. err |= __put_user(0x91d02010, &sf->insns[1]);
  490. if (err)
  491. goto sigsegv;
  492. /* Flush instruction space. */
  493. flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
  494. }
  495. return;
  496. sigill_and_return:
  497. do_exit(SIGILL);
  498. sigsegv:
  499. force_sigsegv(signo, current);
  500. }
  501. static inline void
  502. new_setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
  503. int signo, sigset_t *oldset, siginfo_t *info)
  504. {
  505. struct rt_signal_frame __user *sf;
  506. int sigframe_size;
  507. unsigned int psr;
  508. int err;
  509. synchronize_user_stack();
  510. sigframe_size = RT_ALIGNEDSZ;
  511. if (!used_math())
  512. sigframe_size -= sizeof(__siginfo_fpu_t);
  513. sf = (struct rt_signal_frame __user *)
  514. get_sigframe(&ka->sa, regs, sigframe_size);
  515. if (invalid_frame_pointer(sf, sigframe_size))
  516. goto sigill;
  517. if (current_thread_info()->w_saved != 0)
  518. goto sigill;
  519. err = __put_user(regs->pc, &sf->regs.pc);
  520. err |= __put_user(regs->npc, &sf->regs.npc);
  521. err |= __put_user(regs->y, &sf->regs.y);
  522. psr = regs->psr;
  523. if (used_math())
  524. psr |= PSR_EF;
  525. err |= __put_user(psr, &sf->regs.psr);
  526. err |= __copy_to_user(&sf->regs.u_regs, regs->u_regs, sizeof(regs->u_regs));
  527. err |= __put_user(0, &sf->extra_size);
  528. if (psr & PSR_EF) {
  529. err |= save_fpu_state(regs, &sf->fpu_state);
  530. err |= __put_user(&sf->fpu_state, &sf->fpu_save);
  531. } else {
  532. err |= __put_user(0, &sf->fpu_save);
  533. }
  534. err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t));
  535. /* Setup sigaltstack */
  536. err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
  537. err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
  538. err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
  539. err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
  540. sizeof(struct reg_window));
  541. err |= copy_siginfo_to_user(&sf->info, info);
  542. if (err)
  543. goto sigsegv;
  544. regs->u_regs[UREG_FP] = (unsigned long) sf;
  545. regs->u_regs[UREG_I0] = signo;
  546. regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
  547. regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
  548. regs->pc = (unsigned long) ka->sa.sa_handler;
  549. regs->npc = (regs->pc + 4);
  550. if (ka->ka_restorer)
  551. regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
  552. else {
  553. regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
  554. /* mov __NR_sigreturn, %g1 */
  555. err |= __put_user(0x821020d8, &sf->insns[0]);
  556. /* t 0x10 */
  557. err |= __put_user(0x91d02010, &sf->insns[1]);
  558. if (err)
  559. goto sigsegv;
  560. /* Flush instruction space. */
  561. flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
  562. }
  563. return;
  564. sigill:
  565. do_exit(SIGILL);
  566. sigsegv:
  567. force_sigsegv(signo, current);
  568. }
  569. static inline void
  570. handle_signal(unsigned long signr, struct k_sigaction *ka,
  571. siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
  572. {
  573. if (ka->sa.sa_flags & SA_SIGINFO)
  574. new_setup_rt_frame(ka, regs, signr, oldset, info);
  575. else if (current->thread.new_signal)
  576. new_setup_frame(ka, regs, signr, oldset);
  577. else
  578. setup_frame(&ka->sa, regs, signr, oldset, info);
  579. spin_lock_irq(&current->sighand->siglock);
  580. sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
  581. if (!(ka->sa.sa_flags & SA_NOMASK))
  582. sigaddset(&current->blocked, signr);
  583. recalc_sigpending();
  584. spin_unlock_irq(&current->sighand->siglock);
  585. }
  586. static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
  587. struct sigaction *sa)
  588. {
  589. switch(regs->u_regs[UREG_I0]) {
  590. case ERESTART_RESTARTBLOCK:
  591. case ERESTARTNOHAND:
  592. no_system_call_restart:
  593. regs->u_regs[UREG_I0] = EINTR;
  594. regs->psr |= PSR_C;
  595. break;
  596. case ERESTARTSYS:
  597. if (!(sa->sa_flags & SA_RESTART))
  598. goto no_system_call_restart;
  599. /* fallthrough */
  600. case ERESTARTNOINTR:
  601. regs->u_regs[UREG_I0] = orig_i0;
  602. regs->pc -= 4;
  603. regs->npc -= 4;
  604. }
  605. }
  606. /* Note that 'init' is a special process: it doesn't get signals it doesn't
  607. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  608. * mistake.
  609. */
  610. asmlinkage void do_signal(struct pt_regs * regs, unsigned long orig_i0, int restart_syscall)
  611. {
  612. siginfo_t info;
  613. struct sparc_deliver_cookie cookie;
  614. struct k_sigaction ka;
  615. int signr;
  616. sigset_t *oldset;
  617. cookie.restart_syscall = restart_syscall;
  618. cookie.orig_i0 = orig_i0;
  619. if (test_thread_flag(TIF_RESTORE_SIGMASK))
  620. oldset = &current->saved_sigmask;
  621. else
  622. oldset = &current->blocked;
  623. signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
  624. if (signr > 0) {
  625. if (cookie.restart_syscall)
  626. syscall_restart(cookie.orig_i0, regs, &ka.sa);
  627. handle_signal(signr, &ka, &info, oldset, regs);
  628. /* a signal was successfully delivered; the saved
  629. * sigmask will have been stored in the signal frame,
  630. * and will be restored by sigreturn, so we can simply
  631. * clear the TIF_RESTORE_SIGMASK flag.
  632. */
  633. if (test_thread_flag(TIF_RESTORE_SIGMASK))
  634. clear_thread_flag(TIF_RESTORE_SIGMASK);
  635. return;
  636. }
  637. if (cookie.restart_syscall &&
  638. (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
  639. regs->u_regs[UREG_I0] == ERESTARTSYS ||
  640. regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
  641. /* replay the system call when we are done */
  642. regs->u_regs[UREG_I0] = cookie.orig_i0;
  643. regs->pc -= 4;
  644. regs->npc -= 4;
  645. }
  646. if (cookie.restart_syscall &&
  647. regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
  648. regs->u_regs[UREG_G1] = __NR_restart_syscall;
  649. regs->pc -= 4;
  650. regs->npc -= 4;
  651. }
  652. /* if there's no signal to deliver, we just put the saved sigmask
  653. * back
  654. */
  655. if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
  656. clear_thread_flag(TIF_RESTORE_SIGMASK);
  657. sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
  658. }
  659. }
  660. asmlinkage int
  661. do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr,
  662. unsigned long sp)
  663. {
  664. int ret = -EFAULT;
  665. /* First see if old state is wanted. */
  666. if (ossptr) {
  667. if (put_user(current->sas_ss_sp + current->sas_ss_size,
  668. &ossptr->the_stack) ||
  669. __put_user(on_sig_stack(sp), &ossptr->cur_status))
  670. goto out;
  671. }
  672. /* Now see if we want to update the new state. */
  673. if (ssptr) {
  674. char *ss_sp;
  675. if (get_user(ss_sp, &ssptr->the_stack))
  676. goto out;
  677. /* If the current stack was set with sigaltstack, don't
  678. swap stacks while we are on it. */
  679. ret = -EPERM;
  680. if (current->sas_ss_sp && on_sig_stack(sp))
  681. goto out;
  682. /* Since we don't know the extent of the stack, and we don't
  683. track onstack-ness, but rather calculate it, we must
  684. presume a size. Ho hum this interface is lossy. */
  685. current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
  686. current->sas_ss_size = SIGSTKSZ;
  687. }
  688. ret = 0;
  689. out:
  690. return ret;
  691. }
  692. void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
  693. {
  694. struct sparc_deliver_cookie *cp = cookie;
  695. if (cp->restart_syscall &&
  696. (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
  697. regs->u_regs[UREG_I0] == ERESTARTSYS ||
  698. regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
  699. /* replay the system call when we are done */
  700. regs->u_regs[UREG_I0] = cp->orig_i0;
  701. regs->pc -= 4;
  702. regs->npc -= 4;
  703. cp->restart_syscall = 0;
  704. }
  705. if (cp->restart_syscall &&
  706. regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
  707. regs->u_regs[UREG_G1] = __NR_restart_syscall;
  708. regs->pc -= 4;
  709. regs->npc -= 4;
  710. cp->restart_syscall = 0;
  711. }
  712. }