signal.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. /* $Id: signal.c,v 1.110 2002/02/08 03:57:14 davem Exp $
  2. * linux/arch/sparc/kernel/signal.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  6. * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
  7. * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/signal.h>
  12. #include <linux/errno.h>
  13. #include <linux/wait.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/unistd.h>
  16. #include <linux/mm.h>
  17. #include <linux/tty.h>
  18. #include <linux/smp.h>
  19. #include <linux/binfmts.h> /* do_coredum */
  20. #include <linux/bitops.h>
  21. #include <asm/uaccess.h>
  22. #include <asm/ptrace.h>
  23. #include <asm/pgalloc.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/cacheflush.h> /* flush_sig_insns */
  26. #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
  27. extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
  28. void *fpqueue, unsigned long *fpqdepth);
  29. extern void fpload(unsigned long *fpregs, unsigned long *fsr);
  30. /* Signal frames: the original one (compatible with SunOS):
  31. *
  32. * Set up a signal frame... Make the stack look the way SunOS
  33. * expects it to look which is basically:
  34. *
  35. * ---------------------------------- <-- %sp at signal time
  36. * Struct sigcontext
  37. * Signal address
  38. * Ptr to sigcontext area above
  39. * Signal code
  40. * The signal number itself
  41. * One register window
  42. * ---------------------------------- <-- New %sp
  43. */
  44. struct signal_sframe {
  45. struct reg_window sig_window;
  46. int sig_num;
  47. int sig_code;
  48. struct sigcontext __user *sig_scptr;
  49. int sig_address;
  50. struct sigcontext sig_context;
  51. unsigned int extramask[_NSIG_WORDS - 1];
  52. };
  53. /*
  54. * And the new one, intended to be used for Linux applications only
  55. * (we have enough in there to work with clone).
  56. * All the interesting bits are in the info field.
  57. */
  58. struct new_signal_frame {
  59. struct sparc_stackf ss;
  60. __siginfo_t info;
  61. __siginfo_fpu_t __user *fpu_save;
  62. unsigned long insns[2] __attribute__ ((aligned (8)));
  63. unsigned int extramask[_NSIG_WORDS - 1];
  64. unsigned int extra_size; /* Should be 0 */
  65. __siginfo_fpu_t fpu_state;
  66. };
  67. struct rt_signal_frame {
  68. struct sparc_stackf ss;
  69. siginfo_t info;
  70. struct pt_regs regs;
  71. sigset_t mask;
  72. __siginfo_fpu_t __user *fpu_save;
  73. unsigned int insns[2];
  74. stack_t stack;
  75. unsigned int extra_size; /* Should be 0 */
  76. __siginfo_fpu_t fpu_state;
  77. };
  78. /* Align macros */
  79. #define SF_ALIGNEDSZ (((sizeof(struct signal_sframe) + 7) & (~7)))
  80. #define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7)))
  81. #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
  82. static int _sigpause_common(old_sigset_t set)
  83. {
  84. set &= _BLOCKABLE;
  85. spin_lock_irq(&current->sighand->siglock);
  86. current->saved_sigmask = current->blocked;
  87. siginitset(&current->blocked, set);
  88. recalc_sigpending();
  89. spin_unlock_irq(&current->sighand->siglock);
  90. current->state = TASK_INTERRUPTIBLE;
  91. schedule();
  92. set_thread_flag(TIF_RESTORE_SIGMASK);
  93. return -ERESTARTNOHAND;
  94. }
  95. asmlinkage int sys_sigsuspend(old_sigset_t set)
  96. {
  97. return _sigpause_common(set);
  98. }
  99. static inline int
  100. restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
  101. {
  102. int err;
  103. #ifdef CONFIG_SMP
  104. if (test_tsk_thread_flag(current, TIF_USEDFPU))
  105. regs->psr &= ~PSR_EF;
  106. #else
  107. if (current == last_task_used_math) {
  108. last_task_used_math = NULL;
  109. regs->psr &= ~PSR_EF;
  110. }
  111. #endif
  112. set_used_math();
  113. clear_tsk_thread_flag(current, TIF_USEDFPU);
  114. if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
  115. return -EFAULT;
  116. err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
  117. (sizeof(unsigned long) * 32));
  118. err |= __get_user(current->thread.fsr, &fpu->si_fsr);
  119. err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
  120. if (current->thread.fpqdepth != 0)
  121. err |= __copy_from_user(&current->thread.fpqueue[0],
  122. &fpu->si_fpqueue[0],
  123. ((sizeof(unsigned long) +
  124. (sizeof(unsigned long *)))*16));
  125. return err;
  126. }
  127. static inline void do_new_sigreturn (struct pt_regs *regs)
  128. {
  129. struct new_signal_frame __user *sf;
  130. unsigned long up_psr, pc, npc;
  131. sigset_t set;
  132. __siginfo_fpu_t __user *fpu_save;
  133. int err;
  134. sf = (struct new_signal_frame __user *) regs->u_regs[UREG_FP];
  135. /* 1. Make sure we are not getting garbage from the user */
  136. if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
  137. goto segv_and_exit;
  138. if (((unsigned long) sf) & 3)
  139. goto segv_and_exit;
  140. err = __get_user(pc, &sf->info.si_regs.pc);
  141. err |= __get_user(npc, &sf->info.si_regs.npc);
  142. if ((pc | npc) & 3)
  143. goto segv_and_exit;
  144. /* 2. Restore the state */
  145. up_psr = regs->psr;
  146. err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs));
  147. /* User can only change condition codes and FPU enabling in %psr. */
  148. regs->psr = (up_psr & ~(PSR_ICC | PSR_EF))
  149. | (regs->psr & (PSR_ICC | PSR_EF));
  150. err |= __get_user(fpu_save, &sf->fpu_save);
  151. if (fpu_save)
  152. err |= restore_fpu_state(regs, fpu_save);
  153. /* This is pretty much atomic, no amount locking would prevent
  154. * the races which exist anyways.
  155. */
  156. err |= __get_user(set.sig[0], &sf->info.si_mask);
  157. err |= __copy_from_user(&set.sig[1], &sf->extramask,
  158. (_NSIG_WORDS-1) * sizeof(unsigned int));
  159. if (err)
  160. goto segv_and_exit;
  161. sigdelsetmask(&set, ~_BLOCKABLE);
  162. spin_lock_irq(&current->sighand->siglock);
  163. current->blocked = set;
  164. recalc_sigpending();
  165. spin_unlock_irq(&current->sighand->siglock);
  166. return;
  167. segv_and_exit:
  168. force_sig(SIGSEGV, current);
  169. }
  170. asmlinkage void do_sigreturn(struct pt_regs *regs)
  171. {
  172. struct sigcontext __user *scptr;
  173. unsigned long pc, npc, psr;
  174. sigset_t set;
  175. int err;
  176. /* Always make any pending restarted system calls return -EINTR */
  177. current_thread_info()->restart_block.fn = do_no_restart_syscall;
  178. synchronize_user_stack();
  179. if (current->thread.new_signal) {
  180. do_new_sigreturn(regs);
  181. return;
  182. }
  183. scptr = (struct sigcontext __user *) regs->u_regs[UREG_I0];
  184. /* Check sanity of the user arg. */
  185. if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext)) ||
  186. (((unsigned long) scptr) & 3))
  187. goto segv_and_exit;
  188. err = __get_user(pc, &scptr->sigc_pc);
  189. err |= __get_user(npc, &scptr->sigc_npc);
  190. if ((pc | npc) & 3)
  191. goto segv_and_exit;
  192. /* This is pretty much atomic, no amount locking would prevent
  193. * the races which exist anyways.
  194. */
  195. err |= __get_user(set.sig[0], &scptr->sigc_mask);
  196. /* Note that scptr + 1 points to extramask */
  197. err |= __copy_from_user(&set.sig[1], scptr + 1,
  198. (_NSIG_WORDS - 1) * sizeof(unsigned int));
  199. if (err)
  200. goto segv_and_exit;
  201. sigdelsetmask(&set, ~_BLOCKABLE);
  202. spin_lock_irq(&current->sighand->siglock);
  203. current->blocked = set;
  204. recalc_sigpending();
  205. spin_unlock_irq(&current->sighand->siglock);
  206. regs->pc = pc;
  207. regs->npc = npc;
  208. err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
  209. err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
  210. err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
  211. /* User can only change condition codes in %psr. */
  212. err |= __get_user(psr, &scptr->sigc_psr);
  213. if (err)
  214. goto segv_and_exit;
  215. regs->psr &= ~(PSR_ICC);
  216. regs->psr |= (psr & PSR_ICC);
  217. return;
  218. segv_and_exit:
  219. force_sig(SIGSEGV, current);
  220. }
  221. asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
  222. {
  223. struct rt_signal_frame __user *sf;
  224. unsigned int psr, pc, npc;
  225. __siginfo_fpu_t __user *fpu_save;
  226. mm_segment_t old_fs;
  227. sigset_t set;
  228. stack_t st;
  229. int err;
  230. synchronize_user_stack();
  231. sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
  232. if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
  233. (((unsigned long) sf) & 0x03))
  234. goto segv;
  235. err = __get_user(pc, &sf->regs.pc);
  236. err |= __get_user(npc, &sf->regs.npc);
  237. err |= ((pc | npc) & 0x03);
  238. err |= __get_user(regs->y, &sf->regs.y);
  239. err |= __get_user(psr, &sf->regs.psr);
  240. err |= __copy_from_user(&regs->u_regs[UREG_G1],
  241. &sf->regs.u_regs[UREG_G1], 15 * sizeof(u32));
  242. regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC);
  243. err |= __get_user(fpu_save, &sf->fpu_save);
  244. if (fpu_save)
  245. err |= restore_fpu_state(regs, fpu_save);
  246. err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
  247. err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
  248. if (err)
  249. goto segv;
  250. regs->pc = pc;
  251. regs->npc = npc;
  252. /* It is more difficult to avoid calling this function than to
  253. * call it and ignore errors.
  254. */
  255. old_fs = get_fs();
  256. set_fs(KERNEL_DS);
  257. do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
  258. set_fs(old_fs);
  259. sigdelsetmask(&set, ~_BLOCKABLE);
  260. spin_lock_irq(&current->sighand->siglock);
  261. current->blocked = set;
  262. recalc_sigpending();
  263. spin_unlock_irq(&current->sighand->siglock);
  264. return;
  265. segv:
  266. force_sig(SIGSEGV, current);
  267. }
  268. /* Checks if the fp is valid */
  269. static inline int invalid_frame_pointer(void __user *fp, int fplen)
  270. {
  271. if ((((unsigned long) fp) & 7) ||
  272. !__access_ok((unsigned long)fp, fplen) ||
  273. ((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) &&
  274. ((unsigned long) fp < 0xe0000000 && (unsigned long) fp >= 0x20000000)))
  275. return 1;
  276. return 0;
  277. }
  278. static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
  279. {
  280. unsigned long sp;
  281. sp = regs->u_regs[UREG_FP];
  282. /* This is the X/Open sanctioned signal stack switching. */
  283. if (sa->sa_flags & SA_ONSTACK) {
  284. if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7))
  285. sp = current->sas_ss_sp + current->sas_ss_size;
  286. }
  287. return (void __user *)(sp - framesize);
  288. }
  289. static inline void
  290. setup_frame(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
  291. {
  292. struct signal_sframe __user *sframep;
  293. struct sigcontext __user *sc;
  294. int window = 0, err;
  295. unsigned long pc = regs->pc;
  296. unsigned long npc = regs->npc;
  297. struct thread_info *tp = current_thread_info();
  298. void __user *sig_address;
  299. int sig_code;
  300. synchronize_user_stack();
  301. sframep = (struct signal_sframe __user *)
  302. get_sigframe(sa, regs, SF_ALIGNEDSZ);
  303. if (invalid_frame_pointer(sframep, sizeof(*sframep))){
  304. /* Don't change signal code and address, so that
  305. * post mortem debuggers can have a look.
  306. */
  307. goto sigill_and_return;
  308. }
  309. sc = &sframep->sig_context;
  310. /* We've already made sure frame pointer isn't in kernel space... */
  311. err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
  312. &sc->sigc_onstack);
  313. err |= __put_user(oldset->sig[0], &sc->sigc_mask);
  314. err |= __copy_to_user(sframep->extramask, &oldset->sig[1],
  315. (_NSIG_WORDS - 1) * sizeof(unsigned int));
  316. err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
  317. err |= __put_user(pc, &sc->sigc_pc);
  318. err |= __put_user(npc, &sc->sigc_npc);
  319. err |= __put_user(regs->psr, &sc->sigc_psr);
  320. err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
  321. err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
  322. err |= __put_user(tp->w_saved, &sc->sigc_oswins);
  323. if (tp->w_saved)
  324. for (window = 0; window < tp->w_saved; window++) {
  325. put_user((char *)tp->rwbuf_stkptrs[window],
  326. &sc->sigc_spbuf[window]);
  327. err |= __copy_to_user(&sc->sigc_wbuf[window],
  328. &tp->reg_window[window],
  329. sizeof(struct reg_window));
  330. }
  331. else
  332. err |= __copy_to_user(sframep, (char *) regs->u_regs[UREG_FP],
  333. sizeof(struct reg_window));
  334. tp->w_saved = 0; /* So process is allowed to execute. */
  335. err |= __put_user(signr, &sframep->sig_num);
  336. sig_address = NULL;
  337. sig_code = 0;
  338. if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
  339. sig_address = info->si_addr;
  340. switch (signr) {
  341. case SIGSEGV:
  342. switch (info->si_code) {
  343. case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
  344. default: sig_code = SUBSIG_PROTECTION; break;
  345. }
  346. break;
  347. case SIGILL:
  348. switch (info->si_code) {
  349. case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
  350. case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
  351. case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
  352. default: sig_code = SUBSIG_STACK; break;
  353. }
  354. break;
  355. case SIGFPE:
  356. switch (info->si_code) {
  357. case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
  358. case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
  359. case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
  360. case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
  361. case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
  362. case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
  363. case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
  364. default: sig_code = SUBSIG_FPERROR; break;
  365. }
  366. break;
  367. case SIGBUS:
  368. switch (info->si_code) {
  369. case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
  370. case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
  371. default: sig_code = SUBSIG_BUSTIMEOUT; break;
  372. }
  373. break;
  374. case SIGEMT:
  375. switch (info->si_code) {
  376. case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
  377. }
  378. break;
  379. case SIGSYS:
  380. if (info->si_code == (__SI_FAULT|0x100)) {
  381. sig_code = info->si_trapno;
  382. break;
  383. }
  384. default:
  385. sig_address = NULL;
  386. }
  387. }
  388. err |= __put_user((unsigned long)sig_address, &sframep->sig_address);
  389. err |= __put_user(sig_code, &sframep->sig_code);
  390. err |= __put_user(sc, &sframep->sig_scptr);
  391. if (err)
  392. goto sigsegv;
  393. regs->u_regs[UREG_FP] = (unsigned long) sframep;
  394. regs->pc = (unsigned long) sa->sa_handler;
  395. regs->npc = (regs->pc + 4);
  396. return;
  397. sigill_and_return:
  398. do_exit(SIGILL);
  399. sigsegv:
  400. force_sigsegv(signr, current);
  401. }
  402. static inline int
  403. save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
  404. {
  405. int err = 0;
  406. #ifdef CONFIG_SMP
  407. if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
  408. put_psr(get_psr() | PSR_EF);
  409. fpsave(&current->thread.float_regs[0], &current->thread.fsr,
  410. &current->thread.fpqueue[0], &current->thread.fpqdepth);
  411. regs->psr &= ~(PSR_EF);
  412. clear_tsk_thread_flag(current, TIF_USEDFPU);
  413. }
  414. #else
  415. if (current == last_task_used_math) {
  416. put_psr(get_psr() | PSR_EF);
  417. fpsave(&current->thread.float_regs[0], &current->thread.fsr,
  418. &current->thread.fpqueue[0], &current->thread.fpqdepth);
  419. last_task_used_math = NULL;
  420. regs->psr &= ~(PSR_EF);
  421. }
  422. #endif
  423. err |= __copy_to_user(&fpu->si_float_regs[0],
  424. &current->thread.float_regs[0],
  425. (sizeof(unsigned long) * 32));
  426. err |= __put_user(current->thread.fsr, &fpu->si_fsr);
  427. err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
  428. if (current->thread.fpqdepth != 0)
  429. err |= __copy_to_user(&fpu->si_fpqueue[0],
  430. &current->thread.fpqueue[0],
  431. ((sizeof(unsigned long) +
  432. (sizeof(unsigned long *)))*16));
  433. clear_used_math();
  434. return err;
  435. }
  436. static inline void
  437. new_setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
  438. int signo, sigset_t *oldset)
  439. {
  440. struct new_signal_frame __user *sf;
  441. int sigframe_size, err;
  442. /* 1. Make sure everything is clean */
  443. synchronize_user_stack();
  444. sigframe_size = NF_ALIGNEDSZ;
  445. if (!used_math())
  446. sigframe_size -= sizeof(__siginfo_fpu_t);
  447. sf = (struct new_signal_frame __user *)
  448. get_sigframe(&ka->sa, regs, sigframe_size);
  449. if (invalid_frame_pointer(sf, sigframe_size))
  450. goto sigill_and_return;
  451. if (current_thread_info()->w_saved != 0)
  452. goto sigill_and_return;
  453. /* 2. Save the current process state */
  454. err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs));
  455. err |= __put_user(0, &sf->extra_size);
  456. if (used_math()) {
  457. err |= save_fpu_state(regs, &sf->fpu_state);
  458. err |= __put_user(&sf->fpu_state, &sf->fpu_save);
  459. } else {
  460. err |= __put_user(0, &sf->fpu_save);
  461. }
  462. err |= __put_user(oldset->sig[0], &sf->info.si_mask);
  463. err |= __copy_to_user(sf->extramask, &oldset->sig[1],
  464. (_NSIG_WORDS - 1) * sizeof(unsigned int));
  465. err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
  466. sizeof(struct reg_window));
  467. if (err)
  468. goto sigsegv;
  469. /* 3. signal handler back-trampoline and parameters */
  470. regs->u_regs[UREG_FP] = (unsigned long) sf;
  471. regs->u_regs[UREG_I0] = signo;
  472. regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
  473. regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
  474. /* 4. signal handler */
  475. regs->pc = (unsigned long) ka->sa.sa_handler;
  476. regs->npc = (regs->pc + 4);
  477. /* 5. return to kernel instructions */
  478. if (ka->ka_restorer)
  479. regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
  480. else {
  481. regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
  482. /* mov __NR_sigreturn, %g1 */
  483. err |= __put_user(0x821020d8, &sf->insns[0]);
  484. /* t 0x10 */
  485. err |= __put_user(0x91d02010, &sf->insns[1]);
  486. if (err)
  487. goto sigsegv;
  488. /* Flush instruction space. */
  489. flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
  490. }
  491. return;
  492. sigill_and_return:
  493. do_exit(SIGILL);
  494. sigsegv:
  495. force_sigsegv(signo, current);
  496. }
  497. static inline void
  498. new_setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
  499. int signo, sigset_t *oldset, siginfo_t *info)
  500. {
  501. struct rt_signal_frame __user *sf;
  502. int sigframe_size;
  503. unsigned int psr;
  504. int err;
  505. synchronize_user_stack();
  506. sigframe_size = RT_ALIGNEDSZ;
  507. if (!used_math())
  508. sigframe_size -= sizeof(__siginfo_fpu_t);
  509. sf = (struct rt_signal_frame __user *)
  510. get_sigframe(&ka->sa, regs, sigframe_size);
  511. if (invalid_frame_pointer(sf, sigframe_size))
  512. goto sigill;
  513. if (current_thread_info()->w_saved != 0)
  514. goto sigill;
  515. err = __put_user(regs->pc, &sf->regs.pc);
  516. err |= __put_user(regs->npc, &sf->regs.npc);
  517. err |= __put_user(regs->y, &sf->regs.y);
  518. psr = regs->psr;
  519. if (used_math())
  520. psr |= PSR_EF;
  521. err |= __put_user(psr, &sf->regs.psr);
  522. err |= __copy_to_user(&sf->regs.u_regs, regs->u_regs, sizeof(regs->u_regs));
  523. err |= __put_user(0, &sf->extra_size);
  524. if (psr & PSR_EF) {
  525. err |= save_fpu_state(regs, &sf->fpu_state);
  526. err |= __put_user(&sf->fpu_state, &sf->fpu_save);
  527. } else {
  528. err |= __put_user(0, &sf->fpu_save);
  529. }
  530. err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t));
  531. /* Setup sigaltstack */
  532. err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
  533. err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
  534. err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
  535. err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
  536. sizeof(struct reg_window));
  537. err |= copy_siginfo_to_user(&sf->info, info);
  538. if (err)
  539. goto sigsegv;
  540. regs->u_regs[UREG_FP] = (unsigned long) sf;
  541. regs->u_regs[UREG_I0] = signo;
  542. regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
  543. regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
  544. regs->pc = (unsigned long) ka->sa.sa_handler;
  545. regs->npc = (regs->pc + 4);
  546. if (ka->ka_restorer)
  547. regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
  548. else {
  549. regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
  550. /* mov __NR_sigreturn, %g1 */
  551. err |= __put_user(0x821020d8, &sf->insns[0]);
  552. /* t 0x10 */
  553. err |= __put_user(0x91d02010, &sf->insns[1]);
  554. if (err)
  555. goto sigsegv;
  556. /* Flush instruction space. */
  557. flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
  558. }
  559. return;
  560. sigill:
  561. do_exit(SIGILL);
  562. sigsegv:
  563. force_sigsegv(signo, current);
  564. }
  565. static inline void
  566. handle_signal(unsigned long signr, struct k_sigaction *ka,
  567. siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
  568. {
  569. if (ka->sa.sa_flags & SA_SIGINFO)
  570. new_setup_rt_frame(ka, regs, signr, oldset, info);
  571. else if (current->thread.new_signal)
  572. new_setup_frame(ka, regs, signr, oldset);
  573. else
  574. setup_frame(&ka->sa, regs, signr, oldset, info);
  575. spin_lock_irq(&current->sighand->siglock);
  576. sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
  577. if (!(ka->sa.sa_flags & SA_NOMASK))
  578. sigaddset(&current->blocked, signr);
  579. recalc_sigpending();
  580. spin_unlock_irq(&current->sighand->siglock);
  581. }
  582. static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
  583. struct sigaction *sa)
  584. {
  585. switch(regs->u_regs[UREG_I0]) {
  586. case ERESTART_RESTARTBLOCK:
  587. case ERESTARTNOHAND:
  588. no_system_call_restart:
  589. regs->u_regs[UREG_I0] = EINTR;
  590. regs->psr |= PSR_C;
  591. break;
  592. case ERESTARTSYS:
  593. if (!(sa->sa_flags & SA_RESTART))
  594. goto no_system_call_restart;
  595. /* fallthrough */
  596. case ERESTARTNOINTR:
  597. regs->u_regs[UREG_I0] = orig_i0;
  598. regs->pc -= 4;
  599. regs->npc -= 4;
  600. }
  601. }
  602. /* Note that 'init' is a special process: it doesn't get signals it doesn't
  603. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  604. * mistake.
  605. */
  606. asmlinkage void do_signal(struct pt_regs * regs, unsigned long orig_i0, int restart_syscall)
  607. {
  608. siginfo_t info;
  609. struct sparc_deliver_cookie cookie;
  610. struct k_sigaction ka;
  611. int signr;
  612. sigset_t *oldset;
  613. cookie.restart_syscall = restart_syscall;
  614. cookie.orig_i0 = orig_i0;
  615. if (test_thread_flag(TIF_RESTORE_SIGMASK))
  616. oldset = &current->saved_sigmask;
  617. else
  618. oldset = &current->blocked;
  619. signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
  620. if (signr > 0) {
  621. if (cookie.restart_syscall)
  622. syscall_restart(cookie.orig_i0, regs, &ka.sa);
  623. handle_signal(signr, &ka, &info, oldset, regs);
  624. /* a signal was successfully delivered; the saved
  625. * sigmask will have been stored in the signal frame,
  626. * and will be restored by sigreturn, so we can simply
  627. * clear the TIF_RESTORE_SIGMASK flag.
  628. */
  629. if (test_thread_flag(TIF_RESTORE_SIGMASK))
  630. clear_thread_flag(TIF_RESTORE_SIGMASK);
  631. return;
  632. }
  633. if (cookie.restart_syscall &&
  634. (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
  635. regs->u_regs[UREG_I0] == ERESTARTSYS ||
  636. regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
  637. /* replay the system call when we are done */
  638. regs->u_regs[UREG_I0] = cookie.orig_i0;
  639. regs->pc -= 4;
  640. regs->npc -= 4;
  641. }
  642. if (cookie.restart_syscall &&
  643. regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
  644. regs->u_regs[UREG_G1] = __NR_restart_syscall;
  645. regs->pc -= 4;
  646. regs->npc -= 4;
  647. }
  648. /* if there's no signal to deliver, we just put the saved sigmask
  649. * back
  650. */
  651. if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
  652. clear_thread_flag(TIF_RESTORE_SIGMASK);
  653. sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
  654. }
  655. }
  656. asmlinkage int
  657. do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr,
  658. unsigned long sp)
  659. {
  660. int ret = -EFAULT;
  661. /* First see if old state is wanted. */
  662. if (ossptr) {
  663. if (put_user(current->sas_ss_sp + current->sas_ss_size,
  664. &ossptr->the_stack) ||
  665. __put_user(on_sig_stack(sp), &ossptr->cur_status))
  666. goto out;
  667. }
  668. /* Now see if we want to update the new state. */
  669. if (ssptr) {
  670. char *ss_sp;
  671. if (get_user(ss_sp, &ssptr->the_stack))
  672. goto out;
  673. /* If the current stack was set with sigaltstack, don't
  674. swap stacks while we are on it. */
  675. ret = -EPERM;
  676. if (current->sas_ss_sp && on_sig_stack(sp))
  677. goto out;
  678. /* Since we don't know the extent of the stack, and we don't
  679. track onstack-ness, but rather calculate it, we must
  680. presume a size. Ho hum this interface is lossy. */
  681. current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
  682. current->sas_ss_size = SIGSTKSZ;
  683. }
  684. ret = 0;
  685. out:
  686. return ret;
  687. }
  688. void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
  689. {
  690. struct sparc_deliver_cookie *cp = cookie;
  691. if (cp->restart_syscall &&
  692. (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
  693. regs->u_regs[UREG_I0] == ERESTARTSYS ||
  694. regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
  695. /* replay the system call when we are done */
  696. regs->u_regs[UREG_I0] = cp->orig_i0;
  697. regs->pc -= 4;
  698. regs->npc -= 4;
  699. cp->restart_syscall = 0;
  700. }
  701. if (cp->restart_syscall &&
  702. regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
  703. regs->u_regs[UREG_G1] = __NR_restart_syscall;
  704. regs->pc -= 4;
  705. regs->npc -= 4;
  706. cp->restart_syscall = 0;
  707. }
  708. }