signal.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914
  1. /*
  2. * linux/kernel/signal.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
  7. *
  8. * 2003-06-02 Jim Houston - Concurrent Computer Corp.
  9. * Changes to use preallocated sigqueue structures
  10. * to allow signals to be sent reliably.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/sched.h>
  16. #include <linux/fs.h>
  17. #include <linux/tty.h>
  18. #include <linux/binfmts.h>
  19. #include <linux/security.h>
  20. #include <linux/syscalls.h>
  21. #include <linux/ptrace.h>
  22. #include <linux/signal.h>
  23. #include <linux/signalfd.h>
  24. #include <linux/ratelimit.h>
  25. #include <linux/tracehook.h>
  26. #include <linux/capability.h>
  27. #include <linux/freezer.h>
  28. #include <linux/pid_namespace.h>
  29. #include <linux/nsproxy.h>
  30. #define CREATE_TRACE_POINTS
  31. #include <trace/events/signal.h>
  32. #include <asm/param.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/unistd.h>
  35. #include <asm/siginfo.h>
  36. #include "audit.h" /* audit_signal_info() */
  37. /*
  38. * SLAB caches for signal bits.
  39. */
  40. static struct kmem_cache *sigqueue_cachep;
  41. int print_fatal_signals __read_mostly;
  42. static void __user *sig_handler(struct task_struct *t, int sig)
  43. {
  44. return t->sighand->action[sig - 1].sa.sa_handler;
  45. }
  46. static int sig_handler_ignored(void __user *handler, int sig)
  47. {
  48. /* Is it explicitly or implicitly ignored? */
  49. return handler == SIG_IGN ||
  50. (handler == SIG_DFL && sig_kernel_ignore(sig));
  51. }
  52. static int sig_task_ignored(struct task_struct *t, int sig,
  53. int from_ancestor_ns)
  54. {
  55. void __user *handler;
  56. handler = sig_handler(t, sig);
  57. if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  58. handler == SIG_DFL && !from_ancestor_ns)
  59. return 1;
  60. return sig_handler_ignored(handler, sig);
  61. }
  62. static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
  63. {
  64. /*
  65. * Blocked signals are never ignored, since the
  66. * signal handler may change by the time it is
  67. * unblocked.
  68. */
  69. if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
  70. return 0;
  71. if (!sig_task_ignored(t, sig, from_ancestor_ns))
  72. return 0;
  73. /*
  74. * Tracers may want to know about even ignored signals.
  75. */
  76. return !tracehook_consider_ignored_signal(t, sig);
  77. }
  78. /*
  79. * Re-calculate pending state from the set of locally pending
  80. * signals, globally pending signals, and blocked signals.
  81. */
  82. static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
  83. {
  84. unsigned long ready;
  85. long i;
  86. switch (_NSIG_WORDS) {
  87. default:
  88. for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
  89. ready |= signal->sig[i] &~ blocked->sig[i];
  90. break;
  91. case 4: ready = signal->sig[3] &~ blocked->sig[3];
  92. ready |= signal->sig[2] &~ blocked->sig[2];
  93. ready |= signal->sig[1] &~ blocked->sig[1];
  94. ready |= signal->sig[0] &~ blocked->sig[0];
  95. break;
  96. case 2: ready = signal->sig[1] &~ blocked->sig[1];
  97. ready |= signal->sig[0] &~ blocked->sig[0];
  98. break;
  99. case 1: ready = signal->sig[0] &~ blocked->sig[0];
  100. }
  101. return ready != 0;
  102. }
  103. #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
  104. static int recalc_sigpending_tsk(struct task_struct *t)
  105. {
  106. if ((t->group_stop & GROUP_STOP_PENDING) ||
  107. PENDING(&t->pending, &t->blocked) ||
  108. PENDING(&t->signal->shared_pending, &t->blocked)) {
  109. set_tsk_thread_flag(t, TIF_SIGPENDING);
  110. return 1;
  111. }
  112. /*
  113. * We must never clear the flag in another thread, or in current
  114. * when it's possible the current syscall is returning -ERESTART*.
  115. * So we don't clear it here, and only callers who know they should do.
  116. */
  117. return 0;
  118. }
  119. /*
  120. * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
  121. * This is superfluous when called on current, the wakeup is a harmless no-op.
  122. */
  123. void recalc_sigpending_and_wake(struct task_struct *t)
  124. {
  125. if (recalc_sigpending_tsk(t))
  126. signal_wake_up(t, 0);
  127. }
  128. void recalc_sigpending(void)
  129. {
  130. if (unlikely(tracehook_force_sigpending()))
  131. set_thread_flag(TIF_SIGPENDING);
  132. else if (!recalc_sigpending_tsk(current) && !freezing(current))
  133. clear_thread_flag(TIF_SIGPENDING);
  134. }
  135. /* Given the mask, find the first available signal that should be serviced. */
  136. #define SYNCHRONOUS_MASK \
  137. (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
  138. sigmask(SIGTRAP) | sigmask(SIGFPE))
  139. int next_signal(struct sigpending *pending, sigset_t *mask)
  140. {
  141. unsigned long i, *s, *m, x;
  142. int sig = 0;
  143. s = pending->signal.sig;
  144. m = mask->sig;
  145. /*
  146. * Handle the first word specially: it contains the
  147. * synchronous signals that need to be dequeued first.
  148. */
  149. x = *s &~ *m;
  150. if (x) {
  151. if (x & SYNCHRONOUS_MASK)
  152. x &= SYNCHRONOUS_MASK;
  153. sig = ffz(~x) + 1;
  154. return sig;
  155. }
  156. switch (_NSIG_WORDS) {
  157. default:
  158. for (i = 1; i < _NSIG_WORDS; ++i) {
  159. x = *++s &~ *++m;
  160. if (!x)
  161. continue;
  162. sig = ffz(~x) + i*_NSIG_BPW + 1;
  163. break;
  164. }
  165. break;
  166. case 2:
  167. x = s[1] &~ m[1];
  168. if (!x)
  169. break;
  170. sig = ffz(~x) + _NSIG_BPW + 1;
  171. break;
  172. case 1:
  173. /* Nothing to do */
  174. break;
  175. }
  176. return sig;
  177. }
  178. static inline void print_dropped_signal(int sig)
  179. {
  180. static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
  181. if (!print_fatal_signals)
  182. return;
  183. if (!__ratelimit(&ratelimit_state))
  184. return;
  185. printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
  186. current->comm, current->pid, sig);
  187. }
  188. /**
  189. * task_clear_group_stop_trapping - clear group stop trapping bit
  190. * @task: target task
  191. *
  192. * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
  193. * and wake up the ptracer. Note that we don't need any further locking.
  194. * @task->siglock guarantees that @task->parent points to the ptracer.
  195. *
  196. * CONTEXT:
  197. * Must be called with @task->sighand->siglock held.
  198. */
  199. static void task_clear_group_stop_trapping(struct task_struct *task)
  200. {
  201. if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
  202. task->group_stop &= ~GROUP_STOP_TRAPPING;
  203. __wake_up_sync(&task->parent->signal->wait_chldexit,
  204. TASK_UNINTERRUPTIBLE, 1);
  205. }
  206. }
  207. /**
  208. * task_clear_group_stop_pending - clear pending group stop
  209. * @task: target task
  210. *
  211. * Clear group stop states for @task.
  212. *
  213. * CONTEXT:
  214. * Must be called with @task->sighand->siglock held.
  215. */
  216. void task_clear_group_stop_pending(struct task_struct *task)
  217. {
  218. task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME);
  219. }
  220. /**
  221. * task_participate_group_stop - participate in a group stop
  222. * @task: task participating in a group stop
  223. *
  224. * @task has GROUP_STOP_PENDING set and is participating in a group stop.
  225. * Group stop states are cleared and the group stop count is consumed if
  226. * %GROUP_STOP_CONSUME was set. If the consumption completes the group
  227. * stop, the appropriate %SIGNAL_* flags are set.
  228. *
  229. * CONTEXT:
  230. * Must be called with @task->sighand->siglock held.
  231. */
  232. static bool task_participate_group_stop(struct task_struct *task)
  233. {
  234. struct signal_struct *sig = task->signal;
  235. bool consume = task->group_stop & GROUP_STOP_CONSUME;
  236. WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
  237. task_clear_group_stop_pending(task);
  238. if (!consume)
  239. return false;
  240. if (!WARN_ON_ONCE(sig->group_stop_count == 0))
  241. sig->group_stop_count--;
  242. if (!sig->group_stop_count) {
  243. sig->flags = SIGNAL_STOP_STOPPED;
  244. return true;
  245. }
  246. return false;
  247. }
  248. /*
  249. * allocate a new signal queue record
  250. * - this may be called without locks if and only if t == current, otherwise an
  251. * appopriate lock must be held to stop the target task from exiting
  252. */
  253. static struct sigqueue *
  254. __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
  255. {
  256. struct sigqueue *q = NULL;
  257. struct user_struct *user;
  258. /*
  259. * Protect access to @t credentials. This can go away when all
  260. * callers hold rcu read lock.
  261. */
  262. rcu_read_lock();
  263. user = get_uid(__task_cred(t)->user);
  264. atomic_inc(&user->sigpending);
  265. rcu_read_unlock();
  266. if (override_rlimit ||
  267. atomic_read(&user->sigpending) <=
  268. task_rlimit(t, RLIMIT_SIGPENDING)) {
  269. q = kmem_cache_alloc(sigqueue_cachep, flags);
  270. } else {
  271. print_dropped_signal(sig);
  272. }
  273. if (unlikely(q == NULL)) {
  274. atomic_dec(&user->sigpending);
  275. free_uid(user);
  276. } else {
  277. INIT_LIST_HEAD(&q->list);
  278. q->flags = 0;
  279. q->user = user;
  280. }
  281. return q;
  282. }
  283. static void __sigqueue_free(struct sigqueue *q)
  284. {
  285. if (q->flags & SIGQUEUE_PREALLOC)
  286. return;
  287. atomic_dec(&q->user->sigpending);
  288. free_uid(q->user);
  289. kmem_cache_free(sigqueue_cachep, q);
  290. }
  291. void flush_sigqueue(struct sigpending *queue)
  292. {
  293. struct sigqueue *q;
  294. sigemptyset(&queue->signal);
  295. while (!list_empty(&queue->list)) {
  296. q = list_entry(queue->list.next, struct sigqueue , list);
  297. list_del_init(&q->list);
  298. __sigqueue_free(q);
  299. }
  300. }
  301. /*
  302. * Flush all pending signals for a task.
  303. */
  304. void __flush_signals(struct task_struct *t)
  305. {
  306. clear_tsk_thread_flag(t, TIF_SIGPENDING);
  307. flush_sigqueue(&t->pending);
  308. flush_sigqueue(&t->signal->shared_pending);
  309. }
  310. void flush_signals(struct task_struct *t)
  311. {
  312. unsigned long flags;
  313. spin_lock_irqsave(&t->sighand->siglock, flags);
  314. __flush_signals(t);
  315. spin_unlock_irqrestore(&t->sighand->siglock, flags);
  316. }
  317. static void __flush_itimer_signals(struct sigpending *pending)
  318. {
  319. sigset_t signal, retain;
  320. struct sigqueue *q, *n;
  321. signal = pending->signal;
  322. sigemptyset(&retain);
  323. list_for_each_entry_safe(q, n, &pending->list, list) {
  324. int sig = q->info.si_signo;
  325. if (likely(q->info.si_code != SI_TIMER)) {
  326. sigaddset(&retain, sig);
  327. } else {
  328. sigdelset(&signal, sig);
  329. list_del_init(&q->list);
  330. __sigqueue_free(q);
  331. }
  332. }
  333. sigorsets(&pending->signal, &signal, &retain);
  334. }
  335. void flush_itimer_signals(void)
  336. {
  337. struct task_struct *tsk = current;
  338. unsigned long flags;
  339. spin_lock_irqsave(&tsk->sighand->siglock, flags);
  340. __flush_itimer_signals(&tsk->pending);
  341. __flush_itimer_signals(&tsk->signal->shared_pending);
  342. spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  343. }
  344. void ignore_signals(struct task_struct *t)
  345. {
  346. int i;
  347. for (i = 0; i < _NSIG; ++i)
  348. t->sighand->action[i].sa.sa_handler = SIG_IGN;
  349. flush_signals(t);
  350. }
  351. /*
  352. * Flush all handlers for a task.
  353. */
  354. void
  355. flush_signal_handlers(struct task_struct *t, int force_default)
  356. {
  357. int i;
  358. struct k_sigaction *ka = &t->sighand->action[0];
  359. for (i = _NSIG ; i != 0 ; i--) {
  360. if (force_default || ka->sa.sa_handler != SIG_IGN)
  361. ka->sa.sa_handler = SIG_DFL;
  362. ka->sa.sa_flags = 0;
  363. sigemptyset(&ka->sa.sa_mask);
  364. ka++;
  365. }
  366. }
  367. int unhandled_signal(struct task_struct *tsk, int sig)
  368. {
  369. void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
  370. if (is_global_init(tsk))
  371. return 1;
  372. if (handler != SIG_IGN && handler != SIG_DFL)
  373. return 0;
  374. return !tracehook_consider_fatal_signal(tsk, sig);
  375. }
  376. /* Notify the system that a driver wants to block all signals for this
  377. * process, and wants to be notified if any signals at all were to be
  378. * sent/acted upon. If the notifier routine returns non-zero, then the
  379. * signal will be acted upon after all. If the notifier routine returns 0,
  380. * then then signal will be blocked. Only one block per process is
  381. * allowed. priv is a pointer to private data that the notifier routine
  382. * can use to determine if the signal should be blocked or not. */
  383. void
  384. block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
  385. {
  386. unsigned long flags;
  387. spin_lock_irqsave(&current->sighand->siglock, flags);
  388. current->notifier_mask = mask;
  389. current->notifier_data = priv;
  390. current->notifier = notifier;
  391. spin_unlock_irqrestore(&current->sighand->siglock, flags);
  392. }
  393. /* Notify the system that blocking has ended. */
  394. void
  395. unblock_all_signals(void)
  396. {
  397. unsigned long flags;
  398. spin_lock_irqsave(&current->sighand->siglock, flags);
  399. current->notifier = NULL;
  400. current->notifier_data = NULL;
  401. recalc_sigpending();
  402. spin_unlock_irqrestore(&current->sighand->siglock, flags);
  403. }
  404. static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
  405. {
  406. struct sigqueue *q, *first = NULL;
  407. /*
  408. * Collect the siginfo appropriate to this signal. Check if
  409. * there is another siginfo for the same signal.
  410. */
  411. list_for_each_entry(q, &list->list, list) {
  412. if (q->info.si_signo == sig) {
  413. if (first)
  414. goto still_pending;
  415. first = q;
  416. }
  417. }
  418. sigdelset(&list->signal, sig);
  419. if (first) {
  420. still_pending:
  421. list_del_init(&first->list);
  422. copy_siginfo(info, &first->info);
  423. __sigqueue_free(first);
  424. } else {
  425. /* Ok, it wasn't in the queue. This must be
  426. a fast-pathed signal or we must have been
  427. out of queue space. So zero out the info.
  428. */
  429. info->si_signo = sig;
  430. info->si_errno = 0;
  431. info->si_code = SI_USER;
  432. info->si_pid = 0;
  433. info->si_uid = 0;
  434. }
  435. }
  436. static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  437. siginfo_t *info)
  438. {
  439. int sig = next_signal(pending, mask);
  440. if (sig) {
  441. if (current->notifier) {
  442. if (sigismember(current->notifier_mask, sig)) {
  443. if (!(current->notifier)(current->notifier_data)) {
  444. clear_thread_flag(TIF_SIGPENDING);
  445. return 0;
  446. }
  447. }
  448. }
  449. collect_signal(sig, pending, info);
  450. }
  451. return sig;
  452. }
  453. /*
  454. * Dequeue a signal and return the element to the caller, which is
  455. * expected to free it.
  456. *
  457. * All callers have to hold the siglock.
  458. */
  459. int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
  460. {
  461. int signr;
  462. /* We only dequeue private signals from ourselves, we don't let
  463. * signalfd steal them
  464. */
  465. signr = __dequeue_signal(&tsk->pending, mask, info);
  466. if (!signr) {
  467. signr = __dequeue_signal(&tsk->signal->shared_pending,
  468. mask, info);
  469. /*
  470. * itimer signal ?
  471. *
  472. * itimers are process shared and we restart periodic
  473. * itimers in the signal delivery path to prevent DoS
  474. * attacks in the high resolution timer case. This is
  475. * compliant with the old way of self restarting
  476. * itimers, as the SIGALRM is a legacy signal and only
  477. * queued once. Changing the restart behaviour to
  478. * restart the timer in the signal dequeue path is
  479. * reducing the timer noise on heavy loaded !highres
  480. * systems too.
  481. */
  482. if (unlikely(signr == SIGALRM)) {
  483. struct hrtimer *tmr = &tsk->signal->real_timer;
  484. if (!hrtimer_is_queued(tmr) &&
  485. tsk->signal->it_real_incr.tv64 != 0) {
  486. hrtimer_forward(tmr, tmr->base->get_time(),
  487. tsk->signal->it_real_incr);
  488. hrtimer_restart(tmr);
  489. }
  490. }
  491. }
  492. recalc_sigpending();
  493. if (!signr)
  494. return 0;
  495. if (unlikely(sig_kernel_stop(signr))) {
  496. /*
  497. * Set a marker that we have dequeued a stop signal. Our
  498. * caller might release the siglock and then the pending
  499. * stop signal it is about to process is no longer in the
  500. * pending bitmasks, but must still be cleared by a SIGCONT
  501. * (and overruled by a SIGKILL). So those cases clear this
  502. * shared flag after we've set it. Note that this flag may
  503. * remain set after the signal we return is ignored or
  504. * handled. That doesn't matter because its only purpose
  505. * is to alert stop-signal processing code when another
  506. * processor has come along and cleared the flag.
  507. */
  508. tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
  509. }
  510. if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
  511. /*
  512. * Release the siglock to ensure proper locking order
  513. * of timer locks outside of siglocks. Note, we leave
  514. * irqs disabled here, since the posix-timers code is
  515. * about to disable them again anyway.
  516. */
  517. spin_unlock(&tsk->sighand->siglock);
  518. do_schedule_next_timer(info);
  519. spin_lock(&tsk->sighand->siglock);
  520. }
  521. return signr;
  522. }
  523. /*
  524. * Tell a process that it has a new active signal..
  525. *
  526. * NOTE! we rely on the previous spin_lock to
  527. * lock interrupts for us! We can only be called with
  528. * "siglock" held, and the local interrupt must
  529. * have been disabled when that got acquired!
  530. *
  531. * No need to set need_resched since signal event passing
  532. * goes through ->blocked
  533. */
  534. void signal_wake_up(struct task_struct *t, int resume)
  535. {
  536. unsigned int mask;
  537. set_tsk_thread_flag(t, TIF_SIGPENDING);
  538. /*
  539. * For SIGKILL, we want to wake it up in the stopped/traced/killable
  540. * case. We don't check t->state here because there is a race with it
  541. * executing another processor and just now entering stopped state.
  542. * By using wake_up_state, we ensure the process will wake up and
  543. * handle its death signal.
  544. */
  545. mask = TASK_INTERRUPTIBLE;
  546. if (resume)
  547. mask |= TASK_WAKEKILL;
  548. if (!wake_up_state(t, mask))
  549. kick_process(t);
  550. }
  551. /*
  552. * Remove signals in mask from the pending set and queue.
  553. * Returns 1 if any signals were found.
  554. *
  555. * All callers must be holding the siglock.
  556. *
  557. * This version takes a sigset mask and looks at all signals,
  558. * not just those in the first mask word.
  559. */
  560. static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
  561. {
  562. struct sigqueue *q, *n;
  563. sigset_t m;
  564. sigandsets(&m, mask, &s->signal);
  565. if (sigisemptyset(&m))
  566. return 0;
  567. signandsets(&s->signal, &s->signal, mask);
  568. list_for_each_entry_safe(q, n, &s->list, list) {
  569. if (sigismember(mask, q->info.si_signo)) {
  570. list_del_init(&q->list);
  571. __sigqueue_free(q);
  572. }
  573. }
  574. return 1;
  575. }
  576. /*
  577. * Remove signals in mask from the pending set and queue.
  578. * Returns 1 if any signals were found.
  579. *
  580. * All callers must be holding the siglock.
  581. */
  582. static int rm_from_queue(unsigned long mask, struct sigpending *s)
  583. {
  584. struct sigqueue *q, *n;
  585. if (!sigtestsetmask(&s->signal, mask))
  586. return 0;
  587. sigdelsetmask(&s->signal, mask);
  588. list_for_each_entry_safe(q, n, &s->list, list) {
  589. if (q->info.si_signo < SIGRTMIN &&
  590. (mask & sigmask(q->info.si_signo))) {
  591. list_del_init(&q->list);
  592. __sigqueue_free(q);
  593. }
  594. }
  595. return 1;
  596. }
  597. static inline int is_si_special(const struct siginfo *info)
  598. {
  599. return info <= SEND_SIG_FORCED;
  600. }
  601. static inline bool si_fromuser(const struct siginfo *info)
  602. {
  603. return info == SEND_SIG_NOINFO ||
  604. (!is_si_special(info) && SI_FROMUSER(info));
  605. }
  606. /*
  607. * Bad permissions for sending the signal
  608. * - the caller must hold the RCU read lock
  609. */
  610. static int check_kill_permission(int sig, struct siginfo *info,
  611. struct task_struct *t)
  612. {
  613. const struct cred *cred, *tcred;
  614. struct pid *sid;
  615. int error;
  616. if (!valid_signal(sig))
  617. return -EINVAL;
  618. if (!si_fromuser(info))
  619. return 0;
  620. error = audit_signal_info(sig, t); /* Let audit system see the signal */
  621. if (error)
  622. return error;
  623. cred = current_cred();
  624. tcred = __task_cred(t);
  625. if (!same_thread_group(current, t) &&
  626. (cred->euid ^ tcred->suid) &&
  627. (cred->euid ^ tcred->uid) &&
  628. (cred->uid ^ tcred->suid) &&
  629. (cred->uid ^ tcred->uid) &&
  630. !capable(CAP_KILL)) {
  631. switch (sig) {
  632. case SIGCONT:
  633. sid = task_session(t);
  634. /*
  635. * We don't return the error if sid == NULL. The
  636. * task was unhashed, the caller must notice this.
  637. */
  638. if (!sid || sid == task_session(current))
  639. break;
  640. default:
  641. return -EPERM;
  642. }
  643. }
  644. return security_task_kill(t, info, sig, 0);
  645. }
  646. /*
  647. * Handle magic process-wide effects of stop/continue signals. Unlike
  648. * the signal actions, these happen immediately at signal-generation
  649. * time regardless of blocking, ignoring, or handling. This does the
  650. * actual continuing for SIGCONT, but not the actual stopping for stop
  651. * signals. The process stop is done as a signal action for SIG_DFL.
  652. *
  653. * Returns true if the signal should be actually delivered, otherwise
  654. * it should be dropped.
  655. */
  656. static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
  657. {
  658. struct signal_struct *signal = p->signal;
  659. struct task_struct *t;
  660. if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
  661. /*
  662. * The process is in the middle of dying, nothing to do.
  663. */
  664. } else if (sig_kernel_stop(sig)) {
  665. /*
  666. * This is a stop signal. Remove SIGCONT from all queues.
  667. */
  668. rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
  669. t = p;
  670. do {
  671. rm_from_queue(sigmask(SIGCONT), &t->pending);
  672. } while_each_thread(p, t);
  673. } else if (sig == SIGCONT) {
  674. unsigned int why;
  675. /*
  676. * Remove all stop signals from all queues,
  677. * and wake all threads.
  678. */
  679. rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
  680. t = p;
  681. do {
  682. unsigned int state;
  683. task_clear_group_stop_pending(t);
  684. rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
  685. /*
  686. * If there is a handler for SIGCONT, we must make
  687. * sure that no thread returns to user mode before
  688. * we post the signal, in case it was the only
  689. * thread eligible to run the signal handler--then
  690. * it must not do anything between resuming and
  691. * running the handler. With the TIF_SIGPENDING
  692. * flag set, the thread will pause and acquire the
  693. * siglock that we hold now and until we've queued
  694. * the pending signal.
  695. *
  696. * Wake up the stopped thread _after_ setting
  697. * TIF_SIGPENDING
  698. */
  699. state = __TASK_STOPPED;
  700. if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
  701. set_tsk_thread_flag(t, TIF_SIGPENDING);
  702. state |= TASK_INTERRUPTIBLE;
  703. }
  704. wake_up_state(t, state);
  705. } while_each_thread(p, t);
  706. /*
  707. * Notify the parent with CLD_CONTINUED if we were stopped.
  708. *
  709. * If we were in the middle of a group stop, we pretend it
  710. * was already finished, and then continued. Since SIGCHLD
  711. * doesn't queue we report only CLD_STOPPED, as if the next
  712. * CLD_CONTINUED was dropped.
  713. */
  714. why = 0;
  715. if (signal->flags & SIGNAL_STOP_STOPPED)
  716. why |= SIGNAL_CLD_CONTINUED;
  717. else if (signal->group_stop_count)
  718. why |= SIGNAL_CLD_STOPPED;
  719. if (why) {
  720. /*
  721. * The first thread which returns from do_signal_stop()
  722. * will take ->siglock, notice SIGNAL_CLD_MASK, and
  723. * notify its parent. See get_signal_to_deliver().
  724. */
  725. signal->flags = why | SIGNAL_STOP_CONTINUED;
  726. signal->group_stop_count = 0;
  727. signal->group_exit_code = 0;
  728. } else {
  729. /*
  730. * We are not stopped, but there could be a stop
  731. * signal in the middle of being processed after
  732. * being removed from the queue. Clear that too.
  733. */
  734. signal->flags &= ~SIGNAL_STOP_DEQUEUED;
  735. }
  736. }
  737. return !sig_ignored(p, sig, from_ancestor_ns);
  738. }
  739. /*
  740. * Test if P wants to take SIG. After we've checked all threads with this,
  741. * it's equivalent to finding no threads not blocking SIG. Any threads not
  742. * blocking SIG were ruled out because they are not running and already
  743. * have pending signals. Such threads will dequeue from the shared queue
  744. * as soon as they're available, so putting the signal on the shared queue
  745. * will be equivalent to sending it to one such thread.
  746. */
  747. static inline int wants_signal(int sig, struct task_struct *p)
  748. {
  749. if (sigismember(&p->blocked, sig))
  750. return 0;
  751. if (p->flags & PF_EXITING)
  752. return 0;
  753. if (sig == SIGKILL)
  754. return 1;
  755. if (task_is_stopped_or_traced(p))
  756. return 0;
  757. return task_curr(p) || !signal_pending(p);
  758. }
  759. static void complete_signal(int sig, struct task_struct *p, int group)
  760. {
  761. struct signal_struct *signal = p->signal;
  762. struct task_struct *t;
  763. /*
  764. * Now find a thread we can wake up to take the signal off the queue.
  765. *
  766. * If the main thread wants the signal, it gets first crack.
  767. * Probably the least surprising to the average bear.
  768. */
  769. if (wants_signal(sig, p))
  770. t = p;
  771. else if (!group || thread_group_empty(p))
  772. /*
  773. * There is just one thread and it does not need to be woken.
  774. * It will dequeue unblocked signals before it runs again.
  775. */
  776. return;
  777. else {
  778. /*
  779. * Otherwise try to find a suitable thread.
  780. */
  781. t = signal->curr_target;
  782. while (!wants_signal(sig, t)) {
  783. t = next_thread(t);
  784. if (t == signal->curr_target)
  785. /*
  786. * No thread needs to be woken.
  787. * Any eligible threads will see
  788. * the signal in the queue soon.
  789. */
  790. return;
  791. }
  792. signal->curr_target = t;
  793. }
  794. /*
  795. * Found a killable thread. If the signal will be fatal,
  796. * then start taking the whole group down immediately.
  797. */
  798. if (sig_fatal(p, sig) &&
  799. !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
  800. !sigismember(&t->real_blocked, sig) &&
  801. (sig == SIGKILL ||
  802. !tracehook_consider_fatal_signal(t, sig))) {
  803. /*
  804. * This signal will be fatal to the whole group.
  805. */
  806. if (!sig_kernel_coredump(sig)) {
  807. /*
  808. * Start a group exit and wake everybody up.
  809. * This way we don't have other threads
  810. * running and doing things after a slower
  811. * thread has the fatal signal pending.
  812. */
  813. signal->flags = SIGNAL_GROUP_EXIT;
  814. signal->group_exit_code = sig;
  815. signal->group_stop_count = 0;
  816. t = p;
  817. do {
  818. task_clear_group_stop_pending(t);
  819. sigaddset(&t->pending.signal, SIGKILL);
  820. signal_wake_up(t, 1);
  821. } while_each_thread(p, t);
  822. return;
  823. }
  824. }
  825. /*
  826. * The signal is already in the shared-pending queue.
  827. * Tell the chosen thread to wake up and dequeue it.
  828. */
  829. signal_wake_up(t, sig == SIGKILL);
  830. return;
  831. }
  832. static inline int legacy_queue(struct sigpending *signals, int sig)
  833. {
  834. return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
  835. }
  836. static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
  837. int group, int from_ancestor_ns)
  838. {
  839. struct sigpending *pending;
  840. struct sigqueue *q;
  841. int override_rlimit;
  842. trace_signal_generate(sig, info, t);
  843. assert_spin_locked(&t->sighand->siglock);
  844. if (!prepare_signal(sig, t, from_ancestor_ns))
  845. return 0;
  846. pending = group ? &t->signal->shared_pending : &t->pending;
  847. /*
  848. * Short-circuit ignored signals and support queuing
  849. * exactly one non-rt signal, so that we can get more
  850. * detailed information about the cause of the signal.
  851. */
  852. if (legacy_queue(pending, sig))
  853. return 0;
  854. /*
  855. * fast-pathed signals for kernel-internal things like SIGSTOP
  856. * or SIGKILL.
  857. */
  858. if (info == SEND_SIG_FORCED)
  859. goto out_set;
  860. /* Real-time signals must be queued if sent by sigqueue, or
  861. some other real-time mechanism. It is implementation
  862. defined whether kill() does so. We attempt to do so, on
  863. the principle of least surprise, but since kill is not
  864. allowed to fail with EAGAIN when low on memory we just
  865. make sure at least one signal gets delivered and don't
  866. pass on the info struct. */
  867. if (sig < SIGRTMIN)
  868. override_rlimit = (is_si_special(info) || info->si_code >= 0);
  869. else
  870. override_rlimit = 0;
  871. q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
  872. override_rlimit);
  873. if (q) {
  874. list_add_tail(&q->list, &pending->list);
  875. switch ((unsigned long) info) {
  876. case (unsigned long) SEND_SIG_NOINFO:
  877. q->info.si_signo = sig;
  878. q->info.si_errno = 0;
  879. q->info.si_code = SI_USER;
  880. q->info.si_pid = task_tgid_nr_ns(current,
  881. task_active_pid_ns(t));
  882. q->info.si_uid = current_uid();
  883. break;
  884. case (unsigned long) SEND_SIG_PRIV:
  885. q->info.si_signo = sig;
  886. q->info.si_errno = 0;
  887. q->info.si_code = SI_KERNEL;
  888. q->info.si_pid = 0;
  889. q->info.si_uid = 0;
  890. break;
  891. default:
  892. copy_siginfo(&q->info, info);
  893. if (from_ancestor_ns)
  894. q->info.si_pid = 0;
  895. break;
  896. }
  897. } else if (!is_si_special(info)) {
  898. if (sig >= SIGRTMIN && info->si_code != SI_USER) {
  899. /*
  900. * Queue overflow, abort. We may abort if the
  901. * signal was rt and sent by user using something
  902. * other than kill().
  903. */
  904. trace_signal_overflow_fail(sig, group, info);
  905. return -EAGAIN;
  906. } else {
  907. /*
  908. * This is a silent loss of information. We still
  909. * send the signal, but the *info bits are lost.
  910. */
  911. trace_signal_lose_info(sig, group, info);
  912. }
  913. }
  914. out_set:
  915. signalfd_notify(t, sig);
  916. sigaddset(&pending->signal, sig);
  917. complete_signal(sig, t, group);
  918. return 0;
  919. }
  920. static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
  921. int group)
  922. {
  923. int from_ancestor_ns = 0;
  924. #ifdef CONFIG_PID_NS
  925. from_ancestor_ns = si_fromuser(info) &&
  926. !task_pid_nr_ns(current, task_active_pid_ns(t));
  927. #endif
  928. return __send_signal(sig, info, t, group, from_ancestor_ns);
  929. }
  930. static void print_fatal_signal(struct pt_regs *regs, int signr)
  931. {
  932. printk("%s/%d: potentially unexpected fatal signal %d.\n",
  933. current->comm, task_pid_nr(current), signr);
  934. #if defined(__i386__) && !defined(__arch_um__)
  935. printk("code at %08lx: ", regs->ip);
  936. {
  937. int i;
  938. for (i = 0; i < 16; i++) {
  939. unsigned char insn;
  940. if (get_user(insn, (unsigned char *)(regs->ip + i)))
  941. break;
  942. printk("%02x ", insn);
  943. }
  944. }
  945. #endif
  946. printk("\n");
  947. preempt_disable();
  948. show_regs(regs);
  949. preempt_enable();
  950. }
  951. static int __init setup_print_fatal_signals(char *str)
  952. {
  953. get_option (&str, &print_fatal_signals);
  954. return 1;
  955. }
  956. __setup("print-fatal-signals=", setup_print_fatal_signals);
  957. int
  958. __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  959. {
  960. return send_signal(sig, info, p, 1);
  961. }
  962. static int
  963. specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
  964. {
  965. return send_signal(sig, info, t, 0);
  966. }
  967. int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
  968. bool group)
  969. {
  970. unsigned long flags;
  971. int ret = -ESRCH;
  972. if (lock_task_sighand(p, &flags)) {
  973. ret = send_signal(sig, info, p, group);
  974. unlock_task_sighand(p, &flags);
  975. }
  976. return ret;
  977. }
  978. /*
  979. * Force a signal that the process can't ignore: if necessary
  980. * we unblock the signal and change any SIG_IGN to SIG_DFL.
  981. *
  982. * Note: If we unblock the signal, we always reset it to SIG_DFL,
  983. * since we do not want to have a signal handler that was blocked
  984. * be invoked when user space had explicitly blocked it.
  985. *
  986. * We don't want to have recursive SIGSEGV's etc, for example,
  987. * that is why we also clear SIGNAL_UNKILLABLE.
  988. */
  989. int
  990. force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
  991. {
  992. unsigned long int flags;
  993. int ret, blocked, ignored;
  994. struct k_sigaction *action;
  995. spin_lock_irqsave(&t->sighand->siglock, flags);
  996. action = &t->sighand->action[sig-1];
  997. ignored = action->sa.sa_handler == SIG_IGN;
  998. blocked = sigismember(&t->blocked, sig);
  999. if (blocked || ignored) {
  1000. action->sa.sa_handler = SIG_DFL;
  1001. if (blocked) {
  1002. sigdelset(&t->blocked, sig);
  1003. recalc_sigpending_and_wake(t);
  1004. }
  1005. }
  1006. if (action->sa.sa_handler == SIG_DFL)
  1007. t->signal->flags &= ~SIGNAL_UNKILLABLE;
  1008. ret = specific_send_sig_info(sig, info, t);
  1009. spin_unlock_irqrestore(&t->sighand->siglock, flags);
  1010. return ret;
  1011. }
  1012. /*
  1013. * Nuke all other threads in the group.
  1014. */
  1015. int zap_other_threads(struct task_struct *p)
  1016. {
  1017. struct task_struct *t = p;
  1018. int count = 0;
  1019. p->signal->group_stop_count = 0;
  1020. while_each_thread(p, t) {
  1021. task_clear_group_stop_pending(t);
  1022. count++;
  1023. /* Don't bother with already dead threads */
  1024. if (t->exit_state)
  1025. continue;
  1026. sigaddset(&t->pending.signal, SIGKILL);
  1027. signal_wake_up(t, 1);
  1028. }
  1029. return count;
  1030. }
  1031. struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
  1032. unsigned long *flags)
  1033. {
  1034. struct sighand_struct *sighand;
  1035. rcu_read_lock();
  1036. for (;;) {
  1037. sighand = rcu_dereference(tsk->sighand);
  1038. if (unlikely(sighand == NULL))
  1039. break;
  1040. spin_lock_irqsave(&sighand->siglock, *flags);
  1041. if (likely(sighand == tsk->sighand))
  1042. break;
  1043. spin_unlock_irqrestore(&sighand->siglock, *flags);
  1044. }
  1045. rcu_read_unlock();
  1046. return sighand;
  1047. }
  1048. /*
  1049. * send signal info to all the members of a group
  1050. */
  1051. int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  1052. {
  1053. int ret;
  1054. rcu_read_lock();
  1055. ret = check_kill_permission(sig, info, p);
  1056. rcu_read_unlock();
  1057. if (!ret && sig)
  1058. ret = do_send_sig_info(sig, info, p, true);
  1059. return ret;
  1060. }
  1061. /*
  1062. * __kill_pgrp_info() sends a signal to a process group: this is what the tty
  1063. * control characters do (^C, ^Z etc)
  1064. * - the caller must hold at least a readlock on tasklist_lock
  1065. */
  1066. int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
  1067. {
  1068. struct task_struct *p = NULL;
  1069. int retval, success;
  1070. success = 0;
  1071. retval = -ESRCH;
  1072. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  1073. int err = group_send_sig_info(sig, info, p);
  1074. success |= !err;
  1075. retval = err;
  1076. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  1077. return success ? 0 : retval;
  1078. }
  1079. int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
  1080. {
  1081. int error = -ESRCH;
  1082. struct task_struct *p;
  1083. rcu_read_lock();
  1084. retry:
  1085. p = pid_task(pid, PIDTYPE_PID);
  1086. if (p) {
  1087. error = group_send_sig_info(sig, info, p);
  1088. if (unlikely(error == -ESRCH))
  1089. /*
  1090. * The task was unhashed in between, try again.
  1091. * If it is dead, pid_task() will return NULL,
  1092. * if we race with de_thread() it will find the
  1093. * new leader.
  1094. */
  1095. goto retry;
  1096. }
  1097. rcu_read_unlock();
  1098. return error;
  1099. }
  1100. int
  1101. kill_proc_info(int sig, struct siginfo *info, pid_t pid)
  1102. {
  1103. int error;
  1104. rcu_read_lock();
  1105. error = kill_pid_info(sig, info, find_vpid(pid));
  1106. rcu_read_unlock();
  1107. return error;
  1108. }
  1109. /* like kill_pid_info(), but doesn't use uid/euid of "current" */
  1110. int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
  1111. uid_t uid, uid_t euid, u32 secid)
  1112. {
  1113. int ret = -EINVAL;
  1114. struct task_struct *p;
  1115. const struct cred *pcred;
  1116. unsigned long flags;
  1117. if (!valid_signal(sig))
  1118. return ret;
  1119. rcu_read_lock();
  1120. p = pid_task(pid, PIDTYPE_PID);
  1121. if (!p) {
  1122. ret = -ESRCH;
  1123. goto out_unlock;
  1124. }
  1125. pcred = __task_cred(p);
  1126. if (si_fromuser(info) &&
  1127. euid != pcred->suid && euid != pcred->uid &&
  1128. uid != pcred->suid && uid != pcred->uid) {
  1129. ret = -EPERM;
  1130. goto out_unlock;
  1131. }
  1132. ret = security_task_kill(p, info, sig, secid);
  1133. if (ret)
  1134. goto out_unlock;
  1135. if (sig) {
  1136. if (lock_task_sighand(p, &flags)) {
  1137. ret = __send_signal(sig, info, p, 1, 0);
  1138. unlock_task_sighand(p, &flags);
  1139. } else
  1140. ret = -ESRCH;
  1141. }
  1142. out_unlock:
  1143. rcu_read_unlock();
  1144. return ret;
  1145. }
  1146. EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
  1147. /*
  1148. * kill_something_info() interprets pid in interesting ways just like kill(2).
  1149. *
  1150. * POSIX specifies that kill(-1,sig) is unspecified, but what we have
  1151. * is probably wrong. Should make it like BSD or SYSV.
  1152. */
  1153. static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
  1154. {
  1155. int ret;
  1156. if (pid > 0) {
  1157. rcu_read_lock();
  1158. ret = kill_pid_info(sig, info, find_vpid(pid));
  1159. rcu_read_unlock();
  1160. return ret;
  1161. }
  1162. read_lock(&tasklist_lock);
  1163. if (pid != -1) {
  1164. ret = __kill_pgrp_info(sig, info,
  1165. pid ? find_vpid(-pid) : task_pgrp(current));
  1166. } else {
  1167. int retval = 0, count = 0;
  1168. struct task_struct * p;
  1169. for_each_process(p) {
  1170. if (task_pid_vnr(p) > 1 &&
  1171. !same_thread_group(p, current)) {
  1172. int err = group_send_sig_info(sig, info, p);
  1173. ++count;
  1174. if (err != -EPERM)
  1175. retval = err;
  1176. }
  1177. }
  1178. ret = count ? retval : -ESRCH;
  1179. }
  1180. read_unlock(&tasklist_lock);
  1181. return ret;
  1182. }
  1183. /*
  1184. * These are for backward compatibility with the rest of the kernel source.
  1185. */
  1186. int
  1187. send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  1188. {
  1189. /*
  1190. * Make sure legacy kernel users don't send in bad values
  1191. * (normal paths check this in check_kill_permission).
  1192. */
  1193. if (!valid_signal(sig))
  1194. return -EINVAL;
  1195. return do_send_sig_info(sig, info, p, false);
  1196. }
  1197. #define __si_special(priv) \
  1198. ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
  1199. int
  1200. send_sig(int sig, struct task_struct *p, int priv)
  1201. {
  1202. return send_sig_info(sig, __si_special(priv), p);
  1203. }
  1204. void
  1205. force_sig(int sig, struct task_struct *p)
  1206. {
  1207. force_sig_info(sig, SEND_SIG_PRIV, p);
  1208. }
  1209. /*
  1210. * When things go south during signal handling, we
  1211. * will force a SIGSEGV. And if the signal that caused
  1212. * the problem was already a SIGSEGV, we'll want to
  1213. * make sure we don't even try to deliver the signal..
  1214. */
  1215. int
  1216. force_sigsegv(int sig, struct task_struct *p)
  1217. {
  1218. if (sig == SIGSEGV) {
  1219. unsigned long flags;
  1220. spin_lock_irqsave(&p->sighand->siglock, flags);
  1221. p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
  1222. spin_unlock_irqrestore(&p->sighand->siglock, flags);
  1223. }
  1224. force_sig(SIGSEGV, p);
  1225. return 0;
  1226. }
  1227. int kill_pgrp(struct pid *pid, int sig, int priv)
  1228. {
  1229. int ret;
  1230. read_lock(&tasklist_lock);
  1231. ret = __kill_pgrp_info(sig, __si_special(priv), pid);
  1232. read_unlock(&tasklist_lock);
  1233. return ret;
  1234. }
  1235. EXPORT_SYMBOL(kill_pgrp);
  1236. int kill_pid(struct pid *pid, int sig, int priv)
  1237. {
  1238. return kill_pid_info(sig, __si_special(priv), pid);
  1239. }
  1240. EXPORT_SYMBOL(kill_pid);
  1241. /*
  1242. * These functions support sending signals using preallocated sigqueue
  1243. * structures. This is needed "because realtime applications cannot
  1244. * afford to lose notifications of asynchronous events, like timer
  1245. * expirations or I/O completions". In the case of Posix Timers
  1246. * we allocate the sigqueue structure from the timer_create. If this
  1247. * allocation fails we are able to report the failure to the application
  1248. * with an EAGAIN error.
  1249. */
  1250. struct sigqueue *sigqueue_alloc(void)
  1251. {
  1252. struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
  1253. if (q)
  1254. q->flags |= SIGQUEUE_PREALLOC;
  1255. return q;
  1256. }
  1257. void sigqueue_free(struct sigqueue *q)
  1258. {
  1259. unsigned long flags;
  1260. spinlock_t *lock = &current->sighand->siglock;
  1261. BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
  1262. /*
  1263. * We must hold ->siglock while testing q->list
  1264. * to serialize with collect_signal() or with
  1265. * __exit_signal()->flush_sigqueue().
  1266. */
  1267. spin_lock_irqsave(lock, flags);
  1268. q->flags &= ~SIGQUEUE_PREALLOC;
  1269. /*
  1270. * If it is queued it will be freed when dequeued,
  1271. * like the "regular" sigqueue.
  1272. */
  1273. if (!list_empty(&q->list))
  1274. q = NULL;
  1275. spin_unlock_irqrestore(lock, flags);
  1276. if (q)
  1277. __sigqueue_free(q);
  1278. }
  1279. int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
  1280. {
  1281. int sig = q->info.si_signo;
  1282. struct sigpending *pending;
  1283. unsigned long flags;
  1284. int ret;
  1285. BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
  1286. ret = -1;
  1287. if (!likely(lock_task_sighand(t, &flags)))
  1288. goto ret;
  1289. ret = 1; /* the signal is ignored */
  1290. if (!prepare_signal(sig, t, 0))
  1291. goto out;
  1292. ret = 0;
  1293. if (unlikely(!list_empty(&q->list))) {
  1294. /*
  1295. * If an SI_TIMER entry is already queue just increment
  1296. * the overrun count.
  1297. */
  1298. BUG_ON(q->info.si_code != SI_TIMER);
  1299. q->info.si_overrun++;
  1300. goto out;
  1301. }
  1302. q->info.si_overrun = 0;
  1303. signalfd_notify(t, sig);
  1304. pending = group ? &t->signal->shared_pending : &t->pending;
  1305. list_add_tail(&q->list, &pending->list);
  1306. sigaddset(&pending->signal, sig);
  1307. complete_signal(sig, t, group);
  1308. out:
  1309. unlock_task_sighand(t, &flags);
  1310. ret:
  1311. return ret;
  1312. }
  1313. /*
  1314. * Let a parent know about the death of a child.
  1315. * For a stopped/continued status change, use do_notify_parent_cldstop instead.
  1316. *
  1317. * Returns -1 if our parent ignored us and so we've switched to
  1318. * self-reaping, or else @sig.
  1319. */
  1320. int do_notify_parent(struct task_struct *tsk, int sig)
  1321. {
  1322. struct siginfo info;
  1323. unsigned long flags;
  1324. struct sighand_struct *psig;
  1325. int ret = sig;
  1326. BUG_ON(sig == -1);
  1327. /* do_notify_parent_cldstop should have been called instead. */
  1328. BUG_ON(task_is_stopped_or_traced(tsk));
  1329. BUG_ON(!task_ptrace(tsk) &&
  1330. (tsk->group_leader != tsk || !thread_group_empty(tsk)));
  1331. info.si_signo = sig;
  1332. info.si_errno = 0;
  1333. /*
  1334. * we are under tasklist_lock here so our parent is tied to
  1335. * us and cannot exit and release its namespace.
  1336. *
  1337. * the only it can is to switch its nsproxy with sys_unshare,
  1338. * bu uncharing pid namespaces is not allowed, so we'll always
  1339. * see relevant namespace
  1340. *
  1341. * write_lock() currently calls preempt_disable() which is the
  1342. * same as rcu_read_lock(), but according to Oleg, this is not
  1343. * correct to rely on this
  1344. */
  1345. rcu_read_lock();
  1346. info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
  1347. info.si_uid = __task_cred(tsk)->uid;
  1348. rcu_read_unlock();
  1349. info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
  1350. tsk->signal->utime));
  1351. info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
  1352. tsk->signal->stime));
  1353. info.si_status = tsk->exit_code & 0x7f;
  1354. if (tsk->exit_code & 0x80)
  1355. info.si_code = CLD_DUMPED;
  1356. else if (tsk->exit_code & 0x7f)
  1357. info.si_code = CLD_KILLED;
  1358. else {
  1359. info.si_code = CLD_EXITED;
  1360. info.si_status = tsk->exit_code >> 8;
  1361. }
  1362. psig = tsk->parent->sighand;
  1363. spin_lock_irqsave(&psig->siglock, flags);
  1364. if (!task_ptrace(tsk) && sig == SIGCHLD &&
  1365. (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
  1366. (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
  1367. /*
  1368. * We are exiting and our parent doesn't care. POSIX.1
  1369. * defines special semantics for setting SIGCHLD to SIG_IGN
  1370. * or setting the SA_NOCLDWAIT flag: we should be reaped
  1371. * automatically and not left for our parent's wait4 call.
  1372. * Rather than having the parent do it as a magic kind of
  1373. * signal handler, we just set this to tell do_exit that we
  1374. * can be cleaned up without becoming a zombie. Note that
  1375. * we still call __wake_up_parent in this case, because a
  1376. * blocked sys_wait4 might now return -ECHILD.
  1377. *
  1378. * Whether we send SIGCHLD or not for SA_NOCLDWAIT
  1379. * is implementation-defined: we do (if you don't want
  1380. * it, just use SIG_IGN instead).
  1381. */
  1382. ret = tsk->exit_signal = -1;
  1383. if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
  1384. sig = -1;
  1385. }
  1386. if (valid_signal(sig) && sig > 0)
  1387. __group_send_sig_info(sig, &info, tsk->parent);
  1388. __wake_up_parent(tsk, tsk->parent);
  1389. spin_unlock_irqrestore(&psig->siglock, flags);
  1390. return ret;
  1391. }
  1392. static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
  1393. {
  1394. struct siginfo info;
  1395. unsigned long flags;
  1396. struct task_struct *parent;
  1397. struct sighand_struct *sighand;
  1398. if (task_ptrace(tsk))
  1399. parent = tsk->parent;
  1400. else {
  1401. tsk = tsk->group_leader;
  1402. parent = tsk->real_parent;
  1403. }
  1404. info.si_signo = SIGCHLD;
  1405. info.si_errno = 0;
  1406. /*
  1407. * see comment in do_notify_parent() abot the following 3 lines
  1408. */
  1409. rcu_read_lock();
  1410. info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
  1411. info.si_uid = __task_cred(tsk)->uid;
  1412. rcu_read_unlock();
  1413. info.si_utime = cputime_to_clock_t(tsk->utime);
  1414. info.si_stime = cputime_to_clock_t(tsk->stime);
  1415. info.si_code = why;
  1416. switch (why) {
  1417. case CLD_CONTINUED:
  1418. info.si_status = SIGCONT;
  1419. break;
  1420. case CLD_STOPPED:
  1421. info.si_status = tsk->signal->group_exit_code & 0x7f;
  1422. break;
  1423. case CLD_TRAPPED:
  1424. info.si_status = tsk->exit_code & 0x7f;
  1425. break;
  1426. default:
  1427. BUG();
  1428. }
  1429. sighand = parent->sighand;
  1430. spin_lock_irqsave(&sighand->siglock, flags);
  1431. if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
  1432. !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
  1433. __group_send_sig_info(SIGCHLD, &info, parent);
  1434. /*
  1435. * Even if SIGCHLD is not generated, we must wake up wait4 calls.
  1436. */
  1437. __wake_up_parent(tsk, parent);
  1438. spin_unlock_irqrestore(&sighand->siglock, flags);
  1439. }
  1440. static inline int may_ptrace_stop(void)
  1441. {
  1442. if (!likely(task_ptrace(current)))
  1443. return 0;
  1444. /*
  1445. * Are we in the middle of do_coredump?
  1446. * If so and our tracer is also part of the coredump stopping
  1447. * is a deadlock situation, and pointless because our tracer
  1448. * is dead so don't allow us to stop.
  1449. * If SIGKILL was already sent before the caller unlocked
  1450. * ->siglock we must see ->core_state != NULL. Otherwise it
  1451. * is safe to enter schedule().
  1452. */
  1453. if (unlikely(current->mm->core_state) &&
  1454. unlikely(current->mm == current->parent->mm))
  1455. return 0;
  1456. return 1;
  1457. }
  1458. /*
  1459. * Return nonzero if there is a SIGKILL that should be waking us up.
  1460. * Called with the siglock held.
  1461. */
  1462. static int sigkill_pending(struct task_struct *tsk)
  1463. {
  1464. return sigismember(&tsk->pending.signal, SIGKILL) ||
  1465. sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
  1466. }
  1467. /*
  1468. * This must be called with current->sighand->siglock held.
  1469. *
  1470. * This should be the path for all ptrace stops.
  1471. * We always set current->last_siginfo while stopped here.
  1472. * That makes it a way to test a stopped process for
  1473. * being ptrace-stopped vs being job-control-stopped.
  1474. *
  1475. * If we actually decide not to stop at all because the tracer
  1476. * is gone, we keep current->exit_code unless clear_code.
  1477. */
  1478. static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
  1479. __releases(&current->sighand->siglock)
  1480. __acquires(&current->sighand->siglock)
  1481. {
  1482. if (arch_ptrace_stop_needed(exit_code, info)) {
  1483. /*
  1484. * The arch code has something special to do before a
  1485. * ptrace stop. This is allowed to block, e.g. for faults
  1486. * on user stack pages. We can't keep the siglock while
  1487. * calling arch_ptrace_stop, so we must release it now.
  1488. * To preserve proper semantics, we must do this before
  1489. * any signal bookkeeping like checking group_stop_count.
  1490. * Meanwhile, a SIGKILL could come in before we retake the
  1491. * siglock. That must prevent us from sleeping in TASK_TRACED.
  1492. * So after regaining the lock, we must check for SIGKILL.
  1493. */
  1494. spin_unlock_irq(&current->sighand->siglock);
  1495. arch_ptrace_stop(exit_code, info);
  1496. spin_lock_irq(&current->sighand->siglock);
  1497. if (sigkill_pending(current))
  1498. return;
  1499. }
  1500. /*
  1501. * If @why is CLD_STOPPED, we're trapping to participate in a group
  1502. * stop. Do the bookkeeping. Note that if SIGCONT was delievered
  1503. * while siglock was released for the arch hook, PENDING could be
  1504. * clear now. We act as if SIGCONT is received after TASK_TRACED
  1505. * is entered - ignore it.
  1506. */
  1507. if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
  1508. task_participate_group_stop(current);
  1509. current->last_siginfo = info;
  1510. current->exit_code = exit_code;
  1511. /*
  1512. * TRACED should be visible before TRAPPING is cleared; otherwise,
  1513. * the tracer might fail do_wait().
  1514. */
  1515. set_current_state(TASK_TRACED);
  1516. /*
  1517. * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
  1518. * transition to TASK_TRACED should be atomic with respect to
  1519. * siglock. This hsould be done after the arch hook as siglock is
  1520. * released and regrabbed across it.
  1521. */
  1522. task_clear_group_stop_trapping(current);
  1523. spin_unlock_irq(&current->sighand->siglock);
  1524. read_lock(&tasklist_lock);
  1525. if (may_ptrace_stop()) {
  1526. do_notify_parent_cldstop(current, why);
  1527. /*
  1528. * Don't want to allow preemption here, because
  1529. * sys_ptrace() needs this task to be inactive.
  1530. *
  1531. * XXX: implement read_unlock_no_resched().
  1532. */
  1533. preempt_disable();
  1534. read_unlock(&tasklist_lock);
  1535. preempt_enable_no_resched();
  1536. schedule();
  1537. } else {
  1538. /*
  1539. * By the time we got the lock, our tracer went away.
  1540. * Don't drop the lock yet, another tracer may come.
  1541. */
  1542. __set_current_state(TASK_RUNNING);
  1543. if (clear_code)
  1544. current->exit_code = 0;
  1545. read_unlock(&tasklist_lock);
  1546. }
  1547. /*
  1548. * While in TASK_TRACED, we were considered "frozen enough".
  1549. * Now that we woke up, it's crucial if we're supposed to be
  1550. * frozen that we freeze now before running anything substantial.
  1551. */
  1552. try_to_freeze();
  1553. /*
  1554. * We are back. Now reacquire the siglock before touching
  1555. * last_siginfo, so that we are sure to have synchronized with
  1556. * any signal-sending on another CPU that wants to examine it.
  1557. */
  1558. spin_lock_irq(&current->sighand->siglock);
  1559. current->last_siginfo = NULL;
  1560. /*
  1561. * Queued signals ignored us while we were stopped for tracing.
  1562. * So check for any that we should take before resuming user mode.
  1563. * This sets TIF_SIGPENDING, but never clears it.
  1564. */
  1565. recalc_sigpending_tsk(current);
  1566. }
  1567. void ptrace_notify(int exit_code)
  1568. {
  1569. siginfo_t info;
  1570. BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
  1571. memset(&info, 0, sizeof info);
  1572. info.si_signo = SIGTRAP;
  1573. info.si_code = exit_code;
  1574. info.si_pid = task_pid_vnr(current);
  1575. info.si_uid = current_uid();
  1576. /* Let the debugger run. */
  1577. spin_lock_irq(&current->sighand->siglock);
  1578. ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
  1579. spin_unlock_irq(&current->sighand->siglock);
  1580. }
  1581. /*
  1582. * This performs the stopping for SIGSTOP and other stop signals.
  1583. * We have to stop all threads in the thread group.
  1584. * Returns nonzero if we've actually stopped and released the siglock.
  1585. * Returns zero if we didn't stop and still hold the siglock.
  1586. */
  1587. static int do_signal_stop(int signr)
  1588. {
  1589. struct signal_struct *sig = current->signal;
  1590. if (!(current->group_stop & GROUP_STOP_PENDING)) {
  1591. unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
  1592. struct task_struct *t;
  1593. /* signr will be recorded in task->group_stop for retries */
  1594. WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
  1595. if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
  1596. unlikely(signal_group_exit(sig)))
  1597. return 0;
  1598. /*
  1599. * There is no group stop already in progress.
  1600. * We must initiate one now.
  1601. */
  1602. sig->group_exit_code = signr;
  1603. current->group_stop &= ~GROUP_STOP_SIGMASK;
  1604. current->group_stop |= signr | gstop;
  1605. sig->group_stop_count = 1;
  1606. for (t = next_thread(current); t != current;
  1607. t = next_thread(t)) {
  1608. t->group_stop &= ~GROUP_STOP_SIGMASK;
  1609. /*
  1610. * Setting state to TASK_STOPPED for a group
  1611. * stop is always done with the siglock held,
  1612. * so this check has no races.
  1613. */
  1614. if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
  1615. t->group_stop |= signr | gstop;
  1616. sig->group_stop_count++;
  1617. signal_wake_up(t, 0);
  1618. } else {
  1619. task_clear_group_stop_pending(t);
  1620. }
  1621. }
  1622. }
  1623. retry:
  1624. if (likely(!task_ptrace(current))) {
  1625. int notify = 0;
  1626. /*
  1627. * If there are no other threads in the group, or if there
  1628. * is a group stop in progress and we are the last to stop,
  1629. * report to the parent.
  1630. */
  1631. if (task_participate_group_stop(current))
  1632. notify = CLD_STOPPED;
  1633. __set_current_state(TASK_STOPPED);
  1634. spin_unlock_irq(&current->sighand->siglock);
  1635. if (notify) {
  1636. read_lock(&tasklist_lock);
  1637. do_notify_parent_cldstop(current, notify);
  1638. read_unlock(&tasklist_lock);
  1639. }
  1640. /* Now we don't run again until woken by SIGCONT or SIGKILL */
  1641. schedule();
  1642. spin_lock_irq(&current->sighand->siglock);
  1643. } else {
  1644. ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
  1645. CLD_STOPPED, 0, NULL);
  1646. current->exit_code = 0;
  1647. }
  1648. /*
  1649. * GROUP_STOP_PENDING could be set if another group stop has
  1650. * started since being woken up or ptrace wants us to transit
  1651. * between TASK_STOPPED and TRACED. Retry group stop.
  1652. */
  1653. if (current->group_stop & GROUP_STOP_PENDING) {
  1654. WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
  1655. goto retry;
  1656. }
  1657. /* PTRACE_ATTACH might have raced with task killing, clear trapping */
  1658. task_clear_group_stop_trapping(current);
  1659. spin_unlock_irq(&current->sighand->siglock);
  1660. tracehook_finish_jctl();
  1661. return 1;
  1662. }
  1663. static int ptrace_signal(int signr, siginfo_t *info,
  1664. struct pt_regs *regs, void *cookie)
  1665. {
  1666. if (!task_ptrace(current))
  1667. return signr;
  1668. ptrace_signal_deliver(regs, cookie);
  1669. /* Let the debugger run. */
  1670. ptrace_stop(signr, CLD_TRAPPED, 0, info);
  1671. /* We're back. Did the debugger cancel the sig? */
  1672. signr = current->exit_code;
  1673. if (signr == 0)
  1674. return signr;
  1675. current->exit_code = 0;
  1676. /* Update the siginfo structure if the signal has
  1677. changed. If the debugger wanted something
  1678. specific in the siginfo structure then it should
  1679. have updated *info via PTRACE_SETSIGINFO. */
  1680. if (signr != info->si_signo) {
  1681. info->si_signo = signr;
  1682. info->si_errno = 0;
  1683. info->si_code = SI_USER;
  1684. info->si_pid = task_pid_vnr(current->parent);
  1685. info->si_uid = task_uid(current->parent);
  1686. }
  1687. /* If the (new) signal is now blocked, requeue it. */
  1688. if (sigismember(&current->blocked, signr)) {
  1689. specific_send_sig_info(signr, info, current);
  1690. signr = 0;
  1691. }
  1692. return signr;
  1693. }
  1694. int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
  1695. struct pt_regs *regs, void *cookie)
  1696. {
  1697. struct sighand_struct *sighand = current->sighand;
  1698. struct signal_struct *signal = current->signal;
  1699. int signr;
  1700. relock:
  1701. /*
  1702. * We'll jump back here after any time we were stopped in TASK_STOPPED.
  1703. * While in TASK_STOPPED, we were considered "frozen enough".
  1704. * Now that we woke up, it's crucial if we're supposed to be
  1705. * frozen that we freeze now before running anything substantial.
  1706. */
  1707. try_to_freeze();
  1708. spin_lock_irq(&sighand->siglock);
  1709. /*
  1710. * Every stopped thread goes here after wakeup. Check to see if
  1711. * we should notify the parent, prepare_signal(SIGCONT) encodes
  1712. * the CLD_ si_code into SIGNAL_CLD_MASK bits.
  1713. */
  1714. if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
  1715. int why;
  1716. if (signal->flags & SIGNAL_CLD_CONTINUED)
  1717. why = CLD_CONTINUED;
  1718. else
  1719. why = CLD_STOPPED;
  1720. signal->flags &= ~SIGNAL_CLD_MASK;
  1721. spin_unlock_irq(&sighand->siglock);
  1722. read_lock(&tasklist_lock);
  1723. do_notify_parent_cldstop(current->group_leader, why);
  1724. read_unlock(&tasklist_lock);
  1725. goto relock;
  1726. }
  1727. for (;;) {
  1728. struct k_sigaction *ka;
  1729. /*
  1730. * Tracing can induce an artifical signal and choose sigaction.
  1731. * The return value in @signr determines the default action,
  1732. * but @info->si_signo is the signal number we will report.
  1733. */
  1734. signr = tracehook_get_signal(current, regs, info, return_ka);
  1735. if (unlikely(signr < 0))
  1736. goto relock;
  1737. if (unlikely(signr != 0))
  1738. ka = return_ka;
  1739. else {
  1740. if (unlikely(current->group_stop &
  1741. GROUP_STOP_PENDING) && do_signal_stop(0))
  1742. goto relock;
  1743. signr = dequeue_signal(current, &current->blocked,
  1744. info);
  1745. if (!signr)
  1746. break; /* will return 0 */
  1747. if (signr != SIGKILL) {
  1748. signr = ptrace_signal(signr, info,
  1749. regs, cookie);
  1750. if (!signr)
  1751. continue;
  1752. }
  1753. ka = &sighand->action[signr-1];
  1754. }
  1755. /* Trace actually delivered signals. */
  1756. trace_signal_deliver(signr, info, ka);
  1757. if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
  1758. continue;
  1759. if (ka->sa.sa_handler != SIG_DFL) {
  1760. /* Run the handler. */
  1761. *return_ka = *ka;
  1762. if (ka->sa.sa_flags & SA_ONESHOT)
  1763. ka->sa.sa_handler = SIG_DFL;
  1764. break; /* will return non-zero "signr" value */
  1765. }
  1766. /*
  1767. * Now we are doing the default action for this signal.
  1768. */
  1769. if (sig_kernel_ignore(signr)) /* Default is nothing. */
  1770. continue;
  1771. /*
  1772. * Global init gets no signals it doesn't want.
  1773. * Container-init gets no signals it doesn't want from same
  1774. * container.
  1775. *
  1776. * Note that if global/container-init sees a sig_kernel_only()
  1777. * signal here, the signal must have been generated internally
  1778. * or must have come from an ancestor namespace. In either
  1779. * case, the signal cannot be dropped.
  1780. */
  1781. if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
  1782. !sig_kernel_only(signr))
  1783. continue;
  1784. if (sig_kernel_stop(signr)) {
  1785. /*
  1786. * The default action is to stop all threads in
  1787. * the thread group. The job control signals
  1788. * do nothing in an orphaned pgrp, but SIGSTOP
  1789. * always works. Note that siglock needs to be
  1790. * dropped during the call to is_orphaned_pgrp()
  1791. * because of lock ordering with tasklist_lock.
  1792. * This allows an intervening SIGCONT to be posted.
  1793. * We need to check for that and bail out if necessary.
  1794. */
  1795. if (signr != SIGSTOP) {
  1796. spin_unlock_irq(&sighand->siglock);
  1797. /* signals can be posted during this window */
  1798. if (is_current_pgrp_orphaned())
  1799. goto relock;
  1800. spin_lock_irq(&sighand->siglock);
  1801. }
  1802. if (likely(do_signal_stop(info->si_signo))) {
  1803. /* It released the siglock. */
  1804. goto relock;
  1805. }
  1806. /*
  1807. * We didn't actually stop, due to a race
  1808. * with SIGCONT or something like that.
  1809. */
  1810. continue;
  1811. }
  1812. spin_unlock_irq(&sighand->siglock);
  1813. /*
  1814. * Anything else is fatal, maybe with a core dump.
  1815. */
  1816. current->flags |= PF_SIGNALED;
  1817. if (sig_kernel_coredump(signr)) {
  1818. if (print_fatal_signals)
  1819. print_fatal_signal(regs, info->si_signo);
  1820. /*
  1821. * If it was able to dump core, this kills all
  1822. * other threads in the group and synchronizes with
  1823. * their demise. If we lost the race with another
  1824. * thread getting here, it set group_exit_code
  1825. * first and our do_group_exit call below will use
  1826. * that value and ignore the one we pass it.
  1827. */
  1828. do_coredump(info->si_signo, info->si_signo, regs);
  1829. }
  1830. /*
  1831. * Death signals, no core dump.
  1832. */
  1833. do_group_exit(info->si_signo);
  1834. /* NOTREACHED */
  1835. }
  1836. spin_unlock_irq(&sighand->siglock);
  1837. return signr;
  1838. }
  1839. void exit_signals(struct task_struct *tsk)
  1840. {
  1841. int group_stop = 0;
  1842. struct task_struct *t;
  1843. if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
  1844. tsk->flags |= PF_EXITING;
  1845. return;
  1846. }
  1847. spin_lock_irq(&tsk->sighand->siglock);
  1848. /*
  1849. * From now this task is not visible for group-wide signals,
  1850. * see wants_signal(), do_signal_stop().
  1851. */
  1852. tsk->flags |= PF_EXITING;
  1853. if (!signal_pending(tsk))
  1854. goto out;
  1855. /* It could be that __group_complete_signal() choose us to
  1856. * notify about group-wide signal. Another thread should be
  1857. * woken now to take the signal since we will not.
  1858. */
  1859. for (t = tsk; (t = next_thread(t)) != tsk; )
  1860. if (!signal_pending(t) && !(t->flags & PF_EXITING))
  1861. recalc_sigpending_and_wake(t);
  1862. if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
  1863. task_participate_group_stop(tsk))
  1864. group_stop = CLD_STOPPED;
  1865. out:
  1866. spin_unlock_irq(&tsk->sighand->siglock);
  1867. if (unlikely(group_stop)) {
  1868. read_lock(&tasklist_lock);
  1869. do_notify_parent_cldstop(tsk, group_stop);
  1870. read_unlock(&tasklist_lock);
  1871. }
  1872. }
  1873. EXPORT_SYMBOL(recalc_sigpending);
  1874. EXPORT_SYMBOL_GPL(dequeue_signal);
  1875. EXPORT_SYMBOL(flush_signals);
  1876. EXPORT_SYMBOL(force_sig);
  1877. EXPORT_SYMBOL(send_sig);
  1878. EXPORT_SYMBOL(send_sig_info);
  1879. EXPORT_SYMBOL(sigprocmask);
  1880. EXPORT_SYMBOL(block_all_signals);
  1881. EXPORT_SYMBOL(unblock_all_signals);
  1882. /*
  1883. * System call entry points.
  1884. */
  1885. SYSCALL_DEFINE0(restart_syscall)
  1886. {
  1887. struct restart_block *restart = &current_thread_info()->restart_block;
  1888. return restart->fn(restart);
  1889. }
  1890. long do_no_restart_syscall(struct restart_block *param)
  1891. {
  1892. return -EINTR;
  1893. }
  1894. /*
  1895. * We don't need to get the kernel lock - this is all local to this
  1896. * particular thread.. (and that's good, because this is _heavily_
  1897. * used by various programs)
  1898. */
  1899. /*
  1900. * This is also useful for kernel threads that want to temporarily
  1901. * (or permanently) block certain signals.
  1902. *
  1903. * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
  1904. * interface happily blocks "unblockable" signals like SIGKILL
  1905. * and friends.
  1906. */
  1907. int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
  1908. {
  1909. int error;
  1910. spin_lock_irq(&current->sighand->siglock);
  1911. if (oldset)
  1912. *oldset = current->blocked;
  1913. error = 0;
  1914. switch (how) {
  1915. case SIG_BLOCK:
  1916. sigorsets(&current->blocked, &current->blocked, set);
  1917. break;
  1918. case SIG_UNBLOCK:
  1919. signandsets(&current->blocked, &current->blocked, set);
  1920. break;
  1921. case SIG_SETMASK:
  1922. current->blocked = *set;
  1923. break;
  1924. default:
  1925. error = -EINVAL;
  1926. }
  1927. recalc_sigpending();
  1928. spin_unlock_irq(&current->sighand->siglock);
  1929. return error;
  1930. }
  1931. SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
  1932. sigset_t __user *, oset, size_t, sigsetsize)
  1933. {
  1934. int error = -EINVAL;
  1935. sigset_t old_set, new_set;
  1936. /* XXX: Don't preclude handling different sized sigset_t's. */
  1937. if (sigsetsize != sizeof(sigset_t))
  1938. goto out;
  1939. if (set) {
  1940. error = -EFAULT;
  1941. if (copy_from_user(&new_set, set, sizeof(*set)))
  1942. goto out;
  1943. sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
  1944. error = sigprocmask(how, &new_set, &old_set);
  1945. if (error)
  1946. goto out;
  1947. if (oset)
  1948. goto set_old;
  1949. } else if (oset) {
  1950. spin_lock_irq(&current->sighand->siglock);
  1951. old_set = current->blocked;
  1952. spin_unlock_irq(&current->sighand->siglock);
  1953. set_old:
  1954. error = -EFAULT;
  1955. if (copy_to_user(oset, &old_set, sizeof(*oset)))
  1956. goto out;
  1957. }
  1958. error = 0;
  1959. out:
  1960. return error;
  1961. }
  1962. long do_sigpending(void __user *set, unsigned long sigsetsize)
  1963. {
  1964. long error = -EINVAL;
  1965. sigset_t pending;
  1966. if (sigsetsize > sizeof(sigset_t))
  1967. goto out;
  1968. spin_lock_irq(&current->sighand->siglock);
  1969. sigorsets(&pending, &current->pending.signal,
  1970. &current->signal->shared_pending.signal);
  1971. spin_unlock_irq(&current->sighand->siglock);
  1972. /* Outside the lock because only this thread touches it. */
  1973. sigandsets(&pending, &current->blocked, &pending);
  1974. error = -EFAULT;
  1975. if (!copy_to_user(set, &pending, sigsetsize))
  1976. error = 0;
  1977. out:
  1978. return error;
  1979. }
  1980. SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
  1981. {
  1982. return do_sigpending(set, sigsetsize);
  1983. }
  1984. #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
  1985. int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
  1986. {
  1987. int err;
  1988. if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
  1989. return -EFAULT;
  1990. if (from->si_code < 0)
  1991. return __copy_to_user(to, from, sizeof(siginfo_t))
  1992. ? -EFAULT : 0;
  1993. /*
  1994. * If you change siginfo_t structure, please be sure
  1995. * this code is fixed accordingly.
  1996. * Please remember to update the signalfd_copyinfo() function
  1997. * inside fs/signalfd.c too, in case siginfo_t changes.
  1998. * It should never copy any pad contained in the structure
  1999. * to avoid security leaks, but must copy the generic
  2000. * 3 ints plus the relevant union member.
  2001. */
  2002. err = __put_user(from->si_signo, &to->si_signo);
  2003. err |= __put_user(from->si_errno, &to->si_errno);
  2004. err |= __put_user((short)from->si_code, &to->si_code);
  2005. switch (from->si_code & __SI_MASK) {
  2006. case __SI_KILL:
  2007. err |= __put_user(from->si_pid, &to->si_pid);
  2008. err |= __put_user(from->si_uid, &to->si_uid);
  2009. break;
  2010. case __SI_TIMER:
  2011. err |= __put_user(from->si_tid, &to->si_tid);
  2012. err |= __put_user(from->si_overrun, &to->si_overrun);
  2013. err |= __put_user(from->si_ptr, &to->si_ptr);
  2014. break;
  2015. case __SI_POLL:
  2016. err |= __put_user(from->si_band, &to->si_band);
  2017. err |= __put_user(from->si_fd, &to->si_fd);
  2018. break;
  2019. case __SI_FAULT:
  2020. err |= __put_user(from->si_addr, &to->si_addr);
  2021. #ifdef __ARCH_SI_TRAPNO
  2022. err |= __put_user(from->si_trapno, &to->si_trapno);
  2023. #endif
  2024. #ifdef BUS_MCEERR_AO
  2025. /*
  2026. * Other callers might not initialize the si_lsb field,
  2027. * so check explicitely for the right codes here.
  2028. */
  2029. if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
  2030. err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
  2031. #endif
  2032. break;
  2033. case __SI_CHLD:
  2034. err |= __put_user(from->si_pid, &to->si_pid);
  2035. err |= __put_user(from->si_uid, &to->si_uid);
  2036. err |= __put_user(from->si_status, &to->si_status);
  2037. err |= __put_user(from->si_utime, &to->si_utime);
  2038. err |= __put_user(from->si_stime, &to->si_stime);
  2039. break;
  2040. case __SI_RT: /* This is not generated by the kernel as of now. */
  2041. case __SI_MESGQ: /* But this is */
  2042. err |= __put_user(from->si_pid, &to->si_pid);
  2043. err |= __put_user(from->si_uid, &to->si_uid);
  2044. err |= __put_user(from->si_ptr, &to->si_ptr);
  2045. break;
  2046. default: /* this is just in case for now ... */
  2047. err |= __put_user(from->si_pid, &to->si_pid);
  2048. err |= __put_user(from->si_uid, &to->si_uid);
  2049. break;
  2050. }
  2051. return err;
  2052. }
  2053. #endif
  2054. SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
  2055. siginfo_t __user *, uinfo, const struct timespec __user *, uts,
  2056. size_t, sigsetsize)
  2057. {
  2058. int ret, sig;
  2059. sigset_t these;
  2060. struct timespec ts;
  2061. siginfo_t info;
  2062. long timeout = 0;
  2063. /* XXX: Don't preclude handling different sized sigset_t's. */
  2064. if (sigsetsize != sizeof(sigset_t))
  2065. return -EINVAL;
  2066. if (copy_from_user(&these, uthese, sizeof(these)))
  2067. return -EFAULT;
  2068. /*
  2069. * Invert the set of allowed signals to get those we
  2070. * want to block.
  2071. */
  2072. sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2073. signotset(&these);
  2074. if (uts) {
  2075. if (copy_from_user(&ts, uts, sizeof(ts)))
  2076. return -EFAULT;
  2077. if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
  2078. || ts.tv_sec < 0)
  2079. return -EINVAL;
  2080. }
  2081. spin_lock_irq(&current->sighand->siglock);
  2082. sig = dequeue_signal(current, &these, &info);
  2083. if (!sig) {
  2084. timeout = MAX_SCHEDULE_TIMEOUT;
  2085. if (uts)
  2086. timeout = (timespec_to_jiffies(&ts)
  2087. + (ts.tv_sec || ts.tv_nsec));
  2088. if (timeout) {
  2089. /* None ready -- temporarily unblock those we're
  2090. * interested while we are sleeping in so that we'll
  2091. * be awakened when they arrive. */
  2092. current->real_blocked = current->blocked;
  2093. sigandsets(&current->blocked, &current->blocked, &these);
  2094. recalc_sigpending();
  2095. spin_unlock_irq(&current->sighand->siglock);
  2096. timeout = schedule_timeout_interruptible(timeout);
  2097. spin_lock_irq(&current->sighand->siglock);
  2098. sig = dequeue_signal(current, &these, &info);
  2099. current->blocked = current->real_blocked;
  2100. siginitset(&current->real_blocked, 0);
  2101. recalc_sigpending();
  2102. }
  2103. }
  2104. spin_unlock_irq(&current->sighand->siglock);
  2105. if (sig) {
  2106. ret = sig;
  2107. if (uinfo) {
  2108. if (copy_siginfo_to_user(uinfo, &info))
  2109. ret = -EFAULT;
  2110. }
  2111. } else {
  2112. ret = -EAGAIN;
  2113. if (timeout)
  2114. ret = -EINTR;
  2115. }
  2116. return ret;
  2117. }
  2118. SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
  2119. {
  2120. struct siginfo info;
  2121. info.si_signo = sig;
  2122. info.si_errno = 0;
  2123. info.si_code = SI_USER;
  2124. info.si_pid = task_tgid_vnr(current);
  2125. info.si_uid = current_uid();
  2126. return kill_something_info(sig, &info, pid);
  2127. }
  2128. static int
  2129. do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
  2130. {
  2131. struct task_struct *p;
  2132. int error = -ESRCH;
  2133. rcu_read_lock();
  2134. p = find_task_by_vpid(pid);
  2135. if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
  2136. error = check_kill_permission(sig, info, p);
  2137. /*
  2138. * The null signal is a permissions and process existence
  2139. * probe. No signal is actually delivered.
  2140. */
  2141. if (!error && sig) {
  2142. error = do_send_sig_info(sig, info, p, false);
  2143. /*
  2144. * If lock_task_sighand() failed we pretend the task
  2145. * dies after receiving the signal. The window is tiny,
  2146. * and the signal is private anyway.
  2147. */
  2148. if (unlikely(error == -ESRCH))
  2149. error = 0;
  2150. }
  2151. }
  2152. rcu_read_unlock();
  2153. return error;
  2154. }
  2155. static int do_tkill(pid_t tgid, pid_t pid, int sig)
  2156. {
  2157. struct siginfo info;
  2158. info.si_signo = sig;
  2159. info.si_errno = 0;
  2160. info.si_code = SI_TKILL;
  2161. info.si_pid = task_tgid_vnr(current);
  2162. info.si_uid = current_uid();
  2163. return do_send_specific(tgid, pid, sig, &info);
  2164. }
  2165. /**
  2166. * sys_tgkill - send signal to one specific thread
  2167. * @tgid: the thread group ID of the thread
  2168. * @pid: the PID of the thread
  2169. * @sig: signal to be sent
  2170. *
  2171. * This syscall also checks the @tgid and returns -ESRCH even if the PID
  2172. * exists but it's not belonging to the target process anymore. This
  2173. * method solves the problem of threads exiting and PIDs getting reused.
  2174. */
  2175. SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
  2176. {
  2177. /* This is only valid for single tasks */
  2178. if (pid <= 0 || tgid <= 0)
  2179. return -EINVAL;
  2180. return do_tkill(tgid, pid, sig);
  2181. }
  2182. /*
  2183. * Send a signal to only one task, even if it's a CLONE_THREAD task.
  2184. */
  2185. SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
  2186. {
  2187. /* This is only valid for single tasks */
  2188. if (pid <= 0)
  2189. return -EINVAL;
  2190. return do_tkill(0, pid, sig);
  2191. }
  2192. SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
  2193. siginfo_t __user *, uinfo)
  2194. {
  2195. siginfo_t info;
  2196. if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
  2197. return -EFAULT;
  2198. /* Not even root can pretend to send signals from the kernel.
  2199. * Nor can they impersonate a kill()/tgkill(), which adds source info.
  2200. */
  2201. if (info.si_code != SI_QUEUE) {
  2202. /* We used to allow any < 0 si_code */
  2203. WARN_ON_ONCE(info.si_code < 0);
  2204. return -EPERM;
  2205. }
  2206. info.si_signo = sig;
  2207. /* POSIX.1b doesn't mention process groups. */
  2208. return kill_proc_info(sig, &info, pid);
  2209. }
  2210. long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
  2211. {
  2212. /* This is only valid for single tasks */
  2213. if (pid <= 0 || tgid <= 0)
  2214. return -EINVAL;
  2215. /* Not even root can pretend to send signals from the kernel.
  2216. * Nor can they impersonate a kill()/tgkill(), which adds source info.
  2217. */
  2218. if (info->si_code != SI_QUEUE) {
  2219. /* We used to allow any < 0 si_code */
  2220. WARN_ON_ONCE(info->si_code < 0);
  2221. return -EPERM;
  2222. }
  2223. info->si_signo = sig;
  2224. return do_send_specific(tgid, pid, sig, info);
  2225. }
  2226. SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
  2227. siginfo_t __user *, uinfo)
  2228. {
  2229. siginfo_t info;
  2230. if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
  2231. return -EFAULT;
  2232. return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
  2233. }
  2234. int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
  2235. {
  2236. struct task_struct *t = current;
  2237. struct k_sigaction *k;
  2238. sigset_t mask;
  2239. if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
  2240. return -EINVAL;
  2241. k = &t->sighand->action[sig-1];
  2242. spin_lock_irq(&current->sighand->siglock);
  2243. if (oact)
  2244. *oact = *k;
  2245. if (act) {
  2246. sigdelsetmask(&act->sa.sa_mask,
  2247. sigmask(SIGKILL) | sigmask(SIGSTOP));
  2248. *k = *act;
  2249. /*
  2250. * POSIX 3.3.1.3:
  2251. * "Setting a signal action to SIG_IGN for a signal that is
  2252. * pending shall cause the pending signal to be discarded,
  2253. * whether or not it is blocked."
  2254. *
  2255. * "Setting a signal action to SIG_DFL for a signal that is
  2256. * pending and whose default action is to ignore the signal
  2257. * (for example, SIGCHLD), shall cause the pending signal to
  2258. * be discarded, whether or not it is blocked"
  2259. */
  2260. if (sig_handler_ignored(sig_handler(t, sig), sig)) {
  2261. sigemptyset(&mask);
  2262. sigaddset(&mask, sig);
  2263. rm_from_queue_full(&mask, &t->signal->shared_pending);
  2264. do {
  2265. rm_from_queue_full(&mask, &t->pending);
  2266. t = next_thread(t);
  2267. } while (t != current);
  2268. }
  2269. }
  2270. spin_unlock_irq(&current->sighand->siglock);
  2271. return 0;
  2272. }
  2273. int
  2274. do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
  2275. {
  2276. stack_t oss;
  2277. int error;
  2278. oss.ss_sp = (void __user *) current->sas_ss_sp;
  2279. oss.ss_size = current->sas_ss_size;
  2280. oss.ss_flags = sas_ss_flags(sp);
  2281. if (uss) {
  2282. void __user *ss_sp;
  2283. size_t ss_size;
  2284. int ss_flags;
  2285. error = -EFAULT;
  2286. if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
  2287. goto out;
  2288. error = __get_user(ss_sp, &uss->ss_sp) |
  2289. __get_user(ss_flags, &uss->ss_flags) |
  2290. __get_user(ss_size, &uss->ss_size);
  2291. if (error)
  2292. goto out;
  2293. error = -EPERM;
  2294. if (on_sig_stack(sp))
  2295. goto out;
  2296. error = -EINVAL;
  2297. /*
  2298. *
  2299. * Note - this code used to test ss_flags incorrectly
  2300. * old code may have been written using ss_flags==0
  2301. * to mean ss_flags==SS_ONSTACK (as this was the only
  2302. * way that worked) - this fix preserves that older
  2303. * mechanism
  2304. */
  2305. if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
  2306. goto out;
  2307. if (ss_flags == SS_DISABLE) {
  2308. ss_size = 0;
  2309. ss_sp = NULL;
  2310. } else {
  2311. error = -ENOMEM;
  2312. if (ss_size < MINSIGSTKSZ)
  2313. goto out;
  2314. }
  2315. current->sas_ss_sp = (unsigned long) ss_sp;
  2316. current->sas_ss_size = ss_size;
  2317. }
  2318. error = 0;
  2319. if (uoss) {
  2320. error = -EFAULT;
  2321. if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
  2322. goto out;
  2323. error = __put_user(oss.ss_sp, &uoss->ss_sp) |
  2324. __put_user(oss.ss_size, &uoss->ss_size) |
  2325. __put_user(oss.ss_flags, &uoss->ss_flags);
  2326. }
  2327. out:
  2328. return error;
  2329. }
  2330. #ifdef __ARCH_WANT_SYS_SIGPENDING
  2331. SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
  2332. {
  2333. return do_sigpending(set, sizeof(*set));
  2334. }
  2335. #endif
  2336. #ifdef __ARCH_WANT_SYS_SIGPROCMASK
  2337. /* Some platforms have their own version with special arguments others
  2338. support only sys_rt_sigprocmask. */
  2339. SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
  2340. old_sigset_t __user *, oset)
  2341. {
  2342. int error;
  2343. old_sigset_t old_set, new_set;
  2344. if (set) {
  2345. error = -EFAULT;
  2346. if (copy_from_user(&new_set, set, sizeof(*set)))
  2347. goto out;
  2348. new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
  2349. spin_lock_irq(&current->sighand->siglock);
  2350. old_set = current->blocked.sig[0];
  2351. error = 0;
  2352. switch (how) {
  2353. default:
  2354. error = -EINVAL;
  2355. break;
  2356. case SIG_BLOCK:
  2357. sigaddsetmask(&current->blocked, new_set);
  2358. break;
  2359. case SIG_UNBLOCK:
  2360. sigdelsetmask(&current->blocked, new_set);
  2361. break;
  2362. case SIG_SETMASK:
  2363. current->blocked.sig[0] = new_set;
  2364. break;
  2365. }
  2366. recalc_sigpending();
  2367. spin_unlock_irq(&current->sighand->siglock);
  2368. if (error)
  2369. goto out;
  2370. if (oset)
  2371. goto set_old;
  2372. } else if (oset) {
  2373. old_set = current->blocked.sig[0];
  2374. set_old:
  2375. error = -EFAULT;
  2376. if (copy_to_user(oset, &old_set, sizeof(*oset)))
  2377. goto out;
  2378. }
  2379. error = 0;
  2380. out:
  2381. return error;
  2382. }
  2383. #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
  2384. #ifdef __ARCH_WANT_SYS_RT_SIGACTION
  2385. SYSCALL_DEFINE4(rt_sigaction, int, sig,
  2386. const struct sigaction __user *, act,
  2387. struct sigaction __user *, oact,
  2388. size_t, sigsetsize)
  2389. {
  2390. struct k_sigaction new_sa, old_sa;
  2391. int ret = -EINVAL;
  2392. /* XXX: Don't preclude handling different sized sigset_t's. */
  2393. if (sigsetsize != sizeof(sigset_t))
  2394. goto out;
  2395. if (act) {
  2396. if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
  2397. return -EFAULT;
  2398. }
  2399. ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
  2400. if (!ret && oact) {
  2401. if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
  2402. return -EFAULT;
  2403. }
  2404. out:
  2405. return ret;
  2406. }
  2407. #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
  2408. #ifdef __ARCH_WANT_SYS_SGETMASK
  2409. /*
  2410. * For backwards compatibility. Functionality superseded by sigprocmask.
  2411. */
  2412. SYSCALL_DEFINE0(sgetmask)
  2413. {
  2414. /* SMP safe */
  2415. return current->blocked.sig[0];
  2416. }
  2417. SYSCALL_DEFINE1(ssetmask, int, newmask)
  2418. {
  2419. int old;
  2420. spin_lock_irq(&current->sighand->siglock);
  2421. old = current->blocked.sig[0];
  2422. siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
  2423. sigmask(SIGSTOP)));
  2424. recalc_sigpending();
  2425. spin_unlock_irq(&current->sighand->siglock);
  2426. return old;
  2427. }
  2428. #endif /* __ARCH_WANT_SGETMASK */
  2429. #ifdef __ARCH_WANT_SYS_SIGNAL
  2430. /*
  2431. * For backwards compatibility. Functionality superseded by sigaction.
  2432. */
  2433. SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
  2434. {
  2435. struct k_sigaction new_sa, old_sa;
  2436. int ret;
  2437. new_sa.sa.sa_handler = handler;
  2438. new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
  2439. sigemptyset(&new_sa.sa.sa_mask);
  2440. ret = do_sigaction(sig, &new_sa, &old_sa);
  2441. return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
  2442. }
  2443. #endif /* __ARCH_WANT_SYS_SIGNAL */
  2444. #ifdef __ARCH_WANT_SYS_PAUSE
  2445. SYSCALL_DEFINE0(pause)
  2446. {
  2447. current->state = TASK_INTERRUPTIBLE;
  2448. schedule();
  2449. return -ERESTARTNOHAND;
  2450. }
  2451. #endif
  2452. #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
  2453. SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
  2454. {
  2455. sigset_t newset;
  2456. /* XXX: Don't preclude handling different sized sigset_t's. */
  2457. if (sigsetsize != sizeof(sigset_t))
  2458. return -EINVAL;
  2459. if (copy_from_user(&newset, unewset, sizeof(newset)))
  2460. return -EFAULT;
  2461. sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2462. spin_lock_irq(&current->sighand->siglock);
  2463. current->saved_sigmask = current->blocked;
  2464. current->blocked = newset;
  2465. recalc_sigpending();
  2466. spin_unlock_irq(&current->sighand->siglock);
  2467. current->state = TASK_INTERRUPTIBLE;
  2468. schedule();
  2469. set_restore_sigmask();
  2470. return -ERESTARTNOHAND;
  2471. }
  2472. #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
  2473. __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
  2474. {
  2475. return NULL;
  2476. }
  2477. void __init signals_init(void)
  2478. {
  2479. sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
  2480. }
  2481. #ifdef CONFIG_KGDB_KDB
  2482. #include <linux/kdb.h>
  2483. /*
  2484. * kdb_send_sig_info - Allows kdb to send signals without exposing
  2485. * signal internals. This function checks if the required locks are
  2486. * available before calling the main signal code, to avoid kdb
  2487. * deadlocks.
  2488. */
  2489. void
  2490. kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
  2491. {
  2492. static struct task_struct *kdb_prev_t;
  2493. int sig, new_t;
  2494. if (!spin_trylock(&t->sighand->siglock)) {
  2495. kdb_printf("Can't do kill command now.\n"
  2496. "The sigmask lock is held somewhere else in "
  2497. "kernel, try again later\n");
  2498. return;
  2499. }
  2500. spin_unlock(&t->sighand->siglock);
  2501. new_t = kdb_prev_t != t;
  2502. kdb_prev_t = t;
  2503. if (t->state != TASK_RUNNING && new_t) {
  2504. kdb_printf("Process is not RUNNING, sending a signal from "
  2505. "kdb risks deadlock\n"
  2506. "on the run queue locks. "
  2507. "The signal has _not_ been sent.\n"
  2508. "Reissue the kill command if you want to risk "
  2509. "the deadlock.\n");
  2510. return;
  2511. }
  2512. sig = info->si_signo;
  2513. if (send_sig_info(sig, info, t))
  2514. kdb_printf("Fail to deliver Signal %d to process %d.\n",
  2515. sig, t->pid);
  2516. else
  2517. kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
  2518. }
  2519. #endif /* CONFIG_KGDB_KDB */