signal.c 83 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232
  1. /*
  2. * linux/kernel/signal.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
  7. *
  8. * 2003-06-02 Jim Houston - Concurrent Computer Corp.
  9. * Changes to use preallocated sigqueue structures
  10. * to allow signals to be sent reliably.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/sched.h>
  16. #include <linux/fs.h>
  17. #include <linux/tty.h>
  18. #include <linux/binfmts.h>
  19. #include <linux/security.h>
  20. #include <linux/syscalls.h>
  21. #include <linux/ptrace.h>
  22. #include <linux/signal.h>
  23. #include <linux/signalfd.h>
  24. #include <linux/ratelimit.h>
  25. #include <linux/tracehook.h>
  26. #include <linux/capability.h>
  27. #include <linux/freezer.h>
  28. #include <linux/pid_namespace.h>
  29. #include <linux/nsproxy.h>
  30. #define CREATE_TRACE_POINTS
  31. #include <trace/events/signal.h>
  32. #include <asm/param.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/unistd.h>
  35. #include <asm/siginfo.h>
  36. #include "audit.h" /* audit_signal_info() */
  37. /*
  38. * SLAB caches for signal bits.
  39. */
  40. static struct kmem_cache *sigqueue_cachep;
  41. int print_fatal_signals __read_mostly;
  42. static void __user *sig_handler(struct task_struct *t, int sig)
  43. {
  44. return t->sighand->action[sig - 1].sa.sa_handler;
  45. }
  46. static int sig_handler_ignored(void __user *handler, int sig)
  47. {
  48. /* Is it explicitly or implicitly ignored? */
  49. return handler == SIG_IGN ||
  50. (handler == SIG_DFL && sig_kernel_ignore(sig));
  51. }
  52. static int sig_task_ignored(struct task_struct *t, int sig,
  53. int from_ancestor_ns)
  54. {
  55. void __user *handler;
  56. handler = sig_handler(t, sig);
  57. if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
  58. handler == SIG_DFL && !from_ancestor_ns)
  59. return 1;
  60. return sig_handler_ignored(handler, sig);
  61. }
  62. static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
  63. {
  64. /*
  65. * Blocked signals are never ignored, since the
  66. * signal handler may change by the time it is
  67. * unblocked.
  68. */
  69. if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
  70. return 0;
  71. if (!sig_task_ignored(t, sig, from_ancestor_ns))
  72. return 0;
  73. /*
  74. * Tracers may want to know about even ignored signals.
  75. */
  76. return !t->ptrace;
  77. }
  78. /*
  79. * Re-calculate pending state from the set of locally pending
  80. * signals, globally pending signals, and blocked signals.
  81. */
  82. static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
  83. {
  84. unsigned long ready;
  85. long i;
  86. switch (_NSIG_WORDS) {
  87. default:
  88. for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
  89. ready |= signal->sig[i] &~ blocked->sig[i];
  90. break;
  91. case 4: ready = signal->sig[3] &~ blocked->sig[3];
  92. ready |= signal->sig[2] &~ blocked->sig[2];
  93. ready |= signal->sig[1] &~ blocked->sig[1];
  94. ready |= signal->sig[0] &~ blocked->sig[0];
  95. break;
  96. case 2: ready = signal->sig[1] &~ blocked->sig[1];
  97. ready |= signal->sig[0] &~ blocked->sig[0];
  98. break;
  99. case 1: ready = signal->sig[0] &~ blocked->sig[0];
  100. }
  101. return ready != 0;
  102. }
  103. #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
  104. static int recalc_sigpending_tsk(struct task_struct *t)
  105. {
  106. if ((t->jobctl & JOBCTL_PENDING_MASK) ||
  107. PENDING(&t->pending, &t->blocked) ||
  108. PENDING(&t->signal->shared_pending, &t->blocked)) {
  109. set_tsk_thread_flag(t, TIF_SIGPENDING);
  110. return 1;
  111. }
  112. /*
  113. * We must never clear the flag in another thread, or in current
  114. * when it's possible the current syscall is returning -ERESTART*.
  115. * So we don't clear it here, and only callers who know they should do.
  116. */
  117. return 0;
  118. }
  119. /*
  120. * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
  121. * This is superfluous when called on current, the wakeup is a harmless no-op.
  122. */
  123. void recalc_sigpending_and_wake(struct task_struct *t)
  124. {
  125. if (recalc_sigpending_tsk(t))
  126. signal_wake_up(t, 0);
  127. }
  128. void recalc_sigpending(void)
  129. {
  130. if (!recalc_sigpending_tsk(current) && !freezing(current))
  131. clear_thread_flag(TIF_SIGPENDING);
  132. }
  133. /* Given the mask, find the first available signal that should be serviced. */
  134. #define SYNCHRONOUS_MASK \
  135. (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
  136. sigmask(SIGTRAP) | sigmask(SIGFPE))
  137. int next_signal(struct sigpending *pending, sigset_t *mask)
  138. {
  139. unsigned long i, *s, *m, x;
  140. int sig = 0;
  141. s = pending->signal.sig;
  142. m = mask->sig;
  143. /*
  144. * Handle the first word specially: it contains the
  145. * synchronous signals that need to be dequeued first.
  146. */
  147. x = *s &~ *m;
  148. if (x) {
  149. if (x & SYNCHRONOUS_MASK)
  150. x &= SYNCHRONOUS_MASK;
  151. sig = ffz(~x) + 1;
  152. return sig;
  153. }
  154. switch (_NSIG_WORDS) {
  155. default:
  156. for (i = 1; i < _NSIG_WORDS; ++i) {
  157. x = *++s &~ *++m;
  158. if (!x)
  159. continue;
  160. sig = ffz(~x) + i*_NSIG_BPW + 1;
  161. break;
  162. }
  163. break;
  164. case 2:
  165. x = s[1] &~ m[1];
  166. if (!x)
  167. break;
  168. sig = ffz(~x) + _NSIG_BPW + 1;
  169. break;
  170. case 1:
  171. /* Nothing to do */
  172. break;
  173. }
  174. return sig;
  175. }
  176. static inline void print_dropped_signal(int sig)
  177. {
  178. static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
  179. if (!print_fatal_signals)
  180. return;
  181. if (!__ratelimit(&ratelimit_state))
  182. return;
  183. printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
  184. current->comm, current->pid, sig);
  185. }
  186. /**
  187. * task_set_jobctl_pending - set jobctl pending bits
  188. * @task: target task
  189. * @mask: pending bits to set
  190. *
  191. * Clear @mask from @task->jobctl. @mask must be subset of
  192. * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
  193. * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
  194. * cleared. If @task is already being killed or exiting, this function
  195. * becomes noop.
  196. *
  197. * CONTEXT:
  198. * Must be called with @task->sighand->siglock held.
  199. *
  200. * RETURNS:
  201. * %true if @mask is set, %false if made noop because @task was dying.
  202. */
  203. bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
  204. {
  205. BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
  206. JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
  207. BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
  208. if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
  209. return false;
  210. if (mask & JOBCTL_STOP_SIGMASK)
  211. task->jobctl &= ~JOBCTL_STOP_SIGMASK;
  212. task->jobctl |= mask;
  213. return true;
  214. }
  215. /**
  216. * task_clear_jobctl_trapping - clear jobctl trapping bit
  217. * @task: target task
  218. *
  219. * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
  220. * Clear it and wake up the ptracer. Note that we don't need any further
  221. * locking. @task->siglock guarantees that @task->parent points to the
  222. * ptracer.
  223. *
  224. * CONTEXT:
  225. * Must be called with @task->sighand->siglock held.
  226. */
  227. void task_clear_jobctl_trapping(struct task_struct *task)
  228. {
  229. if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
  230. task->jobctl &= ~JOBCTL_TRAPPING;
  231. wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
  232. }
  233. }
  234. /**
  235. * task_clear_jobctl_pending - clear jobctl pending bits
  236. * @task: target task
  237. * @mask: pending bits to clear
  238. *
  239. * Clear @mask from @task->jobctl. @mask must be subset of
  240. * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
  241. * STOP bits are cleared together.
  242. *
  243. * If clearing of @mask leaves no stop or trap pending, this function calls
  244. * task_clear_jobctl_trapping().
  245. *
  246. * CONTEXT:
  247. * Must be called with @task->sighand->siglock held.
  248. */
  249. void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
  250. {
  251. BUG_ON(mask & ~JOBCTL_PENDING_MASK);
  252. if (mask & JOBCTL_STOP_PENDING)
  253. mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
  254. task->jobctl &= ~mask;
  255. if (!(task->jobctl & JOBCTL_PENDING_MASK))
  256. task_clear_jobctl_trapping(task);
  257. }
  258. /**
  259. * task_participate_group_stop - participate in a group stop
  260. * @task: task participating in a group stop
  261. *
  262. * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
  263. * Group stop states are cleared and the group stop count is consumed if
  264. * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
  265. * stop, the appropriate %SIGNAL_* flags are set.
  266. *
  267. * CONTEXT:
  268. * Must be called with @task->sighand->siglock held.
  269. *
  270. * RETURNS:
  271. * %true if group stop completion should be notified to the parent, %false
  272. * otherwise.
  273. */
  274. static bool task_participate_group_stop(struct task_struct *task)
  275. {
  276. struct signal_struct *sig = task->signal;
  277. bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
  278. WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
  279. task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
  280. if (!consume)
  281. return false;
  282. if (!WARN_ON_ONCE(sig->group_stop_count == 0))
  283. sig->group_stop_count--;
  284. /*
  285. * Tell the caller to notify completion iff we are entering into a
  286. * fresh group stop. Read comment in do_signal_stop() for details.
  287. */
  288. if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
  289. sig->flags = SIGNAL_STOP_STOPPED;
  290. return true;
  291. }
  292. return false;
  293. }
  294. /*
  295. * allocate a new signal queue record
  296. * - this may be called without locks if and only if t == current, otherwise an
  297. * appropriate lock must be held to stop the target task from exiting
  298. */
  299. static struct sigqueue *
  300. __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
  301. {
  302. struct sigqueue *q = NULL;
  303. struct user_struct *user;
  304. /*
  305. * Protect access to @t credentials. This can go away when all
  306. * callers hold rcu read lock.
  307. */
  308. rcu_read_lock();
  309. user = get_uid(__task_cred(t)->user);
  310. atomic_inc(&user->sigpending);
  311. rcu_read_unlock();
  312. if (override_rlimit ||
  313. atomic_read(&user->sigpending) <=
  314. task_rlimit(t, RLIMIT_SIGPENDING)) {
  315. q = kmem_cache_alloc(sigqueue_cachep, flags);
  316. } else {
  317. print_dropped_signal(sig);
  318. }
  319. if (unlikely(q == NULL)) {
  320. atomic_dec(&user->sigpending);
  321. free_uid(user);
  322. } else {
  323. INIT_LIST_HEAD(&q->list);
  324. q->flags = 0;
  325. q->user = user;
  326. }
  327. return q;
  328. }
  329. static void __sigqueue_free(struct sigqueue *q)
  330. {
  331. if (q->flags & SIGQUEUE_PREALLOC)
  332. return;
  333. atomic_dec(&q->user->sigpending);
  334. free_uid(q->user);
  335. kmem_cache_free(sigqueue_cachep, q);
  336. }
  337. void flush_sigqueue(struct sigpending *queue)
  338. {
  339. struct sigqueue *q;
  340. sigemptyset(&queue->signal);
  341. while (!list_empty(&queue->list)) {
  342. q = list_entry(queue->list.next, struct sigqueue , list);
  343. list_del_init(&q->list);
  344. __sigqueue_free(q);
  345. }
  346. }
  347. /*
  348. * Flush all pending signals for a task.
  349. */
  350. void __flush_signals(struct task_struct *t)
  351. {
  352. clear_tsk_thread_flag(t, TIF_SIGPENDING);
  353. flush_sigqueue(&t->pending);
  354. flush_sigqueue(&t->signal->shared_pending);
  355. }
  356. void flush_signals(struct task_struct *t)
  357. {
  358. unsigned long flags;
  359. spin_lock_irqsave(&t->sighand->siglock, flags);
  360. __flush_signals(t);
  361. spin_unlock_irqrestore(&t->sighand->siglock, flags);
  362. }
  363. static void __flush_itimer_signals(struct sigpending *pending)
  364. {
  365. sigset_t signal, retain;
  366. struct sigqueue *q, *n;
  367. signal = pending->signal;
  368. sigemptyset(&retain);
  369. list_for_each_entry_safe(q, n, &pending->list, list) {
  370. int sig = q->info.si_signo;
  371. if (likely(q->info.si_code != SI_TIMER)) {
  372. sigaddset(&retain, sig);
  373. } else {
  374. sigdelset(&signal, sig);
  375. list_del_init(&q->list);
  376. __sigqueue_free(q);
  377. }
  378. }
  379. sigorsets(&pending->signal, &signal, &retain);
  380. }
  381. void flush_itimer_signals(void)
  382. {
  383. struct task_struct *tsk = current;
  384. unsigned long flags;
  385. spin_lock_irqsave(&tsk->sighand->siglock, flags);
  386. __flush_itimer_signals(&tsk->pending);
  387. __flush_itimer_signals(&tsk->signal->shared_pending);
  388. spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  389. }
  390. void ignore_signals(struct task_struct *t)
  391. {
  392. int i;
  393. for (i = 0; i < _NSIG; ++i)
  394. t->sighand->action[i].sa.sa_handler = SIG_IGN;
  395. flush_signals(t);
  396. }
  397. /*
  398. * Flush all handlers for a task.
  399. */
  400. void
  401. flush_signal_handlers(struct task_struct *t, int force_default)
  402. {
  403. int i;
  404. struct k_sigaction *ka = &t->sighand->action[0];
  405. for (i = _NSIG ; i != 0 ; i--) {
  406. if (force_default || ka->sa.sa_handler != SIG_IGN)
  407. ka->sa.sa_handler = SIG_DFL;
  408. ka->sa.sa_flags = 0;
  409. sigemptyset(&ka->sa.sa_mask);
  410. ka++;
  411. }
  412. }
  413. int unhandled_signal(struct task_struct *tsk, int sig)
  414. {
  415. void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
  416. if (is_global_init(tsk))
  417. return 1;
  418. if (handler != SIG_IGN && handler != SIG_DFL)
  419. return 0;
  420. /* if ptraced, let the tracer determine */
  421. return !tsk->ptrace;
  422. }
  423. /*
  424. * Notify the system that a driver wants to block all signals for this
  425. * process, and wants to be notified if any signals at all were to be
  426. * sent/acted upon. If the notifier routine returns non-zero, then the
  427. * signal will be acted upon after all. If the notifier routine returns 0,
  428. * then then signal will be blocked. Only one block per process is
  429. * allowed. priv is a pointer to private data that the notifier routine
  430. * can use to determine if the signal should be blocked or not.
  431. */
  432. void
  433. block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
  434. {
  435. unsigned long flags;
  436. spin_lock_irqsave(&current->sighand->siglock, flags);
  437. current->notifier_mask = mask;
  438. current->notifier_data = priv;
  439. current->notifier = notifier;
  440. spin_unlock_irqrestore(&current->sighand->siglock, flags);
  441. }
  442. /* Notify the system that blocking has ended. */
  443. void
  444. unblock_all_signals(void)
  445. {
  446. unsigned long flags;
  447. spin_lock_irqsave(&current->sighand->siglock, flags);
  448. current->notifier = NULL;
  449. current->notifier_data = NULL;
  450. recalc_sigpending();
  451. spin_unlock_irqrestore(&current->sighand->siglock, flags);
  452. }
  453. static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
  454. {
  455. struct sigqueue *q, *first = NULL;
  456. /*
  457. * Collect the siginfo appropriate to this signal. Check if
  458. * there is another siginfo for the same signal.
  459. */
  460. list_for_each_entry(q, &list->list, list) {
  461. if (q->info.si_signo == sig) {
  462. if (first)
  463. goto still_pending;
  464. first = q;
  465. }
  466. }
  467. sigdelset(&list->signal, sig);
  468. if (first) {
  469. still_pending:
  470. list_del_init(&first->list);
  471. copy_siginfo(info, &first->info);
  472. __sigqueue_free(first);
  473. } else {
  474. /*
  475. * Ok, it wasn't in the queue. This must be
  476. * a fast-pathed signal or we must have been
  477. * out of queue space. So zero out the info.
  478. */
  479. info->si_signo = sig;
  480. info->si_errno = 0;
  481. info->si_code = SI_USER;
  482. info->si_pid = 0;
  483. info->si_uid = 0;
  484. }
  485. }
  486. static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  487. siginfo_t *info)
  488. {
  489. int sig = next_signal(pending, mask);
  490. if (sig) {
  491. if (current->notifier) {
  492. if (sigismember(current->notifier_mask, sig)) {
  493. if (!(current->notifier)(current->notifier_data)) {
  494. clear_thread_flag(TIF_SIGPENDING);
  495. return 0;
  496. }
  497. }
  498. }
  499. collect_signal(sig, pending, info);
  500. }
  501. return sig;
  502. }
  503. /*
  504. * Dequeue a signal and return the element to the caller, which is
  505. * expected to free it.
  506. *
  507. * All callers have to hold the siglock.
  508. */
  509. int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
  510. {
  511. int signr;
  512. /* We only dequeue private signals from ourselves, we don't let
  513. * signalfd steal them
  514. */
  515. signr = __dequeue_signal(&tsk->pending, mask, info);
  516. if (!signr) {
  517. signr = __dequeue_signal(&tsk->signal->shared_pending,
  518. mask, info);
  519. /*
  520. * itimer signal ?
  521. *
  522. * itimers are process shared and we restart periodic
  523. * itimers in the signal delivery path to prevent DoS
  524. * attacks in the high resolution timer case. This is
  525. * compliant with the old way of self-restarting
  526. * itimers, as the SIGALRM is a legacy signal and only
  527. * queued once. Changing the restart behaviour to
  528. * restart the timer in the signal dequeue path is
  529. * reducing the timer noise on heavy loaded !highres
  530. * systems too.
  531. */
  532. if (unlikely(signr == SIGALRM)) {
  533. struct hrtimer *tmr = &tsk->signal->real_timer;
  534. if (!hrtimer_is_queued(tmr) &&
  535. tsk->signal->it_real_incr.tv64 != 0) {
  536. hrtimer_forward(tmr, tmr->base->get_time(),
  537. tsk->signal->it_real_incr);
  538. hrtimer_restart(tmr);
  539. }
  540. }
  541. }
  542. recalc_sigpending();
  543. if (!signr)
  544. return 0;
  545. if (unlikely(sig_kernel_stop(signr))) {
  546. /*
  547. * Set a marker that we have dequeued a stop signal. Our
  548. * caller might release the siglock and then the pending
  549. * stop signal it is about to process is no longer in the
  550. * pending bitmasks, but must still be cleared by a SIGCONT
  551. * (and overruled by a SIGKILL). So those cases clear this
  552. * shared flag after we've set it. Note that this flag may
  553. * remain set after the signal we return is ignored or
  554. * handled. That doesn't matter because its only purpose
  555. * is to alert stop-signal processing code when another
  556. * processor has come along and cleared the flag.
  557. */
  558. current->jobctl |= JOBCTL_STOP_DEQUEUED;
  559. }
  560. if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
  561. /*
  562. * Release the siglock to ensure proper locking order
  563. * of timer locks outside of siglocks. Note, we leave
  564. * irqs disabled here, since the posix-timers code is
  565. * about to disable them again anyway.
  566. */
  567. spin_unlock(&tsk->sighand->siglock);
  568. do_schedule_next_timer(info);
  569. spin_lock(&tsk->sighand->siglock);
  570. }
  571. return signr;
  572. }
  573. /*
  574. * Tell a process that it has a new active signal..
  575. *
  576. * NOTE! we rely on the previous spin_lock to
  577. * lock interrupts for us! We can only be called with
  578. * "siglock" held, and the local interrupt must
  579. * have been disabled when that got acquired!
  580. *
  581. * No need to set need_resched since signal event passing
  582. * goes through ->blocked
  583. */
  584. void signal_wake_up(struct task_struct *t, int resume)
  585. {
  586. unsigned int mask;
  587. set_tsk_thread_flag(t, TIF_SIGPENDING);
  588. /*
  589. * For SIGKILL, we want to wake it up in the stopped/traced/killable
  590. * case. We don't check t->state here because there is a race with it
  591. * executing another processor and just now entering stopped state.
  592. * By using wake_up_state, we ensure the process will wake up and
  593. * handle its death signal.
  594. */
  595. mask = TASK_INTERRUPTIBLE;
  596. if (resume)
  597. mask |= TASK_WAKEKILL;
  598. if (!wake_up_state(t, mask))
  599. kick_process(t);
  600. }
  601. /*
  602. * Remove signals in mask from the pending set and queue.
  603. * Returns 1 if any signals were found.
  604. *
  605. * All callers must be holding the siglock.
  606. *
  607. * This version takes a sigset mask and looks at all signals,
  608. * not just those in the first mask word.
  609. */
  610. static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
  611. {
  612. struct sigqueue *q, *n;
  613. sigset_t m;
  614. sigandsets(&m, mask, &s->signal);
  615. if (sigisemptyset(&m))
  616. return 0;
  617. sigandnsets(&s->signal, &s->signal, mask);
  618. list_for_each_entry_safe(q, n, &s->list, list) {
  619. if (sigismember(mask, q->info.si_signo)) {
  620. list_del_init(&q->list);
  621. __sigqueue_free(q);
  622. }
  623. }
  624. return 1;
  625. }
  626. /*
  627. * Remove signals in mask from the pending set and queue.
  628. * Returns 1 if any signals were found.
  629. *
  630. * All callers must be holding the siglock.
  631. */
  632. static int rm_from_queue(unsigned long mask, struct sigpending *s)
  633. {
  634. struct sigqueue *q, *n;
  635. if (!sigtestsetmask(&s->signal, mask))
  636. return 0;
  637. sigdelsetmask(&s->signal, mask);
  638. list_for_each_entry_safe(q, n, &s->list, list) {
  639. if (q->info.si_signo < SIGRTMIN &&
  640. (mask & sigmask(q->info.si_signo))) {
  641. list_del_init(&q->list);
  642. __sigqueue_free(q);
  643. }
  644. }
  645. return 1;
  646. }
  647. static inline int is_si_special(const struct siginfo *info)
  648. {
  649. return info <= SEND_SIG_FORCED;
  650. }
  651. static inline bool si_fromuser(const struct siginfo *info)
  652. {
  653. return info == SEND_SIG_NOINFO ||
  654. (!is_si_special(info) && SI_FROMUSER(info));
  655. }
  656. /*
  657. * called with RCU read lock from check_kill_permission()
  658. */
  659. static int kill_ok_by_cred(struct task_struct *t)
  660. {
  661. const struct cred *cred = current_cred();
  662. const struct cred *tcred = __task_cred(t);
  663. if (cred->user->user_ns == tcred->user->user_ns &&
  664. (cred->euid == tcred->suid ||
  665. cred->euid == tcred->uid ||
  666. cred->uid == tcred->suid ||
  667. cred->uid == tcred->uid))
  668. return 1;
  669. if (ns_capable(tcred->user->user_ns, CAP_KILL))
  670. return 1;
  671. return 0;
  672. }
  673. /*
  674. * Bad permissions for sending the signal
  675. * - the caller must hold the RCU read lock
  676. */
  677. static int check_kill_permission(int sig, struct siginfo *info,
  678. struct task_struct *t)
  679. {
  680. struct pid *sid;
  681. int error;
  682. if (!valid_signal(sig))
  683. return -EINVAL;
  684. if (!si_fromuser(info))
  685. return 0;
  686. error = audit_signal_info(sig, t); /* Let audit system see the signal */
  687. if (error)
  688. return error;
  689. if (!same_thread_group(current, t) &&
  690. !kill_ok_by_cred(t)) {
  691. switch (sig) {
  692. case SIGCONT:
  693. sid = task_session(t);
  694. /*
  695. * We don't return the error if sid == NULL. The
  696. * task was unhashed, the caller must notice this.
  697. */
  698. if (!sid || sid == task_session(current))
  699. break;
  700. default:
  701. return -EPERM;
  702. }
  703. }
  704. return security_task_kill(t, info, sig, 0);
  705. }
  706. /**
  707. * ptrace_trap_notify - schedule trap to notify ptracer
  708. * @t: tracee wanting to notify tracer
  709. *
  710. * This function schedules sticky ptrace trap which is cleared on the next
  711. * TRAP_STOP to notify ptracer of an event. @t must have been seized by
  712. * ptracer.
  713. *
  714. * If @t is running, STOP trap will be taken. If trapped for STOP and
  715. * ptracer is listening for events, tracee is woken up so that it can
  716. * re-trap for the new event. If trapped otherwise, STOP trap will be
  717. * eventually taken without returning to userland after the existing traps
  718. * are finished by PTRACE_CONT.
  719. *
  720. * CONTEXT:
  721. * Must be called with @task->sighand->siglock held.
  722. */
  723. static void ptrace_trap_notify(struct task_struct *t)
  724. {
  725. WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
  726. assert_spin_locked(&t->sighand->siglock);
  727. task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
  728. signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
  729. }
  730. /*
  731. * Handle magic process-wide effects of stop/continue signals. Unlike
  732. * the signal actions, these happen immediately at signal-generation
  733. * time regardless of blocking, ignoring, or handling. This does the
  734. * actual continuing for SIGCONT, but not the actual stopping for stop
  735. * signals. The process stop is done as a signal action for SIG_DFL.
  736. *
  737. * Returns true if the signal should be actually delivered, otherwise
  738. * it should be dropped.
  739. */
  740. static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
  741. {
  742. struct signal_struct *signal = p->signal;
  743. struct task_struct *t;
  744. if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
  745. /*
  746. * The process is in the middle of dying, nothing to do.
  747. */
  748. } else if (sig_kernel_stop(sig)) {
  749. /*
  750. * This is a stop signal. Remove SIGCONT from all queues.
  751. */
  752. rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
  753. t = p;
  754. do {
  755. rm_from_queue(sigmask(SIGCONT), &t->pending);
  756. } while_each_thread(p, t);
  757. } else if (sig == SIGCONT) {
  758. unsigned int why;
  759. /*
  760. * Remove all stop signals from all queues, wake all threads.
  761. */
  762. rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
  763. t = p;
  764. do {
  765. task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
  766. rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
  767. if (likely(!(t->ptrace & PT_SEIZED)))
  768. wake_up_state(t, __TASK_STOPPED);
  769. else
  770. ptrace_trap_notify(t);
  771. } while_each_thread(p, t);
  772. /*
  773. * Notify the parent with CLD_CONTINUED if we were stopped.
  774. *
  775. * If we were in the middle of a group stop, we pretend it
  776. * was already finished, and then continued. Since SIGCHLD
  777. * doesn't queue we report only CLD_STOPPED, as if the next
  778. * CLD_CONTINUED was dropped.
  779. */
  780. why = 0;
  781. if (signal->flags & SIGNAL_STOP_STOPPED)
  782. why |= SIGNAL_CLD_CONTINUED;
  783. else if (signal->group_stop_count)
  784. why |= SIGNAL_CLD_STOPPED;
  785. if (why) {
  786. /*
  787. * The first thread which returns from do_signal_stop()
  788. * will take ->siglock, notice SIGNAL_CLD_MASK, and
  789. * notify its parent. See get_signal_to_deliver().
  790. */
  791. signal->flags = why | SIGNAL_STOP_CONTINUED;
  792. signal->group_stop_count = 0;
  793. signal->group_exit_code = 0;
  794. }
  795. }
  796. return !sig_ignored(p, sig, from_ancestor_ns);
  797. }
  798. /*
  799. * Test if P wants to take SIG. After we've checked all threads with this,
  800. * it's equivalent to finding no threads not blocking SIG. Any threads not
  801. * blocking SIG were ruled out because they are not running and already
  802. * have pending signals. Such threads will dequeue from the shared queue
  803. * as soon as they're available, so putting the signal on the shared queue
  804. * will be equivalent to sending it to one such thread.
  805. */
  806. static inline int wants_signal(int sig, struct task_struct *p)
  807. {
  808. if (sigismember(&p->blocked, sig))
  809. return 0;
  810. if (p->flags & PF_EXITING)
  811. return 0;
  812. if (sig == SIGKILL)
  813. return 1;
  814. if (task_is_stopped_or_traced(p))
  815. return 0;
  816. return task_curr(p) || !signal_pending(p);
  817. }
  818. static void complete_signal(int sig, struct task_struct *p, int group)
  819. {
  820. struct signal_struct *signal = p->signal;
  821. struct task_struct *t;
  822. /*
  823. * Now find a thread we can wake up to take the signal off the queue.
  824. *
  825. * If the main thread wants the signal, it gets first crack.
  826. * Probably the least surprising to the average bear.
  827. */
  828. if (wants_signal(sig, p))
  829. t = p;
  830. else if (!group || thread_group_empty(p))
  831. /*
  832. * There is just one thread and it does not need to be woken.
  833. * It will dequeue unblocked signals before it runs again.
  834. */
  835. return;
  836. else {
  837. /*
  838. * Otherwise try to find a suitable thread.
  839. */
  840. t = signal->curr_target;
  841. while (!wants_signal(sig, t)) {
  842. t = next_thread(t);
  843. if (t == signal->curr_target)
  844. /*
  845. * No thread needs to be woken.
  846. * Any eligible threads will see
  847. * the signal in the queue soon.
  848. */
  849. return;
  850. }
  851. signal->curr_target = t;
  852. }
  853. /*
  854. * Found a killable thread. If the signal will be fatal,
  855. * then start taking the whole group down immediately.
  856. */
  857. if (sig_fatal(p, sig) &&
  858. !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
  859. !sigismember(&t->real_blocked, sig) &&
  860. (sig == SIGKILL || !t->ptrace)) {
  861. /*
  862. * This signal will be fatal to the whole group.
  863. */
  864. if (!sig_kernel_coredump(sig)) {
  865. /*
  866. * Start a group exit and wake everybody up.
  867. * This way we don't have other threads
  868. * running and doing things after a slower
  869. * thread has the fatal signal pending.
  870. */
  871. signal->flags = SIGNAL_GROUP_EXIT;
  872. signal->group_exit_code = sig;
  873. signal->group_stop_count = 0;
  874. t = p;
  875. do {
  876. task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
  877. sigaddset(&t->pending.signal, SIGKILL);
  878. signal_wake_up(t, 1);
  879. } while_each_thread(p, t);
  880. return;
  881. }
  882. }
  883. /*
  884. * The signal is already in the shared-pending queue.
  885. * Tell the chosen thread to wake up and dequeue it.
  886. */
  887. signal_wake_up(t, sig == SIGKILL);
  888. return;
  889. }
  890. static inline int legacy_queue(struct sigpending *signals, int sig)
  891. {
  892. return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
  893. }
  894. static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
  895. int group, int from_ancestor_ns)
  896. {
  897. struct sigpending *pending;
  898. struct sigqueue *q;
  899. int override_rlimit;
  900. trace_signal_generate(sig, info, t);
  901. assert_spin_locked(&t->sighand->siglock);
  902. if (!prepare_signal(sig, t, from_ancestor_ns))
  903. return 0;
  904. pending = group ? &t->signal->shared_pending : &t->pending;
  905. /*
  906. * Short-circuit ignored signals and support queuing
  907. * exactly one non-rt signal, so that we can get more
  908. * detailed information about the cause of the signal.
  909. */
  910. if (legacy_queue(pending, sig))
  911. return 0;
  912. /*
  913. * fast-pathed signals for kernel-internal things like SIGSTOP
  914. * or SIGKILL.
  915. */
  916. if (info == SEND_SIG_FORCED)
  917. goto out_set;
  918. /*
  919. * Real-time signals must be queued if sent by sigqueue, or
  920. * some other real-time mechanism. It is implementation
  921. * defined whether kill() does so. We attempt to do so, on
  922. * the principle of least surprise, but since kill is not
  923. * allowed to fail with EAGAIN when low on memory we just
  924. * make sure at least one signal gets delivered and don't
  925. * pass on the info struct.
  926. */
  927. if (sig < SIGRTMIN)
  928. override_rlimit = (is_si_special(info) || info->si_code >= 0);
  929. else
  930. override_rlimit = 0;
  931. q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
  932. override_rlimit);
  933. if (q) {
  934. list_add_tail(&q->list, &pending->list);
  935. switch ((unsigned long) info) {
  936. case (unsigned long) SEND_SIG_NOINFO:
  937. q->info.si_signo = sig;
  938. q->info.si_errno = 0;
  939. q->info.si_code = SI_USER;
  940. q->info.si_pid = task_tgid_nr_ns(current,
  941. task_active_pid_ns(t));
  942. q->info.si_uid = current_uid();
  943. break;
  944. case (unsigned long) SEND_SIG_PRIV:
  945. q->info.si_signo = sig;
  946. q->info.si_errno = 0;
  947. q->info.si_code = SI_KERNEL;
  948. q->info.si_pid = 0;
  949. q->info.si_uid = 0;
  950. break;
  951. default:
  952. copy_siginfo(&q->info, info);
  953. if (from_ancestor_ns)
  954. q->info.si_pid = 0;
  955. break;
  956. }
  957. } else if (!is_si_special(info)) {
  958. if (sig >= SIGRTMIN && info->si_code != SI_USER) {
  959. /*
  960. * Queue overflow, abort. We may abort if the
  961. * signal was rt and sent by user using something
  962. * other than kill().
  963. */
  964. trace_signal_overflow_fail(sig, group, info);
  965. return -EAGAIN;
  966. } else {
  967. /*
  968. * This is a silent loss of information. We still
  969. * send the signal, but the *info bits are lost.
  970. */
  971. trace_signal_lose_info(sig, group, info);
  972. }
  973. }
  974. out_set:
  975. signalfd_notify(t, sig);
  976. sigaddset(&pending->signal, sig);
  977. complete_signal(sig, t, group);
  978. return 0;
  979. }
  980. static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
  981. int group)
  982. {
  983. int from_ancestor_ns = 0;
  984. #ifdef CONFIG_PID_NS
  985. from_ancestor_ns = si_fromuser(info) &&
  986. !task_pid_nr_ns(current, task_active_pid_ns(t));
  987. #endif
  988. return __send_signal(sig, info, t, group, from_ancestor_ns);
  989. }
  990. static void print_fatal_signal(struct pt_regs *regs, int signr)
  991. {
  992. printk("%s/%d: potentially unexpected fatal signal %d.\n",
  993. current->comm, task_pid_nr(current), signr);
  994. #if defined(__i386__) && !defined(__arch_um__)
  995. printk("code at %08lx: ", regs->ip);
  996. {
  997. int i;
  998. for (i = 0; i < 16; i++) {
  999. unsigned char insn;
  1000. if (get_user(insn, (unsigned char *)(regs->ip + i)))
  1001. break;
  1002. printk("%02x ", insn);
  1003. }
  1004. }
  1005. #endif
  1006. printk("\n");
  1007. preempt_disable();
  1008. show_regs(regs);
  1009. preempt_enable();
  1010. }
  1011. static int __init setup_print_fatal_signals(char *str)
  1012. {
  1013. get_option (&str, &print_fatal_signals);
  1014. return 1;
  1015. }
  1016. __setup("print-fatal-signals=", setup_print_fatal_signals);
  1017. int
  1018. __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  1019. {
  1020. return send_signal(sig, info, p, 1);
  1021. }
  1022. static int
  1023. specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
  1024. {
  1025. return send_signal(sig, info, t, 0);
  1026. }
  1027. int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
  1028. bool group)
  1029. {
  1030. unsigned long flags;
  1031. int ret = -ESRCH;
  1032. if (lock_task_sighand(p, &flags)) {
  1033. ret = send_signal(sig, info, p, group);
  1034. unlock_task_sighand(p, &flags);
  1035. }
  1036. return ret;
  1037. }
  1038. /*
  1039. * Force a signal that the process can't ignore: if necessary
  1040. * we unblock the signal and change any SIG_IGN to SIG_DFL.
  1041. *
  1042. * Note: If we unblock the signal, we always reset it to SIG_DFL,
  1043. * since we do not want to have a signal handler that was blocked
  1044. * be invoked when user space had explicitly blocked it.
  1045. *
  1046. * We don't want to have recursive SIGSEGV's etc, for example,
  1047. * that is why we also clear SIGNAL_UNKILLABLE.
  1048. */
  1049. int
  1050. force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
  1051. {
  1052. unsigned long int flags;
  1053. int ret, blocked, ignored;
  1054. struct k_sigaction *action;
  1055. spin_lock_irqsave(&t->sighand->siglock, flags);
  1056. action = &t->sighand->action[sig-1];
  1057. ignored = action->sa.sa_handler == SIG_IGN;
  1058. blocked = sigismember(&t->blocked, sig);
  1059. if (blocked || ignored) {
  1060. action->sa.sa_handler = SIG_DFL;
  1061. if (blocked) {
  1062. sigdelset(&t->blocked, sig);
  1063. recalc_sigpending_and_wake(t);
  1064. }
  1065. }
  1066. if (action->sa.sa_handler == SIG_DFL)
  1067. t->signal->flags &= ~SIGNAL_UNKILLABLE;
  1068. ret = specific_send_sig_info(sig, info, t);
  1069. spin_unlock_irqrestore(&t->sighand->siglock, flags);
  1070. return ret;
  1071. }
  1072. /*
  1073. * Nuke all other threads in the group.
  1074. */
  1075. int zap_other_threads(struct task_struct *p)
  1076. {
  1077. struct task_struct *t = p;
  1078. int count = 0;
  1079. p->signal->group_stop_count = 0;
  1080. while_each_thread(p, t) {
  1081. task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
  1082. count++;
  1083. /* Don't bother with already dead threads */
  1084. if (t->exit_state)
  1085. continue;
  1086. sigaddset(&t->pending.signal, SIGKILL);
  1087. signal_wake_up(t, 1);
  1088. }
  1089. return count;
  1090. }
  1091. struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
  1092. unsigned long *flags)
  1093. {
  1094. struct sighand_struct *sighand;
  1095. rcu_read_lock();
  1096. for (;;) {
  1097. sighand = rcu_dereference(tsk->sighand);
  1098. if (unlikely(sighand == NULL))
  1099. break;
  1100. spin_lock_irqsave(&sighand->siglock, *flags);
  1101. if (likely(sighand == tsk->sighand))
  1102. break;
  1103. spin_unlock_irqrestore(&sighand->siglock, *flags);
  1104. }
  1105. rcu_read_unlock();
  1106. return sighand;
  1107. }
  1108. /*
  1109. * send signal info to all the members of a group
  1110. */
  1111. int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  1112. {
  1113. int ret;
  1114. rcu_read_lock();
  1115. ret = check_kill_permission(sig, info, p);
  1116. rcu_read_unlock();
  1117. if (!ret && sig)
  1118. ret = do_send_sig_info(sig, info, p, true);
  1119. return ret;
  1120. }
  1121. /*
  1122. * __kill_pgrp_info() sends a signal to a process group: this is what the tty
  1123. * control characters do (^C, ^Z etc)
  1124. * - the caller must hold at least a readlock on tasklist_lock
  1125. */
  1126. int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
  1127. {
  1128. struct task_struct *p = NULL;
  1129. int retval, success;
  1130. success = 0;
  1131. retval = -ESRCH;
  1132. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  1133. int err = group_send_sig_info(sig, info, p);
  1134. success |= !err;
  1135. retval = err;
  1136. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  1137. return success ? 0 : retval;
  1138. }
  1139. int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
  1140. {
  1141. int error = -ESRCH;
  1142. struct task_struct *p;
  1143. rcu_read_lock();
  1144. retry:
  1145. p = pid_task(pid, PIDTYPE_PID);
  1146. if (p) {
  1147. error = group_send_sig_info(sig, info, p);
  1148. if (unlikely(error == -ESRCH))
  1149. /*
  1150. * The task was unhashed in between, try again.
  1151. * If it is dead, pid_task() will return NULL,
  1152. * if we race with de_thread() it will find the
  1153. * new leader.
  1154. */
  1155. goto retry;
  1156. }
  1157. rcu_read_unlock();
  1158. return error;
  1159. }
  1160. int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
  1161. {
  1162. int error;
  1163. rcu_read_lock();
  1164. error = kill_pid_info(sig, info, find_vpid(pid));
  1165. rcu_read_unlock();
  1166. return error;
  1167. }
  1168. /* like kill_pid_info(), but doesn't use uid/euid of "current" */
  1169. int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
  1170. uid_t uid, uid_t euid, u32 secid)
  1171. {
  1172. int ret = -EINVAL;
  1173. struct task_struct *p;
  1174. const struct cred *pcred;
  1175. unsigned long flags;
  1176. if (!valid_signal(sig))
  1177. return ret;
  1178. rcu_read_lock();
  1179. p = pid_task(pid, PIDTYPE_PID);
  1180. if (!p) {
  1181. ret = -ESRCH;
  1182. goto out_unlock;
  1183. }
  1184. pcred = __task_cred(p);
  1185. if (si_fromuser(info) &&
  1186. euid != pcred->suid && euid != pcred->uid &&
  1187. uid != pcred->suid && uid != pcred->uid) {
  1188. ret = -EPERM;
  1189. goto out_unlock;
  1190. }
  1191. ret = security_task_kill(p, info, sig, secid);
  1192. if (ret)
  1193. goto out_unlock;
  1194. if (sig) {
  1195. if (lock_task_sighand(p, &flags)) {
  1196. ret = __send_signal(sig, info, p, 1, 0);
  1197. unlock_task_sighand(p, &flags);
  1198. } else
  1199. ret = -ESRCH;
  1200. }
  1201. out_unlock:
  1202. rcu_read_unlock();
  1203. return ret;
  1204. }
  1205. EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
  1206. /*
  1207. * kill_something_info() interprets pid in interesting ways just like kill(2).
  1208. *
  1209. * POSIX specifies that kill(-1,sig) is unspecified, but what we have
  1210. * is probably wrong. Should make it like BSD or SYSV.
  1211. */
  1212. static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
  1213. {
  1214. int ret;
  1215. if (pid > 0) {
  1216. rcu_read_lock();
  1217. ret = kill_pid_info(sig, info, find_vpid(pid));
  1218. rcu_read_unlock();
  1219. return ret;
  1220. }
  1221. read_lock(&tasklist_lock);
  1222. if (pid != -1) {
  1223. ret = __kill_pgrp_info(sig, info,
  1224. pid ? find_vpid(-pid) : task_pgrp(current));
  1225. } else {
  1226. int retval = 0, count = 0;
  1227. struct task_struct * p;
  1228. for_each_process(p) {
  1229. if (task_pid_vnr(p) > 1 &&
  1230. !same_thread_group(p, current)) {
  1231. int err = group_send_sig_info(sig, info, p);
  1232. ++count;
  1233. if (err != -EPERM)
  1234. retval = err;
  1235. }
  1236. }
  1237. ret = count ? retval : -ESRCH;
  1238. }
  1239. read_unlock(&tasklist_lock);
  1240. return ret;
  1241. }
  1242. /*
  1243. * These are for backward compatibility with the rest of the kernel source.
  1244. */
  1245. int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
  1246. {
  1247. /*
  1248. * Make sure legacy kernel users don't send in bad values
  1249. * (normal paths check this in check_kill_permission).
  1250. */
  1251. if (!valid_signal(sig))
  1252. return -EINVAL;
  1253. return do_send_sig_info(sig, info, p, false);
  1254. }
  1255. #define __si_special(priv) \
  1256. ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
  1257. int
  1258. send_sig(int sig, struct task_struct *p, int priv)
  1259. {
  1260. return send_sig_info(sig, __si_special(priv), p);
  1261. }
  1262. void
  1263. force_sig(int sig, struct task_struct *p)
  1264. {
  1265. force_sig_info(sig, SEND_SIG_PRIV, p);
  1266. }
  1267. /*
  1268. * When things go south during signal handling, we
  1269. * will force a SIGSEGV. And if the signal that caused
  1270. * the problem was already a SIGSEGV, we'll want to
  1271. * make sure we don't even try to deliver the signal..
  1272. */
  1273. int
  1274. force_sigsegv(int sig, struct task_struct *p)
  1275. {
  1276. if (sig == SIGSEGV) {
  1277. unsigned long flags;
  1278. spin_lock_irqsave(&p->sighand->siglock, flags);
  1279. p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
  1280. spin_unlock_irqrestore(&p->sighand->siglock, flags);
  1281. }
  1282. force_sig(SIGSEGV, p);
  1283. return 0;
  1284. }
  1285. int kill_pgrp(struct pid *pid, int sig, int priv)
  1286. {
  1287. int ret;
  1288. read_lock(&tasklist_lock);
  1289. ret = __kill_pgrp_info(sig, __si_special(priv), pid);
  1290. read_unlock(&tasklist_lock);
  1291. return ret;
  1292. }
  1293. EXPORT_SYMBOL(kill_pgrp);
  1294. int kill_pid(struct pid *pid, int sig, int priv)
  1295. {
  1296. return kill_pid_info(sig, __si_special(priv), pid);
  1297. }
  1298. EXPORT_SYMBOL(kill_pid);
  1299. /*
  1300. * These functions support sending signals using preallocated sigqueue
  1301. * structures. This is needed "because realtime applications cannot
  1302. * afford to lose notifications of asynchronous events, like timer
  1303. * expirations or I/O completions". In the case of POSIX Timers
  1304. * we allocate the sigqueue structure from the timer_create. If this
  1305. * allocation fails we are able to report the failure to the application
  1306. * with an EAGAIN error.
  1307. */
  1308. struct sigqueue *sigqueue_alloc(void)
  1309. {
  1310. struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
  1311. if (q)
  1312. q->flags |= SIGQUEUE_PREALLOC;
  1313. return q;
  1314. }
  1315. void sigqueue_free(struct sigqueue *q)
  1316. {
  1317. unsigned long flags;
  1318. spinlock_t *lock = &current->sighand->siglock;
  1319. BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
  1320. /*
  1321. * We must hold ->siglock while testing q->list
  1322. * to serialize with collect_signal() or with
  1323. * __exit_signal()->flush_sigqueue().
  1324. */
  1325. spin_lock_irqsave(lock, flags);
  1326. q->flags &= ~SIGQUEUE_PREALLOC;
  1327. /*
  1328. * If it is queued it will be freed when dequeued,
  1329. * like the "regular" sigqueue.
  1330. */
  1331. if (!list_empty(&q->list))
  1332. q = NULL;
  1333. spin_unlock_irqrestore(lock, flags);
  1334. if (q)
  1335. __sigqueue_free(q);
  1336. }
  1337. int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
  1338. {
  1339. int sig = q->info.si_signo;
  1340. struct sigpending *pending;
  1341. unsigned long flags;
  1342. int ret;
  1343. BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
  1344. ret = -1;
  1345. if (!likely(lock_task_sighand(t, &flags)))
  1346. goto ret;
  1347. ret = 1; /* the signal is ignored */
  1348. if (!prepare_signal(sig, t, 0))
  1349. goto out;
  1350. ret = 0;
  1351. if (unlikely(!list_empty(&q->list))) {
  1352. /*
  1353. * If an SI_TIMER entry is already queue just increment
  1354. * the overrun count.
  1355. */
  1356. BUG_ON(q->info.si_code != SI_TIMER);
  1357. q->info.si_overrun++;
  1358. goto out;
  1359. }
  1360. q->info.si_overrun = 0;
  1361. signalfd_notify(t, sig);
  1362. pending = group ? &t->signal->shared_pending : &t->pending;
  1363. list_add_tail(&q->list, &pending->list);
  1364. sigaddset(&pending->signal, sig);
  1365. complete_signal(sig, t, group);
  1366. out:
  1367. unlock_task_sighand(t, &flags);
  1368. ret:
  1369. return ret;
  1370. }
  1371. /*
  1372. * Let a parent know about the death of a child.
  1373. * For a stopped/continued status change, use do_notify_parent_cldstop instead.
  1374. *
  1375. * Returns true if our parent ignored us and so we've switched to
  1376. * self-reaping.
  1377. */
  1378. bool do_notify_parent(struct task_struct *tsk, int sig)
  1379. {
  1380. struct siginfo info;
  1381. unsigned long flags;
  1382. struct sighand_struct *psig;
  1383. bool autoreap = false;
  1384. BUG_ON(sig == -1);
  1385. /* do_notify_parent_cldstop should have been called instead. */
  1386. BUG_ON(task_is_stopped_or_traced(tsk));
  1387. BUG_ON(!tsk->ptrace &&
  1388. (tsk->group_leader != tsk || !thread_group_empty(tsk)));
  1389. info.si_signo = sig;
  1390. info.si_errno = 0;
  1391. /*
  1392. * we are under tasklist_lock here so our parent is tied to
  1393. * us and cannot exit and release its namespace.
  1394. *
  1395. * the only it can is to switch its nsproxy with sys_unshare,
  1396. * bu uncharing pid namespaces is not allowed, so we'll always
  1397. * see relevant namespace
  1398. *
  1399. * write_lock() currently calls preempt_disable() which is the
  1400. * same as rcu_read_lock(), but according to Oleg, this is not
  1401. * correct to rely on this
  1402. */
  1403. rcu_read_lock();
  1404. info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
  1405. info.si_uid = __task_cred(tsk)->uid;
  1406. rcu_read_unlock();
  1407. info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
  1408. tsk->signal->utime));
  1409. info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
  1410. tsk->signal->stime));
  1411. info.si_status = tsk->exit_code & 0x7f;
  1412. if (tsk->exit_code & 0x80)
  1413. info.si_code = CLD_DUMPED;
  1414. else if (tsk->exit_code & 0x7f)
  1415. info.si_code = CLD_KILLED;
  1416. else {
  1417. info.si_code = CLD_EXITED;
  1418. info.si_status = tsk->exit_code >> 8;
  1419. }
  1420. psig = tsk->parent->sighand;
  1421. spin_lock_irqsave(&psig->siglock, flags);
  1422. if (!tsk->ptrace && sig == SIGCHLD &&
  1423. (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
  1424. (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
  1425. /*
  1426. * We are exiting and our parent doesn't care. POSIX.1
  1427. * defines special semantics for setting SIGCHLD to SIG_IGN
  1428. * or setting the SA_NOCLDWAIT flag: we should be reaped
  1429. * automatically and not left for our parent's wait4 call.
  1430. * Rather than having the parent do it as a magic kind of
  1431. * signal handler, we just set this to tell do_exit that we
  1432. * can be cleaned up without becoming a zombie. Note that
  1433. * we still call __wake_up_parent in this case, because a
  1434. * blocked sys_wait4 might now return -ECHILD.
  1435. *
  1436. * Whether we send SIGCHLD or not for SA_NOCLDWAIT
  1437. * is implementation-defined: we do (if you don't want
  1438. * it, just use SIG_IGN instead).
  1439. */
  1440. autoreap = true;
  1441. tsk->exit_signal = -1;
  1442. if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
  1443. sig = 0;
  1444. }
  1445. if (valid_signal(sig) && sig)
  1446. __group_send_sig_info(sig, &info, tsk->parent);
  1447. __wake_up_parent(tsk, tsk->parent);
  1448. spin_unlock_irqrestore(&psig->siglock, flags);
  1449. return autoreap;
  1450. }
  1451. /**
  1452. * do_notify_parent_cldstop - notify parent of stopped/continued state change
  1453. * @tsk: task reporting the state change
  1454. * @for_ptracer: the notification is for ptracer
  1455. * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
  1456. *
  1457. * Notify @tsk's parent that the stopped/continued state has changed. If
  1458. * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
  1459. * If %true, @tsk reports to @tsk->parent which should be the ptracer.
  1460. *
  1461. * CONTEXT:
  1462. * Must be called with tasklist_lock at least read locked.
  1463. */
  1464. static void do_notify_parent_cldstop(struct task_struct *tsk,
  1465. bool for_ptracer, int why)
  1466. {
  1467. struct siginfo info;
  1468. unsigned long flags;
  1469. struct task_struct *parent;
  1470. struct sighand_struct *sighand;
  1471. if (for_ptracer) {
  1472. parent = tsk->parent;
  1473. } else {
  1474. tsk = tsk->group_leader;
  1475. parent = tsk->real_parent;
  1476. }
  1477. info.si_signo = SIGCHLD;
  1478. info.si_errno = 0;
  1479. /*
  1480. * see comment in do_notify_parent() about the following 4 lines
  1481. */
  1482. rcu_read_lock();
  1483. info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
  1484. info.si_uid = __task_cred(tsk)->uid;
  1485. rcu_read_unlock();
  1486. info.si_utime = cputime_to_clock_t(tsk->utime);
  1487. info.si_stime = cputime_to_clock_t(tsk->stime);
  1488. info.si_code = why;
  1489. switch (why) {
  1490. case CLD_CONTINUED:
  1491. info.si_status = SIGCONT;
  1492. break;
  1493. case CLD_STOPPED:
  1494. info.si_status = tsk->signal->group_exit_code & 0x7f;
  1495. break;
  1496. case CLD_TRAPPED:
  1497. info.si_status = tsk->exit_code & 0x7f;
  1498. break;
  1499. default:
  1500. BUG();
  1501. }
  1502. sighand = parent->sighand;
  1503. spin_lock_irqsave(&sighand->siglock, flags);
  1504. if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
  1505. !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
  1506. __group_send_sig_info(SIGCHLD, &info, parent);
  1507. /*
  1508. * Even if SIGCHLD is not generated, we must wake up wait4 calls.
  1509. */
  1510. __wake_up_parent(tsk, parent);
  1511. spin_unlock_irqrestore(&sighand->siglock, flags);
  1512. }
  1513. static inline int may_ptrace_stop(void)
  1514. {
  1515. if (!likely(current->ptrace))
  1516. return 0;
  1517. /*
  1518. * Are we in the middle of do_coredump?
  1519. * If so and our tracer is also part of the coredump stopping
  1520. * is a deadlock situation, and pointless because our tracer
  1521. * is dead so don't allow us to stop.
  1522. * If SIGKILL was already sent before the caller unlocked
  1523. * ->siglock we must see ->core_state != NULL. Otherwise it
  1524. * is safe to enter schedule().
  1525. */
  1526. if (unlikely(current->mm->core_state) &&
  1527. unlikely(current->mm == current->parent->mm))
  1528. return 0;
  1529. return 1;
  1530. }
  1531. /*
  1532. * Return non-zero if there is a SIGKILL that should be waking us up.
  1533. * Called with the siglock held.
  1534. */
  1535. static int sigkill_pending(struct task_struct *tsk)
  1536. {
  1537. return sigismember(&tsk->pending.signal, SIGKILL) ||
  1538. sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
  1539. }
  1540. /*
  1541. * Test whether the target task of the usual cldstop notification - the
  1542. * real_parent of @child - is in the same group as the ptracer.
  1543. */
  1544. static bool real_parent_is_ptracer(struct task_struct *child)
  1545. {
  1546. return same_thread_group(child->parent, child->real_parent);
  1547. }
  1548. /*
  1549. * This must be called with current->sighand->siglock held.
  1550. *
  1551. * This should be the path for all ptrace stops.
  1552. * We always set current->last_siginfo while stopped here.
  1553. * That makes it a way to test a stopped process for
  1554. * being ptrace-stopped vs being job-control-stopped.
  1555. *
  1556. * If we actually decide not to stop at all because the tracer
  1557. * is gone, we keep current->exit_code unless clear_code.
  1558. */
  1559. static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
  1560. __releases(&current->sighand->siglock)
  1561. __acquires(&current->sighand->siglock)
  1562. {
  1563. bool gstop_done = false;
  1564. if (arch_ptrace_stop_needed(exit_code, info)) {
  1565. /*
  1566. * The arch code has something special to do before a
  1567. * ptrace stop. This is allowed to block, e.g. for faults
  1568. * on user stack pages. We can't keep the siglock while
  1569. * calling arch_ptrace_stop, so we must release it now.
  1570. * To preserve proper semantics, we must do this before
  1571. * any signal bookkeeping like checking group_stop_count.
  1572. * Meanwhile, a SIGKILL could come in before we retake the
  1573. * siglock. That must prevent us from sleeping in TASK_TRACED.
  1574. * So after regaining the lock, we must check for SIGKILL.
  1575. */
  1576. spin_unlock_irq(&current->sighand->siglock);
  1577. arch_ptrace_stop(exit_code, info);
  1578. spin_lock_irq(&current->sighand->siglock);
  1579. if (sigkill_pending(current))
  1580. return;
  1581. }
  1582. /*
  1583. * We're committing to trapping. TRACED should be visible before
  1584. * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
  1585. * Also, transition to TRACED and updates to ->jobctl should be
  1586. * atomic with respect to siglock and should be done after the arch
  1587. * hook as siglock is released and regrabbed across it.
  1588. */
  1589. set_current_state(TASK_TRACED);
  1590. current->last_siginfo = info;
  1591. current->exit_code = exit_code;
  1592. /*
  1593. * If @why is CLD_STOPPED, we're trapping to participate in a group
  1594. * stop. Do the bookkeeping. Note that if SIGCONT was delievered
  1595. * across siglock relocks since INTERRUPT was scheduled, PENDING
  1596. * could be clear now. We act as if SIGCONT is received after
  1597. * TASK_TRACED is entered - ignore it.
  1598. */
  1599. if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
  1600. gstop_done = task_participate_group_stop(current);
  1601. /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
  1602. task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
  1603. if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
  1604. task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
  1605. /* entering a trap, clear TRAPPING */
  1606. task_clear_jobctl_trapping(current);
  1607. spin_unlock_irq(&current->sighand->siglock);
  1608. read_lock(&tasklist_lock);
  1609. if (may_ptrace_stop()) {
  1610. /*
  1611. * Notify parents of the stop.
  1612. *
  1613. * While ptraced, there are two parents - the ptracer and
  1614. * the real_parent of the group_leader. The ptracer should
  1615. * know about every stop while the real parent is only
  1616. * interested in the completion of group stop. The states
  1617. * for the two don't interact with each other. Notify
  1618. * separately unless they're gonna be duplicates.
  1619. */
  1620. do_notify_parent_cldstop(current, true, why);
  1621. if (gstop_done && !real_parent_is_ptracer(current))
  1622. do_notify_parent_cldstop(current, false, why);
  1623. /*
  1624. * Don't want to allow preemption here, because
  1625. * sys_ptrace() needs this task to be inactive.
  1626. *
  1627. * XXX: implement read_unlock_no_resched().
  1628. */
  1629. preempt_disable();
  1630. read_unlock(&tasklist_lock);
  1631. preempt_enable_no_resched();
  1632. schedule();
  1633. } else {
  1634. /*
  1635. * By the time we got the lock, our tracer went away.
  1636. * Don't drop the lock yet, another tracer may come.
  1637. *
  1638. * If @gstop_done, the ptracer went away between group stop
  1639. * completion and here. During detach, it would have set
  1640. * JOBCTL_STOP_PENDING on us and we'll re-enter
  1641. * TASK_STOPPED in do_signal_stop() on return, so notifying
  1642. * the real parent of the group stop completion is enough.
  1643. */
  1644. if (gstop_done)
  1645. do_notify_parent_cldstop(current, false, why);
  1646. __set_current_state(TASK_RUNNING);
  1647. if (clear_code)
  1648. current->exit_code = 0;
  1649. read_unlock(&tasklist_lock);
  1650. }
  1651. /*
  1652. * While in TASK_TRACED, we were considered "frozen enough".
  1653. * Now that we woke up, it's crucial if we're supposed to be
  1654. * frozen that we freeze now before running anything substantial.
  1655. */
  1656. try_to_freeze();
  1657. /*
  1658. * We are back. Now reacquire the siglock before touching
  1659. * last_siginfo, so that we are sure to have synchronized with
  1660. * any signal-sending on another CPU that wants to examine it.
  1661. */
  1662. spin_lock_irq(&current->sighand->siglock);
  1663. current->last_siginfo = NULL;
  1664. /* LISTENING can be set only during STOP traps, clear it */
  1665. current->jobctl &= ~JOBCTL_LISTENING;
  1666. /*
  1667. * Queued signals ignored us while we were stopped for tracing.
  1668. * So check for any that we should take before resuming user mode.
  1669. * This sets TIF_SIGPENDING, but never clears it.
  1670. */
  1671. recalc_sigpending_tsk(current);
  1672. }
  1673. static void ptrace_do_notify(int signr, int exit_code, int why)
  1674. {
  1675. siginfo_t info;
  1676. memset(&info, 0, sizeof info);
  1677. info.si_signo = signr;
  1678. info.si_code = exit_code;
  1679. info.si_pid = task_pid_vnr(current);
  1680. info.si_uid = current_uid();
  1681. /* Let the debugger run. */
  1682. ptrace_stop(exit_code, why, 1, &info);
  1683. }
  1684. void ptrace_notify(int exit_code)
  1685. {
  1686. BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
  1687. spin_lock_irq(&current->sighand->siglock);
  1688. ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
  1689. spin_unlock_irq(&current->sighand->siglock);
  1690. }
  1691. /**
  1692. * do_signal_stop - handle group stop for SIGSTOP and other stop signals
  1693. * @signr: signr causing group stop if initiating
  1694. *
  1695. * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
  1696. * and participate in it. If already set, participate in the existing
  1697. * group stop. If participated in a group stop (and thus slept), %true is
  1698. * returned with siglock released.
  1699. *
  1700. * If ptraced, this function doesn't handle stop itself. Instead,
  1701. * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
  1702. * untouched. The caller must ensure that INTERRUPT trap handling takes
  1703. * places afterwards.
  1704. *
  1705. * CONTEXT:
  1706. * Must be called with @current->sighand->siglock held, which is released
  1707. * on %true return.
  1708. *
  1709. * RETURNS:
  1710. * %false if group stop is already cancelled or ptrace trap is scheduled.
  1711. * %true if participated in group stop.
  1712. */
  1713. static bool do_signal_stop(int signr)
  1714. __releases(&current->sighand->siglock)
  1715. {
  1716. struct signal_struct *sig = current->signal;
  1717. if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
  1718. unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
  1719. struct task_struct *t;
  1720. /* signr will be recorded in task->jobctl for retries */
  1721. WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
  1722. if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
  1723. unlikely(signal_group_exit(sig)))
  1724. return false;
  1725. /*
  1726. * There is no group stop already in progress. We must
  1727. * initiate one now.
  1728. *
  1729. * While ptraced, a task may be resumed while group stop is
  1730. * still in effect and then receive a stop signal and
  1731. * initiate another group stop. This deviates from the
  1732. * usual behavior as two consecutive stop signals can't
  1733. * cause two group stops when !ptraced. That is why we
  1734. * also check !task_is_stopped(t) below.
  1735. *
  1736. * The condition can be distinguished by testing whether
  1737. * SIGNAL_STOP_STOPPED is already set. Don't generate
  1738. * group_exit_code in such case.
  1739. *
  1740. * This is not necessary for SIGNAL_STOP_CONTINUED because
  1741. * an intervening stop signal is required to cause two
  1742. * continued events regardless of ptrace.
  1743. */
  1744. if (!(sig->flags & SIGNAL_STOP_STOPPED))
  1745. sig->group_exit_code = signr;
  1746. else
  1747. WARN_ON_ONCE(!current->ptrace);
  1748. sig->group_stop_count = 0;
  1749. if (task_set_jobctl_pending(current, signr | gstop))
  1750. sig->group_stop_count++;
  1751. for (t = next_thread(current); t != current;
  1752. t = next_thread(t)) {
  1753. /*
  1754. * Setting state to TASK_STOPPED for a group
  1755. * stop is always done with the siglock held,
  1756. * so this check has no races.
  1757. */
  1758. if (!task_is_stopped(t) &&
  1759. task_set_jobctl_pending(t, signr | gstop)) {
  1760. sig->group_stop_count++;
  1761. if (likely(!(t->ptrace & PT_SEIZED)))
  1762. signal_wake_up(t, 0);
  1763. else
  1764. ptrace_trap_notify(t);
  1765. }
  1766. }
  1767. }
  1768. if (likely(!current->ptrace)) {
  1769. int notify = 0;
  1770. /*
  1771. * If there are no other threads in the group, or if there
  1772. * is a group stop in progress and we are the last to stop,
  1773. * report to the parent.
  1774. */
  1775. if (task_participate_group_stop(current))
  1776. notify = CLD_STOPPED;
  1777. __set_current_state(TASK_STOPPED);
  1778. spin_unlock_irq(&current->sighand->siglock);
  1779. /*
  1780. * Notify the parent of the group stop completion. Because
  1781. * we're not holding either the siglock or tasklist_lock
  1782. * here, ptracer may attach inbetween; however, this is for
  1783. * group stop and should always be delivered to the real
  1784. * parent of the group leader. The new ptracer will get
  1785. * its notification when this task transitions into
  1786. * TASK_TRACED.
  1787. */
  1788. if (notify) {
  1789. read_lock(&tasklist_lock);
  1790. do_notify_parent_cldstop(current, false, notify);
  1791. read_unlock(&tasklist_lock);
  1792. }
  1793. /* Now we don't run again until woken by SIGCONT or SIGKILL */
  1794. schedule();
  1795. return true;
  1796. } else {
  1797. /*
  1798. * While ptraced, group stop is handled by STOP trap.
  1799. * Schedule it and let the caller deal with it.
  1800. */
  1801. task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
  1802. return false;
  1803. }
  1804. }
  1805. /**
  1806. * do_jobctl_trap - take care of ptrace jobctl traps
  1807. *
  1808. * When PT_SEIZED, it's used for both group stop and explicit
  1809. * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
  1810. * accompanying siginfo. If stopped, lower eight bits of exit_code contain
  1811. * the stop signal; otherwise, %SIGTRAP.
  1812. *
  1813. * When !PT_SEIZED, it's used only for group stop trap with stop signal
  1814. * number as exit_code and no siginfo.
  1815. *
  1816. * CONTEXT:
  1817. * Must be called with @current->sighand->siglock held, which may be
  1818. * released and re-acquired before returning with intervening sleep.
  1819. */
  1820. static void do_jobctl_trap(void)
  1821. {
  1822. struct signal_struct *signal = current->signal;
  1823. int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
  1824. if (current->ptrace & PT_SEIZED) {
  1825. if (!signal->group_stop_count &&
  1826. !(signal->flags & SIGNAL_STOP_STOPPED))
  1827. signr = SIGTRAP;
  1828. WARN_ON_ONCE(!signr);
  1829. ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
  1830. CLD_STOPPED);
  1831. } else {
  1832. WARN_ON_ONCE(!signr);
  1833. ptrace_stop(signr, CLD_STOPPED, 0, NULL);
  1834. current->exit_code = 0;
  1835. }
  1836. }
  1837. static int ptrace_signal(int signr, siginfo_t *info,
  1838. struct pt_regs *regs, void *cookie)
  1839. {
  1840. if (!current->ptrace)
  1841. return signr;
  1842. ptrace_signal_deliver(regs, cookie);
  1843. /* Let the debugger run. */
  1844. ptrace_stop(signr, CLD_TRAPPED, 0, info);
  1845. /* We're back. Did the debugger cancel the sig? */
  1846. signr = current->exit_code;
  1847. if (signr == 0)
  1848. return signr;
  1849. current->exit_code = 0;
  1850. /*
  1851. * Update the siginfo structure if the signal has
  1852. * changed. If the debugger wanted something
  1853. * specific in the siginfo structure then it should
  1854. * have updated *info via PTRACE_SETSIGINFO.
  1855. */
  1856. if (signr != info->si_signo) {
  1857. info->si_signo = signr;
  1858. info->si_errno = 0;
  1859. info->si_code = SI_USER;
  1860. info->si_pid = task_pid_vnr(current->parent);
  1861. info->si_uid = task_uid(current->parent);
  1862. }
  1863. /* If the (new) signal is now blocked, requeue it. */
  1864. if (sigismember(&current->blocked, signr)) {
  1865. specific_send_sig_info(signr, info, current);
  1866. signr = 0;
  1867. }
  1868. return signr;
  1869. }
  1870. int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
  1871. struct pt_regs *regs, void *cookie)
  1872. {
  1873. struct sighand_struct *sighand = current->sighand;
  1874. struct signal_struct *signal = current->signal;
  1875. int signr;
  1876. relock:
  1877. /*
  1878. * We'll jump back here after any time we were stopped in TASK_STOPPED.
  1879. * While in TASK_STOPPED, we were considered "frozen enough".
  1880. * Now that we woke up, it's crucial if we're supposed to be
  1881. * frozen that we freeze now before running anything substantial.
  1882. */
  1883. try_to_freeze();
  1884. spin_lock_irq(&sighand->siglock);
  1885. /*
  1886. * Every stopped thread goes here after wakeup. Check to see if
  1887. * we should notify the parent, prepare_signal(SIGCONT) encodes
  1888. * the CLD_ si_code into SIGNAL_CLD_MASK bits.
  1889. */
  1890. if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
  1891. struct task_struct *leader;
  1892. int why;
  1893. if (signal->flags & SIGNAL_CLD_CONTINUED)
  1894. why = CLD_CONTINUED;
  1895. else
  1896. why = CLD_STOPPED;
  1897. signal->flags &= ~SIGNAL_CLD_MASK;
  1898. spin_unlock_irq(&sighand->siglock);
  1899. /*
  1900. * Notify the parent that we're continuing. This event is
  1901. * always per-process and doesn't make whole lot of sense
  1902. * for ptracers, who shouldn't consume the state via
  1903. * wait(2) either, but, for backward compatibility, notify
  1904. * the ptracer of the group leader too unless it's gonna be
  1905. * a duplicate.
  1906. */
  1907. read_lock(&tasklist_lock);
  1908. do_notify_parent_cldstop(current, false, why);
  1909. leader = current->group_leader;
  1910. if (leader->ptrace && !real_parent_is_ptracer(leader))
  1911. do_notify_parent_cldstop(leader, true, why);
  1912. read_unlock(&tasklist_lock);
  1913. goto relock;
  1914. }
  1915. for (;;) {
  1916. struct k_sigaction *ka;
  1917. if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
  1918. do_signal_stop(0))
  1919. goto relock;
  1920. if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
  1921. do_jobctl_trap();
  1922. spin_unlock_irq(&sighand->siglock);
  1923. goto relock;
  1924. }
  1925. signr = dequeue_signal(current, &current->blocked, info);
  1926. if (!signr)
  1927. break; /* will return 0 */
  1928. if (signr != SIGKILL) {
  1929. signr = ptrace_signal(signr, info,
  1930. regs, cookie);
  1931. if (!signr)
  1932. continue;
  1933. }
  1934. ka = &sighand->action[signr-1];
  1935. /* Trace actually delivered signals. */
  1936. trace_signal_deliver(signr, info, ka);
  1937. if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
  1938. continue;
  1939. if (ka->sa.sa_handler != SIG_DFL) {
  1940. /* Run the handler. */
  1941. *return_ka = *ka;
  1942. if (ka->sa.sa_flags & SA_ONESHOT)
  1943. ka->sa.sa_handler = SIG_DFL;
  1944. break; /* will return non-zero "signr" value */
  1945. }
  1946. /*
  1947. * Now we are doing the default action for this signal.
  1948. */
  1949. if (sig_kernel_ignore(signr)) /* Default is nothing. */
  1950. continue;
  1951. /*
  1952. * Global init gets no signals it doesn't want.
  1953. * Container-init gets no signals it doesn't want from same
  1954. * container.
  1955. *
  1956. * Note that if global/container-init sees a sig_kernel_only()
  1957. * signal here, the signal must have been generated internally
  1958. * or must have come from an ancestor namespace. In either
  1959. * case, the signal cannot be dropped.
  1960. */
  1961. if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
  1962. !sig_kernel_only(signr))
  1963. continue;
  1964. if (sig_kernel_stop(signr)) {
  1965. /*
  1966. * The default action is to stop all threads in
  1967. * the thread group. The job control signals
  1968. * do nothing in an orphaned pgrp, but SIGSTOP
  1969. * always works. Note that siglock needs to be
  1970. * dropped during the call to is_orphaned_pgrp()
  1971. * because of lock ordering with tasklist_lock.
  1972. * This allows an intervening SIGCONT to be posted.
  1973. * We need to check for that and bail out if necessary.
  1974. */
  1975. if (signr != SIGSTOP) {
  1976. spin_unlock_irq(&sighand->siglock);
  1977. /* signals can be posted during this window */
  1978. if (is_current_pgrp_orphaned())
  1979. goto relock;
  1980. spin_lock_irq(&sighand->siglock);
  1981. }
  1982. if (likely(do_signal_stop(info->si_signo))) {
  1983. /* It released the siglock. */
  1984. goto relock;
  1985. }
  1986. /*
  1987. * We didn't actually stop, due to a race
  1988. * with SIGCONT or something like that.
  1989. */
  1990. continue;
  1991. }
  1992. spin_unlock_irq(&sighand->siglock);
  1993. /*
  1994. * Anything else is fatal, maybe with a core dump.
  1995. */
  1996. current->flags |= PF_SIGNALED;
  1997. if (sig_kernel_coredump(signr)) {
  1998. if (print_fatal_signals)
  1999. print_fatal_signal(regs, info->si_signo);
  2000. /*
  2001. * If it was able to dump core, this kills all
  2002. * other threads in the group and synchronizes with
  2003. * their demise. If we lost the race with another
  2004. * thread getting here, it set group_exit_code
  2005. * first and our do_group_exit call below will use
  2006. * that value and ignore the one we pass it.
  2007. */
  2008. do_coredump(info->si_signo, info->si_signo, regs);
  2009. }
  2010. /*
  2011. * Death signals, no core dump.
  2012. */
  2013. do_group_exit(info->si_signo);
  2014. /* NOTREACHED */
  2015. }
  2016. spin_unlock_irq(&sighand->siglock);
  2017. return signr;
  2018. }
  2019. /*
  2020. * It could be that complete_signal() picked us to notify about the
  2021. * group-wide signal. Other threads should be notified now to take
  2022. * the shared signals in @which since we will not.
  2023. */
  2024. static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
  2025. {
  2026. sigset_t retarget;
  2027. struct task_struct *t;
  2028. sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
  2029. if (sigisemptyset(&retarget))
  2030. return;
  2031. t = tsk;
  2032. while_each_thread(tsk, t) {
  2033. if (t->flags & PF_EXITING)
  2034. continue;
  2035. if (!has_pending_signals(&retarget, &t->blocked))
  2036. continue;
  2037. /* Remove the signals this thread can handle. */
  2038. sigandsets(&retarget, &retarget, &t->blocked);
  2039. if (!signal_pending(t))
  2040. signal_wake_up(t, 0);
  2041. if (sigisemptyset(&retarget))
  2042. break;
  2043. }
  2044. }
  2045. void exit_signals(struct task_struct *tsk)
  2046. {
  2047. int group_stop = 0;
  2048. sigset_t unblocked;
  2049. if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
  2050. tsk->flags |= PF_EXITING;
  2051. return;
  2052. }
  2053. spin_lock_irq(&tsk->sighand->siglock);
  2054. /*
  2055. * From now this task is not visible for group-wide signals,
  2056. * see wants_signal(), do_signal_stop().
  2057. */
  2058. tsk->flags |= PF_EXITING;
  2059. if (!signal_pending(tsk))
  2060. goto out;
  2061. unblocked = tsk->blocked;
  2062. signotset(&unblocked);
  2063. retarget_shared_pending(tsk, &unblocked);
  2064. if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
  2065. task_participate_group_stop(tsk))
  2066. group_stop = CLD_STOPPED;
  2067. out:
  2068. spin_unlock_irq(&tsk->sighand->siglock);
  2069. /*
  2070. * If group stop has completed, deliver the notification. This
  2071. * should always go to the real parent of the group leader.
  2072. */
  2073. if (unlikely(group_stop)) {
  2074. read_lock(&tasklist_lock);
  2075. do_notify_parent_cldstop(tsk, false, group_stop);
  2076. read_unlock(&tasklist_lock);
  2077. }
  2078. }
  2079. EXPORT_SYMBOL(recalc_sigpending);
  2080. EXPORT_SYMBOL_GPL(dequeue_signal);
  2081. EXPORT_SYMBOL(flush_signals);
  2082. EXPORT_SYMBOL(force_sig);
  2083. EXPORT_SYMBOL(send_sig);
  2084. EXPORT_SYMBOL(send_sig_info);
  2085. EXPORT_SYMBOL(sigprocmask);
  2086. EXPORT_SYMBOL(block_all_signals);
  2087. EXPORT_SYMBOL(unblock_all_signals);
  2088. /*
  2089. * System call entry points.
  2090. */
  2091. /**
  2092. * sys_restart_syscall - restart a system call
  2093. */
  2094. SYSCALL_DEFINE0(restart_syscall)
  2095. {
  2096. struct restart_block *restart = &current_thread_info()->restart_block;
  2097. return restart->fn(restart);
  2098. }
  2099. long do_no_restart_syscall(struct restart_block *param)
  2100. {
  2101. return -EINTR;
  2102. }
  2103. static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
  2104. {
  2105. if (signal_pending(tsk) && !thread_group_empty(tsk)) {
  2106. sigset_t newblocked;
  2107. /* A set of now blocked but previously unblocked signals. */
  2108. sigandnsets(&newblocked, newset, &current->blocked);
  2109. retarget_shared_pending(tsk, &newblocked);
  2110. }
  2111. tsk->blocked = *newset;
  2112. recalc_sigpending();
  2113. }
  2114. /**
  2115. * set_current_blocked - change current->blocked mask
  2116. * @newset: new mask
  2117. *
  2118. * It is wrong to change ->blocked directly, this helper should be used
  2119. * to ensure the process can't miss a shared signal we are going to block.
  2120. */
  2121. void set_current_blocked(const sigset_t *newset)
  2122. {
  2123. struct task_struct *tsk = current;
  2124. spin_lock_irq(&tsk->sighand->siglock);
  2125. __set_task_blocked(tsk, newset);
  2126. spin_unlock_irq(&tsk->sighand->siglock);
  2127. }
  2128. /*
  2129. * This is also useful for kernel threads that want to temporarily
  2130. * (or permanently) block certain signals.
  2131. *
  2132. * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
  2133. * interface happily blocks "unblockable" signals like SIGKILL
  2134. * and friends.
  2135. */
  2136. int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
  2137. {
  2138. struct task_struct *tsk = current;
  2139. sigset_t newset;
  2140. /* Lockless, only current can change ->blocked, never from irq */
  2141. if (oldset)
  2142. *oldset = tsk->blocked;
  2143. switch (how) {
  2144. case SIG_BLOCK:
  2145. sigorsets(&newset, &tsk->blocked, set);
  2146. break;
  2147. case SIG_UNBLOCK:
  2148. sigandnsets(&newset, &tsk->blocked, set);
  2149. break;
  2150. case SIG_SETMASK:
  2151. newset = *set;
  2152. break;
  2153. default:
  2154. return -EINVAL;
  2155. }
  2156. set_current_blocked(&newset);
  2157. return 0;
  2158. }
  2159. /**
  2160. * sys_rt_sigprocmask - change the list of currently blocked signals
  2161. * @how: whether to add, remove, or set signals
  2162. * @set: stores pending signals
  2163. * @oset: previous value of signal mask if non-null
  2164. * @sigsetsize: size of sigset_t type
  2165. */
  2166. SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
  2167. sigset_t __user *, oset, size_t, sigsetsize)
  2168. {
  2169. sigset_t old_set, new_set;
  2170. int error;
  2171. /* XXX: Don't preclude handling different sized sigset_t's. */
  2172. if (sigsetsize != sizeof(sigset_t))
  2173. return -EINVAL;
  2174. old_set = current->blocked;
  2175. if (nset) {
  2176. if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
  2177. return -EFAULT;
  2178. sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2179. error = sigprocmask(how, &new_set, NULL);
  2180. if (error)
  2181. return error;
  2182. }
  2183. if (oset) {
  2184. if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
  2185. return -EFAULT;
  2186. }
  2187. return 0;
  2188. }
  2189. long do_sigpending(void __user *set, unsigned long sigsetsize)
  2190. {
  2191. long error = -EINVAL;
  2192. sigset_t pending;
  2193. if (sigsetsize > sizeof(sigset_t))
  2194. goto out;
  2195. spin_lock_irq(&current->sighand->siglock);
  2196. sigorsets(&pending, &current->pending.signal,
  2197. &current->signal->shared_pending.signal);
  2198. spin_unlock_irq(&current->sighand->siglock);
  2199. /* Outside the lock because only this thread touches it. */
  2200. sigandsets(&pending, &current->blocked, &pending);
  2201. error = -EFAULT;
  2202. if (!copy_to_user(set, &pending, sigsetsize))
  2203. error = 0;
  2204. out:
  2205. return error;
  2206. }
  2207. /**
  2208. * sys_rt_sigpending - examine a pending signal that has been raised
  2209. * while blocked
  2210. * @set: stores pending signals
  2211. * @sigsetsize: size of sigset_t type or larger
  2212. */
  2213. SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
  2214. {
  2215. return do_sigpending(set, sigsetsize);
  2216. }
  2217. #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
  2218. int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
  2219. {
  2220. int err;
  2221. if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
  2222. return -EFAULT;
  2223. if (from->si_code < 0)
  2224. return __copy_to_user(to, from, sizeof(siginfo_t))
  2225. ? -EFAULT : 0;
  2226. /*
  2227. * If you change siginfo_t structure, please be sure
  2228. * this code is fixed accordingly.
  2229. * Please remember to update the signalfd_copyinfo() function
  2230. * inside fs/signalfd.c too, in case siginfo_t changes.
  2231. * It should never copy any pad contained in the structure
  2232. * to avoid security leaks, but must copy the generic
  2233. * 3 ints plus the relevant union member.
  2234. */
  2235. err = __put_user(from->si_signo, &to->si_signo);
  2236. err |= __put_user(from->si_errno, &to->si_errno);
  2237. err |= __put_user((short)from->si_code, &to->si_code);
  2238. switch (from->si_code & __SI_MASK) {
  2239. case __SI_KILL:
  2240. err |= __put_user(from->si_pid, &to->si_pid);
  2241. err |= __put_user(from->si_uid, &to->si_uid);
  2242. break;
  2243. case __SI_TIMER:
  2244. err |= __put_user(from->si_tid, &to->si_tid);
  2245. err |= __put_user(from->si_overrun, &to->si_overrun);
  2246. err |= __put_user(from->si_ptr, &to->si_ptr);
  2247. break;
  2248. case __SI_POLL:
  2249. err |= __put_user(from->si_band, &to->si_band);
  2250. err |= __put_user(from->si_fd, &to->si_fd);
  2251. break;
  2252. case __SI_FAULT:
  2253. err |= __put_user(from->si_addr, &to->si_addr);
  2254. #ifdef __ARCH_SI_TRAPNO
  2255. err |= __put_user(from->si_trapno, &to->si_trapno);
  2256. #endif
  2257. #ifdef BUS_MCEERR_AO
  2258. /*
  2259. * Other callers might not initialize the si_lsb field,
  2260. * so check explicitly for the right codes here.
  2261. */
  2262. if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
  2263. err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
  2264. #endif
  2265. break;
  2266. case __SI_CHLD:
  2267. err |= __put_user(from->si_pid, &to->si_pid);
  2268. err |= __put_user(from->si_uid, &to->si_uid);
  2269. err |= __put_user(from->si_status, &to->si_status);
  2270. err |= __put_user(from->si_utime, &to->si_utime);
  2271. err |= __put_user(from->si_stime, &to->si_stime);
  2272. break;
  2273. case __SI_RT: /* This is not generated by the kernel as of now. */
  2274. case __SI_MESGQ: /* But this is */
  2275. err |= __put_user(from->si_pid, &to->si_pid);
  2276. err |= __put_user(from->si_uid, &to->si_uid);
  2277. err |= __put_user(from->si_ptr, &to->si_ptr);
  2278. break;
  2279. default: /* this is just in case for now ... */
  2280. err |= __put_user(from->si_pid, &to->si_pid);
  2281. err |= __put_user(from->si_uid, &to->si_uid);
  2282. break;
  2283. }
  2284. return err;
  2285. }
  2286. #endif
  2287. /**
  2288. * do_sigtimedwait - wait for queued signals specified in @which
  2289. * @which: queued signals to wait for
  2290. * @info: if non-null, the signal's siginfo is returned here
  2291. * @ts: upper bound on process time suspension
  2292. */
  2293. int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
  2294. const struct timespec *ts)
  2295. {
  2296. struct task_struct *tsk = current;
  2297. long timeout = MAX_SCHEDULE_TIMEOUT;
  2298. sigset_t mask = *which;
  2299. int sig;
  2300. if (ts) {
  2301. if (!timespec_valid(ts))
  2302. return -EINVAL;
  2303. timeout = timespec_to_jiffies(ts);
  2304. /*
  2305. * We can be close to the next tick, add another one
  2306. * to ensure we will wait at least the time asked for.
  2307. */
  2308. if (ts->tv_sec || ts->tv_nsec)
  2309. timeout++;
  2310. }
  2311. /*
  2312. * Invert the set of allowed signals to get those we want to block.
  2313. */
  2314. sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
  2315. signotset(&mask);
  2316. spin_lock_irq(&tsk->sighand->siglock);
  2317. sig = dequeue_signal(tsk, &mask, info);
  2318. if (!sig && timeout) {
  2319. /*
  2320. * None ready, temporarily unblock those we're interested
  2321. * while we are sleeping in so that we'll be awakened when
  2322. * they arrive. Unblocking is always fine, we can avoid
  2323. * set_current_blocked().
  2324. */
  2325. tsk->real_blocked = tsk->blocked;
  2326. sigandsets(&tsk->blocked, &tsk->blocked, &mask);
  2327. recalc_sigpending();
  2328. spin_unlock_irq(&tsk->sighand->siglock);
  2329. timeout = schedule_timeout_interruptible(timeout);
  2330. spin_lock_irq(&tsk->sighand->siglock);
  2331. __set_task_blocked(tsk, &tsk->real_blocked);
  2332. siginitset(&tsk->real_blocked, 0);
  2333. sig = dequeue_signal(tsk, &mask, info);
  2334. }
  2335. spin_unlock_irq(&tsk->sighand->siglock);
  2336. if (sig)
  2337. return sig;
  2338. return timeout ? -EINTR : -EAGAIN;
  2339. }
  2340. /**
  2341. * sys_rt_sigtimedwait - synchronously wait for queued signals specified
  2342. * in @uthese
  2343. * @uthese: queued signals to wait for
  2344. * @uinfo: if non-null, the signal's siginfo is returned here
  2345. * @uts: upper bound on process time suspension
  2346. * @sigsetsize: size of sigset_t type
  2347. */
  2348. SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
  2349. siginfo_t __user *, uinfo, const struct timespec __user *, uts,
  2350. size_t, sigsetsize)
  2351. {
  2352. sigset_t these;
  2353. struct timespec ts;
  2354. siginfo_t info;
  2355. int ret;
  2356. /* XXX: Don't preclude handling different sized sigset_t's. */
  2357. if (sigsetsize != sizeof(sigset_t))
  2358. return -EINVAL;
  2359. if (copy_from_user(&these, uthese, sizeof(these)))
  2360. return -EFAULT;
  2361. if (uts) {
  2362. if (copy_from_user(&ts, uts, sizeof(ts)))
  2363. return -EFAULT;
  2364. }
  2365. ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
  2366. if (ret > 0 && uinfo) {
  2367. if (copy_siginfo_to_user(uinfo, &info))
  2368. ret = -EFAULT;
  2369. }
  2370. return ret;
  2371. }
  2372. /**
  2373. * sys_kill - send a signal to a process
  2374. * @pid: the PID of the process
  2375. * @sig: signal to be sent
  2376. */
  2377. SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
  2378. {
  2379. struct siginfo info;
  2380. info.si_signo = sig;
  2381. info.si_errno = 0;
  2382. info.si_code = SI_USER;
  2383. info.si_pid = task_tgid_vnr(current);
  2384. info.si_uid = current_uid();
  2385. return kill_something_info(sig, &info, pid);
  2386. }
  2387. static int
  2388. do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
  2389. {
  2390. struct task_struct *p;
  2391. int error = -ESRCH;
  2392. rcu_read_lock();
  2393. p = find_task_by_vpid(pid);
  2394. if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
  2395. error = check_kill_permission(sig, info, p);
  2396. /*
  2397. * The null signal is a permissions and process existence
  2398. * probe. No signal is actually delivered.
  2399. */
  2400. if (!error && sig) {
  2401. error = do_send_sig_info(sig, info, p, false);
  2402. /*
  2403. * If lock_task_sighand() failed we pretend the task
  2404. * dies after receiving the signal. The window is tiny,
  2405. * and the signal is private anyway.
  2406. */
  2407. if (unlikely(error == -ESRCH))
  2408. error = 0;
  2409. }
  2410. }
  2411. rcu_read_unlock();
  2412. return error;
  2413. }
  2414. static int do_tkill(pid_t tgid, pid_t pid, int sig)
  2415. {
  2416. struct siginfo info;
  2417. info.si_signo = sig;
  2418. info.si_errno = 0;
  2419. info.si_code = SI_TKILL;
  2420. info.si_pid = task_tgid_vnr(current);
  2421. info.si_uid = current_uid();
  2422. return do_send_specific(tgid, pid, sig, &info);
  2423. }
  2424. /**
  2425. * sys_tgkill - send signal to one specific thread
  2426. * @tgid: the thread group ID of the thread
  2427. * @pid: the PID of the thread
  2428. * @sig: signal to be sent
  2429. *
  2430. * This syscall also checks the @tgid and returns -ESRCH even if the PID
  2431. * exists but it's not belonging to the target process anymore. This
  2432. * method solves the problem of threads exiting and PIDs getting reused.
  2433. */
  2434. SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
  2435. {
  2436. /* This is only valid for single tasks */
  2437. if (pid <= 0 || tgid <= 0)
  2438. return -EINVAL;
  2439. return do_tkill(tgid, pid, sig);
  2440. }
  2441. /**
  2442. * sys_tkill - send signal to one specific task
  2443. * @pid: the PID of the task
  2444. * @sig: signal to be sent
  2445. *
  2446. * Send a signal to only one task, even if it's a CLONE_THREAD task.
  2447. */
  2448. SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
  2449. {
  2450. /* This is only valid for single tasks */
  2451. if (pid <= 0)
  2452. return -EINVAL;
  2453. return do_tkill(0, pid, sig);
  2454. }
  2455. /**
  2456. * sys_rt_sigqueueinfo - send signal information to a signal
  2457. * @pid: the PID of the thread
  2458. * @sig: signal to be sent
  2459. * @uinfo: signal info to be sent
  2460. */
  2461. SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
  2462. siginfo_t __user *, uinfo)
  2463. {
  2464. siginfo_t info;
  2465. if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
  2466. return -EFAULT;
  2467. /* Not even root can pretend to send signals from the kernel.
  2468. * Nor can they impersonate a kill()/tgkill(), which adds source info.
  2469. */
  2470. if (info.si_code >= 0 || info.si_code == SI_TKILL) {
  2471. /* We used to allow any < 0 si_code */
  2472. WARN_ON_ONCE(info.si_code < 0);
  2473. return -EPERM;
  2474. }
  2475. info.si_signo = sig;
  2476. /* POSIX.1b doesn't mention process groups. */
  2477. return kill_proc_info(sig, &info, pid);
  2478. }
  2479. long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
  2480. {
  2481. /* This is only valid for single tasks */
  2482. if (pid <= 0 || tgid <= 0)
  2483. return -EINVAL;
  2484. /* Not even root can pretend to send signals from the kernel.
  2485. * Nor can they impersonate a kill()/tgkill(), which adds source info.
  2486. */
  2487. if (info->si_code >= 0 || info->si_code == SI_TKILL) {
  2488. /* We used to allow any < 0 si_code */
  2489. WARN_ON_ONCE(info->si_code < 0);
  2490. return -EPERM;
  2491. }
  2492. info->si_signo = sig;
  2493. return do_send_specific(tgid, pid, sig, info);
  2494. }
  2495. SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
  2496. siginfo_t __user *, uinfo)
  2497. {
  2498. siginfo_t info;
  2499. if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
  2500. return -EFAULT;
  2501. return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
  2502. }
  2503. int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
  2504. {
  2505. struct task_struct *t = current;
  2506. struct k_sigaction *k;
  2507. sigset_t mask;
  2508. if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
  2509. return -EINVAL;
  2510. k = &t->sighand->action[sig-1];
  2511. spin_lock_irq(&current->sighand->siglock);
  2512. if (oact)
  2513. *oact = *k;
  2514. if (act) {
  2515. sigdelsetmask(&act->sa.sa_mask,
  2516. sigmask(SIGKILL) | sigmask(SIGSTOP));
  2517. *k = *act;
  2518. /*
  2519. * POSIX 3.3.1.3:
  2520. * "Setting a signal action to SIG_IGN for a signal that is
  2521. * pending shall cause the pending signal to be discarded,
  2522. * whether or not it is blocked."
  2523. *
  2524. * "Setting a signal action to SIG_DFL for a signal that is
  2525. * pending and whose default action is to ignore the signal
  2526. * (for example, SIGCHLD), shall cause the pending signal to
  2527. * be discarded, whether or not it is blocked"
  2528. */
  2529. if (sig_handler_ignored(sig_handler(t, sig), sig)) {
  2530. sigemptyset(&mask);
  2531. sigaddset(&mask, sig);
  2532. rm_from_queue_full(&mask, &t->signal->shared_pending);
  2533. do {
  2534. rm_from_queue_full(&mask, &t->pending);
  2535. t = next_thread(t);
  2536. } while (t != current);
  2537. }
  2538. }
  2539. spin_unlock_irq(&current->sighand->siglock);
  2540. return 0;
  2541. }
  2542. int
  2543. do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
  2544. {
  2545. stack_t oss;
  2546. int error;
  2547. oss.ss_sp = (void __user *) current->sas_ss_sp;
  2548. oss.ss_size = current->sas_ss_size;
  2549. oss.ss_flags = sas_ss_flags(sp);
  2550. if (uss) {
  2551. void __user *ss_sp;
  2552. size_t ss_size;
  2553. int ss_flags;
  2554. error = -EFAULT;
  2555. if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
  2556. goto out;
  2557. error = __get_user(ss_sp, &uss->ss_sp) |
  2558. __get_user(ss_flags, &uss->ss_flags) |
  2559. __get_user(ss_size, &uss->ss_size);
  2560. if (error)
  2561. goto out;
  2562. error = -EPERM;
  2563. if (on_sig_stack(sp))
  2564. goto out;
  2565. error = -EINVAL;
  2566. /*
  2567. * Note - this code used to test ss_flags incorrectly:
  2568. * old code may have been written using ss_flags==0
  2569. * to mean ss_flags==SS_ONSTACK (as this was the only
  2570. * way that worked) - this fix preserves that older
  2571. * mechanism.
  2572. */
  2573. if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
  2574. goto out;
  2575. if (ss_flags == SS_DISABLE) {
  2576. ss_size = 0;
  2577. ss_sp = NULL;
  2578. } else {
  2579. error = -ENOMEM;
  2580. if (ss_size < MINSIGSTKSZ)
  2581. goto out;
  2582. }
  2583. current->sas_ss_sp = (unsigned long) ss_sp;
  2584. current->sas_ss_size = ss_size;
  2585. }
  2586. error = 0;
  2587. if (uoss) {
  2588. error = -EFAULT;
  2589. if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
  2590. goto out;
  2591. error = __put_user(oss.ss_sp, &uoss->ss_sp) |
  2592. __put_user(oss.ss_size, &uoss->ss_size) |
  2593. __put_user(oss.ss_flags, &uoss->ss_flags);
  2594. }
  2595. out:
  2596. return error;
  2597. }
  2598. #ifdef __ARCH_WANT_SYS_SIGPENDING
  2599. /**
  2600. * sys_sigpending - examine pending signals
  2601. * @set: where mask of pending signal is returned
  2602. */
  2603. SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
  2604. {
  2605. return do_sigpending(set, sizeof(*set));
  2606. }
  2607. #endif
  2608. #ifdef __ARCH_WANT_SYS_SIGPROCMASK
  2609. /**
  2610. * sys_sigprocmask - examine and change blocked signals
  2611. * @how: whether to add, remove, or set signals
  2612. * @nset: signals to add or remove (if non-null)
  2613. * @oset: previous value of signal mask if non-null
  2614. *
  2615. * Some platforms have their own version with special arguments;
  2616. * others support only sys_rt_sigprocmask.
  2617. */
  2618. SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
  2619. old_sigset_t __user *, oset)
  2620. {
  2621. old_sigset_t old_set, new_set;
  2622. sigset_t new_blocked;
  2623. old_set = current->blocked.sig[0];
  2624. if (nset) {
  2625. if (copy_from_user(&new_set, nset, sizeof(*nset)))
  2626. return -EFAULT;
  2627. new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
  2628. new_blocked = current->blocked;
  2629. switch (how) {
  2630. case SIG_BLOCK:
  2631. sigaddsetmask(&new_blocked, new_set);
  2632. break;
  2633. case SIG_UNBLOCK:
  2634. sigdelsetmask(&new_blocked, new_set);
  2635. break;
  2636. case SIG_SETMASK:
  2637. new_blocked.sig[0] = new_set;
  2638. break;
  2639. default:
  2640. return -EINVAL;
  2641. }
  2642. set_current_blocked(&new_blocked);
  2643. }
  2644. if (oset) {
  2645. if (copy_to_user(oset, &old_set, sizeof(*oset)))
  2646. return -EFAULT;
  2647. }
  2648. return 0;
  2649. }
  2650. #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
  2651. #ifdef __ARCH_WANT_SYS_RT_SIGACTION
  2652. /**
  2653. * sys_rt_sigaction - alter an action taken by a process
  2654. * @sig: signal to be sent
  2655. * @act: new sigaction
  2656. * @oact: used to save the previous sigaction
  2657. * @sigsetsize: size of sigset_t type
  2658. */
  2659. SYSCALL_DEFINE4(rt_sigaction, int, sig,
  2660. const struct sigaction __user *, act,
  2661. struct sigaction __user *, oact,
  2662. size_t, sigsetsize)
  2663. {
  2664. struct k_sigaction new_sa, old_sa;
  2665. int ret = -EINVAL;
  2666. /* XXX: Don't preclude handling different sized sigset_t's. */
  2667. if (sigsetsize != sizeof(sigset_t))
  2668. goto out;
  2669. if (act) {
  2670. if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
  2671. return -EFAULT;
  2672. }
  2673. ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
  2674. if (!ret && oact) {
  2675. if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
  2676. return -EFAULT;
  2677. }
  2678. out:
  2679. return ret;
  2680. }
  2681. #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
  2682. #ifdef __ARCH_WANT_SYS_SGETMASK
  2683. /*
  2684. * For backwards compatibility. Functionality superseded by sigprocmask.
  2685. */
  2686. SYSCALL_DEFINE0(sgetmask)
  2687. {
  2688. /* SMP safe */
  2689. return current->blocked.sig[0];
  2690. }
  2691. SYSCALL_DEFINE1(ssetmask, int, newmask)
  2692. {
  2693. int old;
  2694. spin_lock_irq(&current->sighand->siglock);
  2695. old = current->blocked.sig[0];
  2696. siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
  2697. sigmask(SIGSTOP)));
  2698. recalc_sigpending();
  2699. spin_unlock_irq(&current->sighand->siglock);
  2700. return old;
  2701. }
  2702. #endif /* __ARCH_WANT_SGETMASK */
  2703. #ifdef __ARCH_WANT_SYS_SIGNAL
  2704. /*
  2705. * For backwards compatibility. Functionality superseded by sigaction.
  2706. */
  2707. SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
  2708. {
  2709. struct k_sigaction new_sa, old_sa;
  2710. int ret;
  2711. new_sa.sa.sa_handler = handler;
  2712. new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
  2713. sigemptyset(&new_sa.sa.sa_mask);
  2714. ret = do_sigaction(sig, &new_sa, &old_sa);
  2715. return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
  2716. }
  2717. #endif /* __ARCH_WANT_SYS_SIGNAL */
  2718. #ifdef __ARCH_WANT_SYS_PAUSE
  2719. SYSCALL_DEFINE0(pause)
  2720. {
  2721. while (!signal_pending(current)) {
  2722. current->state = TASK_INTERRUPTIBLE;
  2723. schedule();
  2724. }
  2725. return -ERESTARTNOHAND;
  2726. }
  2727. #endif
  2728. #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
  2729. /**
  2730. * sys_rt_sigsuspend - replace the signal mask for a value with the
  2731. * @unewset value until a signal is received
  2732. * @unewset: new signal mask value
  2733. * @sigsetsize: size of sigset_t type
  2734. */
  2735. SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
  2736. {
  2737. sigset_t newset;
  2738. /* XXX: Don't preclude handling different sized sigset_t's. */
  2739. if (sigsetsize != sizeof(sigset_t))
  2740. return -EINVAL;
  2741. if (copy_from_user(&newset, unewset, sizeof(newset)))
  2742. return -EFAULT;
  2743. sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2744. spin_lock_irq(&current->sighand->siglock);
  2745. current->saved_sigmask = current->blocked;
  2746. current->blocked = newset;
  2747. recalc_sigpending();
  2748. spin_unlock_irq(&current->sighand->siglock);
  2749. current->state = TASK_INTERRUPTIBLE;
  2750. schedule();
  2751. set_restore_sigmask();
  2752. return -ERESTARTNOHAND;
  2753. }
  2754. #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
  2755. __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
  2756. {
  2757. return NULL;
  2758. }
  2759. void __init signals_init(void)
  2760. {
  2761. sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
  2762. }
  2763. #ifdef CONFIG_KGDB_KDB
  2764. #include <linux/kdb.h>
  2765. /*
  2766. * kdb_send_sig_info - Allows kdb to send signals without exposing
  2767. * signal internals. This function checks if the required locks are
  2768. * available before calling the main signal code, to avoid kdb
  2769. * deadlocks.
  2770. */
  2771. void
  2772. kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
  2773. {
  2774. static struct task_struct *kdb_prev_t;
  2775. int sig, new_t;
  2776. if (!spin_trylock(&t->sighand->siglock)) {
  2777. kdb_printf("Can't do kill command now.\n"
  2778. "The sigmask lock is held somewhere else in "
  2779. "kernel, try again later\n");
  2780. return;
  2781. }
  2782. spin_unlock(&t->sighand->siglock);
  2783. new_t = kdb_prev_t != t;
  2784. kdb_prev_t = t;
  2785. if (t->state != TASK_RUNNING && new_t) {
  2786. kdb_printf("Process is not RUNNING, sending a signal from "
  2787. "kdb risks deadlock\n"
  2788. "on the run queue locks. "
  2789. "The signal has _not_ been sent.\n"
  2790. "Reissue the kill command if you want to risk "
  2791. "the deadlock.\n");
  2792. return;
  2793. }
  2794. sig = info->si_signo;
  2795. if (send_sig_info(sig, info, t))
  2796. kdb_printf("Fail to deliver Signal %d to process %d.\n",
  2797. sig, t->pid);
  2798. else
  2799. kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
  2800. }
  2801. #endif /* CONFIG_KGDB_KDB */