exit.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/mnt_namespace.h>
  15. #include <linux/iocontext.h>
  16. #include <linux/key.h>
  17. #include <linux/security.h>
  18. #include <linux/cpu.h>
  19. #include <linux/acct.h>
  20. #include <linux/tsacct_kern.h>
  21. #include <linux/file.h>
  22. #include <linux/fdtable.h>
  23. #include <linux/binfmts.h>
  24. #include <linux/nsproxy.h>
  25. #include <linux/pid_namespace.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/profile.h>
  28. #include <linux/mount.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/kthread.h>
  31. #include <linux/mempolicy.h>
  32. #include <linux/taskstats_kern.h>
  33. #include <linux/delayacct.h>
  34. #include <linux/freezer.h>
  35. #include <linux/cgroup.h>
  36. #include <linux/syscalls.h>
  37. #include <linux/signal.h>
  38. #include <linux/posix-timers.h>
  39. #include <linux/cn_proc.h>
  40. #include <linux/mutex.h>
  41. #include <linux/futex.h>
  42. #include <linux/pipe_fs_i.h>
  43. #include <linux/audit.h> /* for audit_free() */
  44. #include <linux/resource.h>
  45. #include <linux/blkdev.h>
  46. #include <linux/task_io_accounting_ops.h>
  47. #include <linux/tracehook.h>
  48. #include <linux/init_task.h>
  49. #include <trace/sched.h>
  50. #include <asm/uaccess.h>
  51. #include <asm/unistd.h>
  52. #include <asm/pgtable.h>
  53. #include <asm/mmu_context.h>
  54. #include "cred-internals.h"
  55. DEFINE_TRACE(sched_process_free);
  56. DEFINE_TRACE(sched_process_exit);
  57. DEFINE_TRACE(sched_process_wait);
  58. static void exit_mm(struct task_struct * tsk);
  59. static inline int task_detached(struct task_struct *p)
  60. {
  61. return p->exit_signal == -1;
  62. }
  63. static void __unhash_process(struct task_struct *p)
  64. {
  65. nr_threads--;
  66. detach_pid(p, PIDTYPE_PID);
  67. if (thread_group_leader(p)) {
  68. detach_pid(p, PIDTYPE_PGID);
  69. detach_pid(p, PIDTYPE_SID);
  70. list_del_rcu(&p->tasks);
  71. __get_cpu_var(process_counts)--;
  72. }
  73. list_del_rcu(&p->thread_group);
  74. list_del_init(&p->sibling);
  75. }
  76. /*
  77. * This function expects the tasklist_lock write-locked.
  78. */
  79. static void __exit_signal(struct task_struct *tsk)
  80. {
  81. struct signal_struct *sig = tsk->signal;
  82. struct sighand_struct *sighand;
  83. BUG_ON(!sig);
  84. BUG_ON(!atomic_read(&sig->count));
  85. sighand = rcu_dereference(tsk->sighand);
  86. spin_lock(&sighand->siglock);
  87. posix_cpu_timers_exit(tsk);
  88. if (atomic_dec_and_test(&sig->count))
  89. posix_cpu_timers_exit_group(tsk);
  90. else {
  91. /*
  92. * If there is any task waiting for the group exit
  93. * then notify it:
  94. */
  95. if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
  96. wake_up_process(sig->group_exit_task);
  97. if (tsk == sig->curr_target)
  98. sig->curr_target = next_thread(tsk);
  99. /*
  100. * Accumulate here the counters for all threads but the
  101. * group leader as they die, so they can be added into
  102. * the process-wide totals when those are taken.
  103. * The group leader stays around as a zombie as long
  104. * as there are other threads. When it gets reaped,
  105. * the exit.c code will add its counts into these totals.
  106. * We won't ever get here for the group leader, since it
  107. * will have been the last reference on the signal_struct.
  108. */
  109. sig->utime = cputime_add(sig->utime, task_utime(tsk));
  110. sig->stime = cputime_add(sig->stime, task_stime(tsk));
  111. sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
  112. sig->min_flt += tsk->min_flt;
  113. sig->maj_flt += tsk->maj_flt;
  114. sig->nvcsw += tsk->nvcsw;
  115. sig->nivcsw += tsk->nivcsw;
  116. sig->inblock += task_io_get_inblock(tsk);
  117. sig->oublock += task_io_get_oublock(tsk);
  118. task_io_accounting_add(&sig->ioac, &tsk->ioac);
  119. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  120. sig = NULL; /* Marker for below. */
  121. }
  122. __unhash_process(tsk);
  123. /*
  124. * Do this under ->siglock, we can race with another thread
  125. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  126. */
  127. flush_sigqueue(&tsk->pending);
  128. tsk->signal = NULL;
  129. tsk->sighand = NULL;
  130. spin_unlock(&sighand->siglock);
  131. __cleanup_sighand(sighand);
  132. clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
  133. if (sig) {
  134. flush_sigqueue(&sig->shared_pending);
  135. taskstats_tgid_free(sig);
  136. /*
  137. * Make sure ->signal can't go away under rq->lock,
  138. * see account_group_exec_runtime().
  139. */
  140. task_rq_unlock_wait(tsk);
  141. __cleanup_signal(sig);
  142. }
  143. }
  144. static void delayed_put_task_struct(struct rcu_head *rhp)
  145. {
  146. struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  147. trace_sched_process_free(tsk);
  148. put_task_struct(tsk);
  149. }
  150. void release_task(struct task_struct * p)
  151. {
  152. struct task_struct *leader;
  153. int zap_leader;
  154. repeat:
  155. tracehook_prepare_release_task(p);
  156. /* don't need to get the RCU readlock here - the process is dead and
  157. * can't be modifying its own credentials */
  158. atomic_dec(&__task_cred(p)->user->processes);
  159. proc_flush_task(p);
  160. write_lock_irq(&tasklist_lock);
  161. tracehook_finish_release_task(p);
  162. __exit_signal(p);
  163. /*
  164. * If we are the last non-leader member of the thread
  165. * group, and the leader is zombie, then notify the
  166. * group leader's parent process. (if it wants notification.)
  167. */
  168. zap_leader = 0;
  169. leader = p->group_leader;
  170. if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
  171. BUG_ON(task_detached(leader));
  172. do_notify_parent(leader, leader->exit_signal);
  173. /*
  174. * If we were the last child thread and the leader has
  175. * exited already, and the leader's parent ignores SIGCHLD,
  176. * then we are the one who should release the leader.
  177. *
  178. * do_notify_parent() will have marked it self-reaping in
  179. * that case.
  180. */
  181. zap_leader = task_detached(leader);
  182. /*
  183. * This maintains the invariant that release_task()
  184. * only runs on a task in EXIT_DEAD, just for sanity.
  185. */
  186. if (zap_leader)
  187. leader->exit_state = EXIT_DEAD;
  188. }
  189. write_unlock_irq(&tasklist_lock);
  190. release_thread(p);
  191. call_rcu(&p->rcu, delayed_put_task_struct);
  192. p = leader;
  193. if (unlikely(zap_leader))
  194. goto repeat;
  195. }
  196. /*
  197. * This checks not only the pgrp, but falls back on the pid if no
  198. * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
  199. * without this...
  200. *
  201. * The caller must hold rcu lock or the tasklist lock.
  202. */
  203. struct pid *session_of_pgrp(struct pid *pgrp)
  204. {
  205. struct task_struct *p;
  206. struct pid *sid = NULL;
  207. p = pid_task(pgrp, PIDTYPE_PGID);
  208. if (p == NULL)
  209. p = pid_task(pgrp, PIDTYPE_PID);
  210. if (p != NULL)
  211. sid = task_session(p);
  212. return sid;
  213. }
  214. /*
  215. * Determine if a process group is "orphaned", according to the POSIX
  216. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  217. * by terminal-generated stop signals. Newly orphaned process groups are
  218. * to receive a SIGHUP and a SIGCONT.
  219. *
  220. * "I ask you, have you ever known what it is to be an orphan?"
  221. */
  222. static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
  223. {
  224. struct task_struct *p;
  225. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  226. if ((p == ignored_task) ||
  227. (p->exit_state && thread_group_empty(p)) ||
  228. is_global_init(p->real_parent))
  229. continue;
  230. if (task_pgrp(p->real_parent) != pgrp &&
  231. task_session(p->real_parent) == task_session(p))
  232. return 0;
  233. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  234. return 1;
  235. }
  236. int is_current_pgrp_orphaned(void)
  237. {
  238. int retval;
  239. read_lock(&tasklist_lock);
  240. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  241. read_unlock(&tasklist_lock);
  242. return retval;
  243. }
  244. static int has_stopped_jobs(struct pid *pgrp)
  245. {
  246. int retval = 0;
  247. struct task_struct *p;
  248. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  249. if (!task_is_stopped(p))
  250. continue;
  251. retval = 1;
  252. break;
  253. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  254. return retval;
  255. }
  256. /*
  257. * Check to see if any process groups have become orphaned as
  258. * a result of our exiting, and if they have any stopped jobs,
  259. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  260. */
  261. static void
  262. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  263. {
  264. struct pid *pgrp = task_pgrp(tsk);
  265. struct task_struct *ignored_task = tsk;
  266. if (!parent)
  267. /* exit: our father is in a different pgrp than
  268. * we are and we were the only connection outside.
  269. */
  270. parent = tsk->real_parent;
  271. else
  272. /* reparent: our child is in a different pgrp than
  273. * we are, and it was the only connection outside.
  274. */
  275. ignored_task = NULL;
  276. if (task_pgrp(parent) != pgrp &&
  277. task_session(parent) == task_session(tsk) &&
  278. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  279. has_stopped_jobs(pgrp)) {
  280. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  281. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  282. }
  283. }
  284. /**
  285. * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
  286. *
  287. * If a kernel thread is launched as a result of a system call, or if
  288. * it ever exits, it should generally reparent itself to kthreadd so it
  289. * isn't in the way of other processes and is correctly cleaned up on exit.
  290. *
  291. * The various task state such as scheduling policy and priority may have
  292. * been inherited from a user process, so we reset them to sane values here.
  293. *
  294. * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
  295. */
  296. static void reparent_to_kthreadd(void)
  297. {
  298. write_lock_irq(&tasklist_lock);
  299. ptrace_unlink(current);
  300. /* Reparent to init */
  301. current->real_parent = current->parent = kthreadd_task;
  302. list_move_tail(&current->sibling, &current->real_parent->children);
  303. /* Set the exit signal to SIGCHLD so we signal init on exit */
  304. current->exit_signal = SIGCHLD;
  305. if (task_nice(current) < 0)
  306. set_user_nice(current, 0);
  307. /* cpus_allowed? */
  308. /* rt_priority? */
  309. /* signals? */
  310. memcpy(current->signal->rlim, init_task.signal->rlim,
  311. sizeof(current->signal->rlim));
  312. atomic_inc(&init_cred.usage);
  313. commit_creds(&init_cred);
  314. write_unlock_irq(&tasklist_lock);
  315. }
  316. void __set_special_pids(struct pid *pid)
  317. {
  318. struct task_struct *curr = current->group_leader;
  319. pid_t nr = pid_nr(pid);
  320. if (task_session(curr) != pid) {
  321. change_pid(curr, PIDTYPE_SID, pid);
  322. set_task_session(curr, nr);
  323. }
  324. if (task_pgrp(curr) != pid) {
  325. change_pid(curr, PIDTYPE_PGID, pid);
  326. set_task_pgrp(curr, nr);
  327. }
  328. }
  329. static void set_special_pids(struct pid *pid)
  330. {
  331. write_lock_irq(&tasklist_lock);
  332. __set_special_pids(pid);
  333. write_unlock_irq(&tasklist_lock);
  334. }
  335. /*
  336. * Let kernel threads use this to say that they
  337. * allow a certain signal (since daemonize() will
  338. * have disabled all of them by default).
  339. */
  340. int allow_signal(int sig)
  341. {
  342. if (!valid_signal(sig) || sig < 1)
  343. return -EINVAL;
  344. spin_lock_irq(&current->sighand->siglock);
  345. sigdelset(&current->blocked, sig);
  346. if (!current->mm) {
  347. /* Kernel threads handle their own signals.
  348. Let the signal code know it'll be handled, so
  349. that they don't get converted to SIGKILL or
  350. just silently dropped */
  351. current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
  352. }
  353. recalc_sigpending();
  354. spin_unlock_irq(&current->sighand->siglock);
  355. return 0;
  356. }
  357. EXPORT_SYMBOL(allow_signal);
  358. int disallow_signal(int sig)
  359. {
  360. if (!valid_signal(sig) || sig < 1)
  361. return -EINVAL;
  362. spin_lock_irq(&current->sighand->siglock);
  363. current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
  364. recalc_sigpending();
  365. spin_unlock_irq(&current->sighand->siglock);
  366. return 0;
  367. }
  368. EXPORT_SYMBOL(disallow_signal);
  369. /*
  370. * Put all the gunge required to become a kernel thread without
  371. * attached user resources in one place where it belongs.
  372. */
  373. void daemonize(const char *name, ...)
  374. {
  375. va_list args;
  376. struct fs_struct *fs;
  377. sigset_t blocked;
  378. va_start(args, name);
  379. vsnprintf(current->comm, sizeof(current->comm), name, args);
  380. va_end(args);
  381. /*
  382. * If we were started as result of loading a module, close all of the
  383. * user space pages. We don't need them, and if we didn't close them
  384. * they would be locked into memory.
  385. */
  386. exit_mm(current);
  387. /*
  388. * We don't want to have TIF_FREEZE set if the system-wide hibernation
  389. * or suspend transition begins right now.
  390. */
  391. current->flags |= (PF_NOFREEZE | PF_KTHREAD);
  392. if (current->nsproxy != &init_nsproxy) {
  393. get_nsproxy(&init_nsproxy);
  394. switch_task_namespaces(current, &init_nsproxy);
  395. }
  396. set_special_pids(&init_struct_pid);
  397. proc_clear_tty(current);
  398. /* Block and flush all signals */
  399. sigfillset(&blocked);
  400. sigprocmask(SIG_BLOCK, &blocked, NULL);
  401. flush_signals(current);
  402. /* Become as one with the init task */
  403. exit_fs(current); /* current->fs->count--; */
  404. fs = init_task.fs;
  405. current->fs = fs;
  406. atomic_inc(&fs->count);
  407. exit_files(current);
  408. current->files = init_task.files;
  409. atomic_inc(&current->files->count);
  410. reparent_to_kthreadd();
  411. }
  412. EXPORT_SYMBOL(daemonize);
  413. static void close_files(struct files_struct * files)
  414. {
  415. int i, j;
  416. struct fdtable *fdt;
  417. j = 0;
  418. /*
  419. * It is safe to dereference the fd table without RCU or
  420. * ->file_lock because this is the last reference to the
  421. * files structure.
  422. */
  423. fdt = files_fdtable(files);
  424. for (;;) {
  425. unsigned long set;
  426. i = j * __NFDBITS;
  427. if (i >= fdt->max_fds)
  428. break;
  429. set = fdt->open_fds->fds_bits[j++];
  430. while (set) {
  431. if (set & 1) {
  432. struct file * file = xchg(&fdt->fd[i], NULL);
  433. if (file) {
  434. filp_close(file, files);
  435. cond_resched();
  436. }
  437. }
  438. i++;
  439. set >>= 1;
  440. }
  441. }
  442. }
  443. struct files_struct *get_files_struct(struct task_struct *task)
  444. {
  445. struct files_struct *files;
  446. task_lock(task);
  447. files = task->files;
  448. if (files)
  449. atomic_inc(&files->count);
  450. task_unlock(task);
  451. return files;
  452. }
  453. void put_files_struct(struct files_struct *files)
  454. {
  455. struct fdtable *fdt;
  456. if (atomic_dec_and_test(&files->count)) {
  457. close_files(files);
  458. /*
  459. * Free the fd and fdset arrays if we expanded them.
  460. * If the fdtable was embedded, pass files for freeing
  461. * at the end of the RCU grace period. Otherwise,
  462. * you can free files immediately.
  463. */
  464. fdt = files_fdtable(files);
  465. if (fdt != &files->fdtab)
  466. kmem_cache_free(files_cachep, files);
  467. free_fdtable(fdt);
  468. }
  469. }
  470. void reset_files_struct(struct files_struct *files)
  471. {
  472. struct task_struct *tsk = current;
  473. struct files_struct *old;
  474. old = tsk->files;
  475. task_lock(tsk);
  476. tsk->files = files;
  477. task_unlock(tsk);
  478. put_files_struct(old);
  479. }
  480. void exit_files(struct task_struct *tsk)
  481. {
  482. struct files_struct * files = tsk->files;
  483. if (files) {
  484. task_lock(tsk);
  485. tsk->files = NULL;
  486. task_unlock(tsk);
  487. put_files_struct(files);
  488. }
  489. }
  490. void put_fs_struct(struct fs_struct *fs)
  491. {
  492. /* No need to hold fs->lock if we are killing it */
  493. if (atomic_dec_and_test(&fs->count)) {
  494. path_put(&fs->root);
  495. path_put(&fs->pwd);
  496. kmem_cache_free(fs_cachep, fs);
  497. }
  498. }
  499. void exit_fs(struct task_struct *tsk)
  500. {
  501. struct fs_struct * fs = tsk->fs;
  502. if (fs) {
  503. task_lock(tsk);
  504. tsk->fs = NULL;
  505. task_unlock(tsk);
  506. put_fs_struct(fs);
  507. }
  508. }
  509. EXPORT_SYMBOL_GPL(exit_fs);
  510. #ifdef CONFIG_MM_OWNER
  511. /*
  512. * Task p is exiting and it owned mm, lets find a new owner for it
  513. */
  514. static inline int
  515. mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
  516. {
  517. /*
  518. * If there are other users of the mm and the owner (us) is exiting
  519. * we need to find a new owner to take on the responsibility.
  520. */
  521. if (atomic_read(&mm->mm_users) <= 1)
  522. return 0;
  523. if (mm->owner != p)
  524. return 0;
  525. return 1;
  526. }
  527. void mm_update_next_owner(struct mm_struct *mm)
  528. {
  529. struct task_struct *c, *g, *p = current;
  530. retry:
  531. if (!mm_need_new_owner(mm, p))
  532. return;
  533. read_lock(&tasklist_lock);
  534. /*
  535. * Search in the children
  536. */
  537. list_for_each_entry(c, &p->children, sibling) {
  538. if (c->mm == mm)
  539. goto assign_new_owner;
  540. }
  541. /*
  542. * Search in the siblings
  543. */
  544. list_for_each_entry(c, &p->parent->children, sibling) {
  545. if (c->mm == mm)
  546. goto assign_new_owner;
  547. }
  548. /*
  549. * Search through everything else. We should not get
  550. * here often
  551. */
  552. do_each_thread(g, c) {
  553. if (c->mm == mm)
  554. goto assign_new_owner;
  555. } while_each_thread(g, c);
  556. read_unlock(&tasklist_lock);
  557. /*
  558. * We found no owner yet mm_users > 1: this implies that we are
  559. * most likely racing with swapoff (try_to_unuse()) or /proc or
  560. * ptrace or page migration (get_task_mm()). Mark owner as NULL.
  561. */
  562. mm->owner = NULL;
  563. return;
  564. assign_new_owner:
  565. BUG_ON(c == p);
  566. get_task_struct(c);
  567. /*
  568. * The task_lock protects c->mm from changing.
  569. * We always want mm->owner->mm == mm
  570. */
  571. task_lock(c);
  572. /*
  573. * Delay read_unlock() till we have the task_lock()
  574. * to ensure that c does not slip away underneath us
  575. */
  576. read_unlock(&tasklist_lock);
  577. if (c->mm != mm) {
  578. task_unlock(c);
  579. put_task_struct(c);
  580. goto retry;
  581. }
  582. mm->owner = c;
  583. task_unlock(c);
  584. put_task_struct(c);
  585. }
  586. #endif /* CONFIG_MM_OWNER */
  587. /*
  588. * Turn us into a lazy TLB process if we
  589. * aren't already..
  590. */
  591. static void exit_mm(struct task_struct * tsk)
  592. {
  593. struct mm_struct *mm = tsk->mm;
  594. struct core_state *core_state;
  595. mm_release(tsk, mm);
  596. if (!mm)
  597. return;
  598. /*
  599. * Serialize with any possible pending coredump.
  600. * We must hold mmap_sem around checking core_state
  601. * and clearing tsk->mm. The core-inducing thread
  602. * will increment ->nr_threads for each thread in the
  603. * group with ->mm != NULL.
  604. */
  605. down_read(&mm->mmap_sem);
  606. core_state = mm->core_state;
  607. if (core_state) {
  608. struct core_thread self;
  609. up_read(&mm->mmap_sem);
  610. self.task = tsk;
  611. self.next = xchg(&core_state->dumper.next, &self);
  612. /*
  613. * Implies mb(), the result of xchg() must be visible
  614. * to core_state->dumper.
  615. */
  616. if (atomic_dec_and_test(&core_state->nr_threads))
  617. complete(&core_state->startup);
  618. for (;;) {
  619. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  620. if (!self.task) /* see coredump_finish() */
  621. break;
  622. schedule();
  623. }
  624. __set_task_state(tsk, TASK_RUNNING);
  625. down_read(&mm->mmap_sem);
  626. }
  627. atomic_inc(&mm->mm_count);
  628. BUG_ON(mm != tsk->active_mm);
  629. /* more a memory barrier than a real lock */
  630. task_lock(tsk);
  631. tsk->mm = NULL;
  632. up_read(&mm->mmap_sem);
  633. enter_lazy_tlb(mm, current);
  634. /* We don't want this task to be frozen prematurely */
  635. clear_freeze_flag(tsk);
  636. task_unlock(tsk);
  637. mm_update_next_owner(mm);
  638. mmput(mm);
  639. }
  640. /*
  641. * Called with irqs disabled, returns true if childs should reap themselves.
  642. */
  643. static int ignoring_children(struct sighand_struct *sigh)
  644. {
  645. int ret;
  646. spin_lock(&sigh->siglock);
  647. ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
  648. (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
  649. spin_unlock(&sigh->siglock);
  650. return ret;
  651. }
  652. /*
  653. * Detach all tasks we were using ptrace on.
  654. * Any that need to be release_task'd are put on the @dead list.
  655. *
  656. * Called with write_lock(&tasklist_lock) held.
  657. */
  658. static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
  659. {
  660. struct task_struct *p, *n;
  661. list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
  662. __ptrace_unlink(p);
  663. if (p->exit_state != EXIT_ZOMBIE)
  664. continue;
  665. /*
  666. * If it's a zombie, our attachedness prevented normal
  667. * parent notification or self-reaping. Do notification
  668. * now if it would have happened earlier. If it should
  669. * reap itself, add it to the @dead list. We can't call
  670. * release_task() here because we already hold tasklist_lock.
  671. *
  672. * If it's our own child, there is no notification to do.
  673. * But if our normal children self-reap, then this child
  674. * was prevented by ptrace and we must reap it now.
  675. */
  676. if (!task_detached(p) && thread_group_empty(p)) {
  677. if (!same_thread_group(p->real_parent, parent))
  678. do_notify_parent(p, p->exit_signal);
  679. else if (ignoring_children(parent->sighand))
  680. p->exit_signal = -1;
  681. }
  682. if (task_detached(p)) {
  683. /*
  684. * Mark it as in the process of being reaped.
  685. */
  686. p->exit_state = EXIT_DEAD;
  687. list_add(&p->ptrace_entry, dead);
  688. }
  689. }
  690. }
  691. /*
  692. * Finish up exit-time ptrace cleanup.
  693. *
  694. * Called without locks.
  695. */
  696. static void ptrace_exit_finish(struct task_struct *parent,
  697. struct list_head *dead)
  698. {
  699. struct task_struct *p, *n;
  700. BUG_ON(!list_empty(&parent->ptraced));
  701. list_for_each_entry_safe(p, n, dead, ptrace_entry) {
  702. list_del_init(&p->ptrace_entry);
  703. release_task(p);
  704. }
  705. }
  706. static void reparent_thread(struct task_struct *p, struct task_struct *father)
  707. {
  708. if (p->pdeath_signal)
  709. /* We already hold the tasklist_lock here. */
  710. group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
  711. list_move_tail(&p->sibling, &p->real_parent->children);
  712. /* If this is a threaded reparent there is no need to
  713. * notify anyone anything has happened.
  714. */
  715. if (same_thread_group(p->real_parent, father))
  716. return;
  717. /* We don't want people slaying init. */
  718. if (!task_detached(p))
  719. p->exit_signal = SIGCHLD;
  720. /* If we'd notified the old parent about this child's death,
  721. * also notify the new parent.
  722. */
  723. if (!ptrace_reparented(p) &&
  724. p->exit_state == EXIT_ZOMBIE &&
  725. !task_detached(p) && thread_group_empty(p))
  726. do_notify_parent(p, p->exit_signal);
  727. kill_orphaned_pgrp(p, father);
  728. }
  729. /*
  730. * When we die, we re-parent all our children.
  731. * Try to give them to another thread in our thread
  732. * group, and if no such member exists, give it to
  733. * the child reaper process (ie "init") in our pid
  734. * space.
  735. */
  736. static struct task_struct *find_new_reaper(struct task_struct *father)
  737. {
  738. struct pid_namespace *pid_ns = task_active_pid_ns(father);
  739. struct task_struct *thread;
  740. thread = father;
  741. while_each_thread(father, thread) {
  742. if (thread->flags & PF_EXITING)
  743. continue;
  744. if (unlikely(pid_ns->child_reaper == father))
  745. pid_ns->child_reaper = thread;
  746. return thread;
  747. }
  748. if (unlikely(pid_ns->child_reaper == father)) {
  749. write_unlock_irq(&tasklist_lock);
  750. if (unlikely(pid_ns == &init_pid_ns))
  751. panic("Attempted to kill init!");
  752. zap_pid_ns_processes(pid_ns);
  753. write_lock_irq(&tasklist_lock);
  754. /*
  755. * We can not clear ->child_reaper or leave it alone.
  756. * There may by stealth EXIT_DEAD tasks on ->children,
  757. * forget_original_parent() must move them somewhere.
  758. */
  759. pid_ns->child_reaper = init_pid_ns.child_reaper;
  760. }
  761. return pid_ns->child_reaper;
  762. }
  763. static void forget_original_parent(struct task_struct *father)
  764. {
  765. struct task_struct *p, *n, *reaper;
  766. LIST_HEAD(ptrace_dead);
  767. write_lock_irq(&tasklist_lock);
  768. reaper = find_new_reaper(father);
  769. /*
  770. * First clean up ptrace if we were using it.
  771. */
  772. ptrace_exit(father, &ptrace_dead);
  773. list_for_each_entry_safe(p, n, &father->children, sibling) {
  774. p->real_parent = reaper;
  775. if (p->parent == father) {
  776. BUG_ON(p->ptrace);
  777. p->parent = p->real_parent;
  778. }
  779. reparent_thread(p, father);
  780. }
  781. write_unlock_irq(&tasklist_lock);
  782. BUG_ON(!list_empty(&father->children));
  783. ptrace_exit_finish(father, &ptrace_dead);
  784. }
  785. /*
  786. * Send signals to all our closest relatives so that they know
  787. * to properly mourn us..
  788. */
  789. static void exit_notify(struct task_struct *tsk, int group_dead)
  790. {
  791. int signal;
  792. void *cookie;
  793. /*
  794. * This does two things:
  795. *
  796. * A. Make init inherit all the child processes
  797. * B. Check to see if any process groups have become orphaned
  798. * as a result of our exiting, and if they have any stopped
  799. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  800. */
  801. forget_original_parent(tsk);
  802. exit_task_namespaces(tsk);
  803. write_lock_irq(&tasklist_lock);
  804. if (group_dead)
  805. kill_orphaned_pgrp(tsk->group_leader, NULL);
  806. /* Let father know we died
  807. *
  808. * Thread signals are configurable, but you aren't going to use
  809. * that to send signals to arbitary processes.
  810. * That stops right now.
  811. *
  812. * If the parent exec id doesn't match the exec id we saved
  813. * when we started then we know the parent has changed security
  814. * domain.
  815. *
  816. * If our self_exec id doesn't match our parent_exec_id then
  817. * we have changed execution domain as these two values started
  818. * the same after a fork.
  819. */
  820. if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
  821. (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
  822. tsk->self_exec_id != tsk->parent_exec_id) &&
  823. !capable(CAP_KILL))
  824. tsk->exit_signal = SIGCHLD;
  825. signal = tracehook_notify_death(tsk, &cookie, group_dead);
  826. if (signal >= 0)
  827. signal = do_notify_parent(tsk, signal);
  828. tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
  829. /* mt-exec, de_thread() is waiting for us */
  830. if (thread_group_leader(tsk) &&
  831. tsk->signal->group_exit_task &&
  832. tsk->signal->notify_count < 0)
  833. wake_up_process(tsk->signal->group_exit_task);
  834. write_unlock_irq(&tasklist_lock);
  835. tracehook_report_death(tsk, signal, cookie, group_dead);
  836. /* If the process is dead, release it - nobody will wait for it */
  837. if (signal == DEATH_REAP)
  838. release_task(tsk);
  839. }
  840. #ifdef CONFIG_DEBUG_STACK_USAGE
  841. static void check_stack_usage(void)
  842. {
  843. static DEFINE_SPINLOCK(low_water_lock);
  844. static int lowest_to_date = THREAD_SIZE;
  845. unsigned long free;
  846. free = stack_not_used(current);
  847. if (free >= lowest_to_date)
  848. return;
  849. spin_lock(&low_water_lock);
  850. if (free < lowest_to_date) {
  851. printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
  852. "left\n",
  853. current->comm, free);
  854. lowest_to_date = free;
  855. }
  856. spin_unlock(&low_water_lock);
  857. }
  858. #else
  859. static inline void check_stack_usage(void) {}
  860. #endif
  861. NORET_TYPE void do_exit(long code)
  862. {
  863. struct task_struct *tsk = current;
  864. int group_dead;
  865. profile_task_exit(tsk);
  866. WARN_ON(atomic_read(&tsk->fs_excl));
  867. if (unlikely(in_interrupt()))
  868. panic("Aiee, killing interrupt handler!");
  869. if (unlikely(!tsk->pid))
  870. panic("Attempted to kill the idle task!");
  871. tracehook_report_exit(&code);
  872. /*
  873. * We're taking recursive faults here in do_exit. Safest is to just
  874. * leave this task alone and wait for reboot.
  875. */
  876. if (unlikely(tsk->flags & PF_EXITING)) {
  877. printk(KERN_ALERT
  878. "Fixing recursive fault but reboot is needed!\n");
  879. /*
  880. * We can do this unlocked here. The futex code uses
  881. * this flag just to verify whether the pi state
  882. * cleanup has been done or not. In the worst case it
  883. * loops once more. We pretend that the cleanup was
  884. * done as there is no way to return. Either the
  885. * OWNER_DIED bit is set by now or we push the blocked
  886. * task into the wait for ever nirwana as well.
  887. */
  888. tsk->flags |= PF_EXITPIDONE;
  889. set_current_state(TASK_UNINTERRUPTIBLE);
  890. schedule();
  891. }
  892. exit_signals(tsk); /* sets PF_EXITING */
  893. /*
  894. * tsk->flags are checked in the futex code to protect against
  895. * an exiting task cleaning up the robust pi futexes.
  896. */
  897. smp_mb();
  898. spin_unlock_wait(&tsk->pi_lock);
  899. if (unlikely(in_atomic()))
  900. printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
  901. current->comm, task_pid_nr(current),
  902. preempt_count());
  903. acct_update_integrals(tsk);
  904. group_dead = atomic_dec_and_test(&tsk->signal->live);
  905. if (group_dead) {
  906. hrtimer_cancel(&tsk->signal->real_timer);
  907. exit_itimers(tsk->signal);
  908. }
  909. acct_collect(code, group_dead);
  910. if (group_dead)
  911. tty_audit_exit();
  912. if (unlikely(tsk->audit_context))
  913. audit_free(tsk);
  914. tsk->exit_code = code;
  915. taskstats_exit(tsk, group_dead);
  916. exit_mm(tsk);
  917. if (group_dead)
  918. acct_process();
  919. trace_sched_process_exit(tsk);
  920. exit_sem(tsk);
  921. exit_files(tsk);
  922. exit_fs(tsk);
  923. check_stack_usage();
  924. exit_thread();
  925. cgroup_exit(tsk, 1);
  926. if (group_dead && tsk->signal->leader)
  927. disassociate_ctty(1);
  928. module_put(task_thread_info(tsk)->exec_domain->module);
  929. if (tsk->binfmt)
  930. module_put(tsk->binfmt->module);
  931. proc_exit_connector(tsk);
  932. exit_notify(tsk, group_dead);
  933. #ifdef CONFIG_NUMA
  934. mpol_put(tsk->mempolicy);
  935. tsk->mempolicy = NULL;
  936. #endif
  937. #ifdef CONFIG_FUTEX
  938. /*
  939. * This must happen late, after the PID is not
  940. * hashed anymore:
  941. */
  942. if (unlikely(!list_empty(&tsk->pi_state_list)))
  943. exit_pi_state_list(tsk);
  944. if (unlikely(current->pi_state_cache))
  945. kfree(current->pi_state_cache);
  946. #endif
  947. /*
  948. * Make sure we are holding no locks:
  949. */
  950. debug_check_no_locks_held(tsk);
  951. /*
  952. * We can do this unlocked here. The futex code uses this flag
  953. * just to verify whether the pi state cleanup has been done
  954. * or not. In the worst case it loops once more.
  955. */
  956. tsk->flags |= PF_EXITPIDONE;
  957. if (tsk->io_context)
  958. exit_io_context();
  959. if (tsk->splice_pipe)
  960. __free_pipe_info(tsk->splice_pipe);
  961. preempt_disable();
  962. /* causes final put_task_struct in finish_task_switch(). */
  963. tsk->state = TASK_DEAD;
  964. schedule();
  965. BUG();
  966. /* Avoid "noreturn function does return". */
  967. for (;;)
  968. cpu_relax(); /* For when BUG is null */
  969. }
  970. EXPORT_SYMBOL_GPL(do_exit);
  971. NORET_TYPE void complete_and_exit(struct completion *comp, long code)
  972. {
  973. if (comp)
  974. complete(comp);
  975. do_exit(code);
  976. }
  977. EXPORT_SYMBOL(complete_and_exit);
  978. SYSCALL_DEFINE1(exit, int, error_code)
  979. {
  980. do_exit((error_code&0xff)<<8);
  981. }
  982. /*
  983. * Take down every thread in the group. This is called by fatal signals
  984. * as well as by sys_exit_group (below).
  985. */
  986. NORET_TYPE void
  987. do_group_exit(int exit_code)
  988. {
  989. struct signal_struct *sig = current->signal;
  990. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  991. if (signal_group_exit(sig))
  992. exit_code = sig->group_exit_code;
  993. else if (!thread_group_empty(current)) {
  994. struct sighand_struct *const sighand = current->sighand;
  995. spin_lock_irq(&sighand->siglock);
  996. if (signal_group_exit(sig))
  997. /* Another thread got here before we took the lock. */
  998. exit_code = sig->group_exit_code;
  999. else {
  1000. sig->group_exit_code = exit_code;
  1001. sig->flags = SIGNAL_GROUP_EXIT;
  1002. zap_other_threads(current);
  1003. }
  1004. spin_unlock_irq(&sighand->siglock);
  1005. }
  1006. do_exit(exit_code);
  1007. /* NOTREACHED */
  1008. }
  1009. /*
  1010. * this kills every thread in the thread group. Note that any externally
  1011. * wait4()-ing process will get the correct exit code - even if this
  1012. * thread is not the thread group leader.
  1013. */
  1014. SYSCALL_DEFINE1(exit_group, int, error_code)
  1015. {
  1016. do_group_exit((error_code & 0xff) << 8);
  1017. /* NOTREACHED */
  1018. return 0;
  1019. }
  1020. static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  1021. {
  1022. struct pid *pid = NULL;
  1023. if (type == PIDTYPE_PID)
  1024. pid = task->pids[type].pid;
  1025. else if (type < PIDTYPE_MAX)
  1026. pid = task->group_leader->pids[type].pid;
  1027. return pid;
  1028. }
  1029. static int eligible_child(enum pid_type type, struct pid *pid, int options,
  1030. struct task_struct *p)
  1031. {
  1032. int err;
  1033. if (type < PIDTYPE_MAX) {
  1034. if (task_pid_type(p, type) != pid)
  1035. return 0;
  1036. }
  1037. /* Wait for all children (clone and not) if __WALL is set;
  1038. * otherwise, wait for clone children *only* if __WCLONE is
  1039. * set; otherwise, wait for non-clone children *only*. (Note:
  1040. * A "clone" child here is one that reports to its parent
  1041. * using a signal other than SIGCHLD.) */
  1042. if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
  1043. && !(options & __WALL))
  1044. return 0;
  1045. err = security_task_wait(p);
  1046. if (err)
  1047. return err;
  1048. return 1;
  1049. }
  1050. static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
  1051. int why, int status,
  1052. struct siginfo __user *infop,
  1053. struct rusage __user *rusagep)
  1054. {
  1055. int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
  1056. put_task_struct(p);
  1057. if (!retval)
  1058. retval = put_user(SIGCHLD, &infop->si_signo);
  1059. if (!retval)
  1060. retval = put_user(0, &infop->si_errno);
  1061. if (!retval)
  1062. retval = put_user((short)why, &infop->si_code);
  1063. if (!retval)
  1064. retval = put_user(pid, &infop->si_pid);
  1065. if (!retval)
  1066. retval = put_user(uid, &infop->si_uid);
  1067. if (!retval)
  1068. retval = put_user(status, &infop->si_status);
  1069. if (!retval)
  1070. retval = pid;
  1071. return retval;
  1072. }
  1073. /*
  1074. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  1075. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1076. * the lock and this task is uninteresting. If we return nonzero, we have
  1077. * released the lock and the system call should return.
  1078. */
  1079. static int wait_task_zombie(struct task_struct *p, int options,
  1080. struct siginfo __user *infop,
  1081. int __user *stat_addr, struct rusage __user *ru)
  1082. {
  1083. unsigned long state;
  1084. int retval, status, traced;
  1085. pid_t pid = task_pid_vnr(p);
  1086. uid_t uid = __task_cred(p)->uid;
  1087. if (!likely(options & WEXITED))
  1088. return 0;
  1089. if (unlikely(options & WNOWAIT)) {
  1090. int exit_code = p->exit_code;
  1091. int why, status;
  1092. get_task_struct(p);
  1093. read_unlock(&tasklist_lock);
  1094. if ((exit_code & 0x7f) == 0) {
  1095. why = CLD_EXITED;
  1096. status = exit_code >> 8;
  1097. } else {
  1098. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1099. status = exit_code & 0x7f;
  1100. }
  1101. return wait_noreap_copyout(p, pid, uid, why,
  1102. status, infop, ru);
  1103. }
  1104. /*
  1105. * Try to move the task's state to DEAD
  1106. * only one thread is allowed to do this:
  1107. */
  1108. state = xchg(&p->exit_state, EXIT_DEAD);
  1109. if (state != EXIT_ZOMBIE) {
  1110. BUG_ON(state != EXIT_DEAD);
  1111. return 0;
  1112. }
  1113. traced = ptrace_reparented(p);
  1114. if (likely(!traced)) {
  1115. struct signal_struct *psig;
  1116. struct signal_struct *sig;
  1117. struct task_cputime cputime;
  1118. /*
  1119. * The resource counters for the group leader are in its
  1120. * own task_struct. Those for dead threads in the group
  1121. * are in its signal_struct, as are those for the child
  1122. * processes it has previously reaped. All these
  1123. * accumulate in the parent's signal_struct c* fields.
  1124. *
  1125. * We don't bother to take a lock here to protect these
  1126. * p->signal fields, because they are only touched by
  1127. * __exit_signal, which runs with tasklist_lock
  1128. * write-locked anyway, and so is excluded here. We do
  1129. * need to protect the access to p->parent->signal fields,
  1130. * as other threads in the parent group can be right
  1131. * here reaping other children at the same time.
  1132. *
  1133. * We use thread_group_cputime() to get times for the thread
  1134. * group, which consolidates times for all threads in the
  1135. * group including the group leader.
  1136. */
  1137. thread_group_cputime(p, &cputime);
  1138. spin_lock_irq(&p->parent->sighand->siglock);
  1139. psig = p->parent->signal;
  1140. sig = p->signal;
  1141. psig->cutime =
  1142. cputime_add(psig->cutime,
  1143. cputime_add(cputime.utime,
  1144. sig->cutime));
  1145. psig->cstime =
  1146. cputime_add(psig->cstime,
  1147. cputime_add(cputime.stime,
  1148. sig->cstime));
  1149. psig->cgtime =
  1150. cputime_add(psig->cgtime,
  1151. cputime_add(p->gtime,
  1152. cputime_add(sig->gtime,
  1153. sig->cgtime)));
  1154. psig->cmin_flt +=
  1155. p->min_flt + sig->min_flt + sig->cmin_flt;
  1156. psig->cmaj_flt +=
  1157. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  1158. psig->cnvcsw +=
  1159. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  1160. psig->cnivcsw +=
  1161. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  1162. psig->cinblock +=
  1163. task_io_get_inblock(p) +
  1164. sig->inblock + sig->cinblock;
  1165. psig->coublock +=
  1166. task_io_get_oublock(p) +
  1167. sig->oublock + sig->coublock;
  1168. task_io_accounting_add(&psig->ioac, &p->ioac);
  1169. task_io_accounting_add(&psig->ioac, &sig->ioac);
  1170. spin_unlock_irq(&p->parent->sighand->siglock);
  1171. }
  1172. /*
  1173. * Now we are sure this task is interesting, and no other
  1174. * thread can reap it because we set its state to EXIT_DEAD.
  1175. */
  1176. read_unlock(&tasklist_lock);
  1177. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1178. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1179. ? p->signal->group_exit_code : p->exit_code;
  1180. if (!retval && stat_addr)
  1181. retval = put_user(status, stat_addr);
  1182. if (!retval && infop)
  1183. retval = put_user(SIGCHLD, &infop->si_signo);
  1184. if (!retval && infop)
  1185. retval = put_user(0, &infop->si_errno);
  1186. if (!retval && infop) {
  1187. int why;
  1188. if ((status & 0x7f) == 0) {
  1189. why = CLD_EXITED;
  1190. status >>= 8;
  1191. } else {
  1192. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1193. status &= 0x7f;
  1194. }
  1195. retval = put_user((short)why, &infop->si_code);
  1196. if (!retval)
  1197. retval = put_user(status, &infop->si_status);
  1198. }
  1199. if (!retval && infop)
  1200. retval = put_user(pid, &infop->si_pid);
  1201. if (!retval && infop)
  1202. retval = put_user(uid, &infop->si_uid);
  1203. if (!retval)
  1204. retval = pid;
  1205. if (traced) {
  1206. write_lock_irq(&tasklist_lock);
  1207. /* We dropped tasklist, ptracer could die and untrace */
  1208. ptrace_unlink(p);
  1209. /*
  1210. * If this is not a detached task, notify the parent.
  1211. * If it's still not detached after that, don't release
  1212. * it now.
  1213. */
  1214. if (!task_detached(p)) {
  1215. do_notify_parent(p, p->exit_signal);
  1216. if (!task_detached(p)) {
  1217. p->exit_state = EXIT_ZOMBIE;
  1218. p = NULL;
  1219. }
  1220. }
  1221. write_unlock_irq(&tasklist_lock);
  1222. }
  1223. if (p != NULL)
  1224. release_task(p);
  1225. return retval;
  1226. }
  1227. static int *task_stopped_code(struct task_struct *p, bool ptrace)
  1228. {
  1229. if (ptrace) {
  1230. if (task_is_stopped_or_traced(p))
  1231. return &p->exit_code;
  1232. } else {
  1233. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  1234. return &p->signal->group_exit_code;
  1235. }
  1236. return NULL;
  1237. }
  1238. /*
  1239. * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
  1240. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1241. * the lock and this task is uninteresting. If we return nonzero, we have
  1242. * released the lock and the system call should return.
  1243. */
  1244. static int wait_task_stopped(int ptrace, struct task_struct *p,
  1245. int options, struct siginfo __user *infop,
  1246. int __user *stat_addr, struct rusage __user *ru)
  1247. {
  1248. int retval, exit_code, *p_code, why;
  1249. uid_t uid = 0; /* unneeded, required by compiler */
  1250. pid_t pid;
  1251. if (!(options & WUNTRACED))
  1252. return 0;
  1253. exit_code = 0;
  1254. spin_lock_irq(&p->sighand->siglock);
  1255. p_code = task_stopped_code(p, ptrace);
  1256. if (unlikely(!p_code))
  1257. goto unlock_sig;
  1258. exit_code = *p_code;
  1259. if (!exit_code)
  1260. goto unlock_sig;
  1261. if (!unlikely(options & WNOWAIT))
  1262. *p_code = 0;
  1263. /* don't need the RCU readlock here as we're holding a spinlock */
  1264. uid = __task_cred(p)->uid;
  1265. unlock_sig:
  1266. spin_unlock_irq(&p->sighand->siglock);
  1267. if (!exit_code)
  1268. return 0;
  1269. /*
  1270. * Now we are pretty sure this task is interesting.
  1271. * Make sure it doesn't get reaped out from under us while we
  1272. * give up the lock and then examine it below. We don't want to
  1273. * keep holding onto the tasklist_lock while we call getrusage and
  1274. * possibly take page faults for user memory.
  1275. */
  1276. get_task_struct(p);
  1277. pid = task_pid_vnr(p);
  1278. why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
  1279. read_unlock(&tasklist_lock);
  1280. if (unlikely(options & WNOWAIT))
  1281. return wait_noreap_copyout(p, pid, uid,
  1282. why, exit_code,
  1283. infop, ru);
  1284. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1285. if (!retval && stat_addr)
  1286. retval = put_user((exit_code << 8) | 0x7f, stat_addr);
  1287. if (!retval && infop)
  1288. retval = put_user(SIGCHLD, &infop->si_signo);
  1289. if (!retval && infop)
  1290. retval = put_user(0, &infop->si_errno);
  1291. if (!retval && infop)
  1292. retval = put_user((short)why, &infop->si_code);
  1293. if (!retval && infop)
  1294. retval = put_user(exit_code, &infop->si_status);
  1295. if (!retval && infop)
  1296. retval = put_user(pid, &infop->si_pid);
  1297. if (!retval && infop)
  1298. retval = put_user(uid, &infop->si_uid);
  1299. if (!retval)
  1300. retval = pid;
  1301. put_task_struct(p);
  1302. BUG_ON(!retval);
  1303. return retval;
  1304. }
  1305. /*
  1306. * Handle do_wait work for one task in a live, non-stopped state.
  1307. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1308. * the lock and this task is uninteresting. If we return nonzero, we have
  1309. * released the lock and the system call should return.
  1310. */
  1311. static int wait_task_continued(struct task_struct *p, int options,
  1312. struct siginfo __user *infop,
  1313. int __user *stat_addr, struct rusage __user *ru)
  1314. {
  1315. int retval;
  1316. pid_t pid;
  1317. uid_t uid;
  1318. if (!unlikely(options & WCONTINUED))
  1319. return 0;
  1320. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1321. return 0;
  1322. spin_lock_irq(&p->sighand->siglock);
  1323. /* Re-check with the lock held. */
  1324. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1325. spin_unlock_irq(&p->sighand->siglock);
  1326. return 0;
  1327. }
  1328. if (!unlikely(options & WNOWAIT))
  1329. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1330. uid = __task_cred(p)->uid;
  1331. spin_unlock_irq(&p->sighand->siglock);
  1332. pid = task_pid_vnr(p);
  1333. get_task_struct(p);
  1334. read_unlock(&tasklist_lock);
  1335. if (!infop) {
  1336. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1337. put_task_struct(p);
  1338. if (!retval && stat_addr)
  1339. retval = put_user(0xffff, stat_addr);
  1340. if (!retval)
  1341. retval = pid;
  1342. } else {
  1343. retval = wait_noreap_copyout(p, pid, uid,
  1344. CLD_CONTINUED, SIGCONT,
  1345. infop, ru);
  1346. BUG_ON(retval == 0);
  1347. }
  1348. return retval;
  1349. }
  1350. /*
  1351. * Consider @p for a wait by @parent.
  1352. *
  1353. * -ECHILD should be in *@notask_error before the first call.
  1354. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1355. * Returns zero if the search for a child should continue;
  1356. * then *@notask_error is 0 if @p is an eligible child,
  1357. * or another error from security_task_wait(), or still -ECHILD.
  1358. */
  1359. static int wait_consider_task(struct task_struct *parent, int ptrace,
  1360. struct task_struct *p, int *notask_error,
  1361. enum pid_type type, struct pid *pid, int options,
  1362. struct siginfo __user *infop,
  1363. int __user *stat_addr, struct rusage __user *ru)
  1364. {
  1365. int ret = eligible_child(type, pid, options, p);
  1366. if (!ret)
  1367. return ret;
  1368. if (unlikely(ret < 0)) {
  1369. /*
  1370. * If we have not yet seen any eligible child,
  1371. * then let this error code replace -ECHILD.
  1372. * A permission error will give the user a clue
  1373. * to look for security policy problems, rather
  1374. * than for mysterious wait bugs.
  1375. */
  1376. if (*notask_error)
  1377. *notask_error = ret;
  1378. }
  1379. if (likely(!ptrace) && unlikely(p->ptrace)) {
  1380. /*
  1381. * This child is hidden by ptrace.
  1382. * We aren't allowed to see it now, but eventually we will.
  1383. */
  1384. *notask_error = 0;
  1385. return 0;
  1386. }
  1387. if (p->exit_state == EXIT_DEAD)
  1388. return 0;
  1389. /*
  1390. * We don't reap group leaders with subthreads.
  1391. */
  1392. if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
  1393. return wait_task_zombie(p, options, infop, stat_addr, ru);
  1394. /*
  1395. * It's stopped or running now, so it might
  1396. * later continue, exit, or stop again.
  1397. */
  1398. *notask_error = 0;
  1399. if (task_stopped_code(p, ptrace))
  1400. return wait_task_stopped(ptrace, p, options,
  1401. infop, stat_addr, ru);
  1402. return wait_task_continued(p, options, infop, stat_addr, ru);
  1403. }
  1404. /*
  1405. * Do the work of do_wait() for one thread in the group, @tsk.
  1406. *
  1407. * -ECHILD should be in *@notask_error before the first call.
  1408. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1409. * Returns zero if the search for a child should continue; then
  1410. * *@notask_error is 0 if there were any eligible children,
  1411. * or another error from security_task_wait(), or still -ECHILD.
  1412. */
  1413. static int do_wait_thread(struct task_struct *tsk, int *notask_error,
  1414. enum pid_type type, struct pid *pid, int options,
  1415. struct siginfo __user *infop, int __user *stat_addr,
  1416. struct rusage __user *ru)
  1417. {
  1418. struct task_struct *p;
  1419. list_for_each_entry(p, &tsk->children, sibling) {
  1420. /*
  1421. * Do not consider detached threads.
  1422. */
  1423. if (!task_detached(p)) {
  1424. int ret = wait_consider_task(tsk, 0, p, notask_error,
  1425. type, pid, options,
  1426. infop, stat_addr, ru);
  1427. if (ret)
  1428. return ret;
  1429. }
  1430. }
  1431. return 0;
  1432. }
  1433. static int ptrace_do_wait(struct task_struct *tsk, int *notask_error,
  1434. enum pid_type type, struct pid *pid, int options,
  1435. struct siginfo __user *infop, int __user *stat_addr,
  1436. struct rusage __user *ru)
  1437. {
  1438. struct task_struct *p;
  1439. /*
  1440. * Traditionally we see ptrace'd stopped tasks regardless of options.
  1441. */
  1442. options |= WUNTRACED;
  1443. list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
  1444. int ret = wait_consider_task(tsk, 1, p, notask_error,
  1445. type, pid, options,
  1446. infop, stat_addr, ru);
  1447. if (ret)
  1448. return ret;
  1449. }
  1450. return 0;
  1451. }
  1452. static long do_wait(enum pid_type type, struct pid *pid, int options,
  1453. struct siginfo __user *infop, int __user *stat_addr,
  1454. struct rusage __user *ru)
  1455. {
  1456. DECLARE_WAITQUEUE(wait, current);
  1457. struct task_struct *tsk;
  1458. int retval;
  1459. trace_sched_process_wait(pid);
  1460. add_wait_queue(&current->signal->wait_chldexit,&wait);
  1461. repeat:
  1462. /*
  1463. * If there is nothing that can match our critiera just get out.
  1464. * We will clear @retval to zero if we see any child that might later
  1465. * match our criteria, even if we are not able to reap it yet.
  1466. */
  1467. retval = -ECHILD;
  1468. if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type])))
  1469. goto end;
  1470. current->state = TASK_INTERRUPTIBLE;
  1471. read_lock(&tasklist_lock);
  1472. tsk = current;
  1473. do {
  1474. int tsk_result = do_wait_thread(tsk, &retval,
  1475. type, pid, options,
  1476. infop, stat_addr, ru);
  1477. if (!tsk_result)
  1478. tsk_result = ptrace_do_wait(tsk, &retval,
  1479. type, pid, options,
  1480. infop, stat_addr, ru);
  1481. if (tsk_result) {
  1482. /*
  1483. * tasklist_lock is unlocked and we have a final result.
  1484. */
  1485. retval = tsk_result;
  1486. goto end;
  1487. }
  1488. if (options & __WNOTHREAD)
  1489. break;
  1490. tsk = next_thread(tsk);
  1491. BUG_ON(tsk->signal != current->signal);
  1492. } while (tsk != current);
  1493. read_unlock(&tasklist_lock);
  1494. if (!retval && !(options & WNOHANG)) {
  1495. retval = -ERESTARTSYS;
  1496. if (!signal_pending(current)) {
  1497. schedule();
  1498. goto repeat;
  1499. }
  1500. }
  1501. end:
  1502. current->state = TASK_RUNNING;
  1503. remove_wait_queue(&current->signal->wait_chldexit,&wait);
  1504. if (infop) {
  1505. if (retval > 0)
  1506. retval = 0;
  1507. else {
  1508. /*
  1509. * For a WNOHANG return, clear out all the fields
  1510. * we would set so the user can easily tell the
  1511. * difference.
  1512. */
  1513. if (!retval)
  1514. retval = put_user(0, &infop->si_signo);
  1515. if (!retval)
  1516. retval = put_user(0, &infop->si_errno);
  1517. if (!retval)
  1518. retval = put_user(0, &infop->si_code);
  1519. if (!retval)
  1520. retval = put_user(0, &infop->si_pid);
  1521. if (!retval)
  1522. retval = put_user(0, &infop->si_uid);
  1523. if (!retval)
  1524. retval = put_user(0, &infop->si_status);
  1525. }
  1526. }
  1527. return retval;
  1528. }
  1529. SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
  1530. infop, int, options, struct rusage __user *, ru)
  1531. {
  1532. struct pid *pid = NULL;
  1533. enum pid_type type;
  1534. long ret;
  1535. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
  1536. return -EINVAL;
  1537. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1538. return -EINVAL;
  1539. switch (which) {
  1540. case P_ALL:
  1541. type = PIDTYPE_MAX;
  1542. break;
  1543. case P_PID:
  1544. type = PIDTYPE_PID;
  1545. if (upid <= 0)
  1546. return -EINVAL;
  1547. break;
  1548. case P_PGID:
  1549. type = PIDTYPE_PGID;
  1550. if (upid <= 0)
  1551. return -EINVAL;
  1552. break;
  1553. default:
  1554. return -EINVAL;
  1555. }
  1556. if (type < PIDTYPE_MAX)
  1557. pid = find_get_pid(upid);
  1558. ret = do_wait(type, pid, options, infop, NULL, ru);
  1559. put_pid(pid);
  1560. /* avoid REGPARM breakage on x86: */
  1561. asmlinkage_protect(5, ret, which, upid, infop, options, ru);
  1562. return ret;
  1563. }
  1564. SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  1565. int, options, struct rusage __user *, ru)
  1566. {
  1567. struct pid *pid = NULL;
  1568. enum pid_type type;
  1569. long ret;
  1570. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1571. __WNOTHREAD|__WCLONE|__WALL))
  1572. return -EINVAL;
  1573. if (upid == -1)
  1574. type = PIDTYPE_MAX;
  1575. else if (upid < 0) {
  1576. type = PIDTYPE_PGID;
  1577. pid = find_get_pid(-upid);
  1578. } else if (upid == 0) {
  1579. type = PIDTYPE_PGID;
  1580. pid = get_pid(task_pgrp(current));
  1581. } else /* upid > 0 */ {
  1582. type = PIDTYPE_PID;
  1583. pid = find_get_pid(upid);
  1584. }
  1585. ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru);
  1586. put_pid(pid);
  1587. /* avoid REGPARM breakage on x86: */
  1588. asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
  1589. return ret;
  1590. }
  1591. #ifdef __ARCH_WANT_SYS_WAITPID
  1592. /*
  1593. * sys_waitpid() remains for compatibility. waitpid() should be
  1594. * implemented by calling sys_wait4() from libc.a.
  1595. */
  1596. SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
  1597. {
  1598. return sys_wait4(pid, stat_addr, options, NULL);
  1599. }
  1600. #endif