exit.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/iocontext.h>
  15. #include <linux/key.h>
  16. #include <linux/security.h>
  17. #include <linux/cpu.h>
  18. #include <linux/acct.h>
  19. #include <linux/tsacct_kern.h>
  20. #include <linux/file.h>
  21. #include <linux/fdtable.h>
  22. #include <linux/binfmts.h>
  23. #include <linux/nsproxy.h>
  24. #include <linux/pid_namespace.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/profile.h>
  27. #include <linux/mount.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/kthread.h>
  30. #include <linux/mempolicy.h>
  31. #include <linux/taskstats_kern.h>
  32. #include <linux/delayacct.h>
  33. #include <linux/freezer.h>
  34. #include <linux/cgroup.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/signal.h>
  37. #include <linux/posix-timers.h>
  38. #include <linux/cn_proc.h>
  39. #include <linux/mutex.h>
  40. #include <linux/futex.h>
  41. #include <linux/pipe_fs_i.h>
  42. #include <linux/audit.h> /* for audit_free() */
  43. #include <linux/resource.h>
  44. #include <linux/blkdev.h>
  45. #include <linux/task_io_accounting_ops.h>
  46. #include <linux/tracehook.h>
  47. #include <linux/fs_struct.h>
  48. #include <linux/init_task.h>
  49. #include <linux/perf_event.h>
  50. #include <trace/events/sched.h>
  51. #include <asm/uaccess.h>
  52. #include <asm/unistd.h>
  53. #include <asm/pgtable.h>
  54. #include <asm/mmu_context.h>
  55. #include "cred-internals.h"
  56. static void exit_mm(struct task_struct * tsk);
  57. static void __unhash_process(struct task_struct *p)
  58. {
  59. nr_threads--;
  60. detach_pid(p, PIDTYPE_PID);
  61. if (thread_group_leader(p)) {
  62. detach_pid(p, PIDTYPE_PGID);
  63. detach_pid(p, PIDTYPE_SID);
  64. list_del_rcu(&p->tasks);
  65. __get_cpu_var(process_counts)--;
  66. }
  67. list_del_rcu(&p->thread_group);
  68. list_del_init(&p->sibling);
  69. }
  70. /*
  71. * This function expects the tasklist_lock write-locked.
  72. */
  73. static void __exit_signal(struct task_struct *tsk)
  74. {
  75. struct signal_struct *sig = tsk->signal;
  76. struct sighand_struct *sighand;
  77. BUG_ON(!sig);
  78. BUG_ON(!atomic_read(&sig->count));
  79. sighand = rcu_dereference(tsk->sighand);
  80. spin_lock(&sighand->siglock);
  81. posix_cpu_timers_exit(tsk);
  82. if (atomic_dec_and_test(&sig->count))
  83. posix_cpu_timers_exit_group(tsk);
  84. else {
  85. /*
  86. * If there is any task waiting for the group exit
  87. * then notify it:
  88. */
  89. if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
  90. wake_up_process(sig->group_exit_task);
  91. if (tsk == sig->curr_target)
  92. sig->curr_target = next_thread(tsk);
  93. /*
  94. * Accumulate here the counters for all threads but the
  95. * group leader as they die, so they can be added into
  96. * the process-wide totals when those are taken.
  97. * The group leader stays around as a zombie as long
  98. * as there are other threads. When it gets reaped,
  99. * the exit.c code will add its counts into these totals.
  100. * We won't ever get here for the group leader, since it
  101. * will have been the last reference on the signal_struct.
  102. */
  103. sig->utime = cputime_add(sig->utime, task_utime(tsk));
  104. sig->stime = cputime_add(sig->stime, task_stime(tsk));
  105. sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
  106. sig->min_flt += tsk->min_flt;
  107. sig->maj_flt += tsk->maj_flt;
  108. sig->nvcsw += tsk->nvcsw;
  109. sig->nivcsw += tsk->nivcsw;
  110. sig->inblock += task_io_get_inblock(tsk);
  111. sig->oublock += task_io_get_oublock(tsk);
  112. task_io_accounting_add(&sig->ioac, &tsk->ioac);
  113. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  114. sig = NULL; /* Marker for below. */
  115. }
  116. __unhash_process(tsk);
  117. /*
  118. * Do this under ->siglock, we can race with another thread
  119. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  120. */
  121. flush_sigqueue(&tsk->pending);
  122. tsk->signal = NULL;
  123. tsk->sighand = NULL;
  124. spin_unlock(&sighand->siglock);
  125. __cleanup_sighand(sighand);
  126. clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
  127. if (sig) {
  128. flush_sigqueue(&sig->shared_pending);
  129. taskstats_tgid_free(sig);
  130. /*
  131. * Make sure ->signal can't go away under rq->lock,
  132. * see account_group_exec_runtime().
  133. */
  134. task_rq_unlock_wait(tsk);
  135. __cleanup_signal(sig);
  136. }
  137. }
  138. static void delayed_put_task_struct(struct rcu_head *rhp)
  139. {
  140. struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  141. #ifdef CONFIG_PERF_EVENTS
  142. WARN_ON_ONCE(tsk->perf_event_ctxp);
  143. #endif
  144. trace_sched_process_free(tsk);
  145. put_task_struct(tsk);
  146. }
  147. void release_task(struct task_struct * p)
  148. {
  149. struct task_struct *leader;
  150. int zap_leader;
  151. repeat:
  152. tracehook_prepare_release_task(p);
  153. /* don't need to get the RCU readlock here - the process is dead and
  154. * can't be modifying its own credentials */
  155. atomic_dec(&__task_cred(p)->user->processes);
  156. proc_flush_task(p);
  157. write_lock_irq(&tasklist_lock);
  158. tracehook_finish_release_task(p);
  159. __exit_signal(p);
  160. /*
  161. * If we are the last non-leader member of the thread
  162. * group, and the leader is zombie, then notify the
  163. * group leader's parent process. (if it wants notification.)
  164. */
  165. zap_leader = 0;
  166. leader = p->group_leader;
  167. if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
  168. BUG_ON(task_detached(leader));
  169. do_notify_parent(leader, leader->exit_signal);
  170. /*
  171. * If we were the last child thread and the leader has
  172. * exited already, and the leader's parent ignores SIGCHLD,
  173. * then we are the one who should release the leader.
  174. *
  175. * do_notify_parent() will have marked it self-reaping in
  176. * that case.
  177. */
  178. zap_leader = task_detached(leader);
  179. /*
  180. * This maintains the invariant that release_task()
  181. * only runs on a task in EXIT_DEAD, just for sanity.
  182. */
  183. if (zap_leader)
  184. leader->exit_state = EXIT_DEAD;
  185. }
  186. write_unlock_irq(&tasklist_lock);
  187. release_thread(p);
  188. call_rcu(&p->rcu, delayed_put_task_struct);
  189. p = leader;
  190. if (unlikely(zap_leader))
  191. goto repeat;
  192. }
  193. /*
  194. * This checks not only the pgrp, but falls back on the pid if no
  195. * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
  196. * without this...
  197. *
  198. * The caller must hold rcu lock or the tasklist lock.
  199. */
  200. struct pid *session_of_pgrp(struct pid *pgrp)
  201. {
  202. struct task_struct *p;
  203. struct pid *sid = NULL;
  204. p = pid_task(pgrp, PIDTYPE_PGID);
  205. if (p == NULL)
  206. p = pid_task(pgrp, PIDTYPE_PID);
  207. if (p != NULL)
  208. sid = task_session(p);
  209. return sid;
  210. }
  211. /*
  212. * Determine if a process group is "orphaned", according to the POSIX
  213. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  214. * by terminal-generated stop signals. Newly orphaned process groups are
  215. * to receive a SIGHUP and a SIGCONT.
  216. *
  217. * "I ask you, have you ever known what it is to be an orphan?"
  218. */
  219. static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
  220. {
  221. struct task_struct *p;
  222. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  223. if ((p == ignored_task) ||
  224. (p->exit_state && thread_group_empty(p)) ||
  225. is_global_init(p->real_parent))
  226. continue;
  227. if (task_pgrp(p->real_parent) != pgrp &&
  228. task_session(p->real_parent) == task_session(p))
  229. return 0;
  230. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  231. return 1;
  232. }
  233. int is_current_pgrp_orphaned(void)
  234. {
  235. int retval;
  236. read_lock(&tasklist_lock);
  237. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  238. read_unlock(&tasklist_lock);
  239. return retval;
  240. }
  241. static int has_stopped_jobs(struct pid *pgrp)
  242. {
  243. int retval = 0;
  244. struct task_struct *p;
  245. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  246. if (!task_is_stopped(p))
  247. continue;
  248. retval = 1;
  249. break;
  250. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  251. return retval;
  252. }
  253. /*
  254. * Check to see if any process groups have become orphaned as
  255. * a result of our exiting, and if they have any stopped jobs,
  256. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  257. */
  258. static void
  259. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  260. {
  261. struct pid *pgrp = task_pgrp(tsk);
  262. struct task_struct *ignored_task = tsk;
  263. if (!parent)
  264. /* exit: our father is in a different pgrp than
  265. * we are and we were the only connection outside.
  266. */
  267. parent = tsk->real_parent;
  268. else
  269. /* reparent: our child is in a different pgrp than
  270. * we are, and it was the only connection outside.
  271. */
  272. ignored_task = NULL;
  273. if (task_pgrp(parent) != pgrp &&
  274. task_session(parent) == task_session(tsk) &&
  275. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  276. has_stopped_jobs(pgrp)) {
  277. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  278. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  279. }
  280. }
  281. /**
  282. * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
  283. *
  284. * If a kernel thread is launched as a result of a system call, or if
  285. * it ever exits, it should generally reparent itself to kthreadd so it
  286. * isn't in the way of other processes and is correctly cleaned up on exit.
  287. *
  288. * The various task state such as scheduling policy and priority may have
  289. * been inherited from a user process, so we reset them to sane values here.
  290. *
  291. * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
  292. */
  293. static void reparent_to_kthreadd(void)
  294. {
  295. write_lock_irq(&tasklist_lock);
  296. ptrace_unlink(current);
  297. /* Reparent to init */
  298. current->real_parent = current->parent = kthreadd_task;
  299. list_move_tail(&current->sibling, &current->real_parent->children);
  300. /* Set the exit signal to SIGCHLD so we signal init on exit */
  301. current->exit_signal = SIGCHLD;
  302. if (task_nice(current) < 0)
  303. set_user_nice(current, 0);
  304. /* cpus_allowed? */
  305. /* rt_priority? */
  306. /* signals? */
  307. memcpy(current->signal->rlim, init_task.signal->rlim,
  308. sizeof(current->signal->rlim));
  309. atomic_inc(&init_cred.usage);
  310. commit_creds(&init_cred);
  311. write_unlock_irq(&tasklist_lock);
  312. }
  313. void __set_special_pids(struct pid *pid)
  314. {
  315. struct task_struct *curr = current->group_leader;
  316. if (task_session(curr) != pid) {
  317. change_pid(curr, PIDTYPE_SID, pid);
  318. proc_sid_connector(curr);
  319. }
  320. if (task_pgrp(curr) != pid)
  321. change_pid(curr, PIDTYPE_PGID, pid);
  322. }
  323. static void set_special_pids(struct pid *pid)
  324. {
  325. write_lock_irq(&tasklist_lock);
  326. __set_special_pids(pid);
  327. write_unlock_irq(&tasklist_lock);
  328. }
  329. /*
  330. * Let kernel threads use this to say that they allow a certain signal.
  331. * Must not be used if kthread was cloned with CLONE_SIGHAND.
  332. */
  333. int allow_signal(int sig)
  334. {
  335. if (!valid_signal(sig) || sig < 1)
  336. return -EINVAL;
  337. spin_lock_irq(&current->sighand->siglock);
  338. /* This is only needed for daemonize()'ed kthreads */
  339. sigdelset(&current->blocked, sig);
  340. /*
  341. * Kernel threads handle their own signals. Let the signal code
  342. * know it'll be handled, so that they don't get converted to
  343. * SIGKILL or just silently dropped.
  344. */
  345. current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
  346. recalc_sigpending();
  347. spin_unlock_irq(&current->sighand->siglock);
  348. return 0;
  349. }
  350. EXPORT_SYMBOL(allow_signal);
  351. int disallow_signal(int sig)
  352. {
  353. if (!valid_signal(sig) || sig < 1)
  354. return -EINVAL;
  355. spin_lock_irq(&current->sighand->siglock);
  356. current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
  357. recalc_sigpending();
  358. spin_unlock_irq(&current->sighand->siglock);
  359. return 0;
  360. }
  361. EXPORT_SYMBOL(disallow_signal);
  362. /*
  363. * Put all the gunge required to become a kernel thread without
  364. * attached user resources in one place where it belongs.
  365. */
  366. void daemonize(const char *name, ...)
  367. {
  368. va_list args;
  369. sigset_t blocked;
  370. va_start(args, name);
  371. vsnprintf(current->comm, sizeof(current->comm), name, args);
  372. va_end(args);
  373. /*
  374. * If we were started as result of loading a module, close all of the
  375. * user space pages. We don't need them, and if we didn't close them
  376. * they would be locked into memory.
  377. */
  378. exit_mm(current);
  379. /*
  380. * We don't want to have TIF_FREEZE set if the system-wide hibernation
  381. * or suspend transition begins right now.
  382. */
  383. current->flags |= (PF_NOFREEZE | PF_KTHREAD);
  384. if (current->nsproxy != &init_nsproxy) {
  385. get_nsproxy(&init_nsproxy);
  386. switch_task_namespaces(current, &init_nsproxy);
  387. }
  388. set_special_pids(&init_struct_pid);
  389. proc_clear_tty(current);
  390. /* Block and flush all signals */
  391. sigfillset(&blocked);
  392. sigprocmask(SIG_BLOCK, &blocked, NULL);
  393. flush_signals(current);
  394. /* Become as one with the init task */
  395. daemonize_fs_struct();
  396. exit_files(current);
  397. current->files = init_task.files;
  398. atomic_inc(&current->files->count);
  399. reparent_to_kthreadd();
  400. }
  401. EXPORT_SYMBOL(daemonize);
  402. static void close_files(struct files_struct * files)
  403. {
  404. int i, j;
  405. struct fdtable *fdt;
  406. j = 0;
  407. /*
  408. * It is safe to dereference the fd table without RCU or
  409. * ->file_lock because this is the last reference to the
  410. * files structure.
  411. */
  412. fdt = files_fdtable(files);
  413. for (;;) {
  414. unsigned long set;
  415. i = j * __NFDBITS;
  416. if (i >= fdt->max_fds)
  417. break;
  418. set = fdt->open_fds->fds_bits[j++];
  419. while (set) {
  420. if (set & 1) {
  421. struct file * file = xchg(&fdt->fd[i], NULL);
  422. if (file) {
  423. filp_close(file, files);
  424. cond_resched();
  425. }
  426. }
  427. i++;
  428. set >>= 1;
  429. }
  430. }
  431. }
  432. struct files_struct *get_files_struct(struct task_struct *task)
  433. {
  434. struct files_struct *files;
  435. task_lock(task);
  436. files = task->files;
  437. if (files)
  438. atomic_inc(&files->count);
  439. task_unlock(task);
  440. return files;
  441. }
  442. void put_files_struct(struct files_struct *files)
  443. {
  444. struct fdtable *fdt;
  445. if (atomic_dec_and_test(&files->count)) {
  446. close_files(files);
  447. /*
  448. * Free the fd and fdset arrays if we expanded them.
  449. * If the fdtable was embedded, pass files for freeing
  450. * at the end of the RCU grace period. Otherwise,
  451. * you can free files immediately.
  452. */
  453. fdt = files_fdtable(files);
  454. if (fdt != &files->fdtab)
  455. kmem_cache_free(files_cachep, files);
  456. free_fdtable(fdt);
  457. }
  458. }
  459. void reset_files_struct(struct files_struct *files)
  460. {
  461. struct task_struct *tsk = current;
  462. struct files_struct *old;
  463. old = tsk->files;
  464. task_lock(tsk);
  465. tsk->files = files;
  466. task_unlock(tsk);
  467. put_files_struct(old);
  468. }
  469. void exit_files(struct task_struct *tsk)
  470. {
  471. struct files_struct * files = tsk->files;
  472. if (files) {
  473. task_lock(tsk);
  474. tsk->files = NULL;
  475. task_unlock(tsk);
  476. put_files_struct(files);
  477. }
  478. }
  479. #ifdef CONFIG_MM_OWNER
  480. /*
  481. * Task p is exiting and it owned mm, lets find a new owner for it
  482. */
  483. static inline int
  484. mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
  485. {
  486. /*
  487. * If there are other users of the mm and the owner (us) is exiting
  488. * we need to find a new owner to take on the responsibility.
  489. */
  490. if (atomic_read(&mm->mm_users) <= 1)
  491. return 0;
  492. if (mm->owner != p)
  493. return 0;
  494. return 1;
  495. }
  496. void mm_update_next_owner(struct mm_struct *mm)
  497. {
  498. struct task_struct *c, *g, *p = current;
  499. retry:
  500. if (!mm_need_new_owner(mm, p))
  501. return;
  502. read_lock(&tasklist_lock);
  503. /*
  504. * Search in the children
  505. */
  506. list_for_each_entry(c, &p->children, sibling) {
  507. if (c->mm == mm)
  508. goto assign_new_owner;
  509. }
  510. /*
  511. * Search in the siblings
  512. */
  513. list_for_each_entry(c, &p->real_parent->children, sibling) {
  514. if (c->mm == mm)
  515. goto assign_new_owner;
  516. }
  517. /*
  518. * Search through everything else. We should not get
  519. * here often
  520. */
  521. do_each_thread(g, c) {
  522. if (c->mm == mm)
  523. goto assign_new_owner;
  524. } while_each_thread(g, c);
  525. read_unlock(&tasklist_lock);
  526. /*
  527. * We found no owner yet mm_users > 1: this implies that we are
  528. * most likely racing with swapoff (try_to_unuse()) or /proc or
  529. * ptrace or page migration (get_task_mm()). Mark owner as NULL.
  530. */
  531. mm->owner = NULL;
  532. return;
  533. assign_new_owner:
  534. BUG_ON(c == p);
  535. get_task_struct(c);
  536. /*
  537. * The task_lock protects c->mm from changing.
  538. * We always want mm->owner->mm == mm
  539. */
  540. task_lock(c);
  541. /*
  542. * Delay read_unlock() till we have the task_lock()
  543. * to ensure that c does not slip away underneath us
  544. */
  545. read_unlock(&tasklist_lock);
  546. if (c->mm != mm) {
  547. task_unlock(c);
  548. put_task_struct(c);
  549. goto retry;
  550. }
  551. mm->owner = c;
  552. task_unlock(c);
  553. put_task_struct(c);
  554. }
  555. #endif /* CONFIG_MM_OWNER */
  556. /*
  557. * Turn us into a lazy TLB process if we
  558. * aren't already..
  559. */
  560. static void exit_mm(struct task_struct * tsk)
  561. {
  562. struct mm_struct *mm = tsk->mm;
  563. struct core_state *core_state;
  564. mm_release(tsk, mm);
  565. if (!mm)
  566. return;
  567. /*
  568. * Serialize with any possible pending coredump.
  569. * We must hold mmap_sem around checking core_state
  570. * and clearing tsk->mm. The core-inducing thread
  571. * will increment ->nr_threads for each thread in the
  572. * group with ->mm != NULL.
  573. */
  574. down_read(&mm->mmap_sem);
  575. core_state = mm->core_state;
  576. if (core_state) {
  577. struct core_thread self;
  578. up_read(&mm->mmap_sem);
  579. self.task = tsk;
  580. self.next = xchg(&core_state->dumper.next, &self);
  581. /*
  582. * Implies mb(), the result of xchg() must be visible
  583. * to core_state->dumper.
  584. */
  585. if (atomic_dec_and_test(&core_state->nr_threads))
  586. complete(&core_state->startup);
  587. for (;;) {
  588. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  589. if (!self.task) /* see coredump_finish() */
  590. break;
  591. schedule();
  592. }
  593. __set_task_state(tsk, TASK_RUNNING);
  594. down_read(&mm->mmap_sem);
  595. }
  596. atomic_inc(&mm->mm_count);
  597. BUG_ON(mm != tsk->active_mm);
  598. /* more a memory barrier than a real lock */
  599. task_lock(tsk);
  600. tsk->mm = NULL;
  601. up_read(&mm->mmap_sem);
  602. enter_lazy_tlb(mm, current);
  603. /* We don't want this task to be frozen prematurely */
  604. clear_freeze_flag(tsk);
  605. task_unlock(tsk);
  606. mm_update_next_owner(mm);
  607. mmput(mm);
  608. }
  609. /*
  610. * When we die, we re-parent all our children.
  611. * Try to give them to another thread in our thread
  612. * group, and if no such member exists, give it to
  613. * the child reaper process (ie "init") in our pid
  614. * space.
  615. */
  616. static struct task_struct *find_new_reaper(struct task_struct *father)
  617. {
  618. struct pid_namespace *pid_ns = task_active_pid_ns(father);
  619. struct task_struct *thread;
  620. thread = father;
  621. while_each_thread(father, thread) {
  622. if (thread->flags & PF_EXITING)
  623. continue;
  624. if (unlikely(pid_ns->child_reaper == father))
  625. pid_ns->child_reaper = thread;
  626. return thread;
  627. }
  628. if (unlikely(pid_ns->child_reaper == father)) {
  629. write_unlock_irq(&tasklist_lock);
  630. if (unlikely(pid_ns == &init_pid_ns))
  631. panic("Attempted to kill init!");
  632. zap_pid_ns_processes(pid_ns);
  633. write_lock_irq(&tasklist_lock);
  634. /*
  635. * We can not clear ->child_reaper or leave it alone.
  636. * There may by stealth EXIT_DEAD tasks on ->children,
  637. * forget_original_parent() must move them somewhere.
  638. */
  639. pid_ns->child_reaper = init_pid_ns.child_reaper;
  640. }
  641. return pid_ns->child_reaper;
  642. }
  643. /*
  644. * Any that need to be release_task'd are put on the @dead list.
  645. */
  646. static void reparent_thread(struct task_struct *father, struct task_struct *p,
  647. struct list_head *dead)
  648. {
  649. if (p->pdeath_signal)
  650. group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
  651. list_move_tail(&p->sibling, &p->real_parent->children);
  652. if (task_detached(p))
  653. return;
  654. /*
  655. * If this is a threaded reparent there is no need to
  656. * notify anyone anything has happened.
  657. */
  658. if (same_thread_group(p->real_parent, father))
  659. return;
  660. /* We don't want people slaying init. */
  661. p->exit_signal = SIGCHLD;
  662. /* If it has exited notify the new parent about this child's death. */
  663. if (!task_ptrace(p) &&
  664. p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
  665. do_notify_parent(p, p->exit_signal);
  666. if (task_detached(p)) {
  667. p->exit_state = EXIT_DEAD;
  668. list_move_tail(&p->sibling, dead);
  669. }
  670. }
  671. kill_orphaned_pgrp(p, father);
  672. }
  673. static void forget_original_parent(struct task_struct *father)
  674. {
  675. struct task_struct *p, *n, *reaper;
  676. LIST_HEAD(dead_children);
  677. exit_ptrace(father);
  678. write_lock_irq(&tasklist_lock);
  679. reaper = find_new_reaper(father);
  680. list_for_each_entry_safe(p, n, &father->children, sibling) {
  681. p->real_parent = reaper;
  682. if (p->parent == father) {
  683. BUG_ON(task_ptrace(p));
  684. p->parent = p->real_parent;
  685. }
  686. reparent_thread(father, p, &dead_children);
  687. }
  688. write_unlock_irq(&tasklist_lock);
  689. BUG_ON(!list_empty(&father->children));
  690. list_for_each_entry_safe(p, n, &dead_children, sibling) {
  691. list_del_init(&p->sibling);
  692. release_task(p);
  693. }
  694. }
  695. /*
  696. * Send signals to all our closest relatives so that they know
  697. * to properly mourn us..
  698. */
  699. static void exit_notify(struct task_struct *tsk, int group_dead)
  700. {
  701. int signal;
  702. void *cookie;
  703. /*
  704. * This does two things:
  705. *
  706. * A. Make init inherit all the child processes
  707. * B. Check to see if any process groups have become orphaned
  708. * as a result of our exiting, and if they have any stopped
  709. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  710. */
  711. forget_original_parent(tsk);
  712. exit_task_namespaces(tsk);
  713. write_lock_irq(&tasklist_lock);
  714. if (group_dead)
  715. kill_orphaned_pgrp(tsk->group_leader, NULL);
  716. /* Let father know we died
  717. *
  718. * Thread signals are configurable, but you aren't going to use
  719. * that to send signals to arbitary processes.
  720. * That stops right now.
  721. *
  722. * If the parent exec id doesn't match the exec id we saved
  723. * when we started then we know the parent has changed security
  724. * domain.
  725. *
  726. * If our self_exec id doesn't match our parent_exec_id then
  727. * we have changed execution domain as these two values started
  728. * the same after a fork.
  729. */
  730. if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
  731. (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
  732. tsk->self_exec_id != tsk->parent_exec_id))
  733. tsk->exit_signal = SIGCHLD;
  734. signal = tracehook_notify_death(tsk, &cookie, group_dead);
  735. if (signal >= 0)
  736. signal = do_notify_parent(tsk, signal);
  737. tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
  738. /* mt-exec, de_thread() is waiting for us */
  739. if (thread_group_leader(tsk) &&
  740. tsk->signal->group_exit_task &&
  741. tsk->signal->notify_count < 0)
  742. wake_up_process(tsk->signal->group_exit_task);
  743. write_unlock_irq(&tasklist_lock);
  744. tracehook_report_death(tsk, signal, cookie, group_dead);
  745. /* If the process is dead, release it - nobody will wait for it */
  746. if (signal == DEATH_REAP)
  747. release_task(tsk);
  748. }
  749. #ifdef CONFIG_DEBUG_STACK_USAGE
  750. static void check_stack_usage(void)
  751. {
  752. static DEFINE_SPINLOCK(low_water_lock);
  753. static int lowest_to_date = THREAD_SIZE;
  754. unsigned long free;
  755. free = stack_not_used(current);
  756. if (free >= lowest_to_date)
  757. return;
  758. spin_lock(&low_water_lock);
  759. if (free < lowest_to_date) {
  760. printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
  761. "left\n",
  762. current->comm, free);
  763. lowest_to_date = free;
  764. }
  765. spin_unlock(&low_water_lock);
  766. }
  767. #else
  768. static inline void check_stack_usage(void) {}
  769. #endif
  770. NORET_TYPE void do_exit(long code)
  771. {
  772. struct task_struct *tsk = current;
  773. int group_dead;
  774. profile_task_exit(tsk);
  775. WARN_ON(atomic_read(&tsk->fs_excl));
  776. if (unlikely(in_interrupt()))
  777. panic("Aiee, killing interrupt handler!");
  778. if (unlikely(!tsk->pid))
  779. panic("Attempted to kill the idle task!");
  780. tracehook_report_exit(&code);
  781. validate_creds_for_do_exit(tsk);
  782. /*
  783. * We're taking recursive faults here in do_exit. Safest is to just
  784. * leave this task alone and wait for reboot.
  785. */
  786. if (unlikely(tsk->flags & PF_EXITING)) {
  787. printk(KERN_ALERT
  788. "Fixing recursive fault but reboot is needed!\n");
  789. /*
  790. * We can do this unlocked here. The futex code uses
  791. * this flag just to verify whether the pi state
  792. * cleanup has been done or not. In the worst case it
  793. * loops once more. We pretend that the cleanup was
  794. * done as there is no way to return. Either the
  795. * OWNER_DIED bit is set by now or we push the blocked
  796. * task into the wait for ever nirwana as well.
  797. */
  798. tsk->flags |= PF_EXITPIDONE;
  799. set_current_state(TASK_UNINTERRUPTIBLE);
  800. schedule();
  801. }
  802. exit_irq_thread();
  803. exit_signals(tsk); /* sets PF_EXITING */
  804. /*
  805. * tsk->flags are checked in the futex code to protect against
  806. * an exiting task cleaning up the robust pi futexes.
  807. */
  808. smp_mb();
  809. spin_unlock_wait(&tsk->pi_lock);
  810. if (unlikely(in_atomic()))
  811. printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
  812. current->comm, task_pid_nr(current),
  813. preempt_count());
  814. acct_update_integrals(tsk);
  815. group_dead = atomic_dec_and_test(&tsk->signal->live);
  816. if (group_dead) {
  817. hrtimer_cancel(&tsk->signal->real_timer);
  818. exit_itimers(tsk->signal);
  819. if (tsk->mm)
  820. setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
  821. }
  822. acct_collect(code, group_dead);
  823. if (group_dead)
  824. tty_audit_exit();
  825. if (unlikely(tsk->audit_context))
  826. audit_free(tsk);
  827. tsk->exit_code = code;
  828. taskstats_exit(tsk, group_dead);
  829. exit_mm(tsk);
  830. if (group_dead)
  831. acct_process();
  832. trace_sched_process_exit(tsk);
  833. exit_sem(tsk);
  834. exit_files(tsk);
  835. exit_fs(tsk);
  836. check_stack_usage();
  837. exit_thread();
  838. cgroup_exit(tsk, 1);
  839. if (group_dead && tsk->signal->leader)
  840. disassociate_ctty(1);
  841. module_put(task_thread_info(tsk)->exec_domain->module);
  842. if (tsk->binfmt)
  843. module_put(tsk->binfmt->module);
  844. proc_exit_connector(tsk);
  845. /*
  846. * Flush inherited counters to the parent - before the parent
  847. * gets woken up by child-exit notifications.
  848. */
  849. perf_event_exit_task(tsk);
  850. exit_notify(tsk, group_dead);
  851. #ifdef CONFIG_NUMA
  852. mpol_put(tsk->mempolicy);
  853. tsk->mempolicy = NULL;
  854. #endif
  855. #ifdef CONFIG_FUTEX
  856. if (unlikely(!list_empty(&tsk->pi_state_list)))
  857. exit_pi_state_list(tsk);
  858. if (unlikely(current->pi_state_cache))
  859. kfree(current->pi_state_cache);
  860. #endif
  861. /*
  862. * Make sure we are holding no locks:
  863. */
  864. debug_check_no_locks_held(tsk);
  865. /*
  866. * We can do this unlocked here. The futex code uses this flag
  867. * just to verify whether the pi state cleanup has been done
  868. * or not. In the worst case it loops once more.
  869. */
  870. tsk->flags |= PF_EXITPIDONE;
  871. if (tsk->io_context)
  872. exit_io_context();
  873. if (tsk->splice_pipe)
  874. __free_pipe_info(tsk->splice_pipe);
  875. validate_creds_for_do_exit(tsk);
  876. preempt_disable();
  877. exit_rcu();
  878. /* causes final put_task_struct in finish_task_switch(). */
  879. tsk->state = TASK_DEAD;
  880. schedule();
  881. BUG();
  882. /* Avoid "noreturn function does return". */
  883. for (;;)
  884. cpu_relax(); /* For when BUG is null */
  885. }
  886. EXPORT_SYMBOL_GPL(do_exit);
  887. NORET_TYPE void complete_and_exit(struct completion *comp, long code)
  888. {
  889. if (comp)
  890. complete(comp);
  891. do_exit(code);
  892. }
  893. EXPORT_SYMBOL(complete_and_exit);
  894. SYSCALL_DEFINE1(exit, int, error_code)
  895. {
  896. do_exit((error_code&0xff)<<8);
  897. }
  898. /*
  899. * Take down every thread in the group. This is called by fatal signals
  900. * as well as by sys_exit_group (below).
  901. */
  902. NORET_TYPE void
  903. do_group_exit(int exit_code)
  904. {
  905. struct signal_struct *sig = current->signal;
  906. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  907. if (signal_group_exit(sig))
  908. exit_code = sig->group_exit_code;
  909. else if (!thread_group_empty(current)) {
  910. struct sighand_struct *const sighand = current->sighand;
  911. spin_lock_irq(&sighand->siglock);
  912. if (signal_group_exit(sig))
  913. /* Another thread got here before we took the lock. */
  914. exit_code = sig->group_exit_code;
  915. else {
  916. sig->group_exit_code = exit_code;
  917. sig->flags = SIGNAL_GROUP_EXIT;
  918. zap_other_threads(current);
  919. }
  920. spin_unlock_irq(&sighand->siglock);
  921. }
  922. do_exit(exit_code);
  923. /* NOTREACHED */
  924. }
  925. /*
  926. * this kills every thread in the thread group. Note that any externally
  927. * wait4()-ing process will get the correct exit code - even if this
  928. * thread is not the thread group leader.
  929. */
  930. SYSCALL_DEFINE1(exit_group, int, error_code)
  931. {
  932. do_group_exit((error_code & 0xff) << 8);
  933. /* NOTREACHED */
  934. return 0;
  935. }
  936. struct wait_opts {
  937. enum pid_type wo_type;
  938. int wo_flags;
  939. struct pid *wo_pid;
  940. struct siginfo __user *wo_info;
  941. int __user *wo_stat;
  942. struct rusage __user *wo_rusage;
  943. wait_queue_t child_wait;
  944. int notask_error;
  945. };
  946. static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  947. {
  948. struct pid *pid = NULL;
  949. if (type == PIDTYPE_PID)
  950. pid = task->pids[type].pid;
  951. else if (type < PIDTYPE_MAX)
  952. pid = task->group_leader->pids[type].pid;
  953. return pid;
  954. }
  955. static int eligible_child(struct wait_opts *wo, struct task_struct *p)
  956. {
  957. if (wo->wo_type < PIDTYPE_MAX) {
  958. if (task_pid_type(p, wo->wo_type) != wo->wo_pid)
  959. return 0;
  960. }
  961. /* Wait for all children (clone and not) if __WALL is set;
  962. * otherwise, wait for clone children *only* if __WCLONE is
  963. * set; otherwise, wait for non-clone children *only*. (Note:
  964. * A "clone" child here is one that reports to its parent
  965. * using a signal other than SIGCHLD.) */
  966. if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
  967. && !(wo->wo_flags & __WALL))
  968. return 0;
  969. return 1;
  970. }
  971. static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
  972. pid_t pid, uid_t uid, int why, int status)
  973. {
  974. struct siginfo __user *infop;
  975. int retval = wo->wo_rusage
  976. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  977. put_task_struct(p);
  978. infop = wo->wo_info;
  979. if (!retval)
  980. retval = put_user(SIGCHLD, &infop->si_signo);
  981. if (!retval)
  982. retval = put_user(0, &infop->si_errno);
  983. if (!retval)
  984. retval = put_user((short)why, &infop->si_code);
  985. if (!retval)
  986. retval = put_user(pid, &infop->si_pid);
  987. if (!retval)
  988. retval = put_user(uid, &infop->si_uid);
  989. if (!retval)
  990. retval = put_user(status, &infop->si_status);
  991. if (!retval)
  992. retval = pid;
  993. return retval;
  994. }
  995. /*
  996. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  997. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  998. * the lock and this task is uninteresting. If we return nonzero, we have
  999. * released the lock and the system call should return.
  1000. */
  1001. static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
  1002. {
  1003. unsigned long state;
  1004. int retval, status, traced;
  1005. pid_t pid = task_pid_vnr(p);
  1006. uid_t uid = __task_cred(p)->uid;
  1007. struct siginfo __user *infop;
  1008. if (!likely(wo->wo_flags & WEXITED))
  1009. return 0;
  1010. if (unlikely(wo->wo_flags & WNOWAIT)) {
  1011. int exit_code = p->exit_code;
  1012. int why, status;
  1013. get_task_struct(p);
  1014. read_unlock(&tasklist_lock);
  1015. if ((exit_code & 0x7f) == 0) {
  1016. why = CLD_EXITED;
  1017. status = exit_code >> 8;
  1018. } else {
  1019. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1020. status = exit_code & 0x7f;
  1021. }
  1022. return wait_noreap_copyout(wo, p, pid, uid, why, status);
  1023. }
  1024. /*
  1025. * Try to move the task's state to DEAD
  1026. * only one thread is allowed to do this:
  1027. */
  1028. state = xchg(&p->exit_state, EXIT_DEAD);
  1029. if (state != EXIT_ZOMBIE) {
  1030. BUG_ON(state != EXIT_DEAD);
  1031. return 0;
  1032. }
  1033. traced = ptrace_reparented(p);
  1034. /*
  1035. * It can be ptraced but not reparented, check
  1036. * !task_detached() to filter out sub-threads.
  1037. */
  1038. if (likely(!traced) && likely(!task_detached(p))) {
  1039. struct signal_struct *psig;
  1040. struct signal_struct *sig;
  1041. unsigned long maxrss;
  1042. /*
  1043. * The resource counters for the group leader are in its
  1044. * own task_struct. Those for dead threads in the group
  1045. * are in its signal_struct, as are those for the child
  1046. * processes it has previously reaped. All these
  1047. * accumulate in the parent's signal_struct c* fields.
  1048. *
  1049. * We don't bother to take a lock here to protect these
  1050. * p->signal fields, because they are only touched by
  1051. * __exit_signal, which runs with tasklist_lock
  1052. * write-locked anyway, and so is excluded here. We do
  1053. * need to protect the access to parent->signal fields,
  1054. * as other threads in the parent group can be right
  1055. * here reaping other children at the same time.
  1056. */
  1057. spin_lock_irq(&p->real_parent->sighand->siglock);
  1058. psig = p->real_parent->signal;
  1059. sig = p->signal;
  1060. psig->cutime =
  1061. cputime_add(psig->cutime,
  1062. cputime_add(p->utime,
  1063. cputime_add(sig->utime,
  1064. sig->cutime)));
  1065. psig->cstime =
  1066. cputime_add(psig->cstime,
  1067. cputime_add(p->stime,
  1068. cputime_add(sig->stime,
  1069. sig->cstime)));
  1070. psig->cgtime =
  1071. cputime_add(psig->cgtime,
  1072. cputime_add(p->gtime,
  1073. cputime_add(sig->gtime,
  1074. sig->cgtime)));
  1075. psig->cmin_flt +=
  1076. p->min_flt + sig->min_flt + sig->cmin_flt;
  1077. psig->cmaj_flt +=
  1078. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  1079. psig->cnvcsw +=
  1080. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  1081. psig->cnivcsw +=
  1082. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  1083. psig->cinblock +=
  1084. task_io_get_inblock(p) +
  1085. sig->inblock + sig->cinblock;
  1086. psig->coublock +=
  1087. task_io_get_oublock(p) +
  1088. sig->oublock + sig->coublock;
  1089. maxrss = max(sig->maxrss, sig->cmaxrss);
  1090. if (psig->cmaxrss < maxrss)
  1091. psig->cmaxrss = maxrss;
  1092. task_io_accounting_add(&psig->ioac, &p->ioac);
  1093. task_io_accounting_add(&psig->ioac, &sig->ioac);
  1094. spin_unlock_irq(&p->real_parent->sighand->siglock);
  1095. }
  1096. /*
  1097. * Now we are sure this task is interesting, and no other
  1098. * thread can reap it because we set its state to EXIT_DEAD.
  1099. */
  1100. read_unlock(&tasklist_lock);
  1101. retval = wo->wo_rusage
  1102. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1103. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1104. ? p->signal->group_exit_code : p->exit_code;
  1105. if (!retval && wo->wo_stat)
  1106. retval = put_user(status, wo->wo_stat);
  1107. infop = wo->wo_info;
  1108. if (!retval && infop)
  1109. retval = put_user(SIGCHLD, &infop->si_signo);
  1110. if (!retval && infop)
  1111. retval = put_user(0, &infop->si_errno);
  1112. if (!retval && infop) {
  1113. int why;
  1114. if ((status & 0x7f) == 0) {
  1115. why = CLD_EXITED;
  1116. status >>= 8;
  1117. } else {
  1118. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1119. status &= 0x7f;
  1120. }
  1121. retval = put_user((short)why, &infop->si_code);
  1122. if (!retval)
  1123. retval = put_user(status, &infop->si_status);
  1124. }
  1125. if (!retval && infop)
  1126. retval = put_user(pid, &infop->si_pid);
  1127. if (!retval && infop)
  1128. retval = put_user(uid, &infop->si_uid);
  1129. if (!retval)
  1130. retval = pid;
  1131. if (traced) {
  1132. write_lock_irq(&tasklist_lock);
  1133. /* We dropped tasklist, ptracer could die and untrace */
  1134. ptrace_unlink(p);
  1135. /*
  1136. * If this is not a detached task, notify the parent.
  1137. * If it's still not detached after that, don't release
  1138. * it now.
  1139. */
  1140. if (!task_detached(p)) {
  1141. do_notify_parent(p, p->exit_signal);
  1142. if (!task_detached(p)) {
  1143. p->exit_state = EXIT_ZOMBIE;
  1144. p = NULL;
  1145. }
  1146. }
  1147. write_unlock_irq(&tasklist_lock);
  1148. }
  1149. if (p != NULL)
  1150. release_task(p);
  1151. return retval;
  1152. }
  1153. static int *task_stopped_code(struct task_struct *p, bool ptrace)
  1154. {
  1155. if (ptrace) {
  1156. if (task_is_stopped_or_traced(p))
  1157. return &p->exit_code;
  1158. } else {
  1159. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  1160. return &p->signal->group_exit_code;
  1161. }
  1162. return NULL;
  1163. }
  1164. /*
  1165. * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
  1166. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1167. * the lock and this task is uninteresting. If we return nonzero, we have
  1168. * released the lock and the system call should return.
  1169. */
  1170. static int wait_task_stopped(struct wait_opts *wo,
  1171. int ptrace, struct task_struct *p)
  1172. {
  1173. struct siginfo __user *infop;
  1174. int retval, exit_code, *p_code, why;
  1175. uid_t uid = 0; /* unneeded, required by compiler */
  1176. pid_t pid;
  1177. /*
  1178. * Traditionally we see ptrace'd stopped tasks regardless of options.
  1179. */
  1180. if (!ptrace && !(wo->wo_flags & WUNTRACED))
  1181. return 0;
  1182. exit_code = 0;
  1183. spin_lock_irq(&p->sighand->siglock);
  1184. p_code = task_stopped_code(p, ptrace);
  1185. if (unlikely(!p_code))
  1186. goto unlock_sig;
  1187. exit_code = *p_code;
  1188. if (!exit_code)
  1189. goto unlock_sig;
  1190. if (!unlikely(wo->wo_flags & WNOWAIT))
  1191. *p_code = 0;
  1192. /* don't need the RCU readlock here as we're holding a spinlock */
  1193. uid = __task_cred(p)->uid;
  1194. unlock_sig:
  1195. spin_unlock_irq(&p->sighand->siglock);
  1196. if (!exit_code)
  1197. return 0;
  1198. /*
  1199. * Now we are pretty sure this task is interesting.
  1200. * Make sure it doesn't get reaped out from under us while we
  1201. * give up the lock and then examine it below. We don't want to
  1202. * keep holding onto the tasklist_lock while we call getrusage and
  1203. * possibly take page faults for user memory.
  1204. */
  1205. get_task_struct(p);
  1206. pid = task_pid_vnr(p);
  1207. why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
  1208. read_unlock(&tasklist_lock);
  1209. if (unlikely(wo->wo_flags & WNOWAIT))
  1210. return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
  1211. retval = wo->wo_rusage
  1212. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1213. if (!retval && wo->wo_stat)
  1214. retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
  1215. infop = wo->wo_info;
  1216. if (!retval && infop)
  1217. retval = put_user(SIGCHLD, &infop->si_signo);
  1218. if (!retval && infop)
  1219. retval = put_user(0, &infop->si_errno);
  1220. if (!retval && infop)
  1221. retval = put_user((short)why, &infop->si_code);
  1222. if (!retval && infop)
  1223. retval = put_user(exit_code, &infop->si_status);
  1224. if (!retval && infop)
  1225. retval = put_user(pid, &infop->si_pid);
  1226. if (!retval && infop)
  1227. retval = put_user(uid, &infop->si_uid);
  1228. if (!retval)
  1229. retval = pid;
  1230. put_task_struct(p);
  1231. BUG_ON(!retval);
  1232. return retval;
  1233. }
  1234. /*
  1235. * Handle do_wait work for one task in a live, non-stopped state.
  1236. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1237. * the lock and this task is uninteresting. If we return nonzero, we have
  1238. * released the lock and the system call should return.
  1239. */
  1240. static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
  1241. {
  1242. int retval;
  1243. pid_t pid;
  1244. uid_t uid;
  1245. if (!unlikely(wo->wo_flags & WCONTINUED))
  1246. return 0;
  1247. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1248. return 0;
  1249. spin_lock_irq(&p->sighand->siglock);
  1250. /* Re-check with the lock held. */
  1251. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1252. spin_unlock_irq(&p->sighand->siglock);
  1253. return 0;
  1254. }
  1255. if (!unlikely(wo->wo_flags & WNOWAIT))
  1256. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1257. uid = __task_cred(p)->uid;
  1258. spin_unlock_irq(&p->sighand->siglock);
  1259. pid = task_pid_vnr(p);
  1260. get_task_struct(p);
  1261. read_unlock(&tasklist_lock);
  1262. if (!wo->wo_info) {
  1263. retval = wo->wo_rusage
  1264. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1265. put_task_struct(p);
  1266. if (!retval && wo->wo_stat)
  1267. retval = put_user(0xffff, wo->wo_stat);
  1268. if (!retval)
  1269. retval = pid;
  1270. } else {
  1271. retval = wait_noreap_copyout(wo, p, pid, uid,
  1272. CLD_CONTINUED, SIGCONT);
  1273. BUG_ON(retval == 0);
  1274. }
  1275. return retval;
  1276. }
  1277. /*
  1278. * Consider @p for a wait by @parent.
  1279. *
  1280. * -ECHILD should be in ->notask_error before the first call.
  1281. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1282. * Returns zero if the search for a child should continue;
  1283. * then ->notask_error is 0 if @p is an eligible child,
  1284. * or another error from security_task_wait(), or still -ECHILD.
  1285. */
  1286. static int wait_consider_task(struct wait_opts *wo, struct task_struct *parent,
  1287. int ptrace, struct task_struct *p)
  1288. {
  1289. int ret = eligible_child(wo, p);
  1290. if (!ret)
  1291. return ret;
  1292. ret = security_task_wait(p);
  1293. if (unlikely(ret < 0)) {
  1294. /*
  1295. * If we have not yet seen any eligible child,
  1296. * then let this error code replace -ECHILD.
  1297. * A permission error will give the user a clue
  1298. * to look for security policy problems, rather
  1299. * than for mysterious wait bugs.
  1300. */
  1301. if (wo->notask_error)
  1302. wo->notask_error = ret;
  1303. return 0;
  1304. }
  1305. if (likely(!ptrace) && unlikely(task_ptrace(p))) {
  1306. /*
  1307. * This child is hidden by ptrace.
  1308. * We aren't allowed to see it now, but eventually we will.
  1309. */
  1310. wo->notask_error = 0;
  1311. return 0;
  1312. }
  1313. if (p->exit_state == EXIT_DEAD)
  1314. return 0;
  1315. /*
  1316. * We don't reap group leaders with subthreads.
  1317. */
  1318. if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
  1319. return wait_task_zombie(wo, p);
  1320. /*
  1321. * It's stopped or running now, so it might
  1322. * later continue, exit, or stop again.
  1323. */
  1324. wo->notask_error = 0;
  1325. if (task_stopped_code(p, ptrace))
  1326. return wait_task_stopped(wo, ptrace, p);
  1327. return wait_task_continued(wo, p);
  1328. }
  1329. /*
  1330. * Do the work of do_wait() for one thread in the group, @tsk.
  1331. *
  1332. * -ECHILD should be in ->notask_error before the first call.
  1333. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1334. * Returns zero if the search for a child should continue; then
  1335. * ->notask_error is 0 if there were any eligible children,
  1336. * or another error from security_task_wait(), or still -ECHILD.
  1337. */
  1338. static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
  1339. {
  1340. struct task_struct *p;
  1341. list_for_each_entry(p, &tsk->children, sibling) {
  1342. /*
  1343. * Do not consider detached threads.
  1344. */
  1345. if (!task_detached(p)) {
  1346. int ret = wait_consider_task(wo, tsk, 0, p);
  1347. if (ret)
  1348. return ret;
  1349. }
  1350. }
  1351. return 0;
  1352. }
  1353. static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
  1354. {
  1355. struct task_struct *p;
  1356. list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
  1357. int ret = wait_consider_task(wo, tsk, 1, p);
  1358. if (ret)
  1359. return ret;
  1360. }
  1361. return 0;
  1362. }
  1363. static int child_wait_callback(wait_queue_t *wait, unsigned mode,
  1364. int sync, void *key)
  1365. {
  1366. struct wait_opts *wo = container_of(wait, struct wait_opts,
  1367. child_wait);
  1368. struct task_struct *p = key;
  1369. if (!eligible_child(wo, p))
  1370. return 0;
  1371. if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
  1372. return 0;
  1373. return default_wake_function(wait, mode, sync, key);
  1374. }
  1375. void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
  1376. {
  1377. __wake_up_sync_key(&parent->signal->wait_chldexit,
  1378. TASK_INTERRUPTIBLE, 1, p);
  1379. }
  1380. static long do_wait(struct wait_opts *wo)
  1381. {
  1382. struct task_struct *tsk;
  1383. int retval;
  1384. trace_sched_process_wait(wo->wo_pid);
  1385. init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
  1386. wo->child_wait.private = current;
  1387. add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1388. repeat:
  1389. /*
  1390. * If there is nothing that can match our critiera just get out.
  1391. * We will clear ->notask_error to zero if we see any child that
  1392. * might later match our criteria, even if we are not able to reap
  1393. * it yet.
  1394. */
  1395. wo->notask_error = -ECHILD;
  1396. if ((wo->wo_type < PIDTYPE_MAX) &&
  1397. (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
  1398. goto notask;
  1399. set_current_state(TASK_INTERRUPTIBLE);
  1400. read_lock(&tasklist_lock);
  1401. tsk = current;
  1402. do {
  1403. retval = do_wait_thread(wo, tsk);
  1404. if (retval)
  1405. goto end;
  1406. retval = ptrace_do_wait(wo, tsk);
  1407. if (retval)
  1408. goto end;
  1409. if (wo->wo_flags & __WNOTHREAD)
  1410. break;
  1411. } while_each_thread(current, tsk);
  1412. read_unlock(&tasklist_lock);
  1413. notask:
  1414. retval = wo->notask_error;
  1415. if (!retval && !(wo->wo_flags & WNOHANG)) {
  1416. retval = -ERESTARTSYS;
  1417. if (!signal_pending(current)) {
  1418. schedule();
  1419. goto repeat;
  1420. }
  1421. }
  1422. end:
  1423. __set_current_state(TASK_RUNNING);
  1424. remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1425. if (wo->wo_info) {
  1426. struct siginfo __user *infop = wo->wo_info;
  1427. if (retval > 0)
  1428. retval = 0;
  1429. else {
  1430. /*
  1431. * For a WNOHANG return, clear out all the fields
  1432. * we would set so the user can easily tell the
  1433. * difference.
  1434. */
  1435. if (!retval)
  1436. retval = put_user(0, &infop->si_signo);
  1437. if (!retval)
  1438. retval = put_user(0, &infop->si_errno);
  1439. if (!retval)
  1440. retval = put_user(0, &infop->si_code);
  1441. if (!retval)
  1442. retval = put_user(0, &infop->si_pid);
  1443. if (!retval)
  1444. retval = put_user(0, &infop->si_uid);
  1445. if (!retval)
  1446. retval = put_user(0, &infop->si_status);
  1447. }
  1448. }
  1449. return retval;
  1450. }
  1451. SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
  1452. infop, int, options, struct rusage __user *, ru)
  1453. {
  1454. struct wait_opts wo;
  1455. struct pid *pid = NULL;
  1456. enum pid_type type;
  1457. long ret;
  1458. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
  1459. return -EINVAL;
  1460. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1461. return -EINVAL;
  1462. switch (which) {
  1463. case P_ALL:
  1464. type = PIDTYPE_MAX;
  1465. break;
  1466. case P_PID:
  1467. type = PIDTYPE_PID;
  1468. if (upid <= 0)
  1469. return -EINVAL;
  1470. break;
  1471. case P_PGID:
  1472. type = PIDTYPE_PGID;
  1473. if (upid <= 0)
  1474. return -EINVAL;
  1475. break;
  1476. default:
  1477. return -EINVAL;
  1478. }
  1479. if (type < PIDTYPE_MAX)
  1480. pid = find_get_pid(upid);
  1481. wo.wo_type = type;
  1482. wo.wo_pid = pid;
  1483. wo.wo_flags = options;
  1484. wo.wo_info = infop;
  1485. wo.wo_stat = NULL;
  1486. wo.wo_rusage = ru;
  1487. ret = do_wait(&wo);
  1488. put_pid(pid);
  1489. /* avoid REGPARM breakage on x86: */
  1490. asmlinkage_protect(5, ret, which, upid, infop, options, ru);
  1491. return ret;
  1492. }
  1493. SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  1494. int, options, struct rusage __user *, ru)
  1495. {
  1496. struct wait_opts wo;
  1497. struct pid *pid = NULL;
  1498. enum pid_type type;
  1499. long ret;
  1500. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1501. __WNOTHREAD|__WCLONE|__WALL))
  1502. return -EINVAL;
  1503. if (upid == -1)
  1504. type = PIDTYPE_MAX;
  1505. else if (upid < 0) {
  1506. type = PIDTYPE_PGID;
  1507. pid = find_get_pid(-upid);
  1508. } else if (upid == 0) {
  1509. type = PIDTYPE_PGID;
  1510. pid = get_task_pid(current, PIDTYPE_PGID);
  1511. } else /* upid > 0 */ {
  1512. type = PIDTYPE_PID;
  1513. pid = find_get_pid(upid);
  1514. }
  1515. wo.wo_type = type;
  1516. wo.wo_pid = pid;
  1517. wo.wo_flags = options | WEXITED;
  1518. wo.wo_info = NULL;
  1519. wo.wo_stat = stat_addr;
  1520. wo.wo_rusage = ru;
  1521. ret = do_wait(&wo);
  1522. put_pid(pid);
  1523. /* avoid REGPARM breakage on x86: */
  1524. asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
  1525. return ret;
  1526. }
  1527. #ifdef __ARCH_WANT_SYS_WAITPID
  1528. /*
  1529. * sys_waitpid() remains for compatibility. waitpid() should be
  1530. * implemented by calling sys_wait4() from libc.a.
  1531. */
  1532. SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
  1533. {
  1534. return sys_wait4(pid, stat_addr, options, NULL);
  1535. }
  1536. #endif