exit.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/iocontext.h>
  15. #include <linux/key.h>
  16. #include <linux/security.h>
  17. #include <linux/cpu.h>
  18. #include <linux/acct.h>
  19. #include <linux/tsacct_kern.h>
  20. #include <linux/file.h>
  21. #include <linux/fdtable.h>
  22. #include <linux/binfmts.h>
  23. #include <linux/nsproxy.h>
  24. #include <linux/pid_namespace.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/profile.h>
  27. #include <linux/mount.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/kthread.h>
  30. #include <linux/mempolicy.h>
  31. #include <linux/taskstats_kern.h>
  32. #include <linux/delayacct.h>
  33. #include <linux/freezer.h>
  34. #include <linux/cgroup.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/signal.h>
  37. #include <linux/posix-timers.h>
  38. #include <linux/cn_proc.h>
  39. #include <linux/mutex.h>
  40. #include <linux/futex.h>
  41. #include <linux/pipe_fs_i.h>
  42. #include <linux/audit.h> /* for audit_free() */
  43. #include <linux/resource.h>
  44. #include <linux/blkdev.h>
  45. #include <linux/task_io_accounting_ops.h>
  46. #include <linux/tracehook.h>
  47. #include <linux/fs_struct.h>
  48. #include <linux/init_task.h>
  49. #include <linux/perf_event.h>
  50. #include <trace/events/sched.h>
  51. #include <linux/hw_breakpoint.h>
  52. #include <linux/oom.h>
  53. #include <linux/writeback.h>
  54. #include <asm/uaccess.h>
  55. #include <asm/unistd.h>
  56. #include <asm/pgtable.h>
  57. #include <asm/mmu_context.h>
  58. static void exit_mm(struct task_struct * tsk);
  59. static void __unhash_process(struct task_struct *p, bool group_dead)
  60. {
  61. nr_threads--;
  62. detach_pid(p, PIDTYPE_PID);
  63. if (group_dead) {
  64. detach_pid(p, PIDTYPE_PGID);
  65. detach_pid(p, PIDTYPE_SID);
  66. list_del_rcu(&p->tasks);
  67. list_del_init(&p->sibling);
  68. __this_cpu_dec(process_counts);
  69. }
  70. list_del_rcu(&p->thread_group);
  71. }
  72. /*
  73. * This function expects the tasklist_lock write-locked.
  74. */
  75. static void __exit_signal(struct task_struct *tsk)
  76. {
  77. struct signal_struct *sig = tsk->signal;
  78. bool group_dead = thread_group_leader(tsk);
  79. struct sighand_struct *sighand;
  80. struct tty_struct *uninitialized_var(tty);
  81. sighand = rcu_dereference_check(tsk->sighand,
  82. lockdep_tasklist_lock_is_held());
  83. spin_lock(&sighand->siglock);
  84. posix_cpu_timers_exit(tsk);
  85. if (group_dead) {
  86. posix_cpu_timers_exit_group(tsk);
  87. tty = sig->tty;
  88. sig->tty = NULL;
  89. } else {
  90. /*
  91. * This can only happen if the caller is de_thread().
  92. * FIXME: this is the temporary hack, we should teach
  93. * posix-cpu-timers to handle this case correctly.
  94. */
  95. if (unlikely(has_group_leader_pid(tsk)))
  96. posix_cpu_timers_exit_group(tsk);
  97. /*
  98. * If there is any task waiting for the group exit
  99. * then notify it:
  100. */
  101. if (sig->notify_count > 0 && !--sig->notify_count)
  102. wake_up_process(sig->group_exit_task);
  103. if (tsk == sig->curr_target)
  104. sig->curr_target = next_thread(tsk);
  105. /*
  106. * Accumulate here the counters for all threads but the
  107. * group leader as they die, so they can be added into
  108. * the process-wide totals when those are taken.
  109. * The group leader stays around as a zombie as long
  110. * as there are other threads. When it gets reaped,
  111. * the exit.c code will add its counts into these totals.
  112. * We won't ever get here for the group leader, since it
  113. * will have been the last reference on the signal_struct.
  114. */
  115. sig->utime += tsk->utime;
  116. sig->stime += tsk->stime;
  117. sig->gtime += tsk->gtime;
  118. sig->min_flt += tsk->min_flt;
  119. sig->maj_flt += tsk->maj_flt;
  120. sig->nvcsw += tsk->nvcsw;
  121. sig->nivcsw += tsk->nivcsw;
  122. sig->inblock += task_io_get_inblock(tsk);
  123. sig->oublock += task_io_get_oublock(tsk);
  124. task_io_accounting_add(&sig->ioac, &tsk->ioac);
  125. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  126. }
  127. sig->nr_threads--;
  128. __unhash_process(tsk, group_dead);
  129. /*
  130. * Do this under ->siglock, we can race with another thread
  131. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  132. */
  133. flush_sigqueue(&tsk->pending);
  134. tsk->sighand = NULL;
  135. spin_unlock(&sighand->siglock);
  136. __cleanup_sighand(sighand);
  137. clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
  138. if (group_dead) {
  139. flush_sigqueue(&sig->shared_pending);
  140. tty_kref_put(tty);
  141. }
  142. }
  143. static void delayed_put_task_struct(struct rcu_head *rhp)
  144. {
  145. struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  146. perf_event_delayed_put(tsk);
  147. trace_sched_process_free(tsk);
  148. put_task_struct(tsk);
  149. }
  150. void release_task(struct task_struct * p)
  151. {
  152. struct task_struct *leader;
  153. int zap_leader;
  154. repeat:
  155. /* don't need to get the RCU readlock here - the process is dead and
  156. * can't be modifying its own credentials. But shut RCU-lockdep up */
  157. rcu_read_lock();
  158. atomic_dec(&__task_cred(p)->user->processes);
  159. rcu_read_unlock();
  160. proc_flush_task(p);
  161. write_lock_irq(&tasklist_lock);
  162. ptrace_release_task(p);
  163. __exit_signal(p);
  164. /*
  165. * If we are the last non-leader member of the thread
  166. * group, and the leader is zombie, then notify the
  167. * group leader's parent process. (if it wants notification.)
  168. */
  169. zap_leader = 0;
  170. leader = p->group_leader;
  171. if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
  172. /*
  173. * If we were the last child thread and the leader has
  174. * exited already, and the leader's parent ignores SIGCHLD,
  175. * then we are the one who should release the leader.
  176. */
  177. zap_leader = do_notify_parent(leader, leader->exit_signal);
  178. if (zap_leader)
  179. leader->exit_state = EXIT_DEAD;
  180. }
  181. write_unlock_irq(&tasklist_lock);
  182. release_thread(p);
  183. call_rcu(&p->rcu, delayed_put_task_struct);
  184. p = leader;
  185. if (unlikely(zap_leader))
  186. goto repeat;
  187. }
  188. /*
  189. * This checks not only the pgrp, but falls back on the pid if no
  190. * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
  191. * without this...
  192. *
  193. * The caller must hold rcu lock or the tasklist lock.
  194. */
  195. struct pid *session_of_pgrp(struct pid *pgrp)
  196. {
  197. struct task_struct *p;
  198. struct pid *sid = NULL;
  199. p = pid_task(pgrp, PIDTYPE_PGID);
  200. if (p == NULL)
  201. p = pid_task(pgrp, PIDTYPE_PID);
  202. if (p != NULL)
  203. sid = task_session(p);
  204. return sid;
  205. }
  206. /*
  207. * Determine if a process group is "orphaned", according to the POSIX
  208. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  209. * by terminal-generated stop signals. Newly orphaned process groups are
  210. * to receive a SIGHUP and a SIGCONT.
  211. *
  212. * "I ask you, have you ever known what it is to be an orphan?"
  213. */
  214. static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
  215. {
  216. struct task_struct *p;
  217. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  218. if ((p == ignored_task) ||
  219. (p->exit_state && thread_group_empty(p)) ||
  220. is_global_init(p->real_parent))
  221. continue;
  222. if (task_pgrp(p->real_parent) != pgrp &&
  223. task_session(p->real_parent) == task_session(p))
  224. return 0;
  225. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  226. return 1;
  227. }
  228. int is_current_pgrp_orphaned(void)
  229. {
  230. int retval;
  231. read_lock(&tasklist_lock);
  232. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  233. read_unlock(&tasklist_lock);
  234. return retval;
  235. }
  236. static bool has_stopped_jobs(struct pid *pgrp)
  237. {
  238. struct task_struct *p;
  239. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  240. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  241. return true;
  242. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  243. return false;
  244. }
  245. /*
  246. * Check to see if any process groups have become orphaned as
  247. * a result of our exiting, and if they have any stopped jobs,
  248. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  249. */
  250. static void
  251. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  252. {
  253. struct pid *pgrp = task_pgrp(tsk);
  254. struct task_struct *ignored_task = tsk;
  255. if (!parent)
  256. /* exit: our father is in a different pgrp than
  257. * we are and we were the only connection outside.
  258. */
  259. parent = tsk->real_parent;
  260. else
  261. /* reparent: our child is in a different pgrp than
  262. * we are, and it was the only connection outside.
  263. */
  264. ignored_task = NULL;
  265. if (task_pgrp(parent) != pgrp &&
  266. task_session(parent) == task_session(tsk) &&
  267. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  268. has_stopped_jobs(pgrp)) {
  269. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  270. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  271. }
  272. }
  273. /**
  274. * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
  275. *
  276. * If a kernel thread is launched as a result of a system call, or if
  277. * it ever exits, it should generally reparent itself to kthreadd so it
  278. * isn't in the way of other processes and is correctly cleaned up on exit.
  279. *
  280. * The various task state such as scheduling policy and priority may have
  281. * been inherited from a user process, so we reset them to sane values here.
  282. *
  283. * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
  284. */
  285. static void reparent_to_kthreadd(void)
  286. {
  287. write_lock_irq(&tasklist_lock);
  288. ptrace_unlink(current);
  289. /* Reparent to init */
  290. current->real_parent = current->parent = kthreadd_task;
  291. list_move_tail(&current->sibling, &current->real_parent->children);
  292. /* Set the exit signal to SIGCHLD so we signal init on exit */
  293. current->exit_signal = SIGCHLD;
  294. if (task_nice(current) < 0)
  295. set_user_nice(current, 0);
  296. /* cpus_allowed? */
  297. /* rt_priority? */
  298. /* signals? */
  299. memcpy(current->signal->rlim, init_task.signal->rlim,
  300. sizeof(current->signal->rlim));
  301. atomic_inc(&init_cred.usage);
  302. commit_creds(&init_cred);
  303. write_unlock_irq(&tasklist_lock);
  304. }
  305. void __set_special_pids(struct pid *pid)
  306. {
  307. struct task_struct *curr = current->group_leader;
  308. if (task_session(curr) != pid)
  309. change_pid(curr, PIDTYPE_SID, pid);
  310. if (task_pgrp(curr) != pid)
  311. change_pid(curr, PIDTYPE_PGID, pid);
  312. }
  313. static void set_special_pids(struct pid *pid)
  314. {
  315. write_lock_irq(&tasklist_lock);
  316. __set_special_pids(pid);
  317. write_unlock_irq(&tasklist_lock);
  318. }
  319. /*
  320. * Let kernel threads use this to say that they allow a certain signal.
  321. * Must not be used if kthread was cloned with CLONE_SIGHAND.
  322. */
  323. int allow_signal(int sig)
  324. {
  325. if (!valid_signal(sig) || sig < 1)
  326. return -EINVAL;
  327. spin_lock_irq(&current->sighand->siglock);
  328. /* This is only needed for daemonize()'ed kthreads */
  329. sigdelset(&current->blocked, sig);
  330. /*
  331. * Kernel threads handle their own signals. Let the signal code
  332. * know it'll be handled, so that they don't get converted to
  333. * SIGKILL or just silently dropped.
  334. */
  335. current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
  336. recalc_sigpending();
  337. spin_unlock_irq(&current->sighand->siglock);
  338. return 0;
  339. }
  340. EXPORT_SYMBOL(allow_signal);
  341. int disallow_signal(int sig)
  342. {
  343. if (!valid_signal(sig) || sig < 1)
  344. return -EINVAL;
  345. spin_lock_irq(&current->sighand->siglock);
  346. current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
  347. recalc_sigpending();
  348. spin_unlock_irq(&current->sighand->siglock);
  349. return 0;
  350. }
  351. EXPORT_SYMBOL(disallow_signal);
  352. /*
  353. * Put all the gunge required to become a kernel thread without
  354. * attached user resources in one place where it belongs.
  355. */
  356. void daemonize(const char *name, ...)
  357. {
  358. va_list args;
  359. sigset_t blocked;
  360. va_start(args, name);
  361. vsnprintf(current->comm, sizeof(current->comm), name, args);
  362. va_end(args);
  363. /*
  364. * If we were started as result of loading a module, close all of the
  365. * user space pages. We don't need them, and if we didn't close them
  366. * they would be locked into memory.
  367. */
  368. exit_mm(current);
  369. /*
  370. * We don't want to get frozen, in case system-wide hibernation
  371. * or suspend transition begins right now.
  372. */
  373. current->flags |= (PF_NOFREEZE | PF_KTHREAD);
  374. if (current->nsproxy != &init_nsproxy) {
  375. get_nsproxy(&init_nsproxy);
  376. switch_task_namespaces(current, &init_nsproxy);
  377. }
  378. set_special_pids(&init_struct_pid);
  379. proc_clear_tty(current);
  380. /* Block and flush all signals */
  381. sigfillset(&blocked);
  382. sigprocmask(SIG_BLOCK, &blocked, NULL);
  383. flush_signals(current);
  384. /* Become as one with the init task */
  385. daemonize_fs_struct();
  386. exit_files(current);
  387. current->files = init_task.files;
  388. atomic_inc(&current->files->count);
  389. reparent_to_kthreadd();
  390. }
  391. EXPORT_SYMBOL(daemonize);
  392. static void close_files(struct files_struct * files)
  393. {
  394. int i, j;
  395. struct fdtable *fdt;
  396. j = 0;
  397. /*
  398. * It is safe to dereference the fd table without RCU or
  399. * ->file_lock because this is the last reference to the
  400. * files structure. But use RCU to shut RCU-lockdep up.
  401. */
  402. rcu_read_lock();
  403. fdt = files_fdtable(files);
  404. rcu_read_unlock();
  405. for (;;) {
  406. unsigned long set;
  407. i = j * __NFDBITS;
  408. if (i >= fdt->max_fds)
  409. break;
  410. set = fdt->open_fds->fds_bits[j++];
  411. while (set) {
  412. if (set & 1) {
  413. struct file * file = xchg(&fdt->fd[i], NULL);
  414. if (file) {
  415. filp_close(file, files);
  416. cond_resched();
  417. }
  418. }
  419. i++;
  420. set >>= 1;
  421. }
  422. }
  423. }
  424. struct files_struct *get_files_struct(struct task_struct *task)
  425. {
  426. struct files_struct *files;
  427. task_lock(task);
  428. files = task->files;
  429. if (files)
  430. atomic_inc(&files->count);
  431. task_unlock(task);
  432. return files;
  433. }
  434. void put_files_struct(struct files_struct *files)
  435. {
  436. struct fdtable *fdt;
  437. if (atomic_dec_and_test(&files->count)) {
  438. close_files(files);
  439. /*
  440. * Free the fd and fdset arrays if we expanded them.
  441. * If the fdtable was embedded, pass files for freeing
  442. * at the end of the RCU grace period. Otherwise,
  443. * you can free files immediately.
  444. */
  445. rcu_read_lock();
  446. fdt = files_fdtable(files);
  447. if (fdt != &files->fdtab)
  448. kmem_cache_free(files_cachep, files);
  449. free_fdtable(fdt);
  450. rcu_read_unlock();
  451. }
  452. }
  453. void reset_files_struct(struct files_struct *files)
  454. {
  455. struct task_struct *tsk = current;
  456. struct files_struct *old;
  457. old = tsk->files;
  458. task_lock(tsk);
  459. tsk->files = files;
  460. task_unlock(tsk);
  461. put_files_struct(old);
  462. }
  463. void exit_files(struct task_struct *tsk)
  464. {
  465. struct files_struct * files = tsk->files;
  466. if (files) {
  467. task_lock(tsk);
  468. tsk->files = NULL;
  469. task_unlock(tsk);
  470. put_files_struct(files);
  471. }
  472. }
  473. #ifdef CONFIG_MM_OWNER
  474. /*
  475. * A task is exiting. If it owned this mm, find a new owner for the mm.
  476. */
  477. void mm_update_next_owner(struct mm_struct *mm)
  478. {
  479. struct task_struct *c, *g, *p = current;
  480. retry:
  481. /*
  482. * If the exiting or execing task is not the owner, it's
  483. * someone else's problem.
  484. */
  485. if (mm->owner != p)
  486. return;
  487. /*
  488. * The current owner is exiting/execing and there are no other
  489. * candidates. Do not leave the mm pointing to a possibly
  490. * freed task structure.
  491. */
  492. if (atomic_read(&mm->mm_users) <= 1) {
  493. mm->owner = NULL;
  494. return;
  495. }
  496. read_lock(&tasklist_lock);
  497. /*
  498. * Search in the children
  499. */
  500. list_for_each_entry(c, &p->children, sibling) {
  501. if (c->mm == mm)
  502. goto assign_new_owner;
  503. }
  504. /*
  505. * Search in the siblings
  506. */
  507. list_for_each_entry(c, &p->real_parent->children, sibling) {
  508. if (c->mm == mm)
  509. goto assign_new_owner;
  510. }
  511. /*
  512. * Search through everything else. We should not get
  513. * here often
  514. */
  515. do_each_thread(g, c) {
  516. if (c->mm == mm)
  517. goto assign_new_owner;
  518. } while_each_thread(g, c);
  519. read_unlock(&tasklist_lock);
  520. /*
  521. * We found no owner yet mm_users > 1: this implies that we are
  522. * most likely racing with swapoff (try_to_unuse()) or /proc or
  523. * ptrace or page migration (get_task_mm()). Mark owner as NULL.
  524. */
  525. mm->owner = NULL;
  526. return;
  527. assign_new_owner:
  528. BUG_ON(c == p);
  529. get_task_struct(c);
  530. /*
  531. * The task_lock protects c->mm from changing.
  532. * We always want mm->owner->mm == mm
  533. */
  534. task_lock(c);
  535. /*
  536. * Delay read_unlock() till we have the task_lock()
  537. * to ensure that c does not slip away underneath us
  538. */
  539. read_unlock(&tasklist_lock);
  540. if (c->mm != mm) {
  541. task_unlock(c);
  542. put_task_struct(c);
  543. goto retry;
  544. }
  545. mm->owner = c;
  546. task_unlock(c);
  547. put_task_struct(c);
  548. }
  549. #endif /* CONFIG_MM_OWNER */
  550. /*
  551. * Turn us into a lazy TLB process if we
  552. * aren't already..
  553. */
  554. static void exit_mm(struct task_struct * tsk)
  555. {
  556. struct mm_struct *mm = tsk->mm;
  557. struct core_state *core_state;
  558. mm_release(tsk, mm);
  559. if (!mm)
  560. return;
  561. /*
  562. * Serialize with any possible pending coredump.
  563. * We must hold mmap_sem around checking core_state
  564. * and clearing tsk->mm. The core-inducing thread
  565. * will increment ->nr_threads for each thread in the
  566. * group with ->mm != NULL.
  567. */
  568. down_read(&mm->mmap_sem);
  569. core_state = mm->core_state;
  570. if (core_state) {
  571. struct core_thread self;
  572. up_read(&mm->mmap_sem);
  573. self.task = tsk;
  574. self.next = xchg(&core_state->dumper.next, &self);
  575. /*
  576. * Implies mb(), the result of xchg() must be visible
  577. * to core_state->dumper.
  578. */
  579. if (atomic_dec_and_test(&core_state->nr_threads))
  580. complete(&core_state->startup);
  581. for (;;) {
  582. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  583. if (!self.task) /* see coredump_finish() */
  584. break;
  585. schedule();
  586. }
  587. __set_task_state(tsk, TASK_RUNNING);
  588. down_read(&mm->mmap_sem);
  589. }
  590. atomic_inc(&mm->mm_count);
  591. BUG_ON(mm != tsk->active_mm);
  592. /* more a memory barrier than a real lock */
  593. task_lock(tsk);
  594. tsk->mm = NULL;
  595. up_read(&mm->mmap_sem);
  596. enter_lazy_tlb(mm, current);
  597. task_unlock(tsk);
  598. mm_update_next_owner(mm);
  599. mmput(mm);
  600. }
  601. /*
  602. * When we die, we re-parent all our children.
  603. * Try to give them to another thread in our thread
  604. * group, and if no such member exists, give it to
  605. * the child reaper process (ie "init") in our pid
  606. * space.
  607. */
  608. static struct task_struct *find_new_reaper(struct task_struct *father)
  609. __releases(&tasklist_lock)
  610. __acquires(&tasklist_lock)
  611. {
  612. struct pid_namespace *pid_ns = task_active_pid_ns(father);
  613. struct task_struct *thread;
  614. thread = father;
  615. while_each_thread(father, thread) {
  616. if (thread->flags & PF_EXITING)
  617. continue;
  618. if (unlikely(pid_ns->child_reaper == father))
  619. pid_ns->child_reaper = thread;
  620. return thread;
  621. }
  622. if (unlikely(pid_ns->child_reaper == father)) {
  623. write_unlock_irq(&tasklist_lock);
  624. if (unlikely(pid_ns == &init_pid_ns))
  625. panic("Attempted to kill init!");
  626. zap_pid_ns_processes(pid_ns);
  627. write_lock_irq(&tasklist_lock);
  628. /*
  629. * We can not clear ->child_reaper or leave it alone.
  630. * There may by stealth EXIT_DEAD tasks on ->children,
  631. * forget_original_parent() must move them somewhere.
  632. */
  633. pid_ns->child_reaper = init_pid_ns.child_reaper;
  634. }
  635. return pid_ns->child_reaper;
  636. }
  637. /*
  638. * Any that need to be release_task'd are put on the @dead list.
  639. */
  640. static void reparent_leader(struct task_struct *father, struct task_struct *p,
  641. struct list_head *dead)
  642. {
  643. list_move_tail(&p->sibling, &p->real_parent->children);
  644. if (p->exit_state == EXIT_DEAD)
  645. return;
  646. /*
  647. * If this is a threaded reparent there is no need to
  648. * notify anyone anything has happened.
  649. */
  650. if (same_thread_group(p->real_parent, father))
  651. return;
  652. /* We don't want people slaying init. */
  653. p->exit_signal = SIGCHLD;
  654. /* If it has exited notify the new parent about this child's death. */
  655. if (!p->ptrace &&
  656. p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
  657. if (do_notify_parent(p, p->exit_signal)) {
  658. p->exit_state = EXIT_DEAD;
  659. list_move_tail(&p->sibling, dead);
  660. }
  661. }
  662. kill_orphaned_pgrp(p, father);
  663. }
  664. static void forget_original_parent(struct task_struct *father)
  665. {
  666. struct task_struct *p, *n, *reaper;
  667. LIST_HEAD(dead_children);
  668. write_lock_irq(&tasklist_lock);
  669. /*
  670. * Note that exit_ptrace() and find_new_reaper() might
  671. * drop tasklist_lock and reacquire it.
  672. */
  673. exit_ptrace(father);
  674. reaper = find_new_reaper(father);
  675. list_for_each_entry_safe(p, n, &father->children, sibling) {
  676. struct task_struct *t = p;
  677. do {
  678. t->real_parent = reaper;
  679. if (t->parent == father) {
  680. BUG_ON(t->ptrace);
  681. t->parent = t->real_parent;
  682. }
  683. if (t->pdeath_signal)
  684. group_send_sig_info(t->pdeath_signal,
  685. SEND_SIG_NOINFO, t);
  686. } while_each_thread(p, t);
  687. reparent_leader(father, p, &dead_children);
  688. }
  689. write_unlock_irq(&tasklist_lock);
  690. BUG_ON(!list_empty(&father->children));
  691. list_for_each_entry_safe(p, n, &dead_children, sibling) {
  692. list_del_init(&p->sibling);
  693. release_task(p);
  694. }
  695. }
  696. /*
  697. * Send signals to all our closest relatives so that they know
  698. * to properly mourn us..
  699. */
  700. static void exit_notify(struct task_struct *tsk, int group_dead)
  701. {
  702. bool autoreap;
  703. /*
  704. * This does two things:
  705. *
  706. * A. Make init inherit all the child processes
  707. * B. Check to see if any process groups have become orphaned
  708. * as a result of our exiting, and if they have any stopped
  709. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  710. */
  711. forget_original_parent(tsk);
  712. exit_task_namespaces(tsk);
  713. write_lock_irq(&tasklist_lock);
  714. if (group_dead)
  715. kill_orphaned_pgrp(tsk->group_leader, NULL);
  716. if (unlikely(tsk->ptrace)) {
  717. int sig = thread_group_leader(tsk) &&
  718. thread_group_empty(tsk) &&
  719. !ptrace_reparented(tsk) ?
  720. tsk->exit_signal : SIGCHLD;
  721. autoreap = do_notify_parent(tsk, sig);
  722. } else if (thread_group_leader(tsk)) {
  723. autoreap = thread_group_empty(tsk) &&
  724. do_notify_parent(tsk, tsk->exit_signal);
  725. } else {
  726. autoreap = true;
  727. }
  728. tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
  729. /* mt-exec, de_thread() is waiting for group leader */
  730. if (unlikely(tsk->signal->notify_count < 0))
  731. wake_up_process(tsk->signal->group_exit_task);
  732. write_unlock_irq(&tasklist_lock);
  733. /* If the process is dead, release it - nobody will wait for it */
  734. if (autoreap)
  735. release_task(tsk);
  736. }
  737. #ifdef CONFIG_DEBUG_STACK_USAGE
  738. static void check_stack_usage(void)
  739. {
  740. static DEFINE_SPINLOCK(low_water_lock);
  741. static int lowest_to_date = THREAD_SIZE;
  742. unsigned long free;
  743. free = stack_not_used(current);
  744. if (free >= lowest_to_date)
  745. return;
  746. spin_lock(&low_water_lock);
  747. if (free < lowest_to_date) {
  748. printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
  749. "left\n",
  750. current->comm, free);
  751. lowest_to_date = free;
  752. }
  753. spin_unlock(&low_water_lock);
  754. }
  755. #else
  756. static inline void check_stack_usage(void) {}
  757. #endif
  758. void do_exit(long code)
  759. {
  760. struct task_struct *tsk = current;
  761. int group_dead;
  762. profile_task_exit(tsk);
  763. WARN_ON(blk_needs_flush_plug(tsk));
  764. if (unlikely(in_interrupt()))
  765. panic("Aiee, killing interrupt handler!");
  766. if (unlikely(!tsk->pid))
  767. panic("Attempted to kill the idle task!");
  768. /*
  769. * If do_exit is called because this processes oopsed, it's possible
  770. * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
  771. * continuing. Amongst other possible reasons, this is to prevent
  772. * mm_release()->clear_child_tid() from writing to a user-controlled
  773. * kernel address.
  774. */
  775. set_fs(USER_DS);
  776. ptrace_event(PTRACE_EVENT_EXIT, code);
  777. validate_creds_for_do_exit(tsk);
  778. /*
  779. * We're taking recursive faults here in do_exit. Safest is to just
  780. * leave this task alone and wait for reboot.
  781. */
  782. if (unlikely(tsk->flags & PF_EXITING)) {
  783. printk(KERN_ALERT
  784. "Fixing recursive fault but reboot is needed!\n");
  785. /*
  786. * We can do this unlocked here. The futex code uses
  787. * this flag just to verify whether the pi state
  788. * cleanup has been done or not. In the worst case it
  789. * loops once more. We pretend that the cleanup was
  790. * done as there is no way to return. Either the
  791. * OWNER_DIED bit is set by now or we push the blocked
  792. * task into the wait for ever nirwana as well.
  793. */
  794. tsk->flags |= PF_EXITPIDONE;
  795. set_current_state(TASK_UNINTERRUPTIBLE);
  796. schedule();
  797. }
  798. exit_signals(tsk); /* sets PF_EXITING */
  799. /*
  800. * tsk->flags are checked in the futex code to protect against
  801. * an exiting task cleaning up the robust pi futexes.
  802. */
  803. smp_mb();
  804. raw_spin_unlock_wait(&tsk->pi_lock);
  805. exit_irq_thread();
  806. if (unlikely(in_atomic()))
  807. printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
  808. current->comm, task_pid_nr(current),
  809. preempt_count());
  810. acct_update_integrals(tsk);
  811. /* sync mm's RSS info before statistics gathering */
  812. if (tsk->mm)
  813. sync_mm_rss(tsk, tsk->mm);
  814. group_dead = atomic_dec_and_test(&tsk->signal->live);
  815. if (group_dead) {
  816. hrtimer_cancel(&tsk->signal->real_timer);
  817. exit_itimers(tsk->signal);
  818. if (tsk->mm)
  819. setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
  820. }
  821. acct_collect(code, group_dead);
  822. if (group_dead)
  823. tty_audit_exit();
  824. audit_free(tsk);
  825. tsk->exit_code = code;
  826. taskstats_exit(tsk, group_dead);
  827. exit_mm(tsk);
  828. if (group_dead)
  829. acct_process();
  830. trace_sched_process_exit(tsk);
  831. exit_sem(tsk);
  832. exit_shm(tsk);
  833. exit_files(tsk);
  834. exit_fs(tsk);
  835. check_stack_usage();
  836. exit_thread();
  837. /*
  838. * Flush inherited counters to the parent - before the parent
  839. * gets woken up by child-exit notifications.
  840. *
  841. * because of cgroup mode, must be called before cgroup_exit()
  842. */
  843. perf_event_exit_task(tsk);
  844. cgroup_exit(tsk, 1);
  845. if (group_dead)
  846. disassociate_ctty(1);
  847. module_put(task_thread_info(tsk)->exec_domain->module);
  848. proc_exit_connector(tsk);
  849. /*
  850. * FIXME: do that only when needed, using sched_exit tracepoint
  851. */
  852. ptrace_put_breakpoints(tsk);
  853. exit_notify(tsk, group_dead);
  854. #ifdef CONFIG_NUMA
  855. task_lock(tsk);
  856. mpol_put(tsk->mempolicy);
  857. tsk->mempolicy = NULL;
  858. task_unlock(tsk);
  859. #endif
  860. #ifdef CONFIG_FUTEX
  861. if (unlikely(current->pi_state_cache))
  862. kfree(current->pi_state_cache);
  863. #endif
  864. /*
  865. * Make sure we are holding no locks:
  866. */
  867. debug_check_no_locks_held(tsk);
  868. /*
  869. * We can do this unlocked here. The futex code uses this flag
  870. * just to verify whether the pi state cleanup has been done
  871. * or not. In the worst case it loops once more.
  872. */
  873. tsk->flags |= PF_EXITPIDONE;
  874. if (tsk->io_context)
  875. exit_io_context(tsk);
  876. if (tsk->splice_pipe)
  877. __free_pipe_info(tsk->splice_pipe);
  878. validate_creds_for_do_exit(tsk);
  879. preempt_disable();
  880. if (tsk->nr_dirtied)
  881. __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
  882. exit_rcu();
  883. /*
  884. * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
  885. * when the following two conditions become true.
  886. * - There is race condition of mmap_sem (It is acquired by
  887. * exit_mm()), and
  888. * - SMI occurs before setting TASK_RUNINNG.
  889. * (or hypervisor of virtual machine switches to other guest)
  890. * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
  891. *
  892. * To avoid it, we have to wait for releasing tsk->pi_lock which
  893. * is held by try_to_wake_up()
  894. */
  895. smp_mb();
  896. raw_spin_unlock_wait(&tsk->pi_lock);
  897. /* causes final put_task_struct in finish_task_switch(). */
  898. tsk->state = TASK_DEAD;
  899. tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
  900. schedule();
  901. BUG();
  902. /* Avoid "noreturn function does return". */
  903. for (;;)
  904. cpu_relax(); /* For when BUG is null */
  905. }
  906. EXPORT_SYMBOL_GPL(do_exit);
  907. void complete_and_exit(struct completion *comp, long code)
  908. {
  909. if (comp)
  910. complete(comp);
  911. do_exit(code);
  912. }
  913. EXPORT_SYMBOL(complete_and_exit);
  914. SYSCALL_DEFINE1(exit, int, error_code)
  915. {
  916. do_exit((error_code&0xff)<<8);
  917. }
  918. /*
  919. * Take down every thread in the group. This is called by fatal signals
  920. * as well as by sys_exit_group (below).
  921. */
  922. void
  923. do_group_exit(int exit_code)
  924. {
  925. struct signal_struct *sig = current->signal;
  926. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  927. if (signal_group_exit(sig))
  928. exit_code = sig->group_exit_code;
  929. else if (!thread_group_empty(current)) {
  930. struct sighand_struct *const sighand = current->sighand;
  931. spin_lock_irq(&sighand->siglock);
  932. if (signal_group_exit(sig))
  933. /* Another thread got here before we took the lock. */
  934. exit_code = sig->group_exit_code;
  935. else {
  936. sig->group_exit_code = exit_code;
  937. sig->flags = SIGNAL_GROUP_EXIT;
  938. zap_other_threads(current);
  939. }
  940. spin_unlock_irq(&sighand->siglock);
  941. }
  942. do_exit(exit_code);
  943. /* NOTREACHED */
  944. }
  945. /*
  946. * this kills every thread in the thread group. Note that any externally
  947. * wait4()-ing process will get the correct exit code - even if this
  948. * thread is not the thread group leader.
  949. */
  950. SYSCALL_DEFINE1(exit_group, int, error_code)
  951. {
  952. do_group_exit((error_code & 0xff) << 8);
  953. /* NOTREACHED */
  954. return 0;
  955. }
  956. struct wait_opts {
  957. enum pid_type wo_type;
  958. int wo_flags;
  959. struct pid *wo_pid;
  960. struct siginfo __user *wo_info;
  961. int __user *wo_stat;
  962. struct rusage __user *wo_rusage;
  963. wait_queue_t child_wait;
  964. int notask_error;
  965. };
  966. static inline
  967. struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  968. {
  969. if (type != PIDTYPE_PID)
  970. task = task->group_leader;
  971. return task->pids[type].pid;
  972. }
  973. static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
  974. {
  975. return wo->wo_type == PIDTYPE_MAX ||
  976. task_pid_type(p, wo->wo_type) == wo->wo_pid;
  977. }
  978. static int eligible_child(struct wait_opts *wo, struct task_struct *p)
  979. {
  980. if (!eligible_pid(wo, p))
  981. return 0;
  982. /* Wait for all children (clone and not) if __WALL is set;
  983. * otherwise, wait for clone children *only* if __WCLONE is
  984. * set; otherwise, wait for non-clone children *only*. (Note:
  985. * A "clone" child here is one that reports to its parent
  986. * using a signal other than SIGCHLD.) */
  987. if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
  988. && !(wo->wo_flags & __WALL))
  989. return 0;
  990. return 1;
  991. }
  992. static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
  993. pid_t pid, uid_t uid, int why, int status)
  994. {
  995. struct siginfo __user *infop;
  996. int retval = wo->wo_rusage
  997. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  998. put_task_struct(p);
  999. infop = wo->wo_info;
  1000. if (infop) {
  1001. if (!retval)
  1002. retval = put_user(SIGCHLD, &infop->si_signo);
  1003. if (!retval)
  1004. retval = put_user(0, &infop->si_errno);
  1005. if (!retval)
  1006. retval = put_user((short)why, &infop->si_code);
  1007. if (!retval)
  1008. retval = put_user(pid, &infop->si_pid);
  1009. if (!retval)
  1010. retval = put_user(uid, &infop->si_uid);
  1011. if (!retval)
  1012. retval = put_user(status, &infop->si_status);
  1013. }
  1014. if (!retval)
  1015. retval = pid;
  1016. return retval;
  1017. }
  1018. /*
  1019. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  1020. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1021. * the lock and this task is uninteresting. If we return nonzero, we have
  1022. * released the lock and the system call should return.
  1023. */
  1024. static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
  1025. {
  1026. unsigned long state;
  1027. int retval, status, traced;
  1028. pid_t pid = task_pid_vnr(p);
  1029. uid_t uid = __task_cred(p)->uid;
  1030. struct siginfo __user *infop;
  1031. if (!likely(wo->wo_flags & WEXITED))
  1032. return 0;
  1033. if (unlikely(wo->wo_flags & WNOWAIT)) {
  1034. int exit_code = p->exit_code;
  1035. int why;
  1036. get_task_struct(p);
  1037. read_unlock(&tasklist_lock);
  1038. if ((exit_code & 0x7f) == 0) {
  1039. why = CLD_EXITED;
  1040. status = exit_code >> 8;
  1041. } else {
  1042. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1043. status = exit_code & 0x7f;
  1044. }
  1045. return wait_noreap_copyout(wo, p, pid, uid, why, status);
  1046. }
  1047. /*
  1048. * Try to move the task's state to DEAD
  1049. * only one thread is allowed to do this:
  1050. */
  1051. state = xchg(&p->exit_state, EXIT_DEAD);
  1052. if (state != EXIT_ZOMBIE) {
  1053. BUG_ON(state != EXIT_DEAD);
  1054. return 0;
  1055. }
  1056. traced = ptrace_reparented(p);
  1057. /*
  1058. * It can be ptraced but not reparented, check
  1059. * thread_group_leader() to filter out sub-threads.
  1060. */
  1061. if (likely(!traced) && thread_group_leader(p)) {
  1062. struct signal_struct *psig;
  1063. struct signal_struct *sig;
  1064. unsigned long maxrss;
  1065. cputime_t tgutime, tgstime;
  1066. /*
  1067. * The resource counters for the group leader are in its
  1068. * own task_struct. Those for dead threads in the group
  1069. * are in its signal_struct, as are those for the child
  1070. * processes it has previously reaped. All these
  1071. * accumulate in the parent's signal_struct c* fields.
  1072. *
  1073. * We don't bother to take a lock here to protect these
  1074. * p->signal fields, because they are only touched by
  1075. * __exit_signal, which runs with tasklist_lock
  1076. * write-locked anyway, and so is excluded here. We do
  1077. * need to protect the access to parent->signal fields,
  1078. * as other threads in the parent group can be right
  1079. * here reaping other children at the same time.
  1080. *
  1081. * We use thread_group_times() to get times for the thread
  1082. * group, which consolidates times for all threads in the
  1083. * group including the group leader.
  1084. */
  1085. thread_group_times(p, &tgutime, &tgstime);
  1086. spin_lock_irq(&p->real_parent->sighand->siglock);
  1087. psig = p->real_parent->signal;
  1088. sig = p->signal;
  1089. psig->cutime += tgutime + sig->cutime;
  1090. psig->cstime += tgstime + sig->cstime;
  1091. psig->cgtime += p->gtime + sig->gtime + sig->cgtime;
  1092. psig->cmin_flt +=
  1093. p->min_flt + sig->min_flt + sig->cmin_flt;
  1094. psig->cmaj_flt +=
  1095. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  1096. psig->cnvcsw +=
  1097. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  1098. psig->cnivcsw +=
  1099. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  1100. psig->cinblock +=
  1101. task_io_get_inblock(p) +
  1102. sig->inblock + sig->cinblock;
  1103. psig->coublock +=
  1104. task_io_get_oublock(p) +
  1105. sig->oublock + sig->coublock;
  1106. maxrss = max(sig->maxrss, sig->cmaxrss);
  1107. if (psig->cmaxrss < maxrss)
  1108. psig->cmaxrss = maxrss;
  1109. task_io_accounting_add(&psig->ioac, &p->ioac);
  1110. task_io_accounting_add(&psig->ioac, &sig->ioac);
  1111. spin_unlock_irq(&p->real_parent->sighand->siglock);
  1112. }
  1113. /*
  1114. * Now we are sure this task is interesting, and no other
  1115. * thread can reap it because we set its state to EXIT_DEAD.
  1116. */
  1117. read_unlock(&tasklist_lock);
  1118. retval = wo->wo_rusage
  1119. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1120. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1121. ? p->signal->group_exit_code : p->exit_code;
  1122. if (!retval && wo->wo_stat)
  1123. retval = put_user(status, wo->wo_stat);
  1124. infop = wo->wo_info;
  1125. if (!retval && infop)
  1126. retval = put_user(SIGCHLD, &infop->si_signo);
  1127. if (!retval && infop)
  1128. retval = put_user(0, &infop->si_errno);
  1129. if (!retval && infop) {
  1130. int why;
  1131. if ((status & 0x7f) == 0) {
  1132. why = CLD_EXITED;
  1133. status >>= 8;
  1134. } else {
  1135. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1136. status &= 0x7f;
  1137. }
  1138. retval = put_user((short)why, &infop->si_code);
  1139. if (!retval)
  1140. retval = put_user(status, &infop->si_status);
  1141. }
  1142. if (!retval && infop)
  1143. retval = put_user(pid, &infop->si_pid);
  1144. if (!retval && infop)
  1145. retval = put_user(uid, &infop->si_uid);
  1146. if (!retval)
  1147. retval = pid;
  1148. if (traced) {
  1149. write_lock_irq(&tasklist_lock);
  1150. /* We dropped tasklist, ptracer could die and untrace */
  1151. ptrace_unlink(p);
  1152. /*
  1153. * If this is not a sub-thread, notify the parent.
  1154. * If parent wants a zombie, don't release it now.
  1155. */
  1156. if (thread_group_leader(p) &&
  1157. !do_notify_parent(p, p->exit_signal)) {
  1158. p->exit_state = EXIT_ZOMBIE;
  1159. p = NULL;
  1160. }
  1161. write_unlock_irq(&tasklist_lock);
  1162. }
  1163. if (p != NULL)
  1164. release_task(p);
  1165. return retval;
  1166. }
  1167. static int *task_stopped_code(struct task_struct *p, bool ptrace)
  1168. {
  1169. if (ptrace) {
  1170. if (task_is_stopped_or_traced(p) &&
  1171. !(p->jobctl & JOBCTL_LISTENING))
  1172. return &p->exit_code;
  1173. } else {
  1174. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  1175. return &p->signal->group_exit_code;
  1176. }
  1177. return NULL;
  1178. }
  1179. /**
  1180. * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
  1181. * @wo: wait options
  1182. * @ptrace: is the wait for ptrace
  1183. * @p: task to wait for
  1184. *
  1185. * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
  1186. *
  1187. * CONTEXT:
  1188. * read_lock(&tasklist_lock), which is released if return value is
  1189. * non-zero. Also, grabs and releases @p->sighand->siglock.
  1190. *
  1191. * RETURNS:
  1192. * 0 if wait condition didn't exist and search for other wait conditions
  1193. * should continue. Non-zero return, -errno on failure and @p's pid on
  1194. * success, implies that tasklist_lock is released and wait condition
  1195. * search should terminate.
  1196. */
  1197. static int wait_task_stopped(struct wait_opts *wo,
  1198. int ptrace, struct task_struct *p)
  1199. {
  1200. struct siginfo __user *infop;
  1201. int retval, exit_code, *p_code, why;
  1202. uid_t uid = 0; /* unneeded, required by compiler */
  1203. pid_t pid;
  1204. /*
  1205. * Traditionally we see ptrace'd stopped tasks regardless of options.
  1206. */
  1207. if (!ptrace && !(wo->wo_flags & WUNTRACED))
  1208. return 0;
  1209. if (!task_stopped_code(p, ptrace))
  1210. return 0;
  1211. exit_code = 0;
  1212. spin_lock_irq(&p->sighand->siglock);
  1213. p_code = task_stopped_code(p, ptrace);
  1214. if (unlikely(!p_code))
  1215. goto unlock_sig;
  1216. exit_code = *p_code;
  1217. if (!exit_code)
  1218. goto unlock_sig;
  1219. if (!unlikely(wo->wo_flags & WNOWAIT))
  1220. *p_code = 0;
  1221. uid = task_uid(p);
  1222. unlock_sig:
  1223. spin_unlock_irq(&p->sighand->siglock);
  1224. if (!exit_code)
  1225. return 0;
  1226. /*
  1227. * Now we are pretty sure this task is interesting.
  1228. * Make sure it doesn't get reaped out from under us while we
  1229. * give up the lock and then examine it below. We don't want to
  1230. * keep holding onto the tasklist_lock while we call getrusage and
  1231. * possibly take page faults for user memory.
  1232. */
  1233. get_task_struct(p);
  1234. pid = task_pid_vnr(p);
  1235. why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
  1236. read_unlock(&tasklist_lock);
  1237. if (unlikely(wo->wo_flags & WNOWAIT))
  1238. return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
  1239. retval = wo->wo_rusage
  1240. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1241. if (!retval && wo->wo_stat)
  1242. retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
  1243. infop = wo->wo_info;
  1244. if (!retval && infop)
  1245. retval = put_user(SIGCHLD, &infop->si_signo);
  1246. if (!retval && infop)
  1247. retval = put_user(0, &infop->si_errno);
  1248. if (!retval && infop)
  1249. retval = put_user((short)why, &infop->si_code);
  1250. if (!retval && infop)
  1251. retval = put_user(exit_code, &infop->si_status);
  1252. if (!retval && infop)
  1253. retval = put_user(pid, &infop->si_pid);
  1254. if (!retval && infop)
  1255. retval = put_user(uid, &infop->si_uid);
  1256. if (!retval)
  1257. retval = pid;
  1258. put_task_struct(p);
  1259. BUG_ON(!retval);
  1260. return retval;
  1261. }
  1262. /*
  1263. * Handle do_wait work for one task in a live, non-stopped state.
  1264. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1265. * the lock and this task is uninteresting. If we return nonzero, we have
  1266. * released the lock and the system call should return.
  1267. */
  1268. static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
  1269. {
  1270. int retval;
  1271. pid_t pid;
  1272. uid_t uid;
  1273. if (!unlikely(wo->wo_flags & WCONTINUED))
  1274. return 0;
  1275. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1276. return 0;
  1277. spin_lock_irq(&p->sighand->siglock);
  1278. /* Re-check with the lock held. */
  1279. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1280. spin_unlock_irq(&p->sighand->siglock);
  1281. return 0;
  1282. }
  1283. if (!unlikely(wo->wo_flags & WNOWAIT))
  1284. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1285. uid = task_uid(p);
  1286. spin_unlock_irq(&p->sighand->siglock);
  1287. pid = task_pid_vnr(p);
  1288. get_task_struct(p);
  1289. read_unlock(&tasklist_lock);
  1290. if (!wo->wo_info) {
  1291. retval = wo->wo_rusage
  1292. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1293. put_task_struct(p);
  1294. if (!retval && wo->wo_stat)
  1295. retval = put_user(0xffff, wo->wo_stat);
  1296. if (!retval)
  1297. retval = pid;
  1298. } else {
  1299. retval = wait_noreap_copyout(wo, p, pid, uid,
  1300. CLD_CONTINUED, SIGCONT);
  1301. BUG_ON(retval == 0);
  1302. }
  1303. return retval;
  1304. }
  1305. /*
  1306. * Consider @p for a wait by @parent.
  1307. *
  1308. * -ECHILD should be in ->notask_error before the first call.
  1309. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1310. * Returns zero if the search for a child should continue;
  1311. * then ->notask_error is 0 if @p is an eligible child,
  1312. * or another error from security_task_wait(), or still -ECHILD.
  1313. */
  1314. static int wait_consider_task(struct wait_opts *wo, int ptrace,
  1315. struct task_struct *p)
  1316. {
  1317. int ret = eligible_child(wo, p);
  1318. if (!ret)
  1319. return ret;
  1320. ret = security_task_wait(p);
  1321. if (unlikely(ret < 0)) {
  1322. /*
  1323. * If we have not yet seen any eligible child,
  1324. * then let this error code replace -ECHILD.
  1325. * A permission error will give the user a clue
  1326. * to look for security policy problems, rather
  1327. * than for mysterious wait bugs.
  1328. */
  1329. if (wo->notask_error)
  1330. wo->notask_error = ret;
  1331. return 0;
  1332. }
  1333. /* dead body doesn't have much to contribute */
  1334. if (unlikely(p->exit_state == EXIT_DEAD)) {
  1335. /*
  1336. * But do not ignore this task until the tracer does
  1337. * wait_task_zombie()->do_notify_parent().
  1338. */
  1339. if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
  1340. wo->notask_error = 0;
  1341. return 0;
  1342. }
  1343. /* slay zombie? */
  1344. if (p->exit_state == EXIT_ZOMBIE) {
  1345. /*
  1346. * A zombie ptracee is only visible to its ptracer.
  1347. * Notification and reaping will be cascaded to the real
  1348. * parent when the ptracer detaches.
  1349. */
  1350. if (likely(!ptrace) && unlikely(p->ptrace)) {
  1351. /* it will become visible, clear notask_error */
  1352. wo->notask_error = 0;
  1353. return 0;
  1354. }
  1355. /* we don't reap group leaders with subthreads */
  1356. if (!delay_group_leader(p))
  1357. return wait_task_zombie(wo, p);
  1358. /*
  1359. * Allow access to stopped/continued state via zombie by
  1360. * falling through. Clearing of notask_error is complex.
  1361. *
  1362. * When !@ptrace:
  1363. *
  1364. * If WEXITED is set, notask_error should naturally be
  1365. * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
  1366. * so, if there are live subthreads, there are events to
  1367. * wait for. If all subthreads are dead, it's still safe
  1368. * to clear - this function will be called again in finite
  1369. * amount time once all the subthreads are released and
  1370. * will then return without clearing.
  1371. *
  1372. * When @ptrace:
  1373. *
  1374. * Stopped state is per-task and thus can't change once the
  1375. * target task dies. Only continued and exited can happen.
  1376. * Clear notask_error if WCONTINUED | WEXITED.
  1377. */
  1378. if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
  1379. wo->notask_error = 0;
  1380. } else {
  1381. /*
  1382. * If @p is ptraced by a task in its real parent's group,
  1383. * hide group stop/continued state when looking at @p as
  1384. * the real parent; otherwise, a single stop can be
  1385. * reported twice as group and ptrace stops.
  1386. *
  1387. * If a ptracer wants to distinguish the two events for its
  1388. * own children, it should create a separate process which
  1389. * takes the role of real parent.
  1390. */
  1391. if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
  1392. return 0;
  1393. /*
  1394. * @p is alive and it's gonna stop, continue or exit, so
  1395. * there always is something to wait for.
  1396. */
  1397. wo->notask_error = 0;
  1398. }
  1399. /*
  1400. * Wait for stopped. Depending on @ptrace, different stopped state
  1401. * is used and the two don't interact with each other.
  1402. */
  1403. ret = wait_task_stopped(wo, ptrace, p);
  1404. if (ret)
  1405. return ret;
  1406. /*
  1407. * Wait for continued. There's only one continued state and the
  1408. * ptracer can consume it which can confuse the real parent. Don't
  1409. * use WCONTINUED from ptracer. You don't need or want it.
  1410. */
  1411. return wait_task_continued(wo, p);
  1412. }
  1413. /*
  1414. * Do the work of do_wait() for one thread in the group, @tsk.
  1415. *
  1416. * -ECHILD should be in ->notask_error before the first call.
  1417. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1418. * Returns zero if the search for a child should continue; then
  1419. * ->notask_error is 0 if there were any eligible children,
  1420. * or another error from security_task_wait(), or still -ECHILD.
  1421. */
  1422. static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
  1423. {
  1424. struct task_struct *p;
  1425. list_for_each_entry(p, &tsk->children, sibling) {
  1426. int ret = wait_consider_task(wo, 0, p);
  1427. if (ret)
  1428. return ret;
  1429. }
  1430. return 0;
  1431. }
  1432. static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
  1433. {
  1434. struct task_struct *p;
  1435. list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
  1436. int ret = wait_consider_task(wo, 1, p);
  1437. if (ret)
  1438. return ret;
  1439. }
  1440. return 0;
  1441. }
  1442. static int child_wait_callback(wait_queue_t *wait, unsigned mode,
  1443. int sync, void *key)
  1444. {
  1445. struct wait_opts *wo = container_of(wait, struct wait_opts,
  1446. child_wait);
  1447. struct task_struct *p = key;
  1448. if (!eligible_pid(wo, p))
  1449. return 0;
  1450. if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
  1451. return 0;
  1452. return default_wake_function(wait, mode, sync, key);
  1453. }
  1454. void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
  1455. {
  1456. __wake_up_sync_key(&parent->signal->wait_chldexit,
  1457. TASK_INTERRUPTIBLE, 1, p);
  1458. }
  1459. static long do_wait(struct wait_opts *wo)
  1460. {
  1461. struct task_struct *tsk;
  1462. int retval;
  1463. trace_sched_process_wait(wo->wo_pid);
  1464. init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
  1465. wo->child_wait.private = current;
  1466. add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1467. repeat:
  1468. /*
  1469. * If there is nothing that can match our critiera just get out.
  1470. * We will clear ->notask_error to zero if we see any child that
  1471. * might later match our criteria, even if we are not able to reap
  1472. * it yet.
  1473. */
  1474. wo->notask_error = -ECHILD;
  1475. if ((wo->wo_type < PIDTYPE_MAX) &&
  1476. (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
  1477. goto notask;
  1478. set_current_state(TASK_INTERRUPTIBLE);
  1479. read_lock(&tasklist_lock);
  1480. tsk = current;
  1481. do {
  1482. retval = do_wait_thread(wo, tsk);
  1483. if (retval)
  1484. goto end;
  1485. retval = ptrace_do_wait(wo, tsk);
  1486. if (retval)
  1487. goto end;
  1488. if (wo->wo_flags & __WNOTHREAD)
  1489. break;
  1490. } while_each_thread(current, tsk);
  1491. read_unlock(&tasklist_lock);
  1492. notask:
  1493. retval = wo->notask_error;
  1494. if (!retval && !(wo->wo_flags & WNOHANG)) {
  1495. retval = -ERESTARTSYS;
  1496. if (!signal_pending(current)) {
  1497. schedule();
  1498. goto repeat;
  1499. }
  1500. }
  1501. end:
  1502. __set_current_state(TASK_RUNNING);
  1503. remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1504. return retval;
  1505. }
  1506. SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
  1507. infop, int, options, struct rusage __user *, ru)
  1508. {
  1509. struct wait_opts wo;
  1510. struct pid *pid = NULL;
  1511. enum pid_type type;
  1512. long ret;
  1513. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
  1514. return -EINVAL;
  1515. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1516. return -EINVAL;
  1517. switch (which) {
  1518. case P_ALL:
  1519. type = PIDTYPE_MAX;
  1520. break;
  1521. case P_PID:
  1522. type = PIDTYPE_PID;
  1523. if (upid <= 0)
  1524. return -EINVAL;
  1525. break;
  1526. case P_PGID:
  1527. type = PIDTYPE_PGID;
  1528. if (upid <= 0)
  1529. return -EINVAL;
  1530. break;
  1531. default:
  1532. return -EINVAL;
  1533. }
  1534. if (type < PIDTYPE_MAX)
  1535. pid = find_get_pid(upid);
  1536. wo.wo_type = type;
  1537. wo.wo_pid = pid;
  1538. wo.wo_flags = options;
  1539. wo.wo_info = infop;
  1540. wo.wo_stat = NULL;
  1541. wo.wo_rusage = ru;
  1542. ret = do_wait(&wo);
  1543. if (ret > 0) {
  1544. ret = 0;
  1545. } else if (infop) {
  1546. /*
  1547. * For a WNOHANG return, clear out all the fields
  1548. * we would set so the user can easily tell the
  1549. * difference.
  1550. */
  1551. if (!ret)
  1552. ret = put_user(0, &infop->si_signo);
  1553. if (!ret)
  1554. ret = put_user(0, &infop->si_errno);
  1555. if (!ret)
  1556. ret = put_user(0, &infop->si_code);
  1557. if (!ret)
  1558. ret = put_user(0, &infop->si_pid);
  1559. if (!ret)
  1560. ret = put_user(0, &infop->si_uid);
  1561. if (!ret)
  1562. ret = put_user(0, &infop->si_status);
  1563. }
  1564. put_pid(pid);
  1565. /* avoid REGPARM breakage on x86: */
  1566. asmlinkage_protect(5, ret, which, upid, infop, options, ru);
  1567. return ret;
  1568. }
  1569. SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  1570. int, options, struct rusage __user *, ru)
  1571. {
  1572. struct wait_opts wo;
  1573. struct pid *pid = NULL;
  1574. enum pid_type type;
  1575. long ret;
  1576. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1577. __WNOTHREAD|__WCLONE|__WALL))
  1578. return -EINVAL;
  1579. if (upid == -1)
  1580. type = PIDTYPE_MAX;
  1581. else if (upid < 0) {
  1582. type = PIDTYPE_PGID;
  1583. pid = find_get_pid(-upid);
  1584. } else if (upid == 0) {
  1585. type = PIDTYPE_PGID;
  1586. pid = get_task_pid(current, PIDTYPE_PGID);
  1587. } else /* upid > 0 */ {
  1588. type = PIDTYPE_PID;
  1589. pid = find_get_pid(upid);
  1590. }
  1591. wo.wo_type = type;
  1592. wo.wo_pid = pid;
  1593. wo.wo_flags = options | WEXITED;
  1594. wo.wo_info = NULL;
  1595. wo.wo_stat = stat_addr;
  1596. wo.wo_rusage = ru;
  1597. ret = do_wait(&wo);
  1598. put_pid(pid);
  1599. /* avoid REGPARM breakage on x86: */
  1600. asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
  1601. return ret;
  1602. }
  1603. #ifdef __ARCH_WANT_SYS_WAITPID
  1604. /*
  1605. * sys_waitpid() remains for compatibility. waitpid() should be
  1606. * implemented by calling sys_wait4() from libc.a.
  1607. */
  1608. SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
  1609. {
  1610. return sys_wait4(pid, stat_addr, options, NULL);
  1611. }
  1612. #endif