exit.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/iocontext.h>
  15. #include <linux/key.h>
  16. #include <linux/security.h>
  17. #include <linux/cpu.h>
  18. #include <linux/acct.h>
  19. #include <linux/tsacct_kern.h>
  20. #include <linux/file.h>
  21. #include <linux/fdtable.h>
  22. #include <linux/binfmts.h>
  23. #include <linux/nsproxy.h>
  24. #include <linux/pid_namespace.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/profile.h>
  27. #include <linux/mount.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/kthread.h>
  30. #include <linux/mempolicy.h>
  31. #include <linux/taskstats_kern.h>
  32. #include <linux/delayacct.h>
  33. #include <linux/freezer.h>
  34. #include <linux/cgroup.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/signal.h>
  37. #include <linux/posix-timers.h>
  38. #include <linux/cn_proc.h>
  39. #include <linux/mutex.h>
  40. #include <linux/futex.h>
  41. #include <linux/pipe_fs_i.h>
  42. #include <linux/audit.h> /* for audit_free() */
  43. #include <linux/resource.h>
  44. #include <linux/blkdev.h>
  45. #include <linux/task_io_accounting_ops.h>
  46. #include <linux/tracehook.h>
  47. #include <linux/fs_struct.h>
  48. #include <linux/init_task.h>
  49. #include <linux/perf_event.h>
  50. #include <trace/events/sched.h>
  51. #include <linux/hw_breakpoint.h>
  52. #include <linux/oom.h>
  53. #include <asm/uaccess.h>
  54. #include <asm/unistd.h>
  55. #include <asm/pgtable.h>
  56. #include <asm/mmu_context.h>
  57. static void exit_mm(struct task_struct * tsk);
  58. static void __unhash_process(struct task_struct *p, bool group_dead)
  59. {
  60. nr_threads--;
  61. detach_pid(p, PIDTYPE_PID);
  62. if (group_dead) {
  63. detach_pid(p, PIDTYPE_PGID);
  64. detach_pid(p, PIDTYPE_SID);
  65. list_del_rcu(&p->tasks);
  66. list_del_init(&p->sibling);
  67. __this_cpu_dec(process_counts);
  68. }
  69. list_del_rcu(&p->thread_group);
  70. }
  71. /*
  72. * This function expects the tasklist_lock write-locked.
  73. */
  74. static void __exit_signal(struct task_struct *tsk)
  75. {
  76. struct signal_struct *sig = tsk->signal;
  77. bool group_dead = thread_group_leader(tsk);
  78. struct sighand_struct *sighand;
  79. struct tty_struct *uninitialized_var(tty);
  80. sighand = rcu_dereference_check(tsk->sighand,
  81. lockdep_tasklist_lock_is_held());
  82. spin_lock(&sighand->siglock);
  83. posix_cpu_timers_exit(tsk);
  84. if (group_dead) {
  85. posix_cpu_timers_exit_group(tsk);
  86. tty = sig->tty;
  87. sig->tty = NULL;
  88. } else {
  89. /*
  90. * This can only happen if the caller is de_thread().
  91. * FIXME: this is the temporary hack, we should teach
  92. * posix-cpu-timers to handle this case correctly.
  93. */
  94. if (unlikely(has_group_leader_pid(tsk)))
  95. posix_cpu_timers_exit_group(tsk);
  96. /*
  97. * If there is any task waiting for the group exit
  98. * then notify it:
  99. */
  100. if (sig->notify_count > 0 && !--sig->notify_count)
  101. wake_up_process(sig->group_exit_task);
  102. if (tsk == sig->curr_target)
  103. sig->curr_target = next_thread(tsk);
  104. /*
  105. * Accumulate here the counters for all threads but the
  106. * group leader as they die, so they can be added into
  107. * the process-wide totals when those are taken.
  108. * The group leader stays around as a zombie as long
  109. * as there are other threads. When it gets reaped,
  110. * the exit.c code will add its counts into these totals.
  111. * We won't ever get here for the group leader, since it
  112. * will have been the last reference on the signal_struct.
  113. */
  114. sig->utime = cputime_add(sig->utime, tsk->utime);
  115. sig->stime = cputime_add(sig->stime, tsk->stime);
  116. sig->gtime = cputime_add(sig->gtime, tsk->gtime);
  117. sig->min_flt += tsk->min_flt;
  118. sig->maj_flt += tsk->maj_flt;
  119. sig->nvcsw += tsk->nvcsw;
  120. sig->nivcsw += tsk->nivcsw;
  121. sig->inblock += task_io_get_inblock(tsk);
  122. sig->oublock += task_io_get_oublock(tsk);
  123. task_io_accounting_add(&sig->ioac, &tsk->ioac);
  124. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  125. }
  126. sig->nr_threads--;
  127. __unhash_process(tsk, group_dead);
  128. /*
  129. * Do this under ->siglock, we can race with another thread
  130. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  131. */
  132. flush_sigqueue(&tsk->pending);
  133. tsk->sighand = NULL;
  134. spin_unlock(&sighand->siglock);
  135. __cleanup_sighand(sighand);
  136. clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
  137. if (group_dead) {
  138. flush_sigqueue(&sig->shared_pending);
  139. tty_kref_put(tty);
  140. }
  141. }
  142. static void delayed_put_task_struct(struct rcu_head *rhp)
  143. {
  144. struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  145. perf_event_delayed_put(tsk);
  146. trace_sched_process_free(tsk);
  147. put_task_struct(tsk);
  148. }
  149. void release_task(struct task_struct * p)
  150. {
  151. struct task_struct *leader;
  152. int zap_leader;
  153. repeat:
  154. /* don't need to get the RCU readlock here - the process is dead and
  155. * can't be modifying its own credentials. But shut RCU-lockdep up */
  156. rcu_read_lock();
  157. atomic_dec(&__task_cred(p)->user->processes);
  158. rcu_read_unlock();
  159. proc_flush_task(p);
  160. write_lock_irq(&tasklist_lock);
  161. ptrace_release_task(p);
  162. __exit_signal(p);
  163. /*
  164. * If we are the last non-leader member of the thread
  165. * group, and the leader is zombie, then notify the
  166. * group leader's parent process. (if it wants notification.)
  167. */
  168. zap_leader = 0;
  169. leader = p->group_leader;
  170. if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
  171. /*
  172. * If we were the last child thread and the leader has
  173. * exited already, and the leader's parent ignores SIGCHLD,
  174. * then we are the one who should release the leader.
  175. */
  176. zap_leader = do_notify_parent(leader, leader->exit_signal);
  177. if (zap_leader)
  178. leader->exit_state = EXIT_DEAD;
  179. }
  180. write_unlock_irq(&tasklist_lock);
  181. release_thread(p);
  182. call_rcu(&p->rcu, delayed_put_task_struct);
  183. p = leader;
  184. if (unlikely(zap_leader))
  185. goto repeat;
  186. }
  187. /*
  188. * This checks not only the pgrp, but falls back on the pid if no
  189. * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
  190. * without this...
  191. *
  192. * The caller must hold rcu lock or the tasklist lock.
  193. */
  194. struct pid *session_of_pgrp(struct pid *pgrp)
  195. {
  196. struct task_struct *p;
  197. struct pid *sid = NULL;
  198. p = pid_task(pgrp, PIDTYPE_PGID);
  199. if (p == NULL)
  200. p = pid_task(pgrp, PIDTYPE_PID);
  201. if (p != NULL)
  202. sid = task_session(p);
  203. return sid;
  204. }
  205. /*
  206. * Determine if a process group is "orphaned", according to the POSIX
  207. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  208. * by terminal-generated stop signals. Newly orphaned process groups are
  209. * to receive a SIGHUP and a SIGCONT.
  210. *
  211. * "I ask you, have you ever known what it is to be an orphan?"
  212. */
  213. static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
  214. {
  215. struct task_struct *p;
  216. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  217. if ((p == ignored_task) ||
  218. (p->exit_state && thread_group_empty(p)) ||
  219. is_global_init(p->real_parent))
  220. continue;
  221. if (task_pgrp(p->real_parent) != pgrp &&
  222. task_session(p->real_parent) == task_session(p))
  223. return 0;
  224. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  225. return 1;
  226. }
  227. int is_current_pgrp_orphaned(void)
  228. {
  229. int retval;
  230. read_lock(&tasklist_lock);
  231. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  232. read_unlock(&tasklist_lock);
  233. return retval;
  234. }
  235. static bool has_stopped_jobs(struct pid *pgrp)
  236. {
  237. struct task_struct *p;
  238. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  239. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  240. return true;
  241. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  242. return false;
  243. }
  244. /*
  245. * Check to see if any process groups have become orphaned as
  246. * a result of our exiting, and if they have any stopped jobs,
  247. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  248. */
  249. static void
  250. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  251. {
  252. struct pid *pgrp = task_pgrp(tsk);
  253. struct task_struct *ignored_task = tsk;
  254. if (!parent)
  255. /* exit: our father is in a different pgrp than
  256. * we are and we were the only connection outside.
  257. */
  258. parent = tsk->real_parent;
  259. else
  260. /* reparent: our child is in a different pgrp than
  261. * we are, and it was the only connection outside.
  262. */
  263. ignored_task = NULL;
  264. if (task_pgrp(parent) != pgrp &&
  265. task_session(parent) == task_session(tsk) &&
  266. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  267. has_stopped_jobs(pgrp)) {
  268. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  269. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  270. }
  271. }
  272. /**
  273. * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
  274. *
  275. * If a kernel thread is launched as a result of a system call, or if
  276. * it ever exits, it should generally reparent itself to kthreadd so it
  277. * isn't in the way of other processes and is correctly cleaned up on exit.
  278. *
  279. * The various task state such as scheduling policy and priority may have
  280. * been inherited from a user process, so we reset them to sane values here.
  281. *
  282. * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
  283. */
  284. static void reparent_to_kthreadd(void)
  285. {
  286. write_lock_irq(&tasklist_lock);
  287. ptrace_unlink(current);
  288. /* Reparent to init */
  289. current->real_parent = current->parent = kthreadd_task;
  290. list_move_tail(&current->sibling, &current->real_parent->children);
  291. /* Set the exit signal to SIGCHLD so we signal init on exit */
  292. current->exit_signal = SIGCHLD;
  293. if (task_nice(current) < 0)
  294. set_user_nice(current, 0);
  295. /* cpus_allowed? */
  296. /* rt_priority? */
  297. /* signals? */
  298. memcpy(current->signal->rlim, init_task.signal->rlim,
  299. sizeof(current->signal->rlim));
  300. atomic_inc(&init_cred.usage);
  301. commit_creds(&init_cred);
  302. write_unlock_irq(&tasklist_lock);
  303. }
  304. void __set_special_pids(struct pid *pid)
  305. {
  306. struct task_struct *curr = current->group_leader;
  307. if (task_session(curr) != pid)
  308. change_pid(curr, PIDTYPE_SID, pid);
  309. if (task_pgrp(curr) != pid)
  310. change_pid(curr, PIDTYPE_PGID, pid);
  311. }
  312. static void set_special_pids(struct pid *pid)
  313. {
  314. write_lock_irq(&tasklist_lock);
  315. __set_special_pids(pid);
  316. write_unlock_irq(&tasklist_lock);
  317. }
  318. /*
  319. * Let kernel threads use this to say that they allow a certain signal.
  320. * Must not be used if kthread was cloned with CLONE_SIGHAND.
  321. */
  322. int allow_signal(int sig)
  323. {
  324. if (!valid_signal(sig) || sig < 1)
  325. return -EINVAL;
  326. spin_lock_irq(&current->sighand->siglock);
  327. /* This is only needed for daemonize()'ed kthreads */
  328. sigdelset(&current->blocked, sig);
  329. /*
  330. * Kernel threads handle their own signals. Let the signal code
  331. * know it'll be handled, so that they don't get converted to
  332. * SIGKILL or just silently dropped.
  333. */
  334. current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
  335. recalc_sigpending();
  336. spin_unlock_irq(&current->sighand->siglock);
  337. return 0;
  338. }
  339. EXPORT_SYMBOL(allow_signal);
  340. int disallow_signal(int sig)
  341. {
  342. if (!valid_signal(sig) || sig < 1)
  343. return -EINVAL;
  344. spin_lock_irq(&current->sighand->siglock);
  345. current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
  346. recalc_sigpending();
  347. spin_unlock_irq(&current->sighand->siglock);
  348. return 0;
  349. }
  350. EXPORT_SYMBOL(disallow_signal);
  351. /*
  352. * Put all the gunge required to become a kernel thread without
  353. * attached user resources in one place where it belongs.
  354. */
  355. void daemonize(const char *name, ...)
  356. {
  357. va_list args;
  358. sigset_t blocked;
  359. va_start(args, name);
  360. vsnprintf(current->comm, sizeof(current->comm), name, args);
  361. va_end(args);
  362. /*
  363. * If we were started as result of loading a module, close all of the
  364. * user space pages. We don't need them, and if we didn't close them
  365. * they would be locked into memory.
  366. */
  367. exit_mm(current);
  368. /*
  369. * We don't want to have TIF_FREEZE set if the system-wide hibernation
  370. * or suspend transition begins right now.
  371. */
  372. current->flags |= (PF_NOFREEZE | PF_KTHREAD);
  373. if (current->nsproxy != &init_nsproxy) {
  374. get_nsproxy(&init_nsproxy);
  375. switch_task_namespaces(current, &init_nsproxy);
  376. }
  377. set_special_pids(&init_struct_pid);
  378. proc_clear_tty(current);
  379. /* Block and flush all signals */
  380. sigfillset(&blocked);
  381. sigprocmask(SIG_BLOCK, &blocked, NULL);
  382. flush_signals(current);
  383. /* Become as one with the init task */
  384. daemonize_fs_struct();
  385. exit_files(current);
  386. current->files = init_task.files;
  387. atomic_inc(&current->files->count);
  388. reparent_to_kthreadd();
  389. }
  390. EXPORT_SYMBOL(daemonize);
  391. static void close_files(struct files_struct * files)
  392. {
  393. int i, j;
  394. struct fdtable *fdt;
  395. j = 0;
  396. /*
  397. * It is safe to dereference the fd table without RCU or
  398. * ->file_lock because this is the last reference to the
  399. * files structure. But use RCU to shut RCU-lockdep up.
  400. */
  401. rcu_read_lock();
  402. fdt = files_fdtable(files);
  403. rcu_read_unlock();
  404. for (;;) {
  405. unsigned long set;
  406. i = j * __NFDBITS;
  407. if (i >= fdt->max_fds)
  408. break;
  409. set = fdt->open_fds->fds_bits[j++];
  410. while (set) {
  411. if (set & 1) {
  412. struct file * file = xchg(&fdt->fd[i], NULL);
  413. if (file) {
  414. filp_close(file, files);
  415. cond_resched();
  416. }
  417. }
  418. i++;
  419. set >>= 1;
  420. }
  421. }
  422. }
  423. struct files_struct *get_files_struct(struct task_struct *task)
  424. {
  425. struct files_struct *files;
  426. task_lock(task);
  427. files = task->files;
  428. if (files)
  429. atomic_inc(&files->count);
  430. task_unlock(task);
  431. return files;
  432. }
  433. void put_files_struct(struct files_struct *files)
  434. {
  435. struct fdtable *fdt;
  436. if (atomic_dec_and_test(&files->count)) {
  437. close_files(files);
  438. /*
  439. * Free the fd and fdset arrays if we expanded them.
  440. * If the fdtable was embedded, pass files for freeing
  441. * at the end of the RCU grace period. Otherwise,
  442. * you can free files immediately.
  443. */
  444. rcu_read_lock();
  445. fdt = files_fdtable(files);
  446. if (fdt != &files->fdtab)
  447. kmem_cache_free(files_cachep, files);
  448. free_fdtable(fdt);
  449. rcu_read_unlock();
  450. }
  451. }
  452. void reset_files_struct(struct files_struct *files)
  453. {
  454. struct task_struct *tsk = current;
  455. struct files_struct *old;
  456. old = tsk->files;
  457. task_lock(tsk);
  458. tsk->files = files;
  459. task_unlock(tsk);
  460. put_files_struct(old);
  461. }
  462. void exit_files(struct task_struct *tsk)
  463. {
  464. struct files_struct * files = tsk->files;
  465. if (files) {
  466. task_lock(tsk);
  467. tsk->files = NULL;
  468. task_unlock(tsk);
  469. put_files_struct(files);
  470. }
  471. }
  472. #ifdef CONFIG_MM_OWNER
  473. /*
  474. * A task is exiting. If it owned this mm, find a new owner for the mm.
  475. */
  476. void mm_update_next_owner(struct mm_struct *mm)
  477. {
  478. struct task_struct *c, *g, *p = current;
  479. retry:
  480. /*
  481. * If the exiting or execing task is not the owner, it's
  482. * someone else's problem.
  483. */
  484. if (mm->owner != p)
  485. return;
  486. /*
  487. * The current owner is exiting/execing and there are no other
  488. * candidates. Do not leave the mm pointing to a possibly
  489. * freed task structure.
  490. */
  491. if (atomic_read(&mm->mm_users) <= 1) {
  492. mm->owner = NULL;
  493. return;
  494. }
  495. read_lock(&tasklist_lock);
  496. /*
  497. * Search in the children
  498. */
  499. list_for_each_entry(c, &p->children, sibling) {
  500. if (c->mm == mm)
  501. goto assign_new_owner;
  502. }
  503. /*
  504. * Search in the siblings
  505. */
  506. list_for_each_entry(c, &p->real_parent->children, sibling) {
  507. if (c->mm == mm)
  508. goto assign_new_owner;
  509. }
  510. /*
  511. * Search through everything else. We should not get
  512. * here often
  513. */
  514. do_each_thread(g, c) {
  515. if (c->mm == mm)
  516. goto assign_new_owner;
  517. } while_each_thread(g, c);
  518. read_unlock(&tasklist_lock);
  519. /*
  520. * We found no owner yet mm_users > 1: this implies that we are
  521. * most likely racing with swapoff (try_to_unuse()) or /proc or
  522. * ptrace or page migration (get_task_mm()). Mark owner as NULL.
  523. */
  524. mm->owner = NULL;
  525. return;
  526. assign_new_owner:
  527. BUG_ON(c == p);
  528. get_task_struct(c);
  529. /*
  530. * The task_lock protects c->mm from changing.
  531. * We always want mm->owner->mm == mm
  532. */
  533. task_lock(c);
  534. /*
  535. * Delay read_unlock() till we have the task_lock()
  536. * to ensure that c does not slip away underneath us
  537. */
  538. read_unlock(&tasklist_lock);
  539. if (c->mm != mm) {
  540. task_unlock(c);
  541. put_task_struct(c);
  542. goto retry;
  543. }
  544. mm->owner = c;
  545. task_unlock(c);
  546. put_task_struct(c);
  547. }
  548. #endif /* CONFIG_MM_OWNER */
  549. /*
  550. * Turn us into a lazy TLB process if we
  551. * aren't already..
  552. */
  553. static void exit_mm(struct task_struct * tsk)
  554. {
  555. struct mm_struct *mm = tsk->mm;
  556. struct core_state *core_state;
  557. mm_release(tsk, mm);
  558. if (!mm)
  559. return;
  560. /*
  561. * Serialize with any possible pending coredump.
  562. * We must hold mmap_sem around checking core_state
  563. * and clearing tsk->mm. The core-inducing thread
  564. * will increment ->nr_threads for each thread in the
  565. * group with ->mm != NULL.
  566. */
  567. down_read(&mm->mmap_sem);
  568. core_state = mm->core_state;
  569. if (core_state) {
  570. struct core_thread self;
  571. up_read(&mm->mmap_sem);
  572. self.task = tsk;
  573. self.next = xchg(&core_state->dumper.next, &self);
  574. /*
  575. * Implies mb(), the result of xchg() must be visible
  576. * to core_state->dumper.
  577. */
  578. if (atomic_dec_and_test(&core_state->nr_threads))
  579. complete(&core_state->startup);
  580. for (;;) {
  581. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  582. if (!self.task) /* see coredump_finish() */
  583. break;
  584. schedule();
  585. }
  586. __set_task_state(tsk, TASK_RUNNING);
  587. down_read(&mm->mmap_sem);
  588. }
  589. atomic_inc(&mm->mm_count);
  590. BUG_ON(mm != tsk->active_mm);
  591. /* more a memory barrier than a real lock */
  592. task_lock(tsk);
  593. tsk->mm = NULL;
  594. up_read(&mm->mmap_sem);
  595. enter_lazy_tlb(mm, current);
  596. /* We don't want this task to be frozen prematurely */
  597. clear_freeze_flag(tsk);
  598. task_unlock(tsk);
  599. mm_update_next_owner(mm);
  600. mmput(mm);
  601. }
  602. /*
  603. * When we die, we re-parent all our children.
  604. * Try to give them to another thread in our thread
  605. * group, and if no such member exists, give it to
  606. * the child reaper process (ie "init") in our pid
  607. * space.
  608. */
  609. static struct task_struct *find_new_reaper(struct task_struct *father)
  610. __releases(&tasklist_lock)
  611. __acquires(&tasklist_lock)
  612. {
  613. struct pid_namespace *pid_ns = task_active_pid_ns(father);
  614. struct task_struct *thread;
  615. thread = father;
  616. while_each_thread(father, thread) {
  617. if (thread->flags & PF_EXITING)
  618. continue;
  619. if (unlikely(pid_ns->child_reaper == father))
  620. pid_ns->child_reaper = thread;
  621. return thread;
  622. }
  623. if (unlikely(pid_ns->child_reaper == father)) {
  624. write_unlock_irq(&tasklist_lock);
  625. if (unlikely(pid_ns == &init_pid_ns))
  626. panic("Attempted to kill init!");
  627. zap_pid_ns_processes(pid_ns);
  628. write_lock_irq(&tasklist_lock);
  629. /*
  630. * We can not clear ->child_reaper or leave it alone.
  631. * There may by stealth EXIT_DEAD tasks on ->children,
  632. * forget_original_parent() must move them somewhere.
  633. */
  634. pid_ns->child_reaper = init_pid_ns.child_reaper;
  635. }
  636. return pid_ns->child_reaper;
  637. }
  638. /*
  639. * Any that need to be release_task'd are put on the @dead list.
  640. */
  641. static void reparent_leader(struct task_struct *father, struct task_struct *p,
  642. struct list_head *dead)
  643. {
  644. list_move_tail(&p->sibling, &p->real_parent->children);
  645. if (p->exit_state == EXIT_DEAD)
  646. return;
  647. /*
  648. * If this is a threaded reparent there is no need to
  649. * notify anyone anything has happened.
  650. */
  651. if (same_thread_group(p->real_parent, father))
  652. return;
  653. /* We don't want people slaying init. */
  654. p->exit_signal = SIGCHLD;
  655. /* If it has exited notify the new parent about this child's death. */
  656. if (!p->ptrace &&
  657. p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
  658. if (do_notify_parent(p, p->exit_signal)) {
  659. p->exit_state = EXIT_DEAD;
  660. list_move_tail(&p->sibling, dead);
  661. }
  662. }
  663. kill_orphaned_pgrp(p, father);
  664. }
  665. static void forget_original_parent(struct task_struct *father)
  666. {
  667. struct task_struct *p, *n, *reaper;
  668. LIST_HEAD(dead_children);
  669. write_lock_irq(&tasklist_lock);
  670. /*
  671. * Note that exit_ptrace() and find_new_reaper() might
  672. * drop tasklist_lock and reacquire it.
  673. */
  674. exit_ptrace(father);
  675. reaper = find_new_reaper(father);
  676. list_for_each_entry_safe(p, n, &father->children, sibling) {
  677. struct task_struct *t = p;
  678. do {
  679. t->real_parent = reaper;
  680. if (t->parent == father) {
  681. BUG_ON(t->ptrace);
  682. t->parent = t->real_parent;
  683. }
  684. if (t->pdeath_signal)
  685. group_send_sig_info(t->pdeath_signal,
  686. SEND_SIG_NOINFO, t);
  687. } while_each_thread(p, t);
  688. reparent_leader(father, p, &dead_children);
  689. }
  690. write_unlock_irq(&tasklist_lock);
  691. BUG_ON(!list_empty(&father->children));
  692. list_for_each_entry_safe(p, n, &dead_children, sibling) {
  693. list_del_init(&p->sibling);
  694. release_task(p);
  695. }
  696. }
  697. /*
  698. * Send signals to all our closest relatives so that they know
  699. * to properly mourn us..
  700. */
  701. static void exit_notify(struct task_struct *tsk, int group_dead)
  702. {
  703. bool autoreap;
  704. /*
  705. * This does two things:
  706. *
  707. * A. Make init inherit all the child processes
  708. * B. Check to see if any process groups have become orphaned
  709. * as a result of our exiting, and if they have any stopped
  710. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  711. */
  712. forget_original_parent(tsk);
  713. exit_task_namespaces(tsk);
  714. write_lock_irq(&tasklist_lock);
  715. if (group_dead)
  716. kill_orphaned_pgrp(tsk->group_leader, NULL);
  717. /* Let father know we died
  718. *
  719. * Thread signals are configurable, but you aren't going to use
  720. * that to send signals to arbitrary processes.
  721. * That stops right now.
  722. *
  723. * If the parent exec id doesn't match the exec id we saved
  724. * when we started then we know the parent has changed security
  725. * domain.
  726. *
  727. * If our self_exec id doesn't match our parent_exec_id then
  728. * we have changed execution domain as these two values started
  729. * the same after a fork.
  730. */
  731. if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
  732. (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
  733. tsk->self_exec_id != tsk->parent_exec_id))
  734. tsk->exit_signal = SIGCHLD;
  735. if (unlikely(tsk->ptrace)) {
  736. int sig = thread_group_leader(tsk) &&
  737. thread_group_empty(tsk) &&
  738. !ptrace_reparented(tsk) ?
  739. tsk->exit_signal : SIGCHLD;
  740. autoreap = do_notify_parent(tsk, sig);
  741. } else if (thread_group_leader(tsk)) {
  742. autoreap = thread_group_empty(tsk) &&
  743. do_notify_parent(tsk, tsk->exit_signal);
  744. } else {
  745. autoreap = true;
  746. }
  747. tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
  748. /* mt-exec, de_thread() is waiting for group leader */
  749. if (unlikely(tsk->signal->notify_count < 0))
  750. wake_up_process(tsk->signal->group_exit_task);
  751. write_unlock_irq(&tasklist_lock);
  752. /* If the process is dead, release it - nobody will wait for it */
  753. if (autoreap)
  754. release_task(tsk);
  755. }
  756. #ifdef CONFIG_DEBUG_STACK_USAGE
  757. static void check_stack_usage(void)
  758. {
  759. static DEFINE_SPINLOCK(low_water_lock);
  760. static int lowest_to_date = THREAD_SIZE;
  761. unsigned long free;
  762. free = stack_not_used(current);
  763. if (free >= lowest_to_date)
  764. return;
  765. spin_lock(&low_water_lock);
  766. if (free < lowest_to_date) {
  767. printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
  768. "left\n",
  769. current->comm, free);
  770. lowest_to_date = free;
  771. }
  772. spin_unlock(&low_water_lock);
  773. }
  774. #else
  775. static inline void check_stack_usage(void) {}
  776. #endif
  777. NORET_TYPE void do_exit(long code)
  778. {
  779. struct task_struct *tsk = current;
  780. int group_dead;
  781. profile_task_exit(tsk);
  782. WARN_ON(blk_needs_flush_plug(tsk));
  783. if (unlikely(in_interrupt()))
  784. panic("Aiee, killing interrupt handler!");
  785. if (unlikely(!tsk->pid))
  786. panic("Attempted to kill the idle task!");
  787. /*
  788. * If do_exit is called because this processes oopsed, it's possible
  789. * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
  790. * continuing. Amongst other possible reasons, this is to prevent
  791. * mm_release()->clear_child_tid() from writing to a user-controlled
  792. * kernel address.
  793. */
  794. set_fs(USER_DS);
  795. ptrace_event(PTRACE_EVENT_EXIT, code);
  796. validate_creds_for_do_exit(tsk);
  797. /*
  798. * We're taking recursive faults here in do_exit. Safest is to just
  799. * leave this task alone and wait for reboot.
  800. */
  801. if (unlikely(tsk->flags & PF_EXITING)) {
  802. printk(KERN_ALERT
  803. "Fixing recursive fault but reboot is needed!\n");
  804. /*
  805. * We can do this unlocked here. The futex code uses
  806. * this flag just to verify whether the pi state
  807. * cleanup has been done or not. In the worst case it
  808. * loops once more. We pretend that the cleanup was
  809. * done as there is no way to return. Either the
  810. * OWNER_DIED bit is set by now or we push the blocked
  811. * task into the wait for ever nirwana as well.
  812. */
  813. tsk->flags |= PF_EXITPIDONE;
  814. set_current_state(TASK_UNINTERRUPTIBLE);
  815. schedule();
  816. }
  817. exit_irq_thread();
  818. exit_signals(tsk); /* sets PF_EXITING */
  819. /*
  820. * tsk->flags are checked in the futex code to protect against
  821. * an exiting task cleaning up the robust pi futexes.
  822. */
  823. smp_mb();
  824. raw_spin_unlock_wait(&tsk->pi_lock);
  825. if (unlikely(in_atomic()))
  826. printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
  827. current->comm, task_pid_nr(current),
  828. preempt_count());
  829. acct_update_integrals(tsk);
  830. /* sync mm's RSS info before statistics gathering */
  831. if (tsk->mm)
  832. sync_mm_rss(tsk, tsk->mm);
  833. group_dead = atomic_dec_and_test(&tsk->signal->live);
  834. if (group_dead) {
  835. hrtimer_cancel(&tsk->signal->real_timer);
  836. exit_itimers(tsk->signal);
  837. if (tsk->mm)
  838. setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
  839. }
  840. acct_collect(code, group_dead);
  841. if (group_dead)
  842. tty_audit_exit();
  843. if (unlikely(tsk->audit_context))
  844. audit_free(tsk);
  845. tsk->exit_code = code;
  846. taskstats_exit(tsk, group_dead);
  847. exit_mm(tsk);
  848. if (group_dead)
  849. acct_process();
  850. trace_sched_process_exit(tsk);
  851. exit_sem(tsk);
  852. exit_shm(tsk);
  853. exit_files(tsk);
  854. exit_fs(tsk);
  855. check_stack_usage();
  856. exit_thread();
  857. /*
  858. * Flush inherited counters to the parent - before the parent
  859. * gets woken up by child-exit notifications.
  860. *
  861. * because of cgroup mode, must be called before cgroup_exit()
  862. */
  863. perf_event_exit_task(tsk);
  864. cgroup_exit(tsk, 1);
  865. if (group_dead)
  866. disassociate_ctty(1);
  867. module_put(task_thread_info(tsk)->exec_domain->module);
  868. proc_exit_connector(tsk);
  869. /*
  870. * FIXME: do that only when needed, using sched_exit tracepoint
  871. */
  872. ptrace_put_breakpoints(tsk);
  873. exit_notify(tsk, group_dead);
  874. #ifdef CONFIG_NUMA
  875. task_lock(tsk);
  876. mpol_put(tsk->mempolicy);
  877. tsk->mempolicy = NULL;
  878. task_unlock(tsk);
  879. #endif
  880. #ifdef CONFIG_FUTEX
  881. if (unlikely(current->pi_state_cache))
  882. kfree(current->pi_state_cache);
  883. #endif
  884. /*
  885. * Make sure we are holding no locks:
  886. */
  887. debug_check_no_locks_held(tsk);
  888. /*
  889. * We can do this unlocked here. The futex code uses this flag
  890. * just to verify whether the pi state cleanup has been done
  891. * or not. In the worst case it loops once more.
  892. */
  893. tsk->flags |= PF_EXITPIDONE;
  894. if (tsk->io_context)
  895. exit_io_context(tsk);
  896. if (tsk->splice_pipe)
  897. __free_pipe_info(tsk->splice_pipe);
  898. validate_creds_for_do_exit(tsk);
  899. preempt_disable();
  900. exit_rcu();
  901. /* causes final put_task_struct in finish_task_switch(). */
  902. tsk->state = TASK_DEAD;
  903. schedule();
  904. BUG();
  905. /* Avoid "noreturn function does return". */
  906. for (;;)
  907. cpu_relax(); /* For when BUG is null */
  908. }
  909. EXPORT_SYMBOL_GPL(do_exit);
  910. NORET_TYPE void complete_and_exit(struct completion *comp, long code)
  911. {
  912. if (comp)
  913. complete(comp);
  914. do_exit(code);
  915. }
  916. EXPORT_SYMBOL(complete_and_exit);
  917. SYSCALL_DEFINE1(exit, int, error_code)
  918. {
  919. do_exit((error_code&0xff)<<8);
  920. }
  921. /*
  922. * Take down every thread in the group. This is called by fatal signals
  923. * as well as by sys_exit_group (below).
  924. */
  925. NORET_TYPE void
  926. do_group_exit(int exit_code)
  927. {
  928. struct signal_struct *sig = current->signal;
  929. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  930. if (signal_group_exit(sig))
  931. exit_code = sig->group_exit_code;
  932. else if (!thread_group_empty(current)) {
  933. struct sighand_struct *const sighand = current->sighand;
  934. spin_lock_irq(&sighand->siglock);
  935. if (signal_group_exit(sig))
  936. /* Another thread got here before we took the lock. */
  937. exit_code = sig->group_exit_code;
  938. else {
  939. sig->group_exit_code = exit_code;
  940. sig->flags = SIGNAL_GROUP_EXIT;
  941. zap_other_threads(current);
  942. }
  943. spin_unlock_irq(&sighand->siglock);
  944. }
  945. do_exit(exit_code);
  946. /* NOTREACHED */
  947. }
  948. /*
  949. * this kills every thread in the thread group. Note that any externally
  950. * wait4()-ing process will get the correct exit code - even if this
  951. * thread is not the thread group leader.
  952. */
  953. SYSCALL_DEFINE1(exit_group, int, error_code)
  954. {
  955. do_group_exit((error_code & 0xff) << 8);
  956. /* NOTREACHED */
  957. return 0;
  958. }
  959. struct wait_opts {
  960. enum pid_type wo_type;
  961. int wo_flags;
  962. struct pid *wo_pid;
  963. struct siginfo __user *wo_info;
  964. int __user *wo_stat;
  965. struct rusage __user *wo_rusage;
  966. wait_queue_t child_wait;
  967. int notask_error;
  968. };
  969. static inline
  970. struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  971. {
  972. if (type != PIDTYPE_PID)
  973. task = task->group_leader;
  974. return task->pids[type].pid;
  975. }
  976. static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
  977. {
  978. return wo->wo_type == PIDTYPE_MAX ||
  979. task_pid_type(p, wo->wo_type) == wo->wo_pid;
  980. }
  981. static int eligible_child(struct wait_opts *wo, struct task_struct *p)
  982. {
  983. if (!eligible_pid(wo, p))
  984. return 0;
  985. /* Wait for all children (clone and not) if __WALL is set;
  986. * otherwise, wait for clone children *only* if __WCLONE is
  987. * set; otherwise, wait for non-clone children *only*. (Note:
  988. * A "clone" child here is one that reports to its parent
  989. * using a signal other than SIGCHLD.) */
  990. if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
  991. && !(wo->wo_flags & __WALL))
  992. return 0;
  993. return 1;
  994. }
  995. static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
  996. pid_t pid, uid_t uid, int why, int status)
  997. {
  998. struct siginfo __user *infop;
  999. int retval = wo->wo_rusage
  1000. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1001. put_task_struct(p);
  1002. infop = wo->wo_info;
  1003. if (infop) {
  1004. if (!retval)
  1005. retval = put_user(SIGCHLD, &infop->si_signo);
  1006. if (!retval)
  1007. retval = put_user(0, &infop->si_errno);
  1008. if (!retval)
  1009. retval = put_user((short)why, &infop->si_code);
  1010. if (!retval)
  1011. retval = put_user(pid, &infop->si_pid);
  1012. if (!retval)
  1013. retval = put_user(uid, &infop->si_uid);
  1014. if (!retval)
  1015. retval = put_user(status, &infop->si_status);
  1016. }
  1017. if (!retval)
  1018. retval = pid;
  1019. return retval;
  1020. }
  1021. /*
  1022. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  1023. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1024. * the lock and this task is uninteresting. If we return nonzero, we have
  1025. * released the lock and the system call should return.
  1026. */
  1027. static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
  1028. {
  1029. unsigned long state;
  1030. int retval, status, traced;
  1031. pid_t pid = task_pid_vnr(p);
  1032. uid_t uid = __task_cred(p)->uid;
  1033. struct siginfo __user *infop;
  1034. if (!likely(wo->wo_flags & WEXITED))
  1035. return 0;
  1036. if (unlikely(wo->wo_flags & WNOWAIT)) {
  1037. int exit_code = p->exit_code;
  1038. int why;
  1039. get_task_struct(p);
  1040. read_unlock(&tasklist_lock);
  1041. if ((exit_code & 0x7f) == 0) {
  1042. why = CLD_EXITED;
  1043. status = exit_code >> 8;
  1044. } else {
  1045. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1046. status = exit_code & 0x7f;
  1047. }
  1048. return wait_noreap_copyout(wo, p, pid, uid, why, status);
  1049. }
  1050. /*
  1051. * Try to move the task's state to DEAD
  1052. * only one thread is allowed to do this:
  1053. */
  1054. state = xchg(&p->exit_state, EXIT_DEAD);
  1055. if (state != EXIT_ZOMBIE) {
  1056. BUG_ON(state != EXIT_DEAD);
  1057. return 0;
  1058. }
  1059. traced = ptrace_reparented(p);
  1060. /*
  1061. * It can be ptraced but not reparented, check
  1062. * thread_group_leader() to filter out sub-threads.
  1063. */
  1064. if (likely(!traced) && thread_group_leader(p)) {
  1065. struct signal_struct *psig;
  1066. struct signal_struct *sig;
  1067. unsigned long maxrss;
  1068. cputime_t tgutime, tgstime;
  1069. /*
  1070. * The resource counters for the group leader are in its
  1071. * own task_struct. Those for dead threads in the group
  1072. * are in its signal_struct, as are those for the child
  1073. * processes it has previously reaped. All these
  1074. * accumulate in the parent's signal_struct c* fields.
  1075. *
  1076. * We don't bother to take a lock here to protect these
  1077. * p->signal fields, because they are only touched by
  1078. * __exit_signal, which runs with tasklist_lock
  1079. * write-locked anyway, and so is excluded here. We do
  1080. * need to protect the access to parent->signal fields,
  1081. * as other threads in the parent group can be right
  1082. * here reaping other children at the same time.
  1083. *
  1084. * We use thread_group_times() to get times for the thread
  1085. * group, which consolidates times for all threads in the
  1086. * group including the group leader.
  1087. */
  1088. thread_group_times(p, &tgutime, &tgstime);
  1089. spin_lock_irq(&p->real_parent->sighand->siglock);
  1090. psig = p->real_parent->signal;
  1091. sig = p->signal;
  1092. psig->cutime =
  1093. cputime_add(psig->cutime,
  1094. cputime_add(tgutime,
  1095. sig->cutime));
  1096. psig->cstime =
  1097. cputime_add(psig->cstime,
  1098. cputime_add(tgstime,
  1099. sig->cstime));
  1100. psig->cgtime =
  1101. cputime_add(psig->cgtime,
  1102. cputime_add(p->gtime,
  1103. cputime_add(sig->gtime,
  1104. sig->cgtime)));
  1105. psig->cmin_flt +=
  1106. p->min_flt + sig->min_flt + sig->cmin_flt;
  1107. psig->cmaj_flt +=
  1108. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  1109. psig->cnvcsw +=
  1110. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  1111. psig->cnivcsw +=
  1112. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  1113. psig->cinblock +=
  1114. task_io_get_inblock(p) +
  1115. sig->inblock + sig->cinblock;
  1116. psig->coublock +=
  1117. task_io_get_oublock(p) +
  1118. sig->oublock + sig->coublock;
  1119. maxrss = max(sig->maxrss, sig->cmaxrss);
  1120. if (psig->cmaxrss < maxrss)
  1121. psig->cmaxrss = maxrss;
  1122. task_io_accounting_add(&psig->ioac, &p->ioac);
  1123. task_io_accounting_add(&psig->ioac, &sig->ioac);
  1124. spin_unlock_irq(&p->real_parent->sighand->siglock);
  1125. }
  1126. /*
  1127. * Now we are sure this task is interesting, and no other
  1128. * thread can reap it because we set its state to EXIT_DEAD.
  1129. */
  1130. read_unlock(&tasklist_lock);
  1131. retval = wo->wo_rusage
  1132. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1133. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1134. ? p->signal->group_exit_code : p->exit_code;
  1135. if (!retval && wo->wo_stat)
  1136. retval = put_user(status, wo->wo_stat);
  1137. infop = wo->wo_info;
  1138. if (!retval && infop)
  1139. retval = put_user(SIGCHLD, &infop->si_signo);
  1140. if (!retval && infop)
  1141. retval = put_user(0, &infop->si_errno);
  1142. if (!retval && infop) {
  1143. int why;
  1144. if ((status & 0x7f) == 0) {
  1145. why = CLD_EXITED;
  1146. status >>= 8;
  1147. } else {
  1148. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1149. status &= 0x7f;
  1150. }
  1151. retval = put_user((short)why, &infop->si_code);
  1152. if (!retval)
  1153. retval = put_user(status, &infop->si_status);
  1154. }
  1155. if (!retval && infop)
  1156. retval = put_user(pid, &infop->si_pid);
  1157. if (!retval && infop)
  1158. retval = put_user(uid, &infop->si_uid);
  1159. if (!retval)
  1160. retval = pid;
  1161. if (traced) {
  1162. write_lock_irq(&tasklist_lock);
  1163. /* We dropped tasklist, ptracer could die and untrace */
  1164. ptrace_unlink(p);
  1165. /*
  1166. * If this is not a sub-thread, notify the parent.
  1167. * If parent wants a zombie, don't release it now.
  1168. */
  1169. if (thread_group_leader(p) &&
  1170. !do_notify_parent(p, p->exit_signal)) {
  1171. p->exit_state = EXIT_ZOMBIE;
  1172. p = NULL;
  1173. }
  1174. write_unlock_irq(&tasklist_lock);
  1175. }
  1176. if (p != NULL)
  1177. release_task(p);
  1178. return retval;
  1179. }
  1180. static int *task_stopped_code(struct task_struct *p, bool ptrace)
  1181. {
  1182. if (ptrace) {
  1183. if (task_is_stopped_or_traced(p) &&
  1184. !(p->jobctl & JOBCTL_LISTENING))
  1185. return &p->exit_code;
  1186. } else {
  1187. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  1188. return &p->signal->group_exit_code;
  1189. }
  1190. return NULL;
  1191. }
  1192. /**
  1193. * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
  1194. * @wo: wait options
  1195. * @ptrace: is the wait for ptrace
  1196. * @p: task to wait for
  1197. *
  1198. * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
  1199. *
  1200. * CONTEXT:
  1201. * read_lock(&tasklist_lock), which is released if return value is
  1202. * non-zero. Also, grabs and releases @p->sighand->siglock.
  1203. *
  1204. * RETURNS:
  1205. * 0 if wait condition didn't exist and search for other wait conditions
  1206. * should continue. Non-zero return, -errno on failure and @p's pid on
  1207. * success, implies that tasklist_lock is released and wait condition
  1208. * search should terminate.
  1209. */
  1210. static int wait_task_stopped(struct wait_opts *wo,
  1211. int ptrace, struct task_struct *p)
  1212. {
  1213. struct siginfo __user *infop;
  1214. int retval, exit_code, *p_code, why;
  1215. uid_t uid = 0; /* unneeded, required by compiler */
  1216. pid_t pid;
  1217. /*
  1218. * Traditionally we see ptrace'd stopped tasks regardless of options.
  1219. */
  1220. if (!ptrace && !(wo->wo_flags & WUNTRACED))
  1221. return 0;
  1222. if (!task_stopped_code(p, ptrace))
  1223. return 0;
  1224. exit_code = 0;
  1225. spin_lock_irq(&p->sighand->siglock);
  1226. p_code = task_stopped_code(p, ptrace);
  1227. if (unlikely(!p_code))
  1228. goto unlock_sig;
  1229. exit_code = *p_code;
  1230. if (!exit_code)
  1231. goto unlock_sig;
  1232. if (!unlikely(wo->wo_flags & WNOWAIT))
  1233. *p_code = 0;
  1234. uid = task_uid(p);
  1235. unlock_sig:
  1236. spin_unlock_irq(&p->sighand->siglock);
  1237. if (!exit_code)
  1238. return 0;
  1239. /*
  1240. * Now we are pretty sure this task is interesting.
  1241. * Make sure it doesn't get reaped out from under us while we
  1242. * give up the lock and then examine it below. We don't want to
  1243. * keep holding onto the tasklist_lock while we call getrusage and
  1244. * possibly take page faults for user memory.
  1245. */
  1246. get_task_struct(p);
  1247. pid = task_pid_vnr(p);
  1248. why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
  1249. read_unlock(&tasklist_lock);
  1250. if (unlikely(wo->wo_flags & WNOWAIT))
  1251. return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
  1252. retval = wo->wo_rusage
  1253. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1254. if (!retval && wo->wo_stat)
  1255. retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
  1256. infop = wo->wo_info;
  1257. if (!retval && infop)
  1258. retval = put_user(SIGCHLD, &infop->si_signo);
  1259. if (!retval && infop)
  1260. retval = put_user(0, &infop->si_errno);
  1261. if (!retval && infop)
  1262. retval = put_user((short)why, &infop->si_code);
  1263. if (!retval && infop)
  1264. retval = put_user(exit_code, &infop->si_status);
  1265. if (!retval && infop)
  1266. retval = put_user(pid, &infop->si_pid);
  1267. if (!retval && infop)
  1268. retval = put_user(uid, &infop->si_uid);
  1269. if (!retval)
  1270. retval = pid;
  1271. put_task_struct(p);
  1272. BUG_ON(!retval);
  1273. return retval;
  1274. }
  1275. /*
  1276. * Handle do_wait work for one task in a live, non-stopped state.
  1277. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1278. * the lock and this task is uninteresting. If we return nonzero, we have
  1279. * released the lock and the system call should return.
  1280. */
  1281. static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
  1282. {
  1283. int retval;
  1284. pid_t pid;
  1285. uid_t uid;
  1286. if (!unlikely(wo->wo_flags & WCONTINUED))
  1287. return 0;
  1288. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1289. return 0;
  1290. spin_lock_irq(&p->sighand->siglock);
  1291. /* Re-check with the lock held. */
  1292. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1293. spin_unlock_irq(&p->sighand->siglock);
  1294. return 0;
  1295. }
  1296. if (!unlikely(wo->wo_flags & WNOWAIT))
  1297. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1298. uid = task_uid(p);
  1299. spin_unlock_irq(&p->sighand->siglock);
  1300. pid = task_pid_vnr(p);
  1301. get_task_struct(p);
  1302. read_unlock(&tasklist_lock);
  1303. if (!wo->wo_info) {
  1304. retval = wo->wo_rusage
  1305. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1306. put_task_struct(p);
  1307. if (!retval && wo->wo_stat)
  1308. retval = put_user(0xffff, wo->wo_stat);
  1309. if (!retval)
  1310. retval = pid;
  1311. } else {
  1312. retval = wait_noreap_copyout(wo, p, pid, uid,
  1313. CLD_CONTINUED, SIGCONT);
  1314. BUG_ON(retval == 0);
  1315. }
  1316. return retval;
  1317. }
  1318. /*
  1319. * Consider @p for a wait by @parent.
  1320. *
  1321. * -ECHILD should be in ->notask_error before the first call.
  1322. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1323. * Returns zero if the search for a child should continue;
  1324. * then ->notask_error is 0 if @p is an eligible child,
  1325. * or another error from security_task_wait(), or still -ECHILD.
  1326. */
  1327. static int wait_consider_task(struct wait_opts *wo, int ptrace,
  1328. struct task_struct *p)
  1329. {
  1330. int ret = eligible_child(wo, p);
  1331. if (!ret)
  1332. return ret;
  1333. ret = security_task_wait(p);
  1334. if (unlikely(ret < 0)) {
  1335. /*
  1336. * If we have not yet seen any eligible child,
  1337. * then let this error code replace -ECHILD.
  1338. * A permission error will give the user a clue
  1339. * to look for security policy problems, rather
  1340. * than for mysterious wait bugs.
  1341. */
  1342. if (wo->notask_error)
  1343. wo->notask_error = ret;
  1344. return 0;
  1345. }
  1346. /* dead body doesn't have much to contribute */
  1347. if (p->exit_state == EXIT_DEAD)
  1348. return 0;
  1349. /* slay zombie? */
  1350. if (p->exit_state == EXIT_ZOMBIE) {
  1351. /*
  1352. * A zombie ptracee is only visible to its ptracer.
  1353. * Notification and reaping will be cascaded to the real
  1354. * parent when the ptracer detaches.
  1355. */
  1356. if (likely(!ptrace) && unlikely(p->ptrace)) {
  1357. /* it will become visible, clear notask_error */
  1358. wo->notask_error = 0;
  1359. return 0;
  1360. }
  1361. /* we don't reap group leaders with subthreads */
  1362. if (!delay_group_leader(p))
  1363. return wait_task_zombie(wo, p);
  1364. /*
  1365. * Allow access to stopped/continued state via zombie by
  1366. * falling through. Clearing of notask_error is complex.
  1367. *
  1368. * When !@ptrace:
  1369. *
  1370. * If WEXITED is set, notask_error should naturally be
  1371. * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
  1372. * so, if there are live subthreads, there are events to
  1373. * wait for. If all subthreads are dead, it's still safe
  1374. * to clear - this function will be called again in finite
  1375. * amount time once all the subthreads are released and
  1376. * will then return without clearing.
  1377. *
  1378. * When @ptrace:
  1379. *
  1380. * Stopped state is per-task and thus can't change once the
  1381. * target task dies. Only continued and exited can happen.
  1382. * Clear notask_error if WCONTINUED | WEXITED.
  1383. */
  1384. if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
  1385. wo->notask_error = 0;
  1386. } else {
  1387. /*
  1388. * If @p is ptraced by a task in its real parent's group,
  1389. * hide group stop/continued state when looking at @p as
  1390. * the real parent; otherwise, a single stop can be
  1391. * reported twice as group and ptrace stops.
  1392. *
  1393. * If a ptracer wants to distinguish the two events for its
  1394. * own children, it should create a separate process which
  1395. * takes the role of real parent.
  1396. */
  1397. if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
  1398. return 0;
  1399. /*
  1400. * @p is alive and it's gonna stop, continue or exit, so
  1401. * there always is something to wait for.
  1402. */
  1403. wo->notask_error = 0;
  1404. }
  1405. /*
  1406. * Wait for stopped. Depending on @ptrace, different stopped state
  1407. * is used and the two don't interact with each other.
  1408. */
  1409. ret = wait_task_stopped(wo, ptrace, p);
  1410. if (ret)
  1411. return ret;
  1412. /*
  1413. * Wait for continued. There's only one continued state and the
  1414. * ptracer can consume it which can confuse the real parent. Don't
  1415. * use WCONTINUED from ptracer. You don't need or want it.
  1416. */
  1417. return wait_task_continued(wo, p);
  1418. }
  1419. /*
  1420. * Do the work of do_wait() for one thread in the group, @tsk.
  1421. *
  1422. * -ECHILD should be in ->notask_error before the first call.
  1423. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1424. * Returns zero if the search for a child should continue; then
  1425. * ->notask_error is 0 if there were any eligible children,
  1426. * or another error from security_task_wait(), or still -ECHILD.
  1427. */
  1428. static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
  1429. {
  1430. struct task_struct *p;
  1431. list_for_each_entry(p, &tsk->children, sibling) {
  1432. int ret = wait_consider_task(wo, 0, p);
  1433. if (ret)
  1434. return ret;
  1435. }
  1436. return 0;
  1437. }
  1438. static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
  1439. {
  1440. struct task_struct *p;
  1441. list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
  1442. int ret = wait_consider_task(wo, 1, p);
  1443. if (ret)
  1444. return ret;
  1445. }
  1446. return 0;
  1447. }
  1448. static int child_wait_callback(wait_queue_t *wait, unsigned mode,
  1449. int sync, void *key)
  1450. {
  1451. struct wait_opts *wo = container_of(wait, struct wait_opts,
  1452. child_wait);
  1453. struct task_struct *p = key;
  1454. if (!eligible_pid(wo, p))
  1455. return 0;
  1456. if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
  1457. return 0;
  1458. return default_wake_function(wait, mode, sync, key);
  1459. }
  1460. void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
  1461. {
  1462. __wake_up_sync_key(&parent->signal->wait_chldexit,
  1463. TASK_INTERRUPTIBLE, 1, p);
  1464. }
  1465. static long do_wait(struct wait_opts *wo)
  1466. {
  1467. struct task_struct *tsk;
  1468. int retval;
  1469. trace_sched_process_wait(wo->wo_pid);
  1470. init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
  1471. wo->child_wait.private = current;
  1472. add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1473. repeat:
  1474. /*
  1475. * If there is nothing that can match our critiera just get out.
  1476. * We will clear ->notask_error to zero if we see any child that
  1477. * might later match our criteria, even if we are not able to reap
  1478. * it yet.
  1479. */
  1480. wo->notask_error = -ECHILD;
  1481. if ((wo->wo_type < PIDTYPE_MAX) &&
  1482. (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
  1483. goto notask;
  1484. set_current_state(TASK_INTERRUPTIBLE);
  1485. read_lock(&tasklist_lock);
  1486. tsk = current;
  1487. do {
  1488. retval = do_wait_thread(wo, tsk);
  1489. if (retval)
  1490. goto end;
  1491. retval = ptrace_do_wait(wo, tsk);
  1492. if (retval)
  1493. goto end;
  1494. if (wo->wo_flags & __WNOTHREAD)
  1495. break;
  1496. } while_each_thread(current, tsk);
  1497. read_unlock(&tasklist_lock);
  1498. notask:
  1499. retval = wo->notask_error;
  1500. if (!retval && !(wo->wo_flags & WNOHANG)) {
  1501. retval = -ERESTARTSYS;
  1502. if (!signal_pending(current)) {
  1503. schedule();
  1504. goto repeat;
  1505. }
  1506. }
  1507. end:
  1508. __set_current_state(TASK_RUNNING);
  1509. remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1510. return retval;
  1511. }
  1512. SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
  1513. infop, int, options, struct rusage __user *, ru)
  1514. {
  1515. struct wait_opts wo;
  1516. struct pid *pid = NULL;
  1517. enum pid_type type;
  1518. long ret;
  1519. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
  1520. return -EINVAL;
  1521. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1522. return -EINVAL;
  1523. switch (which) {
  1524. case P_ALL:
  1525. type = PIDTYPE_MAX;
  1526. break;
  1527. case P_PID:
  1528. type = PIDTYPE_PID;
  1529. if (upid <= 0)
  1530. return -EINVAL;
  1531. break;
  1532. case P_PGID:
  1533. type = PIDTYPE_PGID;
  1534. if (upid <= 0)
  1535. return -EINVAL;
  1536. break;
  1537. default:
  1538. return -EINVAL;
  1539. }
  1540. if (type < PIDTYPE_MAX)
  1541. pid = find_get_pid(upid);
  1542. wo.wo_type = type;
  1543. wo.wo_pid = pid;
  1544. wo.wo_flags = options;
  1545. wo.wo_info = infop;
  1546. wo.wo_stat = NULL;
  1547. wo.wo_rusage = ru;
  1548. ret = do_wait(&wo);
  1549. if (ret > 0) {
  1550. ret = 0;
  1551. } else if (infop) {
  1552. /*
  1553. * For a WNOHANG return, clear out all the fields
  1554. * we would set so the user can easily tell the
  1555. * difference.
  1556. */
  1557. if (!ret)
  1558. ret = put_user(0, &infop->si_signo);
  1559. if (!ret)
  1560. ret = put_user(0, &infop->si_errno);
  1561. if (!ret)
  1562. ret = put_user(0, &infop->si_code);
  1563. if (!ret)
  1564. ret = put_user(0, &infop->si_pid);
  1565. if (!ret)
  1566. ret = put_user(0, &infop->si_uid);
  1567. if (!ret)
  1568. ret = put_user(0, &infop->si_status);
  1569. }
  1570. put_pid(pid);
  1571. /* avoid REGPARM breakage on x86: */
  1572. asmlinkage_protect(5, ret, which, upid, infop, options, ru);
  1573. return ret;
  1574. }
  1575. SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  1576. int, options, struct rusage __user *, ru)
  1577. {
  1578. struct wait_opts wo;
  1579. struct pid *pid = NULL;
  1580. enum pid_type type;
  1581. long ret;
  1582. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1583. __WNOTHREAD|__WCLONE|__WALL))
  1584. return -EINVAL;
  1585. if (upid == -1)
  1586. type = PIDTYPE_MAX;
  1587. else if (upid < 0) {
  1588. type = PIDTYPE_PGID;
  1589. pid = find_get_pid(-upid);
  1590. } else if (upid == 0) {
  1591. type = PIDTYPE_PGID;
  1592. pid = get_task_pid(current, PIDTYPE_PGID);
  1593. } else /* upid > 0 */ {
  1594. type = PIDTYPE_PID;
  1595. pid = find_get_pid(upid);
  1596. }
  1597. wo.wo_type = type;
  1598. wo.wo_pid = pid;
  1599. wo.wo_flags = options | WEXITED;
  1600. wo.wo_info = NULL;
  1601. wo.wo_stat = stat_addr;
  1602. wo.wo_rusage = ru;
  1603. ret = do_wait(&wo);
  1604. put_pid(pid);
  1605. /* avoid REGPARM breakage on x86: */
  1606. asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
  1607. return ret;
  1608. }
  1609. #ifdef __ARCH_WANT_SYS_WAITPID
  1610. /*
  1611. * sys_waitpid() remains for compatibility. waitpid() should be
  1612. * implemented by calling sys_wait4() from libc.a.
  1613. */
  1614. SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
  1615. {
  1616. return sys_wait4(pid, stat_addr, options, NULL);
  1617. }
  1618. #endif