exit.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/mnt_namespace.h>
  15. #include <linux/iocontext.h>
  16. #include <linux/key.h>
  17. #include <linux/security.h>
  18. #include <linux/cpu.h>
  19. #include <linux/acct.h>
  20. #include <linux/tsacct_kern.h>
  21. #include <linux/file.h>
  22. #include <linux/fdtable.h>
  23. #include <linux/binfmts.h>
  24. #include <linux/nsproxy.h>
  25. #include <linux/pid_namespace.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/profile.h>
  28. #include <linux/mount.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/kthread.h>
  31. #include <linux/mempolicy.h>
  32. #include <linux/taskstats_kern.h>
  33. #include <linux/delayacct.h>
  34. #include <linux/freezer.h>
  35. #include <linux/cgroup.h>
  36. #include <linux/syscalls.h>
  37. #include <linux/signal.h>
  38. #include <linux/posix-timers.h>
  39. #include <linux/cn_proc.h>
  40. #include <linux/mutex.h>
  41. #include <linux/futex.h>
  42. #include <linux/pipe_fs_i.h>
  43. #include <linux/audit.h> /* for audit_free() */
  44. #include <linux/resource.h>
  45. #include <linux/blkdev.h>
  46. #include <linux/task_io_accounting_ops.h>
  47. #include <linux/tracehook.h>
  48. #include <linux/init_task.h>
  49. #include <trace/sched.h>
  50. #include <asm/uaccess.h>
  51. #include <asm/unistd.h>
  52. #include <asm/pgtable.h>
  53. #include <asm/mmu_context.h>
  54. #include "cred-internals.h"
  55. DEFINE_TRACE(sched_process_free);
  56. DEFINE_TRACE(sched_process_exit);
  57. DEFINE_TRACE(sched_process_wait);
  58. static void exit_mm(struct task_struct * tsk);
  59. static void __unhash_process(struct task_struct *p)
  60. {
  61. nr_threads--;
  62. detach_pid(p, PIDTYPE_PID);
  63. if (thread_group_leader(p)) {
  64. detach_pid(p, PIDTYPE_PGID);
  65. detach_pid(p, PIDTYPE_SID);
  66. list_del_rcu(&p->tasks);
  67. __get_cpu_var(process_counts)--;
  68. }
  69. list_del_rcu(&p->thread_group);
  70. list_del_init(&p->sibling);
  71. }
  72. /*
  73. * This function expects the tasklist_lock write-locked.
  74. */
  75. static void __exit_signal(struct task_struct *tsk)
  76. {
  77. struct signal_struct *sig = tsk->signal;
  78. struct sighand_struct *sighand;
  79. BUG_ON(!sig);
  80. BUG_ON(!atomic_read(&sig->count));
  81. sighand = rcu_dereference(tsk->sighand);
  82. spin_lock(&sighand->siglock);
  83. posix_cpu_timers_exit(tsk);
  84. if (atomic_dec_and_test(&sig->count))
  85. posix_cpu_timers_exit_group(tsk);
  86. else {
  87. /*
  88. * If there is any task waiting for the group exit
  89. * then notify it:
  90. */
  91. if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
  92. wake_up_process(sig->group_exit_task);
  93. if (tsk == sig->curr_target)
  94. sig->curr_target = next_thread(tsk);
  95. /*
  96. * Accumulate here the counters for all threads but the
  97. * group leader as they die, so they can be added into
  98. * the process-wide totals when those are taken.
  99. * The group leader stays around as a zombie as long
  100. * as there are other threads. When it gets reaped,
  101. * the exit.c code will add its counts into these totals.
  102. * We won't ever get here for the group leader, since it
  103. * will have been the last reference on the signal_struct.
  104. */
  105. sig->utime = cputime_add(sig->utime, task_utime(tsk));
  106. sig->stime = cputime_add(sig->stime, task_stime(tsk));
  107. sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
  108. sig->min_flt += tsk->min_flt;
  109. sig->maj_flt += tsk->maj_flt;
  110. sig->nvcsw += tsk->nvcsw;
  111. sig->nivcsw += tsk->nivcsw;
  112. sig->inblock += task_io_get_inblock(tsk);
  113. sig->oublock += task_io_get_oublock(tsk);
  114. task_io_accounting_add(&sig->ioac, &tsk->ioac);
  115. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  116. sig = NULL; /* Marker for below. */
  117. }
  118. __unhash_process(tsk);
  119. /*
  120. * Do this under ->siglock, we can race with another thread
  121. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  122. */
  123. flush_sigqueue(&tsk->pending);
  124. tsk->signal = NULL;
  125. tsk->sighand = NULL;
  126. spin_unlock(&sighand->siglock);
  127. __cleanup_sighand(sighand);
  128. clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
  129. if (sig) {
  130. flush_sigqueue(&sig->shared_pending);
  131. taskstats_tgid_free(sig);
  132. /*
  133. * Make sure ->signal can't go away under rq->lock,
  134. * see account_group_exec_runtime().
  135. */
  136. task_rq_unlock_wait(tsk);
  137. __cleanup_signal(sig);
  138. }
  139. }
  140. static void delayed_put_task_struct(struct rcu_head *rhp)
  141. {
  142. struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  143. trace_sched_process_free(tsk);
  144. put_task_struct(tsk);
  145. }
  146. void release_task(struct task_struct * p)
  147. {
  148. struct task_struct *leader;
  149. int zap_leader;
  150. repeat:
  151. tracehook_prepare_release_task(p);
  152. /* don't need to get the RCU readlock here - the process is dead and
  153. * can't be modifying its own credentials */
  154. atomic_dec(&__task_cred(p)->user->processes);
  155. proc_flush_task(p);
  156. write_lock_irq(&tasklist_lock);
  157. tracehook_finish_release_task(p);
  158. __exit_signal(p);
  159. /*
  160. * If we are the last non-leader member of the thread
  161. * group, and the leader is zombie, then notify the
  162. * group leader's parent process. (if it wants notification.)
  163. */
  164. zap_leader = 0;
  165. leader = p->group_leader;
  166. if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
  167. BUG_ON(task_detached(leader));
  168. do_notify_parent(leader, leader->exit_signal);
  169. /*
  170. * If we were the last child thread and the leader has
  171. * exited already, and the leader's parent ignores SIGCHLD,
  172. * then we are the one who should release the leader.
  173. *
  174. * do_notify_parent() will have marked it self-reaping in
  175. * that case.
  176. */
  177. zap_leader = task_detached(leader);
  178. /*
  179. * This maintains the invariant that release_task()
  180. * only runs on a task in EXIT_DEAD, just for sanity.
  181. */
  182. if (zap_leader)
  183. leader->exit_state = EXIT_DEAD;
  184. }
  185. write_unlock_irq(&tasklist_lock);
  186. release_thread(p);
  187. call_rcu(&p->rcu, delayed_put_task_struct);
  188. p = leader;
  189. if (unlikely(zap_leader))
  190. goto repeat;
  191. }
  192. /*
  193. * This checks not only the pgrp, but falls back on the pid if no
  194. * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
  195. * without this...
  196. *
  197. * The caller must hold rcu lock or the tasklist lock.
  198. */
  199. struct pid *session_of_pgrp(struct pid *pgrp)
  200. {
  201. struct task_struct *p;
  202. struct pid *sid = NULL;
  203. p = pid_task(pgrp, PIDTYPE_PGID);
  204. if (p == NULL)
  205. p = pid_task(pgrp, PIDTYPE_PID);
  206. if (p != NULL)
  207. sid = task_session(p);
  208. return sid;
  209. }
  210. /*
  211. * Determine if a process group is "orphaned", according to the POSIX
  212. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  213. * by terminal-generated stop signals. Newly orphaned process groups are
  214. * to receive a SIGHUP and a SIGCONT.
  215. *
  216. * "I ask you, have you ever known what it is to be an orphan?"
  217. */
  218. static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
  219. {
  220. struct task_struct *p;
  221. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  222. if ((p == ignored_task) ||
  223. (p->exit_state && thread_group_empty(p)) ||
  224. is_global_init(p->real_parent))
  225. continue;
  226. if (task_pgrp(p->real_parent) != pgrp &&
  227. task_session(p->real_parent) == task_session(p))
  228. return 0;
  229. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  230. return 1;
  231. }
  232. int is_current_pgrp_orphaned(void)
  233. {
  234. int retval;
  235. read_lock(&tasklist_lock);
  236. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  237. read_unlock(&tasklist_lock);
  238. return retval;
  239. }
  240. static int has_stopped_jobs(struct pid *pgrp)
  241. {
  242. int retval = 0;
  243. struct task_struct *p;
  244. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  245. if (!task_is_stopped(p))
  246. continue;
  247. retval = 1;
  248. break;
  249. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  250. return retval;
  251. }
  252. /*
  253. * Check to see if any process groups have become orphaned as
  254. * a result of our exiting, and if they have any stopped jobs,
  255. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  256. */
  257. static void
  258. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  259. {
  260. struct pid *pgrp = task_pgrp(tsk);
  261. struct task_struct *ignored_task = tsk;
  262. if (!parent)
  263. /* exit: our father is in a different pgrp than
  264. * we are and we were the only connection outside.
  265. */
  266. parent = tsk->real_parent;
  267. else
  268. /* reparent: our child is in a different pgrp than
  269. * we are, and it was the only connection outside.
  270. */
  271. ignored_task = NULL;
  272. if (task_pgrp(parent) != pgrp &&
  273. task_session(parent) == task_session(tsk) &&
  274. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  275. has_stopped_jobs(pgrp)) {
  276. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  277. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  278. }
  279. }
  280. /**
  281. * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
  282. *
  283. * If a kernel thread is launched as a result of a system call, or if
  284. * it ever exits, it should generally reparent itself to kthreadd so it
  285. * isn't in the way of other processes and is correctly cleaned up on exit.
  286. *
  287. * The various task state such as scheduling policy and priority may have
  288. * been inherited from a user process, so we reset them to sane values here.
  289. *
  290. * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
  291. */
  292. static void reparent_to_kthreadd(void)
  293. {
  294. write_lock_irq(&tasklist_lock);
  295. ptrace_unlink(current);
  296. /* Reparent to init */
  297. current->real_parent = current->parent = kthreadd_task;
  298. list_move_tail(&current->sibling, &current->real_parent->children);
  299. /* Set the exit signal to SIGCHLD so we signal init on exit */
  300. current->exit_signal = SIGCHLD;
  301. if (task_nice(current) < 0)
  302. set_user_nice(current, 0);
  303. /* cpus_allowed? */
  304. /* rt_priority? */
  305. /* signals? */
  306. memcpy(current->signal->rlim, init_task.signal->rlim,
  307. sizeof(current->signal->rlim));
  308. atomic_inc(&init_cred.usage);
  309. commit_creds(&init_cred);
  310. write_unlock_irq(&tasklist_lock);
  311. }
  312. void __set_special_pids(struct pid *pid)
  313. {
  314. struct task_struct *curr = current->group_leader;
  315. pid_t nr = pid_nr(pid);
  316. if (task_session(curr) != pid) {
  317. change_pid(curr, PIDTYPE_SID, pid);
  318. set_task_session(curr, nr);
  319. }
  320. if (task_pgrp(curr) != pid) {
  321. change_pid(curr, PIDTYPE_PGID, pid);
  322. set_task_pgrp(curr, nr);
  323. }
  324. }
  325. static void set_special_pids(struct pid *pid)
  326. {
  327. write_lock_irq(&tasklist_lock);
  328. __set_special_pids(pid);
  329. write_unlock_irq(&tasklist_lock);
  330. }
  331. /*
  332. * Let kernel threads use this to say that they
  333. * allow a certain signal (since daemonize() will
  334. * have disabled all of them by default).
  335. */
  336. int allow_signal(int sig)
  337. {
  338. if (!valid_signal(sig) || sig < 1)
  339. return -EINVAL;
  340. spin_lock_irq(&current->sighand->siglock);
  341. sigdelset(&current->blocked, sig);
  342. if (!current->mm) {
  343. /* Kernel threads handle their own signals.
  344. Let the signal code know it'll be handled, so
  345. that they don't get converted to SIGKILL or
  346. just silently dropped */
  347. current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
  348. }
  349. recalc_sigpending();
  350. spin_unlock_irq(&current->sighand->siglock);
  351. return 0;
  352. }
  353. EXPORT_SYMBOL(allow_signal);
  354. int disallow_signal(int sig)
  355. {
  356. if (!valid_signal(sig) || sig < 1)
  357. return -EINVAL;
  358. spin_lock_irq(&current->sighand->siglock);
  359. current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
  360. recalc_sigpending();
  361. spin_unlock_irq(&current->sighand->siglock);
  362. return 0;
  363. }
  364. EXPORT_SYMBOL(disallow_signal);
  365. /*
  366. * Put all the gunge required to become a kernel thread without
  367. * attached user resources in one place where it belongs.
  368. */
  369. void daemonize(const char *name, ...)
  370. {
  371. va_list args;
  372. struct fs_struct *fs;
  373. sigset_t blocked;
  374. va_start(args, name);
  375. vsnprintf(current->comm, sizeof(current->comm), name, args);
  376. va_end(args);
  377. /*
  378. * If we were started as result of loading a module, close all of the
  379. * user space pages. We don't need them, and if we didn't close them
  380. * they would be locked into memory.
  381. */
  382. exit_mm(current);
  383. /*
  384. * We don't want to have TIF_FREEZE set if the system-wide hibernation
  385. * or suspend transition begins right now.
  386. */
  387. current->flags |= (PF_NOFREEZE | PF_KTHREAD);
  388. if (current->nsproxy != &init_nsproxy) {
  389. get_nsproxy(&init_nsproxy);
  390. switch_task_namespaces(current, &init_nsproxy);
  391. }
  392. set_special_pids(&init_struct_pid);
  393. proc_clear_tty(current);
  394. /* Block and flush all signals */
  395. sigfillset(&blocked);
  396. sigprocmask(SIG_BLOCK, &blocked, NULL);
  397. flush_signals(current);
  398. /* Become as one with the init task */
  399. exit_fs(current); /* current->fs->count--; */
  400. fs = init_task.fs;
  401. current->fs = fs;
  402. atomic_inc(&fs->count);
  403. exit_files(current);
  404. current->files = init_task.files;
  405. atomic_inc(&current->files->count);
  406. reparent_to_kthreadd();
  407. }
  408. EXPORT_SYMBOL(daemonize);
  409. static void close_files(struct files_struct * files)
  410. {
  411. int i, j;
  412. struct fdtable *fdt;
  413. j = 0;
  414. /*
  415. * It is safe to dereference the fd table without RCU or
  416. * ->file_lock because this is the last reference to the
  417. * files structure.
  418. */
  419. fdt = files_fdtable(files);
  420. for (;;) {
  421. unsigned long set;
  422. i = j * __NFDBITS;
  423. if (i >= fdt->max_fds)
  424. break;
  425. set = fdt->open_fds->fds_bits[j++];
  426. while (set) {
  427. if (set & 1) {
  428. struct file * file = xchg(&fdt->fd[i], NULL);
  429. if (file) {
  430. filp_close(file, files);
  431. cond_resched();
  432. }
  433. }
  434. i++;
  435. set >>= 1;
  436. }
  437. }
  438. }
  439. struct files_struct *get_files_struct(struct task_struct *task)
  440. {
  441. struct files_struct *files;
  442. task_lock(task);
  443. files = task->files;
  444. if (files)
  445. atomic_inc(&files->count);
  446. task_unlock(task);
  447. return files;
  448. }
  449. void put_files_struct(struct files_struct *files)
  450. {
  451. struct fdtable *fdt;
  452. if (atomic_dec_and_test(&files->count)) {
  453. close_files(files);
  454. /*
  455. * Free the fd and fdset arrays if we expanded them.
  456. * If the fdtable was embedded, pass files for freeing
  457. * at the end of the RCU grace period. Otherwise,
  458. * you can free files immediately.
  459. */
  460. fdt = files_fdtable(files);
  461. if (fdt != &files->fdtab)
  462. kmem_cache_free(files_cachep, files);
  463. free_fdtable(fdt);
  464. }
  465. }
  466. void reset_files_struct(struct files_struct *files)
  467. {
  468. struct task_struct *tsk = current;
  469. struct files_struct *old;
  470. old = tsk->files;
  471. task_lock(tsk);
  472. tsk->files = files;
  473. task_unlock(tsk);
  474. put_files_struct(old);
  475. }
  476. void exit_files(struct task_struct *tsk)
  477. {
  478. struct files_struct * files = tsk->files;
  479. if (files) {
  480. task_lock(tsk);
  481. tsk->files = NULL;
  482. task_unlock(tsk);
  483. put_files_struct(files);
  484. }
  485. }
  486. void put_fs_struct(struct fs_struct *fs)
  487. {
  488. /* No need to hold fs->lock if we are killing it */
  489. if (atomic_dec_and_test(&fs->count)) {
  490. path_put(&fs->root);
  491. path_put(&fs->pwd);
  492. kmem_cache_free(fs_cachep, fs);
  493. }
  494. }
  495. void exit_fs(struct task_struct *tsk)
  496. {
  497. struct fs_struct * fs = tsk->fs;
  498. if (fs) {
  499. task_lock(tsk);
  500. tsk->fs = NULL;
  501. task_unlock(tsk);
  502. put_fs_struct(fs);
  503. }
  504. }
  505. EXPORT_SYMBOL_GPL(exit_fs);
  506. #ifdef CONFIG_MM_OWNER
  507. /*
  508. * Task p is exiting and it owned mm, lets find a new owner for it
  509. */
  510. static inline int
  511. mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
  512. {
  513. /*
  514. * If there are other users of the mm and the owner (us) is exiting
  515. * we need to find a new owner to take on the responsibility.
  516. */
  517. if (atomic_read(&mm->mm_users) <= 1)
  518. return 0;
  519. if (mm->owner != p)
  520. return 0;
  521. return 1;
  522. }
  523. void mm_update_next_owner(struct mm_struct *mm)
  524. {
  525. struct task_struct *c, *g, *p = current;
  526. retry:
  527. if (!mm_need_new_owner(mm, p))
  528. return;
  529. read_lock(&tasklist_lock);
  530. /*
  531. * Search in the children
  532. */
  533. list_for_each_entry(c, &p->children, sibling) {
  534. if (c->mm == mm)
  535. goto assign_new_owner;
  536. }
  537. /*
  538. * Search in the siblings
  539. */
  540. list_for_each_entry(c, &p->parent->children, sibling) {
  541. if (c->mm == mm)
  542. goto assign_new_owner;
  543. }
  544. /*
  545. * Search through everything else. We should not get
  546. * here often
  547. */
  548. do_each_thread(g, c) {
  549. if (c->mm == mm)
  550. goto assign_new_owner;
  551. } while_each_thread(g, c);
  552. read_unlock(&tasklist_lock);
  553. /*
  554. * We found no owner yet mm_users > 1: this implies that we are
  555. * most likely racing with swapoff (try_to_unuse()) or /proc or
  556. * ptrace or page migration (get_task_mm()). Mark owner as NULL.
  557. */
  558. mm->owner = NULL;
  559. return;
  560. assign_new_owner:
  561. BUG_ON(c == p);
  562. get_task_struct(c);
  563. /*
  564. * The task_lock protects c->mm from changing.
  565. * We always want mm->owner->mm == mm
  566. */
  567. task_lock(c);
  568. /*
  569. * Delay read_unlock() till we have the task_lock()
  570. * to ensure that c does not slip away underneath us
  571. */
  572. read_unlock(&tasklist_lock);
  573. if (c->mm != mm) {
  574. task_unlock(c);
  575. put_task_struct(c);
  576. goto retry;
  577. }
  578. mm->owner = c;
  579. task_unlock(c);
  580. put_task_struct(c);
  581. }
  582. #endif /* CONFIG_MM_OWNER */
  583. /*
  584. * Turn us into a lazy TLB process if we
  585. * aren't already..
  586. */
  587. static void exit_mm(struct task_struct * tsk)
  588. {
  589. struct mm_struct *mm = tsk->mm;
  590. struct core_state *core_state;
  591. mm_release(tsk, mm);
  592. if (!mm)
  593. return;
  594. /*
  595. * Serialize with any possible pending coredump.
  596. * We must hold mmap_sem around checking core_state
  597. * and clearing tsk->mm. The core-inducing thread
  598. * will increment ->nr_threads for each thread in the
  599. * group with ->mm != NULL.
  600. */
  601. down_read(&mm->mmap_sem);
  602. core_state = mm->core_state;
  603. if (core_state) {
  604. struct core_thread self;
  605. up_read(&mm->mmap_sem);
  606. self.task = tsk;
  607. self.next = xchg(&core_state->dumper.next, &self);
  608. /*
  609. * Implies mb(), the result of xchg() must be visible
  610. * to core_state->dumper.
  611. */
  612. if (atomic_dec_and_test(&core_state->nr_threads))
  613. complete(&core_state->startup);
  614. for (;;) {
  615. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  616. if (!self.task) /* see coredump_finish() */
  617. break;
  618. schedule();
  619. }
  620. __set_task_state(tsk, TASK_RUNNING);
  621. down_read(&mm->mmap_sem);
  622. }
  623. atomic_inc(&mm->mm_count);
  624. BUG_ON(mm != tsk->active_mm);
  625. /* more a memory barrier than a real lock */
  626. task_lock(tsk);
  627. tsk->mm = NULL;
  628. up_read(&mm->mmap_sem);
  629. enter_lazy_tlb(mm, current);
  630. /* We don't want this task to be frozen prematurely */
  631. clear_freeze_flag(tsk);
  632. task_unlock(tsk);
  633. mm_update_next_owner(mm);
  634. mmput(mm);
  635. }
  636. /* Returns nonzero if the child should be released. */
  637. static int reparent_thread(struct task_struct *p, struct task_struct *father)
  638. {
  639. int dead;
  640. if (p->pdeath_signal)
  641. /* We already hold the tasklist_lock here. */
  642. group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
  643. list_move_tail(&p->sibling, &p->real_parent->children);
  644. if (task_detached(p))
  645. return 0;
  646. /* If this is a threaded reparent there is no need to
  647. * notify anyone anything has happened.
  648. */
  649. if (same_thread_group(p->real_parent, father))
  650. return 0;
  651. /* We don't want people slaying init. */
  652. p->exit_signal = SIGCHLD;
  653. /* If we'd notified the old parent about this child's death,
  654. * also notify the new parent.
  655. */
  656. dead = 0;
  657. if (!p->ptrace &&
  658. p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
  659. do_notify_parent(p, p->exit_signal);
  660. if (task_detached(p)) {
  661. p->exit_state = EXIT_DEAD;
  662. dead = 1;
  663. }
  664. }
  665. kill_orphaned_pgrp(p, father);
  666. return dead;
  667. }
  668. /*
  669. * When we die, we re-parent all our children.
  670. * Try to give them to another thread in our thread
  671. * group, and if no such member exists, give it to
  672. * the child reaper process (ie "init") in our pid
  673. * space.
  674. */
  675. static struct task_struct *find_new_reaper(struct task_struct *father)
  676. {
  677. struct pid_namespace *pid_ns = task_active_pid_ns(father);
  678. struct task_struct *thread;
  679. thread = father;
  680. while_each_thread(father, thread) {
  681. if (thread->flags & PF_EXITING)
  682. continue;
  683. if (unlikely(pid_ns->child_reaper == father))
  684. pid_ns->child_reaper = thread;
  685. return thread;
  686. }
  687. if (unlikely(pid_ns->child_reaper == father)) {
  688. write_unlock_irq(&tasklist_lock);
  689. if (unlikely(pid_ns == &init_pid_ns))
  690. panic("Attempted to kill init!");
  691. zap_pid_ns_processes(pid_ns);
  692. write_lock_irq(&tasklist_lock);
  693. /*
  694. * We can not clear ->child_reaper or leave it alone.
  695. * There may by stealth EXIT_DEAD tasks on ->children,
  696. * forget_original_parent() must move them somewhere.
  697. */
  698. pid_ns->child_reaper = init_pid_ns.child_reaper;
  699. }
  700. return pid_ns->child_reaper;
  701. }
  702. static void forget_original_parent(struct task_struct *father)
  703. {
  704. struct task_struct *p, *n, *reaper;
  705. LIST_HEAD(ptrace_dead);
  706. exit_ptrace(father);
  707. write_lock_irq(&tasklist_lock);
  708. reaper = find_new_reaper(father);
  709. list_for_each_entry_safe(p, n, &father->children, sibling) {
  710. p->real_parent = reaper;
  711. if (p->parent == father) {
  712. BUG_ON(p->ptrace);
  713. p->parent = p->real_parent;
  714. }
  715. if (reparent_thread(p, father))
  716. list_add(&p->ptrace_entry, &ptrace_dead);;
  717. }
  718. write_unlock_irq(&tasklist_lock);
  719. BUG_ON(!list_empty(&father->children));
  720. list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
  721. list_del_init(&p->ptrace_entry);
  722. release_task(p);
  723. }
  724. }
  725. /*
  726. * Send signals to all our closest relatives so that they know
  727. * to properly mourn us..
  728. */
  729. static void exit_notify(struct task_struct *tsk, int group_dead)
  730. {
  731. int signal;
  732. void *cookie;
  733. /*
  734. * This does two things:
  735. *
  736. * A. Make init inherit all the child processes
  737. * B. Check to see if any process groups have become orphaned
  738. * as a result of our exiting, and if they have any stopped
  739. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  740. */
  741. forget_original_parent(tsk);
  742. exit_task_namespaces(tsk);
  743. write_lock_irq(&tasklist_lock);
  744. if (group_dead)
  745. kill_orphaned_pgrp(tsk->group_leader, NULL);
  746. /* Let father know we died
  747. *
  748. * Thread signals are configurable, but you aren't going to use
  749. * that to send signals to arbitary processes.
  750. * That stops right now.
  751. *
  752. * If the parent exec id doesn't match the exec id we saved
  753. * when we started then we know the parent has changed security
  754. * domain.
  755. *
  756. * If our self_exec id doesn't match our parent_exec_id then
  757. * we have changed execution domain as these two values started
  758. * the same after a fork.
  759. */
  760. if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
  761. (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
  762. tsk->self_exec_id != tsk->parent_exec_id) &&
  763. !capable(CAP_KILL))
  764. tsk->exit_signal = SIGCHLD;
  765. signal = tracehook_notify_death(tsk, &cookie, group_dead);
  766. if (signal >= 0)
  767. signal = do_notify_parent(tsk, signal);
  768. tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
  769. /* mt-exec, de_thread() is waiting for us */
  770. if (thread_group_leader(tsk) &&
  771. tsk->signal->group_exit_task &&
  772. tsk->signal->notify_count < 0)
  773. wake_up_process(tsk->signal->group_exit_task);
  774. write_unlock_irq(&tasklist_lock);
  775. tracehook_report_death(tsk, signal, cookie, group_dead);
  776. /* If the process is dead, release it - nobody will wait for it */
  777. if (signal == DEATH_REAP)
  778. release_task(tsk);
  779. }
  780. #ifdef CONFIG_DEBUG_STACK_USAGE
  781. static void check_stack_usage(void)
  782. {
  783. static DEFINE_SPINLOCK(low_water_lock);
  784. static int lowest_to_date = THREAD_SIZE;
  785. unsigned long free;
  786. free = stack_not_used(current);
  787. if (free >= lowest_to_date)
  788. return;
  789. spin_lock(&low_water_lock);
  790. if (free < lowest_to_date) {
  791. printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
  792. "left\n",
  793. current->comm, free);
  794. lowest_to_date = free;
  795. }
  796. spin_unlock(&low_water_lock);
  797. }
  798. #else
  799. static inline void check_stack_usage(void) {}
  800. #endif
  801. NORET_TYPE void do_exit(long code)
  802. {
  803. struct task_struct *tsk = current;
  804. int group_dead;
  805. profile_task_exit(tsk);
  806. WARN_ON(atomic_read(&tsk->fs_excl));
  807. if (unlikely(in_interrupt()))
  808. panic("Aiee, killing interrupt handler!");
  809. if (unlikely(!tsk->pid))
  810. panic("Attempted to kill the idle task!");
  811. tracehook_report_exit(&code);
  812. /*
  813. * We're taking recursive faults here in do_exit. Safest is to just
  814. * leave this task alone and wait for reboot.
  815. */
  816. if (unlikely(tsk->flags & PF_EXITING)) {
  817. printk(KERN_ALERT
  818. "Fixing recursive fault but reboot is needed!\n");
  819. /*
  820. * We can do this unlocked here. The futex code uses
  821. * this flag just to verify whether the pi state
  822. * cleanup has been done or not. In the worst case it
  823. * loops once more. We pretend that the cleanup was
  824. * done as there is no way to return. Either the
  825. * OWNER_DIED bit is set by now or we push the blocked
  826. * task into the wait for ever nirwana as well.
  827. */
  828. tsk->flags |= PF_EXITPIDONE;
  829. set_current_state(TASK_UNINTERRUPTIBLE);
  830. schedule();
  831. }
  832. exit_signals(tsk); /* sets PF_EXITING */
  833. /*
  834. * tsk->flags are checked in the futex code to protect against
  835. * an exiting task cleaning up the robust pi futexes.
  836. */
  837. smp_mb();
  838. spin_unlock_wait(&tsk->pi_lock);
  839. if (unlikely(in_atomic()))
  840. printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
  841. current->comm, task_pid_nr(current),
  842. preempt_count());
  843. acct_update_integrals(tsk);
  844. group_dead = atomic_dec_and_test(&tsk->signal->live);
  845. if (group_dead) {
  846. hrtimer_cancel(&tsk->signal->real_timer);
  847. exit_itimers(tsk->signal);
  848. }
  849. acct_collect(code, group_dead);
  850. if (group_dead)
  851. tty_audit_exit();
  852. if (unlikely(tsk->audit_context))
  853. audit_free(tsk);
  854. tsk->exit_code = code;
  855. taskstats_exit(tsk, group_dead);
  856. exit_mm(tsk);
  857. if (group_dead)
  858. acct_process();
  859. trace_sched_process_exit(tsk);
  860. exit_sem(tsk);
  861. exit_files(tsk);
  862. exit_fs(tsk);
  863. check_stack_usage();
  864. exit_thread();
  865. cgroup_exit(tsk, 1);
  866. if (group_dead && tsk->signal->leader)
  867. disassociate_ctty(1);
  868. module_put(task_thread_info(tsk)->exec_domain->module);
  869. if (tsk->binfmt)
  870. module_put(tsk->binfmt->module);
  871. proc_exit_connector(tsk);
  872. exit_notify(tsk, group_dead);
  873. #ifdef CONFIG_NUMA
  874. mpol_put(tsk->mempolicy);
  875. tsk->mempolicy = NULL;
  876. #endif
  877. #ifdef CONFIG_FUTEX
  878. /*
  879. * This must happen late, after the PID is not
  880. * hashed anymore:
  881. */
  882. if (unlikely(!list_empty(&tsk->pi_state_list)))
  883. exit_pi_state_list(tsk);
  884. if (unlikely(current->pi_state_cache))
  885. kfree(current->pi_state_cache);
  886. #endif
  887. /*
  888. * Make sure we are holding no locks:
  889. */
  890. debug_check_no_locks_held(tsk);
  891. /*
  892. * We can do this unlocked here. The futex code uses this flag
  893. * just to verify whether the pi state cleanup has been done
  894. * or not. In the worst case it loops once more.
  895. */
  896. tsk->flags |= PF_EXITPIDONE;
  897. if (tsk->io_context)
  898. exit_io_context();
  899. if (tsk->splice_pipe)
  900. __free_pipe_info(tsk->splice_pipe);
  901. preempt_disable();
  902. /* causes final put_task_struct in finish_task_switch(). */
  903. tsk->state = TASK_DEAD;
  904. schedule();
  905. BUG();
  906. /* Avoid "noreturn function does return". */
  907. for (;;)
  908. cpu_relax(); /* For when BUG is null */
  909. }
  910. EXPORT_SYMBOL_GPL(do_exit);
  911. NORET_TYPE void complete_and_exit(struct completion *comp, long code)
  912. {
  913. if (comp)
  914. complete(comp);
  915. do_exit(code);
  916. }
  917. EXPORT_SYMBOL(complete_and_exit);
  918. SYSCALL_DEFINE1(exit, int, error_code)
  919. {
  920. do_exit((error_code&0xff)<<8);
  921. }
  922. /*
  923. * Take down every thread in the group. This is called by fatal signals
  924. * as well as by sys_exit_group (below).
  925. */
  926. NORET_TYPE void
  927. do_group_exit(int exit_code)
  928. {
  929. struct signal_struct *sig = current->signal;
  930. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  931. if (signal_group_exit(sig))
  932. exit_code = sig->group_exit_code;
  933. else if (!thread_group_empty(current)) {
  934. struct sighand_struct *const sighand = current->sighand;
  935. spin_lock_irq(&sighand->siglock);
  936. if (signal_group_exit(sig))
  937. /* Another thread got here before we took the lock. */
  938. exit_code = sig->group_exit_code;
  939. else {
  940. sig->group_exit_code = exit_code;
  941. sig->flags = SIGNAL_GROUP_EXIT;
  942. zap_other_threads(current);
  943. }
  944. spin_unlock_irq(&sighand->siglock);
  945. }
  946. do_exit(exit_code);
  947. /* NOTREACHED */
  948. }
  949. /*
  950. * this kills every thread in the thread group. Note that any externally
  951. * wait4()-ing process will get the correct exit code - even if this
  952. * thread is not the thread group leader.
  953. */
  954. SYSCALL_DEFINE1(exit_group, int, error_code)
  955. {
  956. do_group_exit((error_code & 0xff) << 8);
  957. /* NOTREACHED */
  958. return 0;
  959. }
  960. static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  961. {
  962. struct pid *pid = NULL;
  963. if (type == PIDTYPE_PID)
  964. pid = task->pids[type].pid;
  965. else if (type < PIDTYPE_MAX)
  966. pid = task->group_leader->pids[type].pid;
  967. return pid;
  968. }
  969. static int eligible_child(enum pid_type type, struct pid *pid, int options,
  970. struct task_struct *p)
  971. {
  972. int err;
  973. if (type < PIDTYPE_MAX) {
  974. if (task_pid_type(p, type) != pid)
  975. return 0;
  976. }
  977. /* Wait for all children (clone and not) if __WALL is set;
  978. * otherwise, wait for clone children *only* if __WCLONE is
  979. * set; otherwise, wait for non-clone children *only*. (Note:
  980. * A "clone" child here is one that reports to its parent
  981. * using a signal other than SIGCHLD.) */
  982. if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
  983. && !(options & __WALL))
  984. return 0;
  985. err = security_task_wait(p);
  986. if (err)
  987. return err;
  988. return 1;
  989. }
  990. static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
  991. int why, int status,
  992. struct siginfo __user *infop,
  993. struct rusage __user *rusagep)
  994. {
  995. int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
  996. put_task_struct(p);
  997. if (!retval)
  998. retval = put_user(SIGCHLD, &infop->si_signo);
  999. if (!retval)
  1000. retval = put_user(0, &infop->si_errno);
  1001. if (!retval)
  1002. retval = put_user((short)why, &infop->si_code);
  1003. if (!retval)
  1004. retval = put_user(pid, &infop->si_pid);
  1005. if (!retval)
  1006. retval = put_user(uid, &infop->si_uid);
  1007. if (!retval)
  1008. retval = put_user(status, &infop->si_status);
  1009. if (!retval)
  1010. retval = pid;
  1011. return retval;
  1012. }
  1013. /*
  1014. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  1015. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1016. * the lock and this task is uninteresting. If we return nonzero, we have
  1017. * released the lock and the system call should return.
  1018. */
  1019. static int wait_task_zombie(struct task_struct *p, int options,
  1020. struct siginfo __user *infop,
  1021. int __user *stat_addr, struct rusage __user *ru)
  1022. {
  1023. unsigned long state;
  1024. int retval, status, traced;
  1025. pid_t pid = task_pid_vnr(p);
  1026. uid_t uid = __task_cred(p)->uid;
  1027. if (!likely(options & WEXITED))
  1028. return 0;
  1029. if (unlikely(options & WNOWAIT)) {
  1030. int exit_code = p->exit_code;
  1031. int why, status;
  1032. get_task_struct(p);
  1033. read_unlock(&tasklist_lock);
  1034. if ((exit_code & 0x7f) == 0) {
  1035. why = CLD_EXITED;
  1036. status = exit_code >> 8;
  1037. } else {
  1038. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1039. status = exit_code & 0x7f;
  1040. }
  1041. return wait_noreap_copyout(p, pid, uid, why,
  1042. status, infop, ru);
  1043. }
  1044. /*
  1045. * Try to move the task's state to DEAD
  1046. * only one thread is allowed to do this:
  1047. */
  1048. state = xchg(&p->exit_state, EXIT_DEAD);
  1049. if (state != EXIT_ZOMBIE) {
  1050. BUG_ON(state != EXIT_DEAD);
  1051. return 0;
  1052. }
  1053. traced = ptrace_reparented(p);
  1054. if (likely(!traced)) {
  1055. struct signal_struct *psig;
  1056. struct signal_struct *sig;
  1057. struct task_cputime cputime;
  1058. /*
  1059. * The resource counters for the group leader are in its
  1060. * own task_struct. Those for dead threads in the group
  1061. * are in its signal_struct, as are those for the child
  1062. * processes it has previously reaped. All these
  1063. * accumulate in the parent's signal_struct c* fields.
  1064. *
  1065. * We don't bother to take a lock here to protect these
  1066. * p->signal fields, because they are only touched by
  1067. * __exit_signal, which runs with tasklist_lock
  1068. * write-locked anyway, and so is excluded here. We do
  1069. * need to protect the access to p->parent->signal fields,
  1070. * as other threads in the parent group can be right
  1071. * here reaping other children at the same time.
  1072. *
  1073. * We use thread_group_cputime() to get times for the thread
  1074. * group, which consolidates times for all threads in the
  1075. * group including the group leader.
  1076. */
  1077. thread_group_cputime(p, &cputime);
  1078. spin_lock_irq(&p->parent->sighand->siglock);
  1079. psig = p->parent->signal;
  1080. sig = p->signal;
  1081. psig->cutime =
  1082. cputime_add(psig->cutime,
  1083. cputime_add(cputime.utime,
  1084. sig->cutime));
  1085. psig->cstime =
  1086. cputime_add(psig->cstime,
  1087. cputime_add(cputime.stime,
  1088. sig->cstime));
  1089. psig->cgtime =
  1090. cputime_add(psig->cgtime,
  1091. cputime_add(p->gtime,
  1092. cputime_add(sig->gtime,
  1093. sig->cgtime)));
  1094. psig->cmin_flt +=
  1095. p->min_flt + sig->min_flt + sig->cmin_flt;
  1096. psig->cmaj_flt +=
  1097. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  1098. psig->cnvcsw +=
  1099. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  1100. psig->cnivcsw +=
  1101. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  1102. psig->cinblock +=
  1103. task_io_get_inblock(p) +
  1104. sig->inblock + sig->cinblock;
  1105. psig->coublock +=
  1106. task_io_get_oublock(p) +
  1107. sig->oublock + sig->coublock;
  1108. task_io_accounting_add(&psig->ioac, &p->ioac);
  1109. task_io_accounting_add(&psig->ioac, &sig->ioac);
  1110. spin_unlock_irq(&p->parent->sighand->siglock);
  1111. }
  1112. /*
  1113. * Now we are sure this task is interesting, and no other
  1114. * thread can reap it because we set its state to EXIT_DEAD.
  1115. */
  1116. read_unlock(&tasklist_lock);
  1117. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1118. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1119. ? p->signal->group_exit_code : p->exit_code;
  1120. if (!retval && stat_addr)
  1121. retval = put_user(status, stat_addr);
  1122. if (!retval && infop)
  1123. retval = put_user(SIGCHLD, &infop->si_signo);
  1124. if (!retval && infop)
  1125. retval = put_user(0, &infop->si_errno);
  1126. if (!retval && infop) {
  1127. int why;
  1128. if ((status & 0x7f) == 0) {
  1129. why = CLD_EXITED;
  1130. status >>= 8;
  1131. } else {
  1132. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1133. status &= 0x7f;
  1134. }
  1135. retval = put_user((short)why, &infop->si_code);
  1136. if (!retval)
  1137. retval = put_user(status, &infop->si_status);
  1138. }
  1139. if (!retval && infop)
  1140. retval = put_user(pid, &infop->si_pid);
  1141. if (!retval && infop)
  1142. retval = put_user(uid, &infop->si_uid);
  1143. if (!retval)
  1144. retval = pid;
  1145. if (traced) {
  1146. write_lock_irq(&tasklist_lock);
  1147. /* We dropped tasklist, ptracer could die and untrace */
  1148. ptrace_unlink(p);
  1149. /*
  1150. * If this is not a detached task, notify the parent.
  1151. * If it's still not detached after that, don't release
  1152. * it now.
  1153. */
  1154. if (!task_detached(p)) {
  1155. do_notify_parent(p, p->exit_signal);
  1156. if (!task_detached(p)) {
  1157. p->exit_state = EXIT_ZOMBIE;
  1158. p = NULL;
  1159. }
  1160. }
  1161. write_unlock_irq(&tasklist_lock);
  1162. }
  1163. if (p != NULL)
  1164. release_task(p);
  1165. return retval;
  1166. }
  1167. static int *task_stopped_code(struct task_struct *p, bool ptrace)
  1168. {
  1169. if (ptrace) {
  1170. if (task_is_stopped_or_traced(p))
  1171. return &p->exit_code;
  1172. } else {
  1173. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  1174. return &p->signal->group_exit_code;
  1175. }
  1176. return NULL;
  1177. }
  1178. /*
  1179. * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
  1180. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1181. * the lock and this task is uninteresting. If we return nonzero, we have
  1182. * released the lock and the system call should return.
  1183. */
  1184. static int wait_task_stopped(int ptrace, struct task_struct *p,
  1185. int options, struct siginfo __user *infop,
  1186. int __user *stat_addr, struct rusage __user *ru)
  1187. {
  1188. int retval, exit_code, *p_code, why;
  1189. uid_t uid = 0; /* unneeded, required by compiler */
  1190. pid_t pid;
  1191. if (!(options & WUNTRACED))
  1192. return 0;
  1193. exit_code = 0;
  1194. spin_lock_irq(&p->sighand->siglock);
  1195. p_code = task_stopped_code(p, ptrace);
  1196. if (unlikely(!p_code))
  1197. goto unlock_sig;
  1198. exit_code = *p_code;
  1199. if (!exit_code)
  1200. goto unlock_sig;
  1201. if (!unlikely(options & WNOWAIT))
  1202. *p_code = 0;
  1203. /* don't need the RCU readlock here as we're holding a spinlock */
  1204. uid = __task_cred(p)->uid;
  1205. unlock_sig:
  1206. spin_unlock_irq(&p->sighand->siglock);
  1207. if (!exit_code)
  1208. return 0;
  1209. /*
  1210. * Now we are pretty sure this task is interesting.
  1211. * Make sure it doesn't get reaped out from under us while we
  1212. * give up the lock and then examine it below. We don't want to
  1213. * keep holding onto the tasklist_lock while we call getrusage and
  1214. * possibly take page faults for user memory.
  1215. */
  1216. get_task_struct(p);
  1217. pid = task_pid_vnr(p);
  1218. why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
  1219. read_unlock(&tasklist_lock);
  1220. if (unlikely(options & WNOWAIT))
  1221. return wait_noreap_copyout(p, pid, uid,
  1222. why, exit_code,
  1223. infop, ru);
  1224. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1225. if (!retval && stat_addr)
  1226. retval = put_user((exit_code << 8) | 0x7f, stat_addr);
  1227. if (!retval && infop)
  1228. retval = put_user(SIGCHLD, &infop->si_signo);
  1229. if (!retval && infop)
  1230. retval = put_user(0, &infop->si_errno);
  1231. if (!retval && infop)
  1232. retval = put_user((short)why, &infop->si_code);
  1233. if (!retval && infop)
  1234. retval = put_user(exit_code, &infop->si_status);
  1235. if (!retval && infop)
  1236. retval = put_user(pid, &infop->si_pid);
  1237. if (!retval && infop)
  1238. retval = put_user(uid, &infop->si_uid);
  1239. if (!retval)
  1240. retval = pid;
  1241. put_task_struct(p);
  1242. BUG_ON(!retval);
  1243. return retval;
  1244. }
  1245. /*
  1246. * Handle do_wait work for one task in a live, non-stopped state.
  1247. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1248. * the lock and this task is uninteresting. If we return nonzero, we have
  1249. * released the lock and the system call should return.
  1250. */
  1251. static int wait_task_continued(struct task_struct *p, int options,
  1252. struct siginfo __user *infop,
  1253. int __user *stat_addr, struct rusage __user *ru)
  1254. {
  1255. int retval;
  1256. pid_t pid;
  1257. uid_t uid;
  1258. if (!unlikely(options & WCONTINUED))
  1259. return 0;
  1260. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1261. return 0;
  1262. spin_lock_irq(&p->sighand->siglock);
  1263. /* Re-check with the lock held. */
  1264. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1265. spin_unlock_irq(&p->sighand->siglock);
  1266. return 0;
  1267. }
  1268. if (!unlikely(options & WNOWAIT))
  1269. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1270. uid = __task_cred(p)->uid;
  1271. spin_unlock_irq(&p->sighand->siglock);
  1272. pid = task_pid_vnr(p);
  1273. get_task_struct(p);
  1274. read_unlock(&tasklist_lock);
  1275. if (!infop) {
  1276. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1277. put_task_struct(p);
  1278. if (!retval && stat_addr)
  1279. retval = put_user(0xffff, stat_addr);
  1280. if (!retval)
  1281. retval = pid;
  1282. } else {
  1283. retval = wait_noreap_copyout(p, pid, uid,
  1284. CLD_CONTINUED, SIGCONT,
  1285. infop, ru);
  1286. BUG_ON(retval == 0);
  1287. }
  1288. return retval;
  1289. }
  1290. /*
  1291. * Consider @p for a wait by @parent.
  1292. *
  1293. * -ECHILD should be in *@notask_error before the first call.
  1294. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1295. * Returns zero if the search for a child should continue;
  1296. * then *@notask_error is 0 if @p is an eligible child,
  1297. * or another error from security_task_wait(), or still -ECHILD.
  1298. */
  1299. static int wait_consider_task(struct task_struct *parent, int ptrace,
  1300. struct task_struct *p, int *notask_error,
  1301. enum pid_type type, struct pid *pid, int options,
  1302. struct siginfo __user *infop,
  1303. int __user *stat_addr, struct rusage __user *ru)
  1304. {
  1305. int ret = eligible_child(type, pid, options, p);
  1306. if (!ret)
  1307. return ret;
  1308. if (unlikely(ret < 0)) {
  1309. /*
  1310. * If we have not yet seen any eligible child,
  1311. * then let this error code replace -ECHILD.
  1312. * A permission error will give the user a clue
  1313. * to look for security policy problems, rather
  1314. * than for mysterious wait bugs.
  1315. */
  1316. if (*notask_error)
  1317. *notask_error = ret;
  1318. }
  1319. if (likely(!ptrace) && unlikely(p->ptrace)) {
  1320. /*
  1321. * This child is hidden by ptrace.
  1322. * We aren't allowed to see it now, but eventually we will.
  1323. */
  1324. *notask_error = 0;
  1325. return 0;
  1326. }
  1327. if (p->exit_state == EXIT_DEAD)
  1328. return 0;
  1329. /*
  1330. * We don't reap group leaders with subthreads.
  1331. */
  1332. if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
  1333. return wait_task_zombie(p, options, infop, stat_addr, ru);
  1334. /*
  1335. * It's stopped or running now, so it might
  1336. * later continue, exit, or stop again.
  1337. */
  1338. *notask_error = 0;
  1339. if (task_stopped_code(p, ptrace))
  1340. return wait_task_stopped(ptrace, p, options,
  1341. infop, stat_addr, ru);
  1342. return wait_task_continued(p, options, infop, stat_addr, ru);
  1343. }
  1344. /*
  1345. * Do the work of do_wait() for one thread in the group, @tsk.
  1346. *
  1347. * -ECHILD should be in *@notask_error before the first call.
  1348. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1349. * Returns zero if the search for a child should continue; then
  1350. * *@notask_error is 0 if there were any eligible children,
  1351. * or another error from security_task_wait(), or still -ECHILD.
  1352. */
  1353. static int do_wait_thread(struct task_struct *tsk, int *notask_error,
  1354. enum pid_type type, struct pid *pid, int options,
  1355. struct siginfo __user *infop, int __user *stat_addr,
  1356. struct rusage __user *ru)
  1357. {
  1358. struct task_struct *p;
  1359. list_for_each_entry(p, &tsk->children, sibling) {
  1360. /*
  1361. * Do not consider detached threads.
  1362. */
  1363. if (!task_detached(p)) {
  1364. int ret = wait_consider_task(tsk, 0, p, notask_error,
  1365. type, pid, options,
  1366. infop, stat_addr, ru);
  1367. if (ret)
  1368. return ret;
  1369. }
  1370. }
  1371. return 0;
  1372. }
  1373. static int ptrace_do_wait(struct task_struct *tsk, int *notask_error,
  1374. enum pid_type type, struct pid *pid, int options,
  1375. struct siginfo __user *infop, int __user *stat_addr,
  1376. struct rusage __user *ru)
  1377. {
  1378. struct task_struct *p;
  1379. /*
  1380. * Traditionally we see ptrace'd stopped tasks regardless of options.
  1381. */
  1382. options |= WUNTRACED;
  1383. list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
  1384. int ret = wait_consider_task(tsk, 1, p, notask_error,
  1385. type, pid, options,
  1386. infop, stat_addr, ru);
  1387. if (ret)
  1388. return ret;
  1389. }
  1390. return 0;
  1391. }
  1392. static long do_wait(enum pid_type type, struct pid *pid, int options,
  1393. struct siginfo __user *infop, int __user *stat_addr,
  1394. struct rusage __user *ru)
  1395. {
  1396. DECLARE_WAITQUEUE(wait, current);
  1397. struct task_struct *tsk;
  1398. int retval;
  1399. trace_sched_process_wait(pid);
  1400. add_wait_queue(&current->signal->wait_chldexit,&wait);
  1401. repeat:
  1402. /*
  1403. * If there is nothing that can match our critiera just get out.
  1404. * We will clear @retval to zero if we see any child that might later
  1405. * match our criteria, even if we are not able to reap it yet.
  1406. */
  1407. retval = -ECHILD;
  1408. if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type])))
  1409. goto end;
  1410. current->state = TASK_INTERRUPTIBLE;
  1411. read_lock(&tasklist_lock);
  1412. tsk = current;
  1413. do {
  1414. int tsk_result = do_wait_thread(tsk, &retval,
  1415. type, pid, options,
  1416. infop, stat_addr, ru);
  1417. if (!tsk_result)
  1418. tsk_result = ptrace_do_wait(tsk, &retval,
  1419. type, pid, options,
  1420. infop, stat_addr, ru);
  1421. if (tsk_result) {
  1422. /*
  1423. * tasklist_lock is unlocked and we have a final result.
  1424. */
  1425. retval = tsk_result;
  1426. goto end;
  1427. }
  1428. if (options & __WNOTHREAD)
  1429. break;
  1430. tsk = next_thread(tsk);
  1431. BUG_ON(tsk->signal != current->signal);
  1432. } while (tsk != current);
  1433. read_unlock(&tasklist_lock);
  1434. if (!retval && !(options & WNOHANG)) {
  1435. retval = -ERESTARTSYS;
  1436. if (!signal_pending(current)) {
  1437. schedule();
  1438. goto repeat;
  1439. }
  1440. }
  1441. end:
  1442. current->state = TASK_RUNNING;
  1443. remove_wait_queue(&current->signal->wait_chldexit,&wait);
  1444. if (infop) {
  1445. if (retval > 0)
  1446. retval = 0;
  1447. else {
  1448. /*
  1449. * For a WNOHANG return, clear out all the fields
  1450. * we would set so the user can easily tell the
  1451. * difference.
  1452. */
  1453. if (!retval)
  1454. retval = put_user(0, &infop->si_signo);
  1455. if (!retval)
  1456. retval = put_user(0, &infop->si_errno);
  1457. if (!retval)
  1458. retval = put_user(0, &infop->si_code);
  1459. if (!retval)
  1460. retval = put_user(0, &infop->si_pid);
  1461. if (!retval)
  1462. retval = put_user(0, &infop->si_uid);
  1463. if (!retval)
  1464. retval = put_user(0, &infop->si_status);
  1465. }
  1466. }
  1467. return retval;
  1468. }
  1469. SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
  1470. infop, int, options, struct rusage __user *, ru)
  1471. {
  1472. struct pid *pid = NULL;
  1473. enum pid_type type;
  1474. long ret;
  1475. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
  1476. return -EINVAL;
  1477. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1478. return -EINVAL;
  1479. switch (which) {
  1480. case P_ALL:
  1481. type = PIDTYPE_MAX;
  1482. break;
  1483. case P_PID:
  1484. type = PIDTYPE_PID;
  1485. if (upid <= 0)
  1486. return -EINVAL;
  1487. break;
  1488. case P_PGID:
  1489. type = PIDTYPE_PGID;
  1490. if (upid <= 0)
  1491. return -EINVAL;
  1492. break;
  1493. default:
  1494. return -EINVAL;
  1495. }
  1496. if (type < PIDTYPE_MAX)
  1497. pid = find_get_pid(upid);
  1498. ret = do_wait(type, pid, options, infop, NULL, ru);
  1499. put_pid(pid);
  1500. /* avoid REGPARM breakage on x86: */
  1501. asmlinkage_protect(5, ret, which, upid, infop, options, ru);
  1502. return ret;
  1503. }
  1504. SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  1505. int, options, struct rusage __user *, ru)
  1506. {
  1507. struct pid *pid = NULL;
  1508. enum pid_type type;
  1509. long ret;
  1510. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1511. __WNOTHREAD|__WCLONE|__WALL))
  1512. return -EINVAL;
  1513. if (upid == -1)
  1514. type = PIDTYPE_MAX;
  1515. else if (upid < 0) {
  1516. type = PIDTYPE_PGID;
  1517. pid = find_get_pid(-upid);
  1518. } else if (upid == 0) {
  1519. type = PIDTYPE_PGID;
  1520. pid = get_pid(task_pgrp(current));
  1521. } else /* upid > 0 */ {
  1522. type = PIDTYPE_PID;
  1523. pid = find_get_pid(upid);
  1524. }
  1525. ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru);
  1526. put_pid(pid);
  1527. /* avoid REGPARM breakage on x86: */
  1528. asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
  1529. return ret;
  1530. }
  1531. #ifdef __ARCH_WANT_SYS_WAITPID
  1532. /*
  1533. * sys_waitpid() remains for compatibility. waitpid() should be
  1534. * implemented by calling sys_wait4() from libc.a.
  1535. */
  1536. SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
  1537. {
  1538. return sys_wait4(pid, stat_addr, options, NULL);
  1539. }
  1540. #endif