exit.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/mnt_namespace.h>
  15. #include <linux/key.h>
  16. #include <linux/security.h>
  17. #include <linux/cpu.h>
  18. #include <linux/acct.h>
  19. #include <linux/tsacct_kern.h>
  20. #include <linux/file.h>
  21. #include <linux/binfmts.h>
  22. #include <linux/nsproxy.h>
  23. #include <linux/pid_namespace.h>
  24. #include <linux/ptrace.h>
  25. #include <linux/profile.h>
  26. #include <linux/mount.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/kthread.h>
  29. #include <linux/mempolicy.h>
  30. #include <linux/taskstats_kern.h>
  31. #include <linux/delayacct.h>
  32. #include <linux/freezer.h>
  33. #include <linux/cgroup.h>
  34. #include <linux/syscalls.h>
  35. #include <linux/signal.h>
  36. #include <linux/posix-timers.h>
  37. #include <linux/cn_proc.h>
  38. #include <linux/mutex.h>
  39. #include <linux/futex.h>
  40. #include <linux/compat.h>
  41. #include <linux/pipe_fs_i.h>
  42. #include <linux/audit.h> /* for audit_free() */
  43. #include <linux/resource.h>
  44. #include <linux/blkdev.h>
  45. #include <linux/task_io_accounting_ops.h>
  46. #include <asm/uaccess.h>
  47. #include <asm/unistd.h>
  48. #include <asm/pgtable.h>
  49. #include <asm/mmu_context.h>
  50. extern void sem_exit (void);
  51. static void exit_mm(struct task_struct * tsk);
  52. static void __unhash_process(struct task_struct *p)
  53. {
  54. nr_threads--;
  55. detach_pid(p, PIDTYPE_PID);
  56. if (thread_group_leader(p)) {
  57. detach_pid(p, PIDTYPE_PGID);
  58. detach_pid(p, PIDTYPE_SID);
  59. list_del_rcu(&p->tasks);
  60. __get_cpu_var(process_counts)--;
  61. }
  62. list_del_rcu(&p->thread_group);
  63. remove_parent(p);
  64. }
  65. /*
  66. * This function expects the tasklist_lock write-locked.
  67. */
  68. static void __exit_signal(struct task_struct *tsk)
  69. {
  70. struct signal_struct *sig = tsk->signal;
  71. struct sighand_struct *sighand;
  72. BUG_ON(!sig);
  73. BUG_ON(!atomic_read(&sig->count));
  74. rcu_read_lock();
  75. sighand = rcu_dereference(tsk->sighand);
  76. spin_lock(&sighand->siglock);
  77. posix_cpu_timers_exit(tsk);
  78. if (atomic_dec_and_test(&sig->count))
  79. posix_cpu_timers_exit_group(tsk);
  80. else {
  81. /*
  82. * If there is any task waiting for the group exit
  83. * then notify it:
  84. */
  85. if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
  86. wake_up_process(sig->group_exit_task);
  87. if (tsk == sig->curr_target)
  88. sig->curr_target = next_thread(tsk);
  89. /*
  90. * Accumulate here the counters for all threads but the
  91. * group leader as they die, so they can be added into
  92. * the process-wide totals when those are taken.
  93. * The group leader stays around as a zombie as long
  94. * as there are other threads. When it gets reaped,
  95. * the exit.c code will add its counts into these totals.
  96. * We won't ever get here for the group leader, since it
  97. * will have been the last reference on the signal_struct.
  98. */
  99. sig->utime = cputime_add(sig->utime, tsk->utime);
  100. sig->stime = cputime_add(sig->stime, tsk->stime);
  101. sig->gtime = cputime_add(sig->gtime, tsk->gtime);
  102. sig->min_flt += tsk->min_flt;
  103. sig->maj_flt += tsk->maj_flt;
  104. sig->nvcsw += tsk->nvcsw;
  105. sig->nivcsw += tsk->nivcsw;
  106. sig->inblock += task_io_get_inblock(tsk);
  107. sig->oublock += task_io_get_oublock(tsk);
  108. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  109. sig = NULL; /* Marker for below. */
  110. }
  111. __unhash_process(tsk);
  112. tsk->signal = NULL;
  113. tsk->sighand = NULL;
  114. spin_unlock(&sighand->siglock);
  115. rcu_read_unlock();
  116. __cleanup_sighand(sighand);
  117. clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
  118. flush_sigqueue(&tsk->pending);
  119. if (sig) {
  120. flush_sigqueue(&sig->shared_pending);
  121. taskstats_tgid_free(sig);
  122. __cleanup_signal(sig);
  123. }
  124. }
  125. static void delayed_put_task_struct(struct rcu_head *rhp)
  126. {
  127. put_task_struct(container_of(rhp, struct task_struct, rcu));
  128. }
  129. void release_task(struct task_struct * p)
  130. {
  131. struct task_struct *leader;
  132. int zap_leader;
  133. repeat:
  134. atomic_dec(&p->user->processes);
  135. write_lock_irq(&tasklist_lock);
  136. ptrace_unlink(p);
  137. BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
  138. __exit_signal(p);
  139. /*
  140. * If we are the last non-leader member of the thread
  141. * group, and the leader is zombie, then notify the
  142. * group leader's parent process. (if it wants notification.)
  143. */
  144. zap_leader = 0;
  145. leader = p->group_leader;
  146. if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
  147. BUG_ON(leader->exit_signal == -1);
  148. do_notify_parent(leader, leader->exit_signal);
  149. /*
  150. * If we were the last child thread and the leader has
  151. * exited already, and the leader's parent ignores SIGCHLD,
  152. * then we are the one who should release the leader.
  153. *
  154. * do_notify_parent() will have marked it self-reaping in
  155. * that case.
  156. */
  157. zap_leader = (leader->exit_signal == -1);
  158. }
  159. write_unlock_irq(&tasklist_lock);
  160. proc_flush_task(p);
  161. release_thread(p);
  162. call_rcu(&p->rcu, delayed_put_task_struct);
  163. p = leader;
  164. if (unlikely(zap_leader))
  165. goto repeat;
  166. }
  167. /*
  168. * This checks not only the pgrp, but falls back on the pid if no
  169. * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
  170. * without this...
  171. *
  172. * The caller must hold rcu lock or the tasklist lock.
  173. */
  174. struct pid *session_of_pgrp(struct pid *pgrp)
  175. {
  176. struct task_struct *p;
  177. struct pid *sid = NULL;
  178. p = pid_task(pgrp, PIDTYPE_PGID);
  179. if (p == NULL)
  180. p = pid_task(pgrp, PIDTYPE_PID);
  181. if (p != NULL)
  182. sid = task_session(p);
  183. return sid;
  184. }
  185. /*
  186. * Determine if a process group is "orphaned", according to the POSIX
  187. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  188. * by terminal-generated stop signals. Newly orphaned process groups are
  189. * to receive a SIGHUP and a SIGCONT.
  190. *
  191. * "I ask you, have you ever known what it is to be an orphan?"
  192. */
  193. static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
  194. {
  195. struct task_struct *p;
  196. int ret = 1;
  197. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  198. if (p == ignored_task
  199. || p->exit_state
  200. || is_global_init(p->real_parent))
  201. continue;
  202. if (task_pgrp(p->real_parent) != pgrp &&
  203. task_session(p->real_parent) == task_session(p)) {
  204. ret = 0;
  205. break;
  206. }
  207. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  208. return ret; /* (sighing) "Often!" */
  209. }
  210. int is_current_pgrp_orphaned(void)
  211. {
  212. int retval;
  213. read_lock(&tasklist_lock);
  214. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  215. read_unlock(&tasklist_lock);
  216. return retval;
  217. }
  218. static int has_stopped_jobs(struct pid *pgrp)
  219. {
  220. int retval = 0;
  221. struct task_struct *p;
  222. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  223. if (p->state != TASK_STOPPED)
  224. continue;
  225. retval = 1;
  226. break;
  227. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  228. return retval;
  229. }
  230. /**
  231. * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
  232. *
  233. * If a kernel thread is launched as a result of a system call, or if
  234. * it ever exits, it should generally reparent itself to kthreadd so it
  235. * isn't in the way of other processes and is correctly cleaned up on exit.
  236. *
  237. * The various task state such as scheduling policy and priority may have
  238. * been inherited from a user process, so we reset them to sane values here.
  239. *
  240. * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
  241. */
  242. static void reparent_to_kthreadd(void)
  243. {
  244. write_lock_irq(&tasklist_lock);
  245. ptrace_unlink(current);
  246. /* Reparent to init */
  247. remove_parent(current);
  248. current->real_parent = current->parent = kthreadd_task;
  249. add_parent(current);
  250. /* Set the exit signal to SIGCHLD so we signal init on exit */
  251. current->exit_signal = SIGCHLD;
  252. if (task_nice(current) < 0)
  253. set_user_nice(current, 0);
  254. /* cpus_allowed? */
  255. /* rt_priority? */
  256. /* signals? */
  257. security_task_reparent_to_init(current);
  258. memcpy(current->signal->rlim, init_task.signal->rlim,
  259. sizeof(current->signal->rlim));
  260. atomic_inc(&(INIT_USER->__count));
  261. write_unlock_irq(&tasklist_lock);
  262. switch_uid(INIT_USER);
  263. }
  264. void __set_special_pids(pid_t session, pid_t pgrp)
  265. {
  266. struct task_struct *curr = current->group_leader;
  267. if (task_session_nr(curr) != session) {
  268. detach_pid(curr, PIDTYPE_SID);
  269. set_task_session(curr, session);
  270. attach_pid(curr, PIDTYPE_SID, find_pid(session));
  271. }
  272. if (task_pgrp_nr(curr) != pgrp) {
  273. detach_pid(curr, PIDTYPE_PGID);
  274. curr->signal->pgrp = pgrp;
  275. attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp));
  276. }
  277. }
  278. static void set_special_pids(pid_t session, pid_t pgrp)
  279. {
  280. write_lock_irq(&tasklist_lock);
  281. __set_special_pids(session, pgrp);
  282. write_unlock_irq(&tasklist_lock);
  283. }
  284. /*
  285. * Let kernel threads use this to say that they
  286. * allow a certain signal (since daemonize() will
  287. * have disabled all of them by default).
  288. */
  289. int allow_signal(int sig)
  290. {
  291. if (!valid_signal(sig) || sig < 1)
  292. return -EINVAL;
  293. spin_lock_irq(&current->sighand->siglock);
  294. sigdelset(&current->blocked, sig);
  295. if (!current->mm) {
  296. /* Kernel threads handle their own signals.
  297. Let the signal code know it'll be handled, so
  298. that they don't get converted to SIGKILL or
  299. just silently dropped */
  300. current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
  301. }
  302. recalc_sigpending();
  303. spin_unlock_irq(&current->sighand->siglock);
  304. return 0;
  305. }
  306. EXPORT_SYMBOL(allow_signal);
  307. int disallow_signal(int sig)
  308. {
  309. if (!valid_signal(sig) || sig < 1)
  310. return -EINVAL;
  311. spin_lock_irq(&current->sighand->siglock);
  312. current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
  313. recalc_sigpending();
  314. spin_unlock_irq(&current->sighand->siglock);
  315. return 0;
  316. }
  317. EXPORT_SYMBOL(disallow_signal);
  318. /*
  319. * Put all the gunge required to become a kernel thread without
  320. * attached user resources in one place where it belongs.
  321. */
  322. void daemonize(const char *name, ...)
  323. {
  324. va_list args;
  325. struct fs_struct *fs;
  326. sigset_t blocked;
  327. va_start(args, name);
  328. vsnprintf(current->comm, sizeof(current->comm), name, args);
  329. va_end(args);
  330. /*
  331. * If we were started as result of loading a module, close all of the
  332. * user space pages. We don't need them, and if we didn't close them
  333. * they would be locked into memory.
  334. */
  335. exit_mm(current);
  336. /*
  337. * We don't want to have TIF_FREEZE set if the system-wide hibernation
  338. * or suspend transition begins right now.
  339. */
  340. current->flags |= PF_NOFREEZE;
  341. set_special_pids(1, 1);
  342. proc_clear_tty(current);
  343. /* Block and flush all signals */
  344. sigfillset(&blocked);
  345. sigprocmask(SIG_BLOCK, &blocked, NULL);
  346. flush_signals(current);
  347. /* Become as one with the init task */
  348. exit_fs(current); /* current->fs->count--; */
  349. fs = init_task.fs;
  350. current->fs = fs;
  351. atomic_inc(&fs->count);
  352. if (current->nsproxy != init_task.nsproxy) {
  353. get_nsproxy(init_task.nsproxy);
  354. switch_task_namespaces(current, init_task.nsproxy);
  355. }
  356. exit_files(current);
  357. current->files = init_task.files;
  358. atomic_inc(&current->files->count);
  359. reparent_to_kthreadd();
  360. }
  361. EXPORT_SYMBOL(daemonize);
  362. static void close_files(struct files_struct * files)
  363. {
  364. int i, j;
  365. struct fdtable *fdt;
  366. j = 0;
  367. /*
  368. * It is safe to dereference the fd table without RCU or
  369. * ->file_lock because this is the last reference to the
  370. * files structure.
  371. */
  372. fdt = files_fdtable(files);
  373. for (;;) {
  374. unsigned long set;
  375. i = j * __NFDBITS;
  376. if (i >= fdt->max_fds)
  377. break;
  378. set = fdt->open_fds->fds_bits[j++];
  379. while (set) {
  380. if (set & 1) {
  381. struct file * file = xchg(&fdt->fd[i], NULL);
  382. if (file) {
  383. filp_close(file, files);
  384. cond_resched();
  385. }
  386. }
  387. i++;
  388. set >>= 1;
  389. }
  390. }
  391. }
  392. struct files_struct *get_files_struct(struct task_struct *task)
  393. {
  394. struct files_struct *files;
  395. task_lock(task);
  396. files = task->files;
  397. if (files)
  398. atomic_inc(&files->count);
  399. task_unlock(task);
  400. return files;
  401. }
  402. void fastcall put_files_struct(struct files_struct *files)
  403. {
  404. struct fdtable *fdt;
  405. if (atomic_dec_and_test(&files->count)) {
  406. close_files(files);
  407. /*
  408. * Free the fd and fdset arrays if we expanded them.
  409. * If the fdtable was embedded, pass files for freeing
  410. * at the end of the RCU grace period. Otherwise,
  411. * you can free files immediately.
  412. */
  413. fdt = files_fdtable(files);
  414. if (fdt != &files->fdtab)
  415. kmem_cache_free(files_cachep, files);
  416. free_fdtable(fdt);
  417. }
  418. }
  419. EXPORT_SYMBOL(put_files_struct);
  420. void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
  421. {
  422. struct files_struct *old;
  423. old = tsk->files;
  424. task_lock(tsk);
  425. tsk->files = files;
  426. task_unlock(tsk);
  427. put_files_struct(old);
  428. }
  429. EXPORT_SYMBOL(reset_files_struct);
  430. static inline void __exit_files(struct task_struct *tsk)
  431. {
  432. struct files_struct * files = tsk->files;
  433. if (files) {
  434. task_lock(tsk);
  435. tsk->files = NULL;
  436. task_unlock(tsk);
  437. put_files_struct(files);
  438. }
  439. }
  440. void exit_files(struct task_struct *tsk)
  441. {
  442. __exit_files(tsk);
  443. }
  444. static inline void __put_fs_struct(struct fs_struct *fs)
  445. {
  446. /* No need to hold fs->lock if we are killing it */
  447. if (atomic_dec_and_test(&fs->count)) {
  448. dput(fs->root);
  449. mntput(fs->rootmnt);
  450. dput(fs->pwd);
  451. mntput(fs->pwdmnt);
  452. if (fs->altroot) {
  453. dput(fs->altroot);
  454. mntput(fs->altrootmnt);
  455. }
  456. kmem_cache_free(fs_cachep, fs);
  457. }
  458. }
  459. void put_fs_struct(struct fs_struct *fs)
  460. {
  461. __put_fs_struct(fs);
  462. }
  463. static inline void __exit_fs(struct task_struct *tsk)
  464. {
  465. struct fs_struct * fs = tsk->fs;
  466. if (fs) {
  467. task_lock(tsk);
  468. tsk->fs = NULL;
  469. task_unlock(tsk);
  470. __put_fs_struct(fs);
  471. }
  472. }
  473. void exit_fs(struct task_struct *tsk)
  474. {
  475. __exit_fs(tsk);
  476. }
  477. EXPORT_SYMBOL_GPL(exit_fs);
  478. /*
  479. * Turn us into a lazy TLB process if we
  480. * aren't already..
  481. */
  482. static void exit_mm(struct task_struct * tsk)
  483. {
  484. struct mm_struct *mm = tsk->mm;
  485. mm_release(tsk, mm);
  486. if (!mm)
  487. return;
  488. /*
  489. * Serialize with any possible pending coredump.
  490. * We must hold mmap_sem around checking core_waiters
  491. * and clearing tsk->mm. The core-inducing thread
  492. * will increment core_waiters for each thread in the
  493. * group with ->mm != NULL.
  494. */
  495. down_read(&mm->mmap_sem);
  496. if (mm->core_waiters) {
  497. up_read(&mm->mmap_sem);
  498. down_write(&mm->mmap_sem);
  499. if (!--mm->core_waiters)
  500. complete(mm->core_startup_done);
  501. up_write(&mm->mmap_sem);
  502. wait_for_completion(&mm->core_done);
  503. down_read(&mm->mmap_sem);
  504. }
  505. atomic_inc(&mm->mm_count);
  506. BUG_ON(mm != tsk->active_mm);
  507. /* more a memory barrier than a real lock */
  508. task_lock(tsk);
  509. tsk->mm = NULL;
  510. up_read(&mm->mmap_sem);
  511. enter_lazy_tlb(mm, current);
  512. /* We don't want this task to be frozen prematurely */
  513. clear_freeze_flag(tsk);
  514. task_unlock(tsk);
  515. mmput(mm);
  516. }
  517. static void
  518. reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
  519. {
  520. if (p->pdeath_signal)
  521. /* We already hold the tasklist_lock here. */
  522. group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
  523. /* Move the child from its dying parent to the new one. */
  524. if (unlikely(traced)) {
  525. /* Preserve ptrace links if someone else is tracing this child. */
  526. list_del_init(&p->ptrace_list);
  527. if (p->parent != p->real_parent)
  528. list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
  529. } else {
  530. /* If this child is being traced, then we're the one tracing it
  531. * anyway, so let go of it.
  532. */
  533. p->ptrace = 0;
  534. remove_parent(p);
  535. p->parent = p->real_parent;
  536. add_parent(p);
  537. if (p->state == TASK_TRACED) {
  538. /*
  539. * If it was at a trace stop, turn it into
  540. * a normal stop since it's no longer being
  541. * traced.
  542. */
  543. ptrace_untrace(p);
  544. }
  545. }
  546. /* If this is a threaded reparent there is no need to
  547. * notify anyone anything has happened.
  548. */
  549. if (p->real_parent->group_leader == father->group_leader)
  550. return;
  551. /* We don't want people slaying init. */
  552. if (p->exit_signal != -1)
  553. p->exit_signal = SIGCHLD;
  554. /* If we'd notified the old parent about this child's death,
  555. * also notify the new parent.
  556. */
  557. if (!traced && p->exit_state == EXIT_ZOMBIE &&
  558. p->exit_signal != -1 && thread_group_empty(p))
  559. do_notify_parent(p, p->exit_signal);
  560. /*
  561. * process group orphan check
  562. * Case ii: Our child is in a different pgrp
  563. * than we are, and it was the only connection
  564. * outside, so the child pgrp is now orphaned.
  565. */
  566. if ((task_pgrp(p) != task_pgrp(father)) &&
  567. (task_session(p) == task_session(father))) {
  568. struct pid *pgrp = task_pgrp(p);
  569. if (will_become_orphaned_pgrp(pgrp, NULL) &&
  570. has_stopped_jobs(pgrp)) {
  571. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  572. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  573. }
  574. }
  575. }
  576. /*
  577. * When we die, we re-parent all our children.
  578. * Try to give them to another thread in our thread
  579. * group, and if no such member exists, give it to
  580. * the child reaper process (ie "init") in our pid
  581. * space.
  582. */
  583. static void
  584. forget_original_parent(struct task_struct *father, struct list_head *to_release)
  585. {
  586. struct task_struct *p, *n, *reaper = father;
  587. do {
  588. reaper = next_thread(reaper);
  589. if (reaper == father) {
  590. reaper = task_child_reaper(father);
  591. break;
  592. }
  593. } while (reaper->exit_state);
  594. /*
  595. * There are only two places where our children can be:
  596. *
  597. * - in our child list
  598. * - in our ptraced child list
  599. *
  600. * Search them and reparent children.
  601. */
  602. list_for_each_entry_safe(p, n, &father->children, sibling) {
  603. int ptrace;
  604. ptrace = p->ptrace;
  605. /* if father isn't the real parent, then ptrace must be enabled */
  606. BUG_ON(father != p->real_parent && !ptrace);
  607. if (father == p->real_parent) {
  608. /* reparent with a reaper, real father it's us */
  609. p->real_parent = reaper;
  610. reparent_thread(p, father, 0);
  611. } else {
  612. /* reparent ptraced task to its real parent */
  613. __ptrace_unlink (p);
  614. if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
  615. thread_group_empty(p))
  616. do_notify_parent(p, p->exit_signal);
  617. }
  618. /*
  619. * if the ptraced child is a zombie with exit_signal == -1
  620. * we must collect it before we exit, or it will remain
  621. * zombie forever since we prevented it from self-reap itself
  622. * while it was being traced by us, to be able to see it in wait4.
  623. */
  624. if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
  625. list_add(&p->ptrace_list, to_release);
  626. }
  627. list_for_each_entry_safe(p, n, &father->ptrace_children, ptrace_list) {
  628. p->real_parent = reaper;
  629. reparent_thread(p, father, 1);
  630. }
  631. }
  632. /*
  633. * Send signals to all our closest relatives so that they know
  634. * to properly mourn us..
  635. */
  636. static void exit_notify(struct task_struct *tsk)
  637. {
  638. int state;
  639. struct task_struct *t;
  640. struct list_head ptrace_dead, *_p, *_n;
  641. struct pid *pgrp;
  642. if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
  643. && !thread_group_empty(tsk)) {
  644. /*
  645. * This occurs when there was a race between our exit
  646. * syscall and a group signal choosing us as the one to
  647. * wake up. It could be that we are the only thread
  648. * alerted to check for pending signals, but another thread
  649. * should be woken now to take the signal since we will not.
  650. * Now we'll wake all the threads in the group just to make
  651. * sure someone gets all the pending signals.
  652. */
  653. spin_lock_irq(&tsk->sighand->siglock);
  654. for (t = next_thread(tsk); t != tsk; t = next_thread(t))
  655. if (!signal_pending(t) && !(t->flags & PF_EXITING))
  656. recalc_sigpending_and_wake(t);
  657. spin_unlock_irq(&tsk->sighand->siglock);
  658. }
  659. write_lock_irq(&tasklist_lock);
  660. /*
  661. * This does two things:
  662. *
  663. * A. Make init inherit all the child processes
  664. * B. Check to see if any process groups have become orphaned
  665. * as a result of our exiting, and if they have any stopped
  666. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  667. */
  668. INIT_LIST_HEAD(&ptrace_dead);
  669. forget_original_parent(tsk, &ptrace_dead);
  670. BUG_ON(!list_empty(&tsk->children));
  671. BUG_ON(!list_empty(&tsk->ptrace_children));
  672. /*
  673. * Check to see if any process groups have become orphaned
  674. * as a result of our exiting, and if they have any stopped
  675. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  676. *
  677. * Case i: Our father is in a different pgrp than we are
  678. * and we were the only connection outside, so our pgrp
  679. * is about to become orphaned.
  680. */
  681. t = tsk->real_parent;
  682. pgrp = task_pgrp(tsk);
  683. if ((task_pgrp(t) != pgrp) &&
  684. (task_session(t) == task_session(tsk)) &&
  685. will_become_orphaned_pgrp(pgrp, tsk) &&
  686. has_stopped_jobs(pgrp)) {
  687. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  688. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  689. }
  690. /* Let father know we died
  691. *
  692. * Thread signals are configurable, but you aren't going to use
  693. * that to send signals to arbitary processes.
  694. * That stops right now.
  695. *
  696. * If the parent exec id doesn't match the exec id we saved
  697. * when we started then we know the parent has changed security
  698. * domain.
  699. *
  700. * If our self_exec id doesn't match our parent_exec_id then
  701. * we have changed execution domain as these two values started
  702. * the same after a fork.
  703. */
  704. if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
  705. ( tsk->parent_exec_id != t->self_exec_id ||
  706. tsk->self_exec_id != tsk->parent_exec_id)
  707. && !capable(CAP_KILL))
  708. tsk->exit_signal = SIGCHLD;
  709. /* If something other than our normal parent is ptracing us, then
  710. * send it a SIGCHLD instead of honoring exit_signal. exit_signal
  711. * only has special meaning to our real parent.
  712. */
  713. if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
  714. int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
  715. do_notify_parent(tsk, signal);
  716. } else if (tsk->ptrace) {
  717. do_notify_parent(tsk, SIGCHLD);
  718. }
  719. state = EXIT_ZOMBIE;
  720. if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
  721. state = EXIT_DEAD;
  722. tsk->exit_state = state;
  723. if (thread_group_leader(tsk) &&
  724. tsk->signal->notify_count < 0 &&
  725. tsk->signal->group_exit_task)
  726. wake_up_process(tsk->signal->group_exit_task);
  727. write_unlock_irq(&tasklist_lock);
  728. list_for_each_safe(_p, _n, &ptrace_dead) {
  729. list_del_init(_p);
  730. t = list_entry(_p, struct task_struct, ptrace_list);
  731. release_task(t);
  732. }
  733. /* If the process is dead, release it - nobody will wait for it */
  734. if (state == EXIT_DEAD)
  735. release_task(tsk);
  736. }
  737. #ifdef CONFIG_DEBUG_STACK_USAGE
  738. static void check_stack_usage(void)
  739. {
  740. static DEFINE_SPINLOCK(low_water_lock);
  741. static int lowest_to_date = THREAD_SIZE;
  742. unsigned long *n = end_of_stack(current);
  743. unsigned long free;
  744. while (*n == 0)
  745. n++;
  746. free = (unsigned long)n - (unsigned long)end_of_stack(current);
  747. if (free >= lowest_to_date)
  748. return;
  749. spin_lock(&low_water_lock);
  750. if (free < lowest_to_date) {
  751. printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
  752. "left\n",
  753. current->comm, free);
  754. lowest_to_date = free;
  755. }
  756. spin_unlock(&low_water_lock);
  757. }
  758. #else
  759. static inline void check_stack_usage(void) {}
  760. #endif
  761. static inline void exit_child_reaper(struct task_struct *tsk)
  762. {
  763. if (likely(tsk->group_leader != task_child_reaper(tsk)))
  764. return;
  765. panic("Attempted to kill init!");
  766. }
  767. fastcall NORET_TYPE void do_exit(long code)
  768. {
  769. struct task_struct *tsk = current;
  770. int group_dead;
  771. profile_task_exit(tsk);
  772. WARN_ON(atomic_read(&tsk->fs_excl));
  773. if (unlikely(in_interrupt()))
  774. panic("Aiee, killing interrupt handler!");
  775. if (unlikely(!tsk->pid))
  776. panic("Attempted to kill the idle task!");
  777. if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
  778. current->ptrace_message = code;
  779. ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
  780. }
  781. /*
  782. * We're taking recursive faults here in do_exit. Safest is to just
  783. * leave this task alone and wait for reboot.
  784. */
  785. if (unlikely(tsk->flags & PF_EXITING)) {
  786. printk(KERN_ALERT
  787. "Fixing recursive fault but reboot is needed!\n");
  788. /*
  789. * We can do this unlocked here. The futex code uses
  790. * this flag just to verify whether the pi state
  791. * cleanup has been done or not. In the worst case it
  792. * loops once more. We pretend that the cleanup was
  793. * done as there is no way to return. Either the
  794. * OWNER_DIED bit is set by now or we push the blocked
  795. * task into the wait for ever nirwana as well.
  796. */
  797. tsk->flags |= PF_EXITPIDONE;
  798. if (tsk->io_context)
  799. exit_io_context();
  800. set_current_state(TASK_UNINTERRUPTIBLE);
  801. schedule();
  802. }
  803. tsk->flags |= PF_EXITING;
  804. /*
  805. * tsk->flags are checked in the futex code to protect against
  806. * an exiting task cleaning up the robust pi futexes.
  807. */
  808. smp_mb();
  809. spin_unlock_wait(&tsk->pi_lock);
  810. if (unlikely(in_atomic()))
  811. printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
  812. current->comm, current->pid,
  813. preempt_count());
  814. acct_update_integrals(tsk);
  815. if (tsk->mm) {
  816. update_hiwater_rss(tsk->mm);
  817. update_hiwater_vm(tsk->mm);
  818. }
  819. group_dead = atomic_dec_and_test(&tsk->signal->live);
  820. if (group_dead) {
  821. exit_child_reaper(tsk);
  822. hrtimer_cancel(&tsk->signal->real_timer);
  823. exit_itimers(tsk->signal);
  824. }
  825. acct_collect(code, group_dead);
  826. #ifdef CONFIG_FUTEX
  827. if (unlikely(tsk->robust_list))
  828. exit_robust_list(tsk);
  829. #ifdef CONFIG_COMPAT
  830. if (unlikely(tsk->compat_robust_list))
  831. compat_exit_robust_list(tsk);
  832. #endif
  833. #endif
  834. if (group_dead)
  835. tty_audit_exit();
  836. if (unlikely(tsk->audit_context))
  837. audit_free(tsk);
  838. tsk->exit_code = code;
  839. taskstats_exit(tsk, group_dead);
  840. exit_mm(tsk);
  841. if (group_dead)
  842. acct_process();
  843. exit_sem(tsk);
  844. __exit_files(tsk);
  845. __exit_fs(tsk);
  846. check_stack_usage();
  847. exit_thread();
  848. cgroup_exit(tsk, 1);
  849. exit_keys(tsk);
  850. if (group_dead && tsk->signal->leader)
  851. disassociate_ctty(1);
  852. module_put(task_thread_info(tsk)->exec_domain->module);
  853. if (tsk->binfmt)
  854. module_put(tsk->binfmt->module);
  855. proc_exit_connector(tsk);
  856. exit_task_namespaces(tsk);
  857. exit_notify(tsk);
  858. #ifdef CONFIG_NUMA
  859. mpol_free(tsk->mempolicy);
  860. tsk->mempolicy = NULL;
  861. #endif
  862. #ifdef CONFIG_FUTEX
  863. /*
  864. * This must happen late, after the PID is not
  865. * hashed anymore:
  866. */
  867. if (unlikely(!list_empty(&tsk->pi_state_list)))
  868. exit_pi_state_list(tsk);
  869. if (unlikely(current->pi_state_cache))
  870. kfree(current->pi_state_cache);
  871. #endif
  872. /*
  873. * Make sure we are holding no locks:
  874. */
  875. debug_check_no_locks_held(tsk);
  876. /*
  877. * We can do this unlocked here. The futex code uses this flag
  878. * just to verify whether the pi state cleanup has been done
  879. * or not. In the worst case it loops once more.
  880. */
  881. tsk->flags |= PF_EXITPIDONE;
  882. if (tsk->io_context)
  883. exit_io_context();
  884. if (tsk->splice_pipe)
  885. __free_pipe_info(tsk->splice_pipe);
  886. preempt_disable();
  887. /* causes final put_task_struct in finish_task_switch(). */
  888. tsk->state = TASK_DEAD;
  889. schedule();
  890. BUG();
  891. /* Avoid "noreturn function does return". */
  892. for (;;)
  893. cpu_relax(); /* For when BUG is null */
  894. }
  895. EXPORT_SYMBOL_GPL(do_exit);
  896. NORET_TYPE void complete_and_exit(struct completion *comp, long code)
  897. {
  898. if (comp)
  899. complete(comp);
  900. do_exit(code);
  901. }
  902. EXPORT_SYMBOL(complete_and_exit);
  903. asmlinkage long sys_exit(int error_code)
  904. {
  905. do_exit((error_code&0xff)<<8);
  906. }
  907. /*
  908. * Take down every thread in the group. This is called by fatal signals
  909. * as well as by sys_exit_group (below).
  910. */
  911. NORET_TYPE void
  912. do_group_exit(int exit_code)
  913. {
  914. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  915. if (current->signal->flags & SIGNAL_GROUP_EXIT)
  916. exit_code = current->signal->group_exit_code;
  917. else if (!thread_group_empty(current)) {
  918. struct signal_struct *const sig = current->signal;
  919. struct sighand_struct *const sighand = current->sighand;
  920. spin_lock_irq(&sighand->siglock);
  921. if (sig->flags & SIGNAL_GROUP_EXIT)
  922. /* Another thread got here before we took the lock. */
  923. exit_code = sig->group_exit_code;
  924. else {
  925. sig->group_exit_code = exit_code;
  926. zap_other_threads(current);
  927. }
  928. spin_unlock_irq(&sighand->siglock);
  929. }
  930. do_exit(exit_code);
  931. /* NOTREACHED */
  932. }
  933. /*
  934. * this kills every thread in the thread group. Note that any externally
  935. * wait4()-ing process will get the correct exit code - even if this
  936. * thread is not the thread group leader.
  937. */
  938. asmlinkage void sys_exit_group(int error_code)
  939. {
  940. do_group_exit((error_code & 0xff) << 8);
  941. }
  942. static int eligible_child(pid_t pid, int options, struct task_struct *p)
  943. {
  944. int err;
  945. if (pid > 0) {
  946. if (p->pid != pid)
  947. return 0;
  948. } else if (!pid) {
  949. if (task_pgrp_nr(p) != task_pgrp_nr(current))
  950. return 0;
  951. } else if (pid != -1) {
  952. if (task_pgrp_nr(p) != -pid)
  953. return 0;
  954. }
  955. /*
  956. * Do not consider detached threads that are
  957. * not ptraced:
  958. */
  959. if (p->exit_signal == -1 && !p->ptrace)
  960. return 0;
  961. /* Wait for all children (clone and not) if __WALL is set;
  962. * otherwise, wait for clone children *only* if __WCLONE is
  963. * set; otherwise, wait for non-clone children *only*. (Note:
  964. * A "clone" child here is one that reports to its parent
  965. * using a signal other than SIGCHLD.) */
  966. if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
  967. && !(options & __WALL))
  968. return 0;
  969. /*
  970. * Do not consider thread group leaders that are
  971. * in a non-empty thread group:
  972. */
  973. if (delay_group_leader(p))
  974. return 2;
  975. err = security_task_wait(p);
  976. if (err)
  977. return err;
  978. return 1;
  979. }
  980. static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
  981. int why, int status,
  982. struct siginfo __user *infop,
  983. struct rusage __user *rusagep)
  984. {
  985. int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
  986. put_task_struct(p);
  987. if (!retval)
  988. retval = put_user(SIGCHLD, &infop->si_signo);
  989. if (!retval)
  990. retval = put_user(0, &infop->si_errno);
  991. if (!retval)
  992. retval = put_user((short)why, &infop->si_code);
  993. if (!retval)
  994. retval = put_user(pid, &infop->si_pid);
  995. if (!retval)
  996. retval = put_user(uid, &infop->si_uid);
  997. if (!retval)
  998. retval = put_user(status, &infop->si_status);
  999. if (!retval)
  1000. retval = pid;
  1001. return retval;
  1002. }
  1003. /*
  1004. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  1005. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1006. * the lock and this task is uninteresting. If we return nonzero, we have
  1007. * released the lock and the system call should return.
  1008. */
  1009. static int wait_task_zombie(struct task_struct *p, int noreap,
  1010. struct siginfo __user *infop,
  1011. int __user *stat_addr, struct rusage __user *ru)
  1012. {
  1013. unsigned long state;
  1014. int retval, status, traced;
  1015. if (unlikely(noreap)) {
  1016. pid_t pid = p->pid;
  1017. uid_t uid = p->uid;
  1018. int exit_code = p->exit_code;
  1019. int why, status;
  1020. if (unlikely(p->exit_state != EXIT_ZOMBIE))
  1021. return 0;
  1022. if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
  1023. return 0;
  1024. get_task_struct(p);
  1025. read_unlock(&tasklist_lock);
  1026. if ((exit_code & 0x7f) == 0) {
  1027. why = CLD_EXITED;
  1028. status = exit_code >> 8;
  1029. } else {
  1030. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1031. status = exit_code & 0x7f;
  1032. }
  1033. return wait_noreap_copyout(p, pid, uid, why,
  1034. status, infop, ru);
  1035. }
  1036. /*
  1037. * Try to move the task's state to DEAD
  1038. * only one thread is allowed to do this:
  1039. */
  1040. state = xchg(&p->exit_state, EXIT_DEAD);
  1041. if (state != EXIT_ZOMBIE) {
  1042. BUG_ON(state != EXIT_DEAD);
  1043. return 0;
  1044. }
  1045. /* traced means p->ptrace, but not vice versa */
  1046. traced = (p->real_parent != p->parent);
  1047. if (likely(!traced)) {
  1048. struct signal_struct *psig;
  1049. struct signal_struct *sig;
  1050. /*
  1051. * The resource counters for the group leader are in its
  1052. * own task_struct. Those for dead threads in the group
  1053. * are in its signal_struct, as are those for the child
  1054. * processes it has previously reaped. All these
  1055. * accumulate in the parent's signal_struct c* fields.
  1056. *
  1057. * We don't bother to take a lock here to protect these
  1058. * p->signal fields, because they are only touched by
  1059. * __exit_signal, which runs with tasklist_lock
  1060. * write-locked anyway, and so is excluded here. We do
  1061. * need to protect the access to p->parent->signal fields,
  1062. * as other threads in the parent group can be right
  1063. * here reaping other children at the same time.
  1064. */
  1065. spin_lock_irq(&p->parent->sighand->siglock);
  1066. psig = p->parent->signal;
  1067. sig = p->signal;
  1068. psig->cutime =
  1069. cputime_add(psig->cutime,
  1070. cputime_add(p->utime,
  1071. cputime_add(sig->utime,
  1072. sig->cutime)));
  1073. psig->cstime =
  1074. cputime_add(psig->cstime,
  1075. cputime_add(p->stime,
  1076. cputime_add(sig->stime,
  1077. sig->cstime)));
  1078. psig->cgtime =
  1079. cputime_add(psig->cgtime,
  1080. cputime_add(p->gtime,
  1081. cputime_add(sig->gtime,
  1082. sig->cgtime)));
  1083. psig->cmin_flt +=
  1084. p->min_flt + sig->min_flt + sig->cmin_flt;
  1085. psig->cmaj_flt +=
  1086. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  1087. psig->cnvcsw +=
  1088. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  1089. psig->cnivcsw +=
  1090. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  1091. psig->cinblock +=
  1092. task_io_get_inblock(p) +
  1093. sig->inblock + sig->cinblock;
  1094. psig->coublock +=
  1095. task_io_get_oublock(p) +
  1096. sig->oublock + sig->coublock;
  1097. spin_unlock_irq(&p->parent->sighand->siglock);
  1098. }
  1099. /*
  1100. * Now we are sure this task is interesting, and no other
  1101. * thread can reap it because we set its state to EXIT_DEAD.
  1102. */
  1103. read_unlock(&tasklist_lock);
  1104. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1105. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1106. ? p->signal->group_exit_code : p->exit_code;
  1107. if (!retval && stat_addr)
  1108. retval = put_user(status, stat_addr);
  1109. if (!retval && infop)
  1110. retval = put_user(SIGCHLD, &infop->si_signo);
  1111. if (!retval && infop)
  1112. retval = put_user(0, &infop->si_errno);
  1113. if (!retval && infop) {
  1114. int why;
  1115. if ((status & 0x7f) == 0) {
  1116. why = CLD_EXITED;
  1117. status >>= 8;
  1118. } else {
  1119. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1120. status &= 0x7f;
  1121. }
  1122. retval = put_user((short)why, &infop->si_code);
  1123. if (!retval)
  1124. retval = put_user(status, &infop->si_status);
  1125. }
  1126. if (!retval && infop)
  1127. retval = put_user(p->pid, &infop->si_pid);
  1128. if (!retval && infop)
  1129. retval = put_user(p->uid, &infop->si_uid);
  1130. if (!retval)
  1131. retval = p->pid;
  1132. if (traced) {
  1133. write_lock_irq(&tasklist_lock);
  1134. /* We dropped tasklist, ptracer could die and untrace */
  1135. ptrace_unlink(p);
  1136. /*
  1137. * If this is not a detached task, notify the parent.
  1138. * If it's still not detached after that, don't release
  1139. * it now.
  1140. */
  1141. if (p->exit_signal != -1) {
  1142. do_notify_parent(p, p->exit_signal);
  1143. if (p->exit_signal != -1) {
  1144. p->exit_state = EXIT_ZOMBIE;
  1145. p = NULL;
  1146. }
  1147. }
  1148. write_unlock_irq(&tasklist_lock);
  1149. }
  1150. if (p != NULL)
  1151. release_task(p);
  1152. return retval;
  1153. }
  1154. /*
  1155. * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
  1156. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1157. * the lock and this task is uninteresting. If we return nonzero, we have
  1158. * released the lock and the system call should return.
  1159. */
  1160. static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
  1161. int noreap, struct siginfo __user *infop,
  1162. int __user *stat_addr, struct rusage __user *ru)
  1163. {
  1164. int retval, exit_code;
  1165. if (!p->exit_code)
  1166. return 0;
  1167. if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
  1168. p->signal->group_stop_count > 0)
  1169. /*
  1170. * A group stop is in progress and this is the group leader.
  1171. * We won't report until all threads have stopped.
  1172. */
  1173. return 0;
  1174. /*
  1175. * Now we are pretty sure this task is interesting.
  1176. * Make sure it doesn't get reaped out from under us while we
  1177. * give up the lock and then examine it below. We don't want to
  1178. * keep holding onto the tasklist_lock while we call getrusage and
  1179. * possibly take page faults for user memory.
  1180. */
  1181. get_task_struct(p);
  1182. read_unlock(&tasklist_lock);
  1183. if (unlikely(noreap)) {
  1184. pid_t pid = p->pid;
  1185. uid_t uid = p->uid;
  1186. int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
  1187. exit_code = p->exit_code;
  1188. if (unlikely(!exit_code) ||
  1189. unlikely(p->state & TASK_TRACED))
  1190. goto bail_ref;
  1191. return wait_noreap_copyout(p, pid, uid,
  1192. why, (exit_code << 8) | 0x7f,
  1193. infop, ru);
  1194. }
  1195. write_lock_irq(&tasklist_lock);
  1196. /*
  1197. * This uses xchg to be atomic with the thread resuming and setting
  1198. * it. It must also be done with the write lock held to prevent a
  1199. * race with the EXIT_ZOMBIE case.
  1200. */
  1201. exit_code = xchg(&p->exit_code, 0);
  1202. if (unlikely(p->exit_state)) {
  1203. /*
  1204. * The task resumed and then died. Let the next iteration
  1205. * catch it in EXIT_ZOMBIE. Note that exit_code might
  1206. * already be zero here if it resumed and did _exit(0).
  1207. * The task itself is dead and won't touch exit_code again;
  1208. * other processors in this function are locked out.
  1209. */
  1210. p->exit_code = exit_code;
  1211. exit_code = 0;
  1212. }
  1213. if (unlikely(exit_code == 0)) {
  1214. /*
  1215. * Another thread in this function got to it first, or it
  1216. * resumed, or it resumed and then died.
  1217. */
  1218. write_unlock_irq(&tasklist_lock);
  1219. bail_ref:
  1220. put_task_struct(p);
  1221. /*
  1222. * We are returning to the wait loop without having successfully
  1223. * removed the process and having released the lock. We cannot
  1224. * continue, since the "p" task pointer is potentially stale.
  1225. *
  1226. * Return -EAGAIN, and do_wait() will restart the loop from the
  1227. * beginning. Do _not_ re-acquire the lock.
  1228. */
  1229. return -EAGAIN;
  1230. }
  1231. /* move to end of parent's list to avoid starvation */
  1232. remove_parent(p);
  1233. add_parent(p);
  1234. write_unlock_irq(&tasklist_lock);
  1235. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1236. if (!retval && stat_addr)
  1237. retval = put_user((exit_code << 8) | 0x7f, stat_addr);
  1238. if (!retval && infop)
  1239. retval = put_user(SIGCHLD, &infop->si_signo);
  1240. if (!retval && infop)
  1241. retval = put_user(0, &infop->si_errno);
  1242. if (!retval && infop)
  1243. retval = put_user((short)((p->ptrace & PT_PTRACED)
  1244. ? CLD_TRAPPED : CLD_STOPPED),
  1245. &infop->si_code);
  1246. if (!retval && infop)
  1247. retval = put_user(exit_code, &infop->si_status);
  1248. if (!retval && infop)
  1249. retval = put_user(p->pid, &infop->si_pid);
  1250. if (!retval && infop)
  1251. retval = put_user(p->uid, &infop->si_uid);
  1252. if (!retval)
  1253. retval = p->pid;
  1254. put_task_struct(p);
  1255. BUG_ON(!retval);
  1256. return retval;
  1257. }
  1258. /*
  1259. * Handle do_wait work for one task in a live, non-stopped state.
  1260. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1261. * the lock and this task is uninteresting. If we return nonzero, we have
  1262. * released the lock and the system call should return.
  1263. */
  1264. static int wait_task_continued(struct task_struct *p, int noreap,
  1265. struct siginfo __user *infop,
  1266. int __user *stat_addr, struct rusage __user *ru)
  1267. {
  1268. int retval;
  1269. pid_t pid;
  1270. uid_t uid;
  1271. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1272. return 0;
  1273. spin_lock_irq(&p->sighand->siglock);
  1274. /* Re-check with the lock held. */
  1275. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1276. spin_unlock_irq(&p->sighand->siglock);
  1277. return 0;
  1278. }
  1279. if (!noreap)
  1280. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1281. spin_unlock_irq(&p->sighand->siglock);
  1282. pid = p->pid;
  1283. uid = p->uid;
  1284. get_task_struct(p);
  1285. read_unlock(&tasklist_lock);
  1286. if (!infop) {
  1287. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1288. put_task_struct(p);
  1289. if (!retval && stat_addr)
  1290. retval = put_user(0xffff, stat_addr);
  1291. if (!retval)
  1292. retval = p->pid;
  1293. } else {
  1294. retval = wait_noreap_copyout(p, pid, uid,
  1295. CLD_CONTINUED, SIGCONT,
  1296. infop, ru);
  1297. BUG_ON(retval == 0);
  1298. }
  1299. return retval;
  1300. }
  1301. static inline int my_ptrace_child(struct task_struct *p)
  1302. {
  1303. if (!(p->ptrace & PT_PTRACED))
  1304. return 0;
  1305. if (!(p->ptrace & PT_ATTACHED))
  1306. return 1;
  1307. /*
  1308. * This child was PTRACE_ATTACH'd. We should be seeing it only if
  1309. * we are the attacher. If we are the real parent, this is a race
  1310. * inside ptrace_attach. It is waiting for the tasklist_lock,
  1311. * which we have to switch the parent links, but has already set
  1312. * the flags in p->ptrace.
  1313. */
  1314. return (p->parent != p->real_parent);
  1315. }
  1316. static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
  1317. int __user *stat_addr, struct rusage __user *ru)
  1318. {
  1319. DECLARE_WAITQUEUE(wait, current);
  1320. struct task_struct *tsk;
  1321. int flag, retval;
  1322. int allowed, denied;
  1323. add_wait_queue(&current->signal->wait_chldexit,&wait);
  1324. repeat:
  1325. /*
  1326. * We will set this flag if we see any child that might later
  1327. * match our criteria, even if we are not able to reap it yet.
  1328. */
  1329. flag = 0;
  1330. allowed = denied = 0;
  1331. current->state = TASK_INTERRUPTIBLE;
  1332. read_lock(&tasklist_lock);
  1333. tsk = current;
  1334. do {
  1335. struct task_struct *p;
  1336. int ret;
  1337. list_for_each_entry(p, &tsk->children, sibling) {
  1338. ret = eligible_child(pid, options, p);
  1339. if (!ret)
  1340. continue;
  1341. if (unlikely(ret < 0)) {
  1342. denied = ret;
  1343. continue;
  1344. }
  1345. allowed = 1;
  1346. switch (p->state) {
  1347. case TASK_TRACED:
  1348. /*
  1349. * When we hit the race with PTRACE_ATTACH,
  1350. * we will not report this child. But the
  1351. * race means it has not yet been moved to
  1352. * our ptrace_children list, so we need to
  1353. * set the flag here to avoid a spurious ECHILD
  1354. * when the race happens with the only child.
  1355. */
  1356. flag = 1;
  1357. if (!my_ptrace_child(p))
  1358. continue;
  1359. /*FALLTHROUGH*/
  1360. case TASK_STOPPED:
  1361. /*
  1362. * It's stopped now, so it might later
  1363. * continue, exit, or stop again.
  1364. */
  1365. flag = 1;
  1366. if (!(options & WUNTRACED) &&
  1367. !my_ptrace_child(p))
  1368. continue;
  1369. retval = wait_task_stopped(p, ret == 2,
  1370. (options & WNOWAIT),
  1371. infop,
  1372. stat_addr, ru);
  1373. if (retval == -EAGAIN)
  1374. goto repeat;
  1375. if (retval != 0) /* He released the lock. */
  1376. goto end;
  1377. break;
  1378. default:
  1379. // case EXIT_DEAD:
  1380. if (p->exit_state == EXIT_DEAD)
  1381. continue;
  1382. // case EXIT_ZOMBIE:
  1383. if (p->exit_state == EXIT_ZOMBIE) {
  1384. /*
  1385. * Eligible but we cannot release
  1386. * it yet:
  1387. */
  1388. if (ret == 2)
  1389. goto check_continued;
  1390. if (!likely(options & WEXITED))
  1391. continue;
  1392. retval = wait_task_zombie(
  1393. p, (options & WNOWAIT),
  1394. infop, stat_addr, ru);
  1395. /* He released the lock. */
  1396. if (retval != 0)
  1397. goto end;
  1398. break;
  1399. }
  1400. check_continued:
  1401. /*
  1402. * It's running now, so it might later
  1403. * exit, stop, or stop and then continue.
  1404. */
  1405. flag = 1;
  1406. if (!unlikely(options & WCONTINUED))
  1407. continue;
  1408. retval = wait_task_continued(
  1409. p, (options & WNOWAIT),
  1410. infop, stat_addr, ru);
  1411. if (retval != 0) /* He released the lock. */
  1412. goto end;
  1413. break;
  1414. }
  1415. }
  1416. if (!flag) {
  1417. list_for_each_entry(p, &tsk->ptrace_children,
  1418. ptrace_list) {
  1419. if (!eligible_child(pid, options, p))
  1420. continue;
  1421. flag = 1;
  1422. break;
  1423. }
  1424. }
  1425. if (options & __WNOTHREAD)
  1426. break;
  1427. tsk = next_thread(tsk);
  1428. BUG_ON(tsk->signal != current->signal);
  1429. } while (tsk != current);
  1430. read_unlock(&tasklist_lock);
  1431. if (flag) {
  1432. retval = 0;
  1433. if (options & WNOHANG)
  1434. goto end;
  1435. retval = -ERESTARTSYS;
  1436. if (signal_pending(current))
  1437. goto end;
  1438. schedule();
  1439. goto repeat;
  1440. }
  1441. retval = -ECHILD;
  1442. if (unlikely(denied) && !allowed)
  1443. retval = denied;
  1444. end:
  1445. current->state = TASK_RUNNING;
  1446. remove_wait_queue(&current->signal->wait_chldexit,&wait);
  1447. if (infop) {
  1448. if (retval > 0)
  1449. retval = 0;
  1450. else {
  1451. /*
  1452. * For a WNOHANG return, clear out all the fields
  1453. * we would set so the user can easily tell the
  1454. * difference.
  1455. */
  1456. if (!retval)
  1457. retval = put_user(0, &infop->si_signo);
  1458. if (!retval)
  1459. retval = put_user(0, &infop->si_errno);
  1460. if (!retval)
  1461. retval = put_user(0, &infop->si_code);
  1462. if (!retval)
  1463. retval = put_user(0, &infop->si_pid);
  1464. if (!retval)
  1465. retval = put_user(0, &infop->si_uid);
  1466. if (!retval)
  1467. retval = put_user(0, &infop->si_status);
  1468. }
  1469. }
  1470. return retval;
  1471. }
  1472. asmlinkage long sys_waitid(int which, pid_t pid,
  1473. struct siginfo __user *infop, int options,
  1474. struct rusage __user *ru)
  1475. {
  1476. long ret;
  1477. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
  1478. return -EINVAL;
  1479. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1480. return -EINVAL;
  1481. switch (which) {
  1482. case P_ALL:
  1483. pid = -1;
  1484. break;
  1485. case P_PID:
  1486. if (pid <= 0)
  1487. return -EINVAL;
  1488. break;
  1489. case P_PGID:
  1490. if (pid <= 0)
  1491. return -EINVAL;
  1492. pid = -pid;
  1493. break;
  1494. default:
  1495. return -EINVAL;
  1496. }
  1497. ret = do_wait(pid, options, infop, NULL, ru);
  1498. /* avoid REGPARM breakage on x86: */
  1499. prevent_tail_call(ret);
  1500. return ret;
  1501. }
  1502. asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
  1503. int options, struct rusage __user *ru)
  1504. {
  1505. long ret;
  1506. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1507. __WNOTHREAD|__WCLONE|__WALL))
  1508. return -EINVAL;
  1509. ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
  1510. /* avoid REGPARM breakage on x86: */
  1511. prevent_tail_call(ret);
  1512. return ret;
  1513. }
  1514. #ifdef __ARCH_WANT_SYS_WAITPID
  1515. /*
  1516. * sys_waitpid() remains for compatibility. waitpid() should be
  1517. * implemented by calling sys_wait4() from libc.a.
  1518. */
  1519. asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
  1520. {
  1521. return sys_wait4(pid, stat_addr, options, NULL);
  1522. }
  1523. #endif