exit.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/mnt_namespace.h>
  15. #include <linux/iocontext.h>
  16. #include <linux/key.h>
  17. #include <linux/security.h>
  18. #include <linux/cpu.h>
  19. #include <linux/acct.h>
  20. #include <linux/tsacct_kern.h>
  21. #include <linux/file.h>
  22. #include <linux/fdtable.h>
  23. #include <linux/binfmts.h>
  24. #include <linux/nsproxy.h>
  25. #include <linux/pid_namespace.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/profile.h>
  28. #include <linux/mount.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/kthread.h>
  31. #include <linux/mempolicy.h>
  32. #include <linux/taskstats_kern.h>
  33. #include <linux/delayacct.h>
  34. #include <linux/freezer.h>
  35. #include <linux/cgroup.h>
  36. #include <linux/syscalls.h>
  37. #include <linux/signal.h>
  38. #include <linux/posix-timers.h>
  39. #include <linux/cn_proc.h>
  40. #include <linux/mutex.h>
  41. #include <linux/futex.h>
  42. #include <linux/compat.h>
  43. #include <linux/pipe_fs_i.h>
  44. #include <linux/audit.h> /* for audit_free() */
  45. #include <linux/resource.h>
  46. #include <linux/blkdev.h>
  47. #include <linux/task_io_accounting_ops.h>
  48. #include <asm/uaccess.h>
  49. #include <asm/unistd.h>
  50. #include <asm/pgtable.h>
  51. #include <asm/mmu_context.h>
  52. static void exit_mm(struct task_struct * tsk);
  53. static inline int task_detached(struct task_struct *p)
  54. {
  55. return p->exit_signal == -1;
  56. }
  57. static void __unhash_process(struct task_struct *p)
  58. {
  59. nr_threads--;
  60. detach_pid(p, PIDTYPE_PID);
  61. if (thread_group_leader(p)) {
  62. detach_pid(p, PIDTYPE_PGID);
  63. detach_pid(p, PIDTYPE_SID);
  64. list_del_rcu(&p->tasks);
  65. __get_cpu_var(process_counts)--;
  66. }
  67. list_del_rcu(&p->thread_group);
  68. remove_parent(p);
  69. }
  70. /*
  71. * This function expects the tasklist_lock write-locked.
  72. */
  73. static void __exit_signal(struct task_struct *tsk)
  74. {
  75. struct signal_struct *sig = tsk->signal;
  76. struct sighand_struct *sighand;
  77. BUG_ON(!sig);
  78. BUG_ON(!atomic_read(&sig->count));
  79. rcu_read_lock();
  80. sighand = rcu_dereference(tsk->sighand);
  81. spin_lock(&sighand->siglock);
  82. posix_cpu_timers_exit(tsk);
  83. if (atomic_dec_and_test(&sig->count))
  84. posix_cpu_timers_exit_group(tsk);
  85. else {
  86. /*
  87. * If there is any task waiting for the group exit
  88. * then notify it:
  89. */
  90. if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
  91. wake_up_process(sig->group_exit_task);
  92. if (tsk == sig->curr_target)
  93. sig->curr_target = next_thread(tsk);
  94. /*
  95. * Accumulate here the counters for all threads but the
  96. * group leader as they die, so they can be added into
  97. * the process-wide totals when those are taken.
  98. * The group leader stays around as a zombie as long
  99. * as there are other threads. When it gets reaped,
  100. * the exit.c code will add its counts into these totals.
  101. * We won't ever get here for the group leader, since it
  102. * will have been the last reference on the signal_struct.
  103. */
  104. sig->utime = cputime_add(sig->utime, tsk->utime);
  105. sig->stime = cputime_add(sig->stime, tsk->stime);
  106. sig->gtime = cputime_add(sig->gtime, tsk->gtime);
  107. sig->min_flt += tsk->min_flt;
  108. sig->maj_flt += tsk->maj_flt;
  109. sig->nvcsw += tsk->nvcsw;
  110. sig->nivcsw += tsk->nivcsw;
  111. sig->inblock += task_io_get_inblock(tsk);
  112. sig->oublock += task_io_get_oublock(tsk);
  113. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  114. sig = NULL; /* Marker for below. */
  115. }
  116. __unhash_process(tsk);
  117. /*
  118. * Do this under ->siglock, we can race with another thread
  119. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  120. */
  121. flush_sigqueue(&tsk->pending);
  122. tsk->signal = NULL;
  123. tsk->sighand = NULL;
  124. spin_unlock(&sighand->siglock);
  125. rcu_read_unlock();
  126. __cleanup_sighand(sighand);
  127. clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
  128. if (sig) {
  129. flush_sigqueue(&sig->shared_pending);
  130. taskstats_tgid_free(sig);
  131. __cleanup_signal(sig);
  132. }
  133. }
  134. static void delayed_put_task_struct(struct rcu_head *rhp)
  135. {
  136. put_task_struct(container_of(rhp, struct task_struct, rcu));
  137. }
  138. void release_task(struct task_struct * p)
  139. {
  140. struct task_struct *leader;
  141. int zap_leader;
  142. repeat:
  143. atomic_dec(&p->user->processes);
  144. proc_flush_task(p);
  145. write_lock_irq(&tasklist_lock);
  146. ptrace_unlink(p);
  147. BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
  148. __exit_signal(p);
  149. /*
  150. * If we are the last non-leader member of the thread
  151. * group, and the leader is zombie, then notify the
  152. * group leader's parent process. (if it wants notification.)
  153. */
  154. zap_leader = 0;
  155. leader = p->group_leader;
  156. if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
  157. BUG_ON(task_detached(leader));
  158. do_notify_parent(leader, leader->exit_signal);
  159. /*
  160. * If we were the last child thread and the leader has
  161. * exited already, and the leader's parent ignores SIGCHLD,
  162. * then we are the one who should release the leader.
  163. *
  164. * do_notify_parent() will have marked it self-reaping in
  165. * that case.
  166. */
  167. zap_leader = task_detached(leader);
  168. }
  169. write_unlock_irq(&tasklist_lock);
  170. release_thread(p);
  171. call_rcu(&p->rcu, delayed_put_task_struct);
  172. p = leader;
  173. if (unlikely(zap_leader))
  174. goto repeat;
  175. }
  176. /*
  177. * This checks not only the pgrp, but falls back on the pid if no
  178. * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
  179. * without this...
  180. *
  181. * The caller must hold rcu lock or the tasklist lock.
  182. */
  183. struct pid *session_of_pgrp(struct pid *pgrp)
  184. {
  185. struct task_struct *p;
  186. struct pid *sid = NULL;
  187. p = pid_task(pgrp, PIDTYPE_PGID);
  188. if (p == NULL)
  189. p = pid_task(pgrp, PIDTYPE_PID);
  190. if (p != NULL)
  191. sid = task_session(p);
  192. return sid;
  193. }
  194. /*
  195. * Determine if a process group is "orphaned", according to the POSIX
  196. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  197. * by terminal-generated stop signals. Newly orphaned process groups are
  198. * to receive a SIGHUP and a SIGCONT.
  199. *
  200. * "I ask you, have you ever known what it is to be an orphan?"
  201. */
  202. static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
  203. {
  204. struct task_struct *p;
  205. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  206. if ((p == ignored_task) ||
  207. (p->exit_state && thread_group_empty(p)) ||
  208. is_global_init(p->real_parent))
  209. continue;
  210. if (task_pgrp(p->real_parent) != pgrp &&
  211. task_session(p->real_parent) == task_session(p))
  212. return 0;
  213. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  214. return 1;
  215. }
  216. int is_current_pgrp_orphaned(void)
  217. {
  218. int retval;
  219. read_lock(&tasklist_lock);
  220. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  221. read_unlock(&tasklist_lock);
  222. return retval;
  223. }
  224. static int has_stopped_jobs(struct pid *pgrp)
  225. {
  226. int retval = 0;
  227. struct task_struct *p;
  228. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  229. if (!task_is_stopped(p))
  230. continue;
  231. retval = 1;
  232. break;
  233. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  234. return retval;
  235. }
  236. /*
  237. * Check to see if any process groups have become orphaned as
  238. * a result of our exiting, and if they have any stopped jobs,
  239. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  240. */
  241. static void
  242. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  243. {
  244. struct pid *pgrp = task_pgrp(tsk);
  245. struct task_struct *ignored_task = tsk;
  246. if (!parent)
  247. /* exit: our father is in a different pgrp than
  248. * we are and we were the only connection outside.
  249. */
  250. parent = tsk->real_parent;
  251. else
  252. /* reparent: our child is in a different pgrp than
  253. * we are, and it was the only connection outside.
  254. */
  255. ignored_task = NULL;
  256. if (task_pgrp(parent) != pgrp &&
  257. task_session(parent) == task_session(tsk) &&
  258. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  259. has_stopped_jobs(pgrp)) {
  260. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  261. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  262. }
  263. }
  264. /**
  265. * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
  266. *
  267. * If a kernel thread is launched as a result of a system call, or if
  268. * it ever exits, it should generally reparent itself to kthreadd so it
  269. * isn't in the way of other processes and is correctly cleaned up on exit.
  270. *
  271. * The various task state such as scheduling policy and priority may have
  272. * been inherited from a user process, so we reset them to sane values here.
  273. *
  274. * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
  275. */
  276. static void reparent_to_kthreadd(void)
  277. {
  278. write_lock_irq(&tasklist_lock);
  279. ptrace_unlink(current);
  280. /* Reparent to init */
  281. remove_parent(current);
  282. current->real_parent = current->parent = kthreadd_task;
  283. add_parent(current);
  284. /* Set the exit signal to SIGCHLD so we signal init on exit */
  285. current->exit_signal = SIGCHLD;
  286. if (task_nice(current) < 0)
  287. set_user_nice(current, 0);
  288. /* cpus_allowed? */
  289. /* rt_priority? */
  290. /* signals? */
  291. security_task_reparent_to_init(current);
  292. memcpy(current->signal->rlim, init_task.signal->rlim,
  293. sizeof(current->signal->rlim));
  294. atomic_inc(&(INIT_USER->__count));
  295. write_unlock_irq(&tasklist_lock);
  296. switch_uid(INIT_USER);
  297. }
  298. void __set_special_pids(struct pid *pid)
  299. {
  300. struct task_struct *curr = current->group_leader;
  301. pid_t nr = pid_nr(pid);
  302. if (task_session(curr) != pid) {
  303. change_pid(curr, PIDTYPE_SID, pid);
  304. set_task_session(curr, nr);
  305. }
  306. if (task_pgrp(curr) != pid) {
  307. change_pid(curr, PIDTYPE_PGID, pid);
  308. set_task_pgrp(curr, nr);
  309. }
  310. }
  311. static void set_special_pids(struct pid *pid)
  312. {
  313. write_lock_irq(&tasklist_lock);
  314. __set_special_pids(pid);
  315. write_unlock_irq(&tasklist_lock);
  316. }
  317. /*
  318. * Let kernel threads use this to say that they
  319. * allow a certain signal (since daemonize() will
  320. * have disabled all of them by default).
  321. */
  322. int allow_signal(int sig)
  323. {
  324. if (!valid_signal(sig) || sig < 1)
  325. return -EINVAL;
  326. spin_lock_irq(&current->sighand->siglock);
  327. sigdelset(&current->blocked, sig);
  328. if (!current->mm) {
  329. /* Kernel threads handle their own signals.
  330. Let the signal code know it'll be handled, so
  331. that they don't get converted to SIGKILL or
  332. just silently dropped */
  333. current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
  334. }
  335. recalc_sigpending();
  336. spin_unlock_irq(&current->sighand->siglock);
  337. return 0;
  338. }
  339. EXPORT_SYMBOL(allow_signal);
  340. int disallow_signal(int sig)
  341. {
  342. if (!valid_signal(sig) || sig < 1)
  343. return -EINVAL;
  344. spin_lock_irq(&current->sighand->siglock);
  345. current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
  346. recalc_sigpending();
  347. spin_unlock_irq(&current->sighand->siglock);
  348. return 0;
  349. }
  350. EXPORT_SYMBOL(disallow_signal);
  351. /*
  352. * Put all the gunge required to become a kernel thread without
  353. * attached user resources in one place where it belongs.
  354. */
  355. void daemonize(const char *name, ...)
  356. {
  357. va_list args;
  358. struct fs_struct *fs;
  359. sigset_t blocked;
  360. va_start(args, name);
  361. vsnprintf(current->comm, sizeof(current->comm), name, args);
  362. va_end(args);
  363. /*
  364. * If we were started as result of loading a module, close all of the
  365. * user space pages. We don't need them, and if we didn't close them
  366. * they would be locked into memory.
  367. */
  368. exit_mm(current);
  369. /*
  370. * We don't want to have TIF_FREEZE set if the system-wide hibernation
  371. * or suspend transition begins right now.
  372. */
  373. current->flags |= PF_NOFREEZE;
  374. if (current->nsproxy != &init_nsproxy) {
  375. get_nsproxy(&init_nsproxy);
  376. switch_task_namespaces(current, &init_nsproxy);
  377. }
  378. set_special_pids(&init_struct_pid);
  379. proc_clear_tty(current);
  380. /* Block and flush all signals */
  381. sigfillset(&blocked);
  382. sigprocmask(SIG_BLOCK, &blocked, NULL);
  383. flush_signals(current);
  384. /* Become as one with the init task */
  385. exit_fs(current); /* current->fs->count--; */
  386. fs = init_task.fs;
  387. current->fs = fs;
  388. atomic_inc(&fs->count);
  389. exit_files(current);
  390. current->files = init_task.files;
  391. atomic_inc(&current->files->count);
  392. reparent_to_kthreadd();
  393. }
  394. EXPORT_SYMBOL(daemonize);
  395. static void close_files(struct files_struct * files)
  396. {
  397. int i, j;
  398. struct fdtable *fdt;
  399. j = 0;
  400. /*
  401. * It is safe to dereference the fd table without RCU or
  402. * ->file_lock because this is the last reference to the
  403. * files structure.
  404. */
  405. fdt = files_fdtable(files);
  406. for (;;) {
  407. unsigned long set;
  408. i = j * __NFDBITS;
  409. if (i >= fdt->max_fds)
  410. break;
  411. set = fdt->open_fds->fds_bits[j++];
  412. while (set) {
  413. if (set & 1) {
  414. struct file * file = xchg(&fdt->fd[i], NULL);
  415. if (file) {
  416. filp_close(file, files);
  417. cond_resched();
  418. }
  419. }
  420. i++;
  421. set >>= 1;
  422. }
  423. }
  424. }
  425. struct files_struct *get_files_struct(struct task_struct *task)
  426. {
  427. struct files_struct *files;
  428. task_lock(task);
  429. files = task->files;
  430. if (files)
  431. atomic_inc(&files->count);
  432. task_unlock(task);
  433. return files;
  434. }
  435. void put_files_struct(struct files_struct *files)
  436. {
  437. struct fdtable *fdt;
  438. if (atomic_dec_and_test(&files->count)) {
  439. close_files(files);
  440. /*
  441. * Free the fd and fdset arrays if we expanded them.
  442. * If the fdtable was embedded, pass files for freeing
  443. * at the end of the RCU grace period. Otherwise,
  444. * you can free files immediately.
  445. */
  446. fdt = files_fdtable(files);
  447. if (fdt != &files->fdtab)
  448. kmem_cache_free(files_cachep, files);
  449. free_fdtable(fdt);
  450. }
  451. }
  452. void reset_files_struct(struct files_struct *files)
  453. {
  454. struct task_struct *tsk = current;
  455. struct files_struct *old;
  456. old = tsk->files;
  457. task_lock(tsk);
  458. tsk->files = files;
  459. task_unlock(tsk);
  460. put_files_struct(old);
  461. }
  462. void exit_files(struct task_struct *tsk)
  463. {
  464. struct files_struct * files = tsk->files;
  465. if (files) {
  466. task_lock(tsk);
  467. tsk->files = NULL;
  468. task_unlock(tsk);
  469. put_files_struct(files);
  470. }
  471. }
  472. void put_fs_struct(struct fs_struct *fs)
  473. {
  474. /* No need to hold fs->lock if we are killing it */
  475. if (atomic_dec_and_test(&fs->count)) {
  476. path_put(&fs->root);
  477. path_put(&fs->pwd);
  478. if (fs->altroot.dentry)
  479. path_put(&fs->altroot);
  480. kmem_cache_free(fs_cachep, fs);
  481. }
  482. }
  483. void exit_fs(struct task_struct *tsk)
  484. {
  485. struct fs_struct * fs = tsk->fs;
  486. if (fs) {
  487. task_lock(tsk);
  488. tsk->fs = NULL;
  489. task_unlock(tsk);
  490. put_fs_struct(fs);
  491. }
  492. }
  493. EXPORT_SYMBOL_GPL(exit_fs);
  494. #ifdef CONFIG_MM_OWNER
  495. /*
  496. * Task p is exiting and it owned mm, lets find a new owner for it
  497. */
  498. static inline int
  499. mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
  500. {
  501. /*
  502. * If there are other users of the mm and the owner (us) is exiting
  503. * we need to find a new owner to take on the responsibility.
  504. */
  505. if (!mm)
  506. return 0;
  507. if (atomic_read(&mm->mm_users) <= 1)
  508. return 0;
  509. if (mm->owner != p)
  510. return 0;
  511. return 1;
  512. }
  513. void mm_update_next_owner(struct mm_struct *mm)
  514. {
  515. struct task_struct *c, *g, *p = current;
  516. retry:
  517. if (!mm_need_new_owner(mm, p))
  518. return;
  519. read_lock(&tasklist_lock);
  520. /*
  521. * Search in the children
  522. */
  523. list_for_each_entry(c, &p->children, sibling) {
  524. if (c->mm == mm)
  525. goto assign_new_owner;
  526. }
  527. /*
  528. * Search in the siblings
  529. */
  530. list_for_each_entry(c, &p->parent->children, sibling) {
  531. if (c->mm == mm)
  532. goto assign_new_owner;
  533. }
  534. /*
  535. * Search through everything else. We should not get
  536. * here often
  537. */
  538. do_each_thread(g, c) {
  539. if (c->mm == mm)
  540. goto assign_new_owner;
  541. } while_each_thread(g, c);
  542. read_unlock(&tasklist_lock);
  543. return;
  544. assign_new_owner:
  545. BUG_ON(c == p);
  546. get_task_struct(c);
  547. /*
  548. * The task_lock protects c->mm from changing.
  549. * We always want mm->owner->mm == mm
  550. */
  551. task_lock(c);
  552. /*
  553. * Delay read_unlock() till we have the task_lock()
  554. * to ensure that c does not slip away underneath us
  555. */
  556. read_unlock(&tasklist_lock);
  557. if (c->mm != mm) {
  558. task_unlock(c);
  559. put_task_struct(c);
  560. goto retry;
  561. }
  562. cgroup_mm_owner_callbacks(mm->owner, c);
  563. mm->owner = c;
  564. task_unlock(c);
  565. put_task_struct(c);
  566. }
  567. #endif /* CONFIG_MM_OWNER */
  568. /*
  569. * Turn us into a lazy TLB process if we
  570. * aren't already..
  571. */
  572. static void exit_mm(struct task_struct * tsk)
  573. {
  574. struct mm_struct *mm = tsk->mm;
  575. mm_release(tsk, mm);
  576. if (!mm)
  577. return;
  578. /*
  579. * Serialize with any possible pending coredump.
  580. * We must hold mmap_sem around checking core_waiters
  581. * and clearing tsk->mm. The core-inducing thread
  582. * will increment core_waiters for each thread in the
  583. * group with ->mm != NULL.
  584. */
  585. down_read(&mm->mmap_sem);
  586. if (mm->core_waiters) {
  587. up_read(&mm->mmap_sem);
  588. down_write(&mm->mmap_sem);
  589. if (!--mm->core_waiters)
  590. complete(mm->core_startup_done);
  591. up_write(&mm->mmap_sem);
  592. wait_for_completion(&mm->core_done);
  593. down_read(&mm->mmap_sem);
  594. }
  595. atomic_inc(&mm->mm_count);
  596. BUG_ON(mm != tsk->active_mm);
  597. /* more a memory barrier than a real lock */
  598. task_lock(tsk);
  599. tsk->mm = NULL;
  600. up_read(&mm->mmap_sem);
  601. enter_lazy_tlb(mm, current);
  602. /* We don't want this task to be frozen prematurely */
  603. clear_freeze_flag(tsk);
  604. task_unlock(tsk);
  605. mm_update_next_owner(mm);
  606. mmput(mm);
  607. }
  608. static void
  609. reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
  610. {
  611. if (p->pdeath_signal)
  612. /* We already hold the tasklist_lock here. */
  613. group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
  614. /* Move the child from its dying parent to the new one. */
  615. if (unlikely(traced)) {
  616. /* Preserve ptrace links if someone else is tracing this child. */
  617. list_del_init(&p->ptrace_list);
  618. if (ptrace_reparented(p))
  619. list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
  620. } else {
  621. /* If this child is being traced, then we're the one tracing it
  622. * anyway, so let go of it.
  623. */
  624. p->ptrace = 0;
  625. remove_parent(p);
  626. p->parent = p->real_parent;
  627. add_parent(p);
  628. if (task_is_traced(p)) {
  629. /*
  630. * If it was at a trace stop, turn it into
  631. * a normal stop since it's no longer being
  632. * traced.
  633. */
  634. ptrace_untrace(p);
  635. }
  636. }
  637. /* If this is a threaded reparent there is no need to
  638. * notify anyone anything has happened.
  639. */
  640. if (same_thread_group(p->real_parent, father))
  641. return;
  642. /* We don't want people slaying init. */
  643. if (!task_detached(p))
  644. p->exit_signal = SIGCHLD;
  645. /* If we'd notified the old parent about this child's death,
  646. * also notify the new parent.
  647. */
  648. if (!traced && p->exit_state == EXIT_ZOMBIE &&
  649. !task_detached(p) && thread_group_empty(p))
  650. do_notify_parent(p, p->exit_signal);
  651. kill_orphaned_pgrp(p, father);
  652. }
  653. /*
  654. * When we die, we re-parent all our children.
  655. * Try to give them to another thread in our thread
  656. * group, and if no such member exists, give it to
  657. * the child reaper process (ie "init") in our pid
  658. * space.
  659. */
  660. static void forget_original_parent(struct task_struct *father)
  661. {
  662. struct task_struct *p, *n, *reaper = father;
  663. struct list_head ptrace_dead;
  664. INIT_LIST_HEAD(&ptrace_dead);
  665. write_lock_irq(&tasklist_lock);
  666. do {
  667. reaper = next_thread(reaper);
  668. if (reaper == father) {
  669. reaper = task_child_reaper(father);
  670. break;
  671. }
  672. } while (reaper->flags & PF_EXITING);
  673. /*
  674. * There are only two places where our children can be:
  675. *
  676. * - in our child list
  677. * - in our ptraced child list
  678. *
  679. * Search them and reparent children.
  680. */
  681. list_for_each_entry_safe(p, n, &father->children, sibling) {
  682. int ptrace;
  683. ptrace = p->ptrace;
  684. /* if father isn't the real parent, then ptrace must be enabled */
  685. BUG_ON(father != p->real_parent && !ptrace);
  686. if (father == p->real_parent) {
  687. /* reparent with a reaper, real father it's us */
  688. p->real_parent = reaper;
  689. reparent_thread(p, father, 0);
  690. } else {
  691. /* reparent ptraced task to its real parent */
  692. __ptrace_unlink (p);
  693. if (p->exit_state == EXIT_ZOMBIE && !task_detached(p) &&
  694. thread_group_empty(p))
  695. do_notify_parent(p, p->exit_signal);
  696. }
  697. /*
  698. * if the ptraced child is a detached zombie we must collect
  699. * it before we exit, or it will remain zombie forever since
  700. * we prevented it from self-reap itself while it was being
  701. * traced by us, to be able to see it in wait4.
  702. */
  703. if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && task_detached(p)))
  704. list_add(&p->ptrace_list, &ptrace_dead);
  705. }
  706. list_for_each_entry_safe(p, n, &father->ptrace_children, ptrace_list) {
  707. p->real_parent = reaper;
  708. reparent_thread(p, father, 1);
  709. }
  710. write_unlock_irq(&tasklist_lock);
  711. BUG_ON(!list_empty(&father->children));
  712. BUG_ON(!list_empty(&father->ptrace_children));
  713. list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_list) {
  714. list_del_init(&p->ptrace_list);
  715. release_task(p);
  716. }
  717. }
  718. /*
  719. * Send signals to all our closest relatives so that they know
  720. * to properly mourn us..
  721. */
  722. static void exit_notify(struct task_struct *tsk, int group_dead)
  723. {
  724. int state;
  725. /*
  726. * This does two things:
  727. *
  728. * A. Make init inherit all the child processes
  729. * B. Check to see if any process groups have become orphaned
  730. * as a result of our exiting, and if they have any stopped
  731. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  732. */
  733. forget_original_parent(tsk);
  734. exit_task_namespaces(tsk);
  735. write_lock_irq(&tasklist_lock);
  736. if (group_dead)
  737. kill_orphaned_pgrp(tsk->group_leader, NULL);
  738. /* Let father know we died
  739. *
  740. * Thread signals are configurable, but you aren't going to use
  741. * that to send signals to arbitary processes.
  742. * That stops right now.
  743. *
  744. * If the parent exec id doesn't match the exec id we saved
  745. * when we started then we know the parent has changed security
  746. * domain.
  747. *
  748. * If our self_exec id doesn't match our parent_exec_id then
  749. * we have changed execution domain as these two values started
  750. * the same after a fork.
  751. */
  752. if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
  753. (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
  754. tsk->self_exec_id != tsk->parent_exec_id) &&
  755. !capable(CAP_KILL))
  756. tsk->exit_signal = SIGCHLD;
  757. /* If something other than our normal parent is ptracing us, then
  758. * send it a SIGCHLD instead of honoring exit_signal. exit_signal
  759. * only has special meaning to our real parent.
  760. */
  761. if (!task_detached(tsk) && thread_group_empty(tsk)) {
  762. int signal = ptrace_reparented(tsk) ?
  763. SIGCHLD : tsk->exit_signal;
  764. do_notify_parent(tsk, signal);
  765. } else if (tsk->ptrace) {
  766. do_notify_parent(tsk, SIGCHLD);
  767. }
  768. state = EXIT_ZOMBIE;
  769. if (task_detached(tsk) && likely(!tsk->ptrace))
  770. state = EXIT_DEAD;
  771. tsk->exit_state = state;
  772. /* mt-exec, de_thread() is waiting for us */
  773. if (thread_group_leader(tsk) &&
  774. tsk->signal->notify_count < 0 &&
  775. tsk->signal->group_exit_task)
  776. wake_up_process(tsk->signal->group_exit_task);
  777. write_unlock_irq(&tasklist_lock);
  778. /* If the process is dead, release it - nobody will wait for it */
  779. if (state == EXIT_DEAD)
  780. release_task(tsk);
  781. }
  782. #ifdef CONFIG_DEBUG_STACK_USAGE
  783. static void check_stack_usage(void)
  784. {
  785. static DEFINE_SPINLOCK(low_water_lock);
  786. static int lowest_to_date = THREAD_SIZE;
  787. unsigned long *n = end_of_stack(current);
  788. unsigned long free;
  789. while (*n == 0)
  790. n++;
  791. free = (unsigned long)n - (unsigned long)end_of_stack(current);
  792. if (free >= lowest_to_date)
  793. return;
  794. spin_lock(&low_water_lock);
  795. if (free < lowest_to_date) {
  796. printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
  797. "left\n",
  798. current->comm, free);
  799. lowest_to_date = free;
  800. }
  801. spin_unlock(&low_water_lock);
  802. }
  803. #else
  804. static inline void check_stack_usage(void) {}
  805. #endif
  806. static inline void exit_child_reaper(struct task_struct *tsk)
  807. {
  808. if (likely(tsk->group_leader != task_child_reaper(tsk)))
  809. return;
  810. if (tsk->nsproxy->pid_ns == &init_pid_ns)
  811. panic("Attempted to kill init!");
  812. /*
  813. * @tsk is the last thread in the 'cgroup-init' and is exiting.
  814. * Terminate all remaining processes in the namespace and reap them
  815. * before exiting @tsk.
  816. *
  817. * Note that @tsk (last thread of cgroup-init) may not necessarily
  818. * be the child-reaper (i.e main thread of cgroup-init) of the
  819. * namespace i.e the child_reaper may have already exited.
  820. *
  821. * Even after a child_reaper exits, we let it inherit orphaned children,
  822. * because, pid_ns->child_reaper remains valid as long as there is
  823. * at least one living sub-thread in the cgroup init.
  824. * This living sub-thread of the cgroup-init will be notified when
  825. * a child inherited by the 'child-reaper' exits (do_notify_parent()
  826. * uses __group_send_sig_info()). Further, when reaping child processes,
  827. * do_wait() iterates over children of all living sub threads.
  828. * i.e even though 'child_reaper' thread is listed as the parent of the
  829. * orphaned children, any living sub-thread in the cgroup-init can
  830. * perform the role of the child_reaper.
  831. */
  832. zap_pid_ns_processes(tsk->nsproxy->pid_ns);
  833. }
  834. NORET_TYPE void do_exit(long code)
  835. {
  836. struct task_struct *tsk = current;
  837. int group_dead;
  838. profile_task_exit(tsk);
  839. WARN_ON(atomic_read(&tsk->fs_excl));
  840. if (unlikely(in_interrupt()))
  841. panic("Aiee, killing interrupt handler!");
  842. if (unlikely(!tsk->pid))
  843. panic("Attempted to kill the idle task!");
  844. if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
  845. current->ptrace_message = code;
  846. ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
  847. }
  848. /*
  849. * We're taking recursive faults here in do_exit. Safest is to just
  850. * leave this task alone and wait for reboot.
  851. */
  852. if (unlikely(tsk->flags & PF_EXITING)) {
  853. printk(KERN_ALERT
  854. "Fixing recursive fault but reboot is needed!\n");
  855. /*
  856. * We can do this unlocked here. The futex code uses
  857. * this flag just to verify whether the pi state
  858. * cleanup has been done or not. In the worst case it
  859. * loops once more. We pretend that the cleanup was
  860. * done as there is no way to return. Either the
  861. * OWNER_DIED bit is set by now or we push the blocked
  862. * task into the wait for ever nirwana as well.
  863. */
  864. tsk->flags |= PF_EXITPIDONE;
  865. if (tsk->io_context)
  866. exit_io_context();
  867. set_current_state(TASK_UNINTERRUPTIBLE);
  868. schedule();
  869. }
  870. exit_signals(tsk); /* sets PF_EXITING */
  871. /*
  872. * tsk->flags are checked in the futex code to protect against
  873. * an exiting task cleaning up the robust pi futexes.
  874. */
  875. smp_mb();
  876. spin_unlock_wait(&tsk->pi_lock);
  877. if (unlikely(in_atomic()))
  878. printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
  879. current->comm, task_pid_nr(current),
  880. preempt_count());
  881. acct_update_integrals(tsk);
  882. if (tsk->mm) {
  883. update_hiwater_rss(tsk->mm);
  884. update_hiwater_vm(tsk->mm);
  885. }
  886. group_dead = atomic_dec_and_test(&tsk->signal->live);
  887. if (group_dead) {
  888. exit_child_reaper(tsk);
  889. hrtimer_cancel(&tsk->signal->real_timer);
  890. exit_itimers(tsk->signal);
  891. }
  892. acct_collect(code, group_dead);
  893. #ifdef CONFIG_FUTEX
  894. if (unlikely(tsk->robust_list))
  895. exit_robust_list(tsk);
  896. #ifdef CONFIG_COMPAT
  897. if (unlikely(tsk->compat_robust_list))
  898. compat_exit_robust_list(tsk);
  899. #endif
  900. #endif
  901. if (group_dead)
  902. tty_audit_exit();
  903. if (unlikely(tsk->audit_context))
  904. audit_free(tsk);
  905. tsk->exit_code = code;
  906. taskstats_exit(tsk, group_dead);
  907. exit_mm(tsk);
  908. if (group_dead)
  909. acct_process();
  910. exit_sem(tsk);
  911. exit_files(tsk);
  912. exit_fs(tsk);
  913. check_stack_usage();
  914. exit_thread();
  915. cgroup_exit(tsk, 1);
  916. exit_keys(tsk);
  917. if (group_dead && tsk->signal->leader)
  918. disassociate_ctty(1);
  919. module_put(task_thread_info(tsk)->exec_domain->module);
  920. if (tsk->binfmt)
  921. module_put(tsk->binfmt->module);
  922. proc_exit_connector(tsk);
  923. exit_notify(tsk, group_dead);
  924. #ifdef CONFIG_NUMA
  925. mpol_put(tsk->mempolicy);
  926. tsk->mempolicy = NULL;
  927. #endif
  928. #ifdef CONFIG_FUTEX
  929. /*
  930. * This must happen late, after the PID is not
  931. * hashed anymore:
  932. */
  933. if (unlikely(!list_empty(&tsk->pi_state_list)))
  934. exit_pi_state_list(tsk);
  935. if (unlikely(current->pi_state_cache))
  936. kfree(current->pi_state_cache);
  937. #endif
  938. /*
  939. * Make sure we are holding no locks:
  940. */
  941. debug_check_no_locks_held(tsk);
  942. /*
  943. * We can do this unlocked here. The futex code uses this flag
  944. * just to verify whether the pi state cleanup has been done
  945. * or not. In the worst case it loops once more.
  946. */
  947. tsk->flags |= PF_EXITPIDONE;
  948. if (tsk->io_context)
  949. exit_io_context();
  950. if (tsk->splice_pipe)
  951. __free_pipe_info(tsk->splice_pipe);
  952. preempt_disable();
  953. /* causes final put_task_struct in finish_task_switch(). */
  954. tsk->state = TASK_DEAD;
  955. schedule();
  956. BUG();
  957. /* Avoid "noreturn function does return". */
  958. for (;;)
  959. cpu_relax(); /* For when BUG is null */
  960. }
  961. EXPORT_SYMBOL_GPL(do_exit);
  962. NORET_TYPE void complete_and_exit(struct completion *comp, long code)
  963. {
  964. if (comp)
  965. complete(comp);
  966. do_exit(code);
  967. }
  968. EXPORT_SYMBOL(complete_and_exit);
  969. asmlinkage long sys_exit(int error_code)
  970. {
  971. do_exit((error_code&0xff)<<8);
  972. }
  973. /*
  974. * Take down every thread in the group. This is called by fatal signals
  975. * as well as by sys_exit_group (below).
  976. */
  977. NORET_TYPE void
  978. do_group_exit(int exit_code)
  979. {
  980. struct signal_struct *sig = current->signal;
  981. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  982. if (signal_group_exit(sig))
  983. exit_code = sig->group_exit_code;
  984. else if (!thread_group_empty(current)) {
  985. struct sighand_struct *const sighand = current->sighand;
  986. spin_lock_irq(&sighand->siglock);
  987. if (signal_group_exit(sig))
  988. /* Another thread got here before we took the lock. */
  989. exit_code = sig->group_exit_code;
  990. else {
  991. sig->group_exit_code = exit_code;
  992. sig->flags = SIGNAL_GROUP_EXIT;
  993. zap_other_threads(current);
  994. }
  995. spin_unlock_irq(&sighand->siglock);
  996. }
  997. do_exit(exit_code);
  998. /* NOTREACHED */
  999. }
  1000. /*
  1001. * this kills every thread in the thread group. Note that any externally
  1002. * wait4()-ing process will get the correct exit code - even if this
  1003. * thread is not the thread group leader.
  1004. */
  1005. asmlinkage void sys_exit_group(int error_code)
  1006. {
  1007. do_group_exit((error_code & 0xff) << 8);
  1008. }
  1009. static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  1010. {
  1011. struct pid *pid = NULL;
  1012. if (type == PIDTYPE_PID)
  1013. pid = task->pids[type].pid;
  1014. else if (type < PIDTYPE_MAX)
  1015. pid = task->group_leader->pids[type].pid;
  1016. return pid;
  1017. }
  1018. static int eligible_child(enum pid_type type, struct pid *pid, int options,
  1019. struct task_struct *p)
  1020. {
  1021. int err;
  1022. if (type < PIDTYPE_MAX) {
  1023. if (task_pid_type(p, type) != pid)
  1024. return 0;
  1025. }
  1026. /*
  1027. * Do not consider detached threads that are
  1028. * not ptraced:
  1029. */
  1030. if (task_detached(p) && !p->ptrace)
  1031. return 0;
  1032. /* Wait for all children (clone and not) if __WALL is set;
  1033. * otherwise, wait for clone children *only* if __WCLONE is
  1034. * set; otherwise, wait for non-clone children *only*. (Note:
  1035. * A "clone" child here is one that reports to its parent
  1036. * using a signal other than SIGCHLD.) */
  1037. if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
  1038. && !(options & __WALL))
  1039. return 0;
  1040. err = security_task_wait(p);
  1041. if (likely(!err))
  1042. return 1;
  1043. if (type != PIDTYPE_PID)
  1044. return 0;
  1045. /* This child was explicitly requested, abort */
  1046. read_unlock(&tasklist_lock);
  1047. return err;
  1048. }
  1049. static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
  1050. int why, int status,
  1051. struct siginfo __user *infop,
  1052. struct rusage __user *rusagep)
  1053. {
  1054. int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
  1055. put_task_struct(p);
  1056. if (!retval)
  1057. retval = put_user(SIGCHLD, &infop->si_signo);
  1058. if (!retval)
  1059. retval = put_user(0, &infop->si_errno);
  1060. if (!retval)
  1061. retval = put_user((short)why, &infop->si_code);
  1062. if (!retval)
  1063. retval = put_user(pid, &infop->si_pid);
  1064. if (!retval)
  1065. retval = put_user(uid, &infop->si_uid);
  1066. if (!retval)
  1067. retval = put_user(status, &infop->si_status);
  1068. if (!retval)
  1069. retval = pid;
  1070. return retval;
  1071. }
  1072. /*
  1073. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  1074. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1075. * the lock and this task is uninteresting. If we return nonzero, we have
  1076. * released the lock and the system call should return.
  1077. */
  1078. static int wait_task_zombie(struct task_struct *p, int options,
  1079. struct siginfo __user *infop,
  1080. int __user *stat_addr, struct rusage __user *ru)
  1081. {
  1082. unsigned long state;
  1083. int retval, status, traced;
  1084. pid_t pid = task_pid_vnr(p);
  1085. if (!likely(options & WEXITED))
  1086. return 0;
  1087. if (unlikely(options & WNOWAIT)) {
  1088. uid_t uid = p->uid;
  1089. int exit_code = p->exit_code;
  1090. int why, status;
  1091. get_task_struct(p);
  1092. read_unlock(&tasklist_lock);
  1093. if ((exit_code & 0x7f) == 0) {
  1094. why = CLD_EXITED;
  1095. status = exit_code >> 8;
  1096. } else {
  1097. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1098. status = exit_code & 0x7f;
  1099. }
  1100. return wait_noreap_copyout(p, pid, uid, why,
  1101. status, infop, ru);
  1102. }
  1103. /*
  1104. * Try to move the task's state to DEAD
  1105. * only one thread is allowed to do this:
  1106. */
  1107. state = xchg(&p->exit_state, EXIT_DEAD);
  1108. if (state != EXIT_ZOMBIE) {
  1109. BUG_ON(state != EXIT_DEAD);
  1110. return 0;
  1111. }
  1112. traced = ptrace_reparented(p);
  1113. if (likely(!traced)) {
  1114. struct signal_struct *psig;
  1115. struct signal_struct *sig;
  1116. /*
  1117. * The resource counters for the group leader are in its
  1118. * own task_struct. Those for dead threads in the group
  1119. * are in its signal_struct, as are those for the child
  1120. * processes it has previously reaped. All these
  1121. * accumulate in the parent's signal_struct c* fields.
  1122. *
  1123. * We don't bother to take a lock here to protect these
  1124. * p->signal fields, because they are only touched by
  1125. * __exit_signal, which runs with tasklist_lock
  1126. * write-locked anyway, and so is excluded here. We do
  1127. * need to protect the access to p->parent->signal fields,
  1128. * as other threads in the parent group can be right
  1129. * here reaping other children at the same time.
  1130. */
  1131. spin_lock_irq(&p->parent->sighand->siglock);
  1132. psig = p->parent->signal;
  1133. sig = p->signal;
  1134. psig->cutime =
  1135. cputime_add(psig->cutime,
  1136. cputime_add(p->utime,
  1137. cputime_add(sig->utime,
  1138. sig->cutime)));
  1139. psig->cstime =
  1140. cputime_add(psig->cstime,
  1141. cputime_add(p->stime,
  1142. cputime_add(sig->stime,
  1143. sig->cstime)));
  1144. psig->cgtime =
  1145. cputime_add(psig->cgtime,
  1146. cputime_add(p->gtime,
  1147. cputime_add(sig->gtime,
  1148. sig->cgtime)));
  1149. psig->cmin_flt +=
  1150. p->min_flt + sig->min_flt + sig->cmin_flt;
  1151. psig->cmaj_flt +=
  1152. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  1153. psig->cnvcsw +=
  1154. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  1155. psig->cnivcsw +=
  1156. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  1157. psig->cinblock +=
  1158. task_io_get_inblock(p) +
  1159. sig->inblock + sig->cinblock;
  1160. psig->coublock +=
  1161. task_io_get_oublock(p) +
  1162. sig->oublock + sig->coublock;
  1163. spin_unlock_irq(&p->parent->sighand->siglock);
  1164. }
  1165. /*
  1166. * Now we are sure this task is interesting, and no other
  1167. * thread can reap it because we set its state to EXIT_DEAD.
  1168. */
  1169. read_unlock(&tasklist_lock);
  1170. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1171. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1172. ? p->signal->group_exit_code : p->exit_code;
  1173. if (!retval && stat_addr)
  1174. retval = put_user(status, stat_addr);
  1175. if (!retval && infop)
  1176. retval = put_user(SIGCHLD, &infop->si_signo);
  1177. if (!retval && infop)
  1178. retval = put_user(0, &infop->si_errno);
  1179. if (!retval && infop) {
  1180. int why;
  1181. if ((status & 0x7f) == 0) {
  1182. why = CLD_EXITED;
  1183. status >>= 8;
  1184. } else {
  1185. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1186. status &= 0x7f;
  1187. }
  1188. retval = put_user((short)why, &infop->si_code);
  1189. if (!retval)
  1190. retval = put_user(status, &infop->si_status);
  1191. }
  1192. if (!retval && infop)
  1193. retval = put_user(pid, &infop->si_pid);
  1194. if (!retval && infop)
  1195. retval = put_user(p->uid, &infop->si_uid);
  1196. if (!retval)
  1197. retval = pid;
  1198. if (traced) {
  1199. write_lock_irq(&tasklist_lock);
  1200. /* We dropped tasklist, ptracer could die and untrace */
  1201. ptrace_unlink(p);
  1202. /*
  1203. * If this is not a detached task, notify the parent.
  1204. * If it's still not detached after that, don't release
  1205. * it now.
  1206. */
  1207. if (!task_detached(p)) {
  1208. do_notify_parent(p, p->exit_signal);
  1209. if (!task_detached(p)) {
  1210. p->exit_state = EXIT_ZOMBIE;
  1211. p = NULL;
  1212. }
  1213. }
  1214. write_unlock_irq(&tasklist_lock);
  1215. }
  1216. if (p != NULL)
  1217. release_task(p);
  1218. return retval;
  1219. }
  1220. /*
  1221. * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
  1222. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1223. * the lock and this task is uninteresting. If we return nonzero, we have
  1224. * released the lock and the system call should return.
  1225. */
  1226. static int wait_task_stopped(struct task_struct *p,
  1227. int options, struct siginfo __user *infop,
  1228. int __user *stat_addr, struct rusage __user *ru)
  1229. {
  1230. int retval, exit_code, why;
  1231. uid_t uid = 0; /* unneeded, required by compiler */
  1232. pid_t pid;
  1233. if (!(p->ptrace & PT_PTRACED) && !(options & WUNTRACED))
  1234. return 0;
  1235. exit_code = 0;
  1236. spin_lock_irq(&p->sighand->siglock);
  1237. if (unlikely(!task_is_stopped_or_traced(p)))
  1238. goto unlock_sig;
  1239. if (!(p->ptrace & PT_PTRACED) && p->signal->group_stop_count > 0)
  1240. /*
  1241. * A group stop is in progress and this is the group leader.
  1242. * We won't report until all threads have stopped.
  1243. */
  1244. goto unlock_sig;
  1245. exit_code = p->exit_code;
  1246. if (!exit_code)
  1247. goto unlock_sig;
  1248. if (!unlikely(options & WNOWAIT))
  1249. p->exit_code = 0;
  1250. uid = p->uid;
  1251. unlock_sig:
  1252. spin_unlock_irq(&p->sighand->siglock);
  1253. if (!exit_code)
  1254. return 0;
  1255. /*
  1256. * Now we are pretty sure this task is interesting.
  1257. * Make sure it doesn't get reaped out from under us while we
  1258. * give up the lock and then examine it below. We don't want to
  1259. * keep holding onto the tasklist_lock while we call getrusage and
  1260. * possibly take page faults for user memory.
  1261. */
  1262. get_task_struct(p);
  1263. pid = task_pid_vnr(p);
  1264. why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
  1265. read_unlock(&tasklist_lock);
  1266. if (unlikely(options & WNOWAIT))
  1267. return wait_noreap_copyout(p, pid, uid,
  1268. why, exit_code,
  1269. infop, ru);
  1270. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1271. if (!retval && stat_addr)
  1272. retval = put_user((exit_code << 8) | 0x7f, stat_addr);
  1273. if (!retval && infop)
  1274. retval = put_user(SIGCHLD, &infop->si_signo);
  1275. if (!retval && infop)
  1276. retval = put_user(0, &infop->si_errno);
  1277. if (!retval && infop)
  1278. retval = put_user((short)why, &infop->si_code);
  1279. if (!retval && infop)
  1280. retval = put_user(exit_code, &infop->si_status);
  1281. if (!retval && infop)
  1282. retval = put_user(pid, &infop->si_pid);
  1283. if (!retval && infop)
  1284. retval = put_user(uid, &infop->si_uid);
  1285. if (!retval)
  1286. retval = pid;
  1287. put_task_struct(p);
  1288. BUG_ON(!retval);
  1289. return retval;
  1290. }
  1291. /*
  1292. * Handle do_wait work for one task in a live, non-stopped state.
  1293. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1294. * the lock and this task is uninteresting. If we return nonzero, we have
  1295. * released the lock and the system call should return.
  1296. */
  1297. static int wait_task_continued(struct task_struct *p, int options,
  1298. struct siginfo __user *infop,
  1299. int __user *stat_addr, struct rusage __user *ru)
  1300. {
  1301. int retval;
  1302. pid_t pid;
  1303. uid_t uid;
  1304. if (!unlikely(options & WCONTINUED))
  1305. return 0;
  1306. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1307. return 0;
  1308. spin_lock_irq(&p->sighand->siglock);
  1309. /* Re-check with the lock held. */
  1310. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1311. spin_unlock_irq(&p->sighand->siglock);
  1312. return 0;
  1313. }
  1314. if (!unlikely(options & WNOWAIT))
  1315. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1316. spin_unlock_irq(&p->sighand->siglock);
  1317. pid = task_pid_vnr(p);
  1318. uid = p->uid;
  1319. get_task_struct(p);
  1320. read_unlock(&tasklist_lock);
  1321. if (!infop) {
  1322. retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
  1323. put_task_struct(p);
  1324. if (!retval && stat_addr)
  1325. retval = put_user(0xffff, stat_addr);
  1326. if (!retval)
  1327. retval = pid;
  1328. } else {
  1329. retval = wait_noreap_copyout(p, pid, uid,
  1330. CLD_CONTINUED, SIGCONT,
  1331. infop, ru);
  1332. BUG_ON(retval == 0);
  1333. }
  1334. return retval;
  1335. }
  1336. /*
  1337. * Consider @p for a wait by @parent.
  1338. *
  1339. * -ECHILD should be in *@notask_error before the first call.
  1340. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1341. * Returns zero if the search for a child should continue;
  1342. * then *@notask_error is 0 if @p is an eligible child, or still -ECHILD.
  1343. */
  1344. static int wait_consider_task(struct task_struct *parent,
  1345. struct task_struct *p, int *notask_error,
  1346. enum pid_type type, struct pid *pid, int options,
  1347. struct siginfo __user *infop,
  1348. int __user *stat_addr, struct rusage __user *ru)
  1349. {
  1350. int ret = eligible_child(type, pid, options, p);
  1351. if (ret <= 0)
  1352. return ret;
  1353. if (p->exit_state == EXIT_DEAD)
  1354. return 0;
  1355. /*
  1356. * We don't reap group leaders with subthreads.
  1357. */
  1358. if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
  1359. return wait_task_zombie(p, options, infop, stat_addr, ru);
  1360. /*
  1361. * It's stopped or running now, so it might
  1362. * later continue, exit, or stop again.
  1363. */
  1364. *notask_error = 0;
  1365. if (task_is_stopped_or_traced(p))
  1366. return wait_task_stopped(p, options, infop, stat_addr, ru);
  1367. return wait_task_continued(p, options, infop, stat_addr, ru);
  1368. }
  1369. /*
  1370. * Do the work of do_wait() for one thread in the group, @tsk.
  1371. *
  1372. * -ECHILD should be in *@notask_error before the first call.
  1373. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1374. * Returns zero if the search for a child should continue; then
  1375. * *@notask_error is 0 if there were any eligible children, or still -ECHILD.
  1376. */
  1377. static int do_wait_thread(struct task_struct *tsk, int *notask_error,
  1378. enum pid_type type, struct pid *pid, int options,
  1379. struct siginfo __user *infop, int __user *stat_addr,
  1380. struct rusage __user *ru)
  1381. {
  1382. struct task_struct *p;
  1383. list_for_each_entry(p, &tsk->children, sibling) {
  1384. int ret = wait_consider_task(tsk, p, notask_error,
  1385. type, pid, options,
  1386. infop, stat_addr, ru);
  1387. if (ret)
  1388. return ret;
  1389. }
  1390. return 0;
  1391. }
  1392. static int ptrace_do_wait(struct task_struct *tsk, int *notask_error,
  1393. enum pid_type type, struct pid *pid, int options,
  1394. struct siginfo __user *infop, int __user *stat_addr,
  1395. struct rusage __user *ru)
  1396. {
  1397. struct task_struct *p;
  1398. /*
  1399. * If we never saw an eligile child, check for children stolen by
  1400. * ptrace. We don't leave -ECHILD in *@notask_error if there are any,
  1401. * because we will eventually be allowed to wait for them again.
  1402. */
  1403. if (!*notask_error)
  1404. return 0;
  1405. list_for_each_entry(p, &tsk->ptrace_children, ptrace_list) {
  1406. int ret = eligible_child(type, pid, options, p);
  1407. if (unlikely(ret < 0))
  1408. return ret;
  1409. if (ret) {
  1410. *notask_error = 0;
  1411. return 0;
  1412. }
  1413. }
  1414. return 0;
  1415. }
  1416. static long do_wait(enum pid_type type, struct pid *pid, int options,
  1417. struct siginfo __user *infop, int __user *stat_addr,
  1418. struct rusage __user *ru)
  1419. {
  1420. DECLARE_WAITQUEUE(wait, current);
  1421. struct task_struct *tsk;
  1422. int retval;
  1423. add_wait_queue(&current->signal->wait_chldexit,&wait);
  1424. repeat:
  1425. /*
  1426. * If there is nothing that can match our critiera just get out.
  1427. * We will clear @retval to zero if we see any child that might later
  1428. * match our criteria, even if we are not able to reap it yet.
  1429. */
  1430. retval = -ECHILD;
  1431. if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type])))
  1432. goto end;
  1433. current->state = TASK_INTERRUPTIBLE;
  1434. read_lock(&tasklist_lock);
  1435. tsk = current;
  1436. do {
  1437. int tsk_result = do_wait_thread(tsk, &retval,
  1438. type, pid, options,
  1439. infop, stat_addr, ru);
  1440. if (!tsk_result)
  1441. tsk_result = ptrace_do_wait(tsk, &retval,
  1442. type, pid, options,
  1443. infop, stat_addr, ru);
  1444. if (tsk_result) {
  1445. /*
  1446. * tasklist_lock is unlocked and we have a final result.
  1447. */
  1448. retval = tsk_result;
  1449. goto end;
  1450. }
  1451. if (options & __WNOTHREAD)
  1452. break;
  1453. tsk = next_thread(tsk);
  1454. BUG_ON(tsk->signal != current->signal);
  1455. } while (tsk != current);
  1456. read_unlock(&tasklist_lock);
  1457. if (!retval && !(options & WNOHANG)) {
  1458. retval = -ERESTARTSYS;
  1459. if (!signal_pending(current)) {
  1460. schedule();
  1461. goto repeat;
  1462. }
  1463. }
  1464. end:
  1465. current->state = TASK_RUNNING;
  1466. remove_wait_queue(&current->signal->wait_chldexit,&wait);
  1467. if (infop) {
  1468. if (retval > 0)
  1469. retval = 0;
  1470. else {
  1471. /*
  1472. * For a WNOHANG return, clear out all the fields
  1473. * we would set so the user can easily tell the
  1474. * difference.
  1475. */
  1476. if (!retval)
  1477. retval = put_user(0, &infop->si_signo);
  1478. if (!retval)
  1479. retval = put_user(0, &infop->si_errno);
  1480. if (!retval)
  1481. retval = put_user(0, &infop->si_code);
  1482. if (!retval)
  1483. retval = put_user(0, &infop->si_pid);
  1484. if (!retval)
  1485. retval = put_user(0, &infop->si_uid);
  1486. if (!retval)
  1487. retval = put_user(0, &infop->si_status);
  1488. }
  1489. }
  1490. return retval;
  1491. }
  1492. asmlinkage long sys_waitid(int which, pid_t upid,
  1493. struct siginfo __user *infop, int options,
  1494. struct rusage __user *ru)
  1495. {
  1496. struct pid *pid = NULL;
  1497. enum pid_type type;
  1498. long ret;
  1499. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
  1500. return -EINVAL;
  1501. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1502. return -EINVAL;
  1503. switch (which) {
  1504. case P_ALL:
  1505. type = PIDTYPE_MAX;
  1506. break;
  1507. case P_PID:
  1508. type = PIDTYPE_PID;
  1509. if (upid <= 0)
  1510. return -EINVAL;
  1511. break;
  1512. case P_PGID:
  1513. type = PIDTYPE_PGID;
  1514. if (upid <= 0)
  1515. return -EINVAL;
  1516. break;
  1517. default:
  1518. return -EINVAL;
  1519. }
  1520. if (type < PIDTYPE_MAX)
  1521. pid = find_get_pid(upid);
  1522. ret = do_wait(type, pid, options, infop, NULL, ru);
  1523. put_pid(pid);
  1524. /* avoid REGPARM breakage on x86: */
  1525. asmlinkage_protect(5, ret, which, upid, infop, options, ru);
  1526. return ret;
  1527. }
  1528. asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
  1529. int options, struct rusage __user *ru)
  1530. {
  1531. struct pid *pid = NULL;
  1532. enum pid_type type;
  1533. long ret;
  1534. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1535. __WNOTHREAD|__WCLONE|__WALL))
  1536. return -EINVAL;
  1537. if (upid == -1)
  1538. type = PIDTYPE_MAX;
  1539. else if (upid < 0) {
  1540. type = PIDTYPE_PGID;
  1541. pid = find_get_pid(-upid);
  1542. } else if (upid == 0) {
  1543. type = PIDTYPE_PGID;
  1544. pid = get_pid(task_pgrp(current));
  1545. } else /* upid > 0 */ {
  1546. type = PIDTYPE_PID;
  1547. pid = find_get_pid(upid);
  1548. }
  1549. ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru);
  1550. put_pid(pid);
  1551. /* avoid REGPARM breakage on x86: */
  1552. asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
  1553. return ret;
  1554. }
  1555. #ifdef __ARCH_WANT_SYS_WAITPID
  1556. /*
  1557. * sys_waitpid() remains for compatibility. waitpid() should be
  1558. * implemented by calling sys_wait4() from libc.a.
  1559. */
  1560. asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
  1561. {
  1562. return sys_wait4(pid, stat_addr, options, NULL);
  1563. }
  1564. #endif