exit.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868
  1. /*
  2. * linux/kernel/exit.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/slab.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/capability.h>
  11. #include <linux/completion.h>
  12. #include <linux/personality.h>
  13. #include <linux/tty.h>
  14. #include <linux/iocontext.h>
  15. #include <linux/key.h>
  16. #include <linux/security.h>
  17. #include <linux/cpu.h>
  18. #include <linux/acct.h>
  19. #include <linux/tsacct_kern.h>
  20. #include <linux/file.h>
  21. #include <linux/fdtable.h>
  22. #include <linux/binfmts.h>
  23. #include <linux/nsproxy.h>
  24. #include <linux/pid_namespace.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/profile.h>
  27. #include <linux/mount.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/kthread.h>
  30. #include <linux/mempolicy.h>
  31. #include <linux/taskstats_kern.h>
  32. #include <linux/delayacct.h>
  33. #include <linux/freezer.h>
  34. #include <linux/cgroup.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/signal.h>
  37. #include <linux/posix-timers.h>
  38. #include <linux/cn_proc.h>
  39. #include <linux/mutex.h>
  40. #include <linux/futex.h>
  41. #include <linux/pipe_fs_i.h>
  42. #include <linux/audit.h> /* for audit_free() */
  43. #include <linux/resource.h>
  44. #include <linux/blkdev.h>
  45. #include <linux/task_io_accounting_ops.h>
  46. #include <linux/tracehook.h>
  47. #include <linux/fs_struct.h>
  48. #include <linux/init_task.h>
  49. #include <linux/perf_event.h>
  50. #include <trace/events/sched.h>
  51. #include <linux/hw_breakpoint.h>
  52. #include <linux/oom.h>
  53. #include <asm/uaccess.h>
  54. #include <asm/unistd.h>
  55. #include <asm/pgtable.h>
  56. #include <asm/mmu_context.h>
  57. static void exit_mm(struct task_struct * tsk);
  58. static void __unhash_process(struct task_struct *p, bool group_dead)
  59. {
  60. nr_threads--;
  61. detach_pid(p, PIDTYPE_PID);
  62. if (group_dead) {
  63. detach_pid(p, PIDTYPE_PGID);
  64. detach_pid(p, PIDTYPE_SID);
  65. list_del_rcu(&p->tasks);
  66. list_del_init(&p->sibling);
  67. __this_cpu_dec(process_counts);
  68. }
  69. list_del_rcu(&p->thread_group);
  70. }
  71. /*
  72. * This function expects the tasklist_lock write-locked.
  73. */
  74. static void __exit_signal(struct task_struct *tsk)
  75. {
  76. struct signal_struct *sig = tsk->signal;
  77. bool group_dead = thread_group_leader(tsk);
  78. struct sighand_struct *sighand;
  79. struct tty_struct *uninitialized_var(tty);
  80. sighand = rcu_dereference_check(tsk->sighand,
  81. lockdep_tasklist_lock_is_held());
  82. spin_lock(&sighand->siglock);
  83. posix_cpu_timers_exit(tsk);
  84. if (group_dead) {
  85. posix_cpu_timers_exit_group(tsk);
  86. tty = sig->tty;
  87. sig->tty = NULL;
  88. } else {
  89. /*
  90. * This can only happen if the caller is de_thread().
  91. * FIXME: this is the temporary hack, we should teach
  92. * posix-cpu-timers to handle this case correctly.
  93. */
  94. if (unlikely(has_group_leader_pid(tsk)))
  95. posix_cpu_timers_exit_group(tsk);
  96. /*
  97. * If there is any task waiting for the group exit
  98. * then notify it:
  99. */
  100. if (sig->notify_count > 0 && !--sig->notify_count)
  101. wake_up_process(sig->group_exit_task);
  102. if (tsk == sig->curr_target)
  103. sig->curr_target = next_thread(tsk);
  104. /*
  105. * Accumulate here the counters for all threads but the
  106. * group leader as they die, so they can be added into
  107. * the process-wide totals when those are taken.
  108. * The group leader stays around as a zombie as long
  109. * as there are other threads. When it gets reaped,
  110. * the exit.c code will add its counts into these totals.
  111. * We won't ever get here for the group leader, since it
  112. * will have been the last reference on the signal_struct.
  113. */
  114. sig->utime = cputime_add(sig->utime, tsk->utime);
  115. sig->stime = cputime_add(sig->stime, tsk->stime);
  116. sig->gtime = cputime_add(sig->gtime, tsk->gtime);
  117. sig->min_flt += tsk->min_flt;
  118. sig->maj_flt += tsk->maj_flt;
  119. sig->nvcsw += tsk->nvcsw;
  120. sig->nivcsw += tsk->nivcsw;
  121. sig->inblock += task_io_get_inblock(tsk);
  122. sig->oublock += task_io_get_oublock(tsk);
  123. task_io_accounting_add(&sig->ioac, &tsk->ioac);
  124. sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
  125. }
  126. sig->nr_threads--;
  127. __unhash_process(tsk, group_dead);
  128. /*
  129. * Do this under ->siglock, we can race with another thread
  130. * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
  131. */
  132. flush_sigqueue(&tsk->pending);
  133. tsk->sighand = NULL;
  134. spin_unlock(&sighand->siglock);
  135. __cleanup_sighand(sighand);
  136. clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
  137. if (group_dead) {
  138. flush_sigqueue(&sig->shared_pending);
  139. tty_kref_put(tty);
  140. }
  141. }
  142. static void delayed_put_task_struct(struct rcu_head *rhp)
  143. {
  144. struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
  145. perf_event_delayed_put(tsk);
  146. trace_sched_process_free(tsk);
  147. put_task_struct(tsk);
  148. }
  149. void release_task(struct task_struct * p)
  150. {
  151. struct task_struct *leader;
  152. int zap_leader;
  153. repeat:
  154. tracehook_prepare_release_task(p);
  155. /* don't need to get the RCU readlock here - the process is dead and
  156. * can't be modifying its own credentials. But shut RCU-lockdep up */
  157. rcu_read_lock();
  158. atomic_dec(&__task_cred(p)->user->processes);
  159. rcu_read_unlock();
  160. proc_flush_task(p);
  161. write_lock_irq(&tasklist_lock);
  162. tracehook_finish_release_task(p);
  163. __exit_signal(p);
  164. /*
  165. * If we are the last non-leader member of the thread
  166. * group, and the leader is zombie, then notify the
  167. * group leader's parent process. (if it wants notification.)
  168. */
  169. zap_leader = 0;
  170. leader = p->group_leader;
  171. if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
  172. BUG_ON(task_detached(leader));
  173. do_notify_parent(leader, leader->exit_signal);
  174. /*
  175. * If we were the last child thread and the leader has
  176. * exited already, and the leader's parent ignores SIGCHLD,
  177. * then we are the one who should release the leader.
  178. *
  179. * do_notify_parent() will have marked it self-reaping in
  180. * that case.
  181. */
  182. zap_leader = task_detached(leader);
  183. /*
  184. * This maintains the invariant that release_task()
  185. * only runs on a task in EXIT_DEAD, just for sanity.
  186. */
  187. if (zap_leader)
  188. leader->exit_state = EXIT_DEAD;
  189. }
  190. write_unlock_irq(&tasklist_lock);
  191. release_thread(p);
  192. call_rcu(&p->rcu, delayed_put_task_struct);
  193. p = leader;
  194. if (unlikely(zap_leader))
  195. goto repeat;
  196. }
  197. /*
  198. * This checks not only the pgrp, but falls back on the pid if no
  199. * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
  200. * without this...
  201. *
  202. * The caller must hold rcu lock or the tasklist lock.
  203. */
  204. struct pid *session_of_pgrp(struct pid *pgrp)
  205. {
  206. struct task_struct *p;
  207. struct pid *sid = NULL;
  208. p = pid_task(pgrp, PIDTYPE_PGID);
  209. if (p == NULL)
  210. p = pid_task(pgrp, PIDTYPE_PID);
  211. if (p != NULL)
  212. sid = task_session(p);
  213. return sid;
  214. }
  215. /*
  216. * Determine if a process group is "orphaned", according to the POSIX
  217. * definition in 2.2.2.52. Orphaned process groups are not to be affected
  218. * by terminal-generated stop signals. Newly orphaned process groups are
  219. * to receive a SIGHUP and a SIGCONT.
  220. *
  221. * "I ask you, have you ever known what it is to be an orphan?"
  222. */
  223. static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
  224. {
  225. struct task_struct *p;
  226. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  227. if ((p == ignored_task) ||
  228. (p->exit_state && thread_group_empty(p)) ||
  229. is_global_init(p->real_parent))
  230. continue;
  231. if (task_pgrp(p->real_parent) != pgrp &&
  232. task_session(p->real_parent) == task_session(p))
  233. return 0;
  234. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  235. return 1;
  236. }
  237. int is_current_pgrp_orphaned(void)
  238. {
  239. int retval;
  240. read_lock(&tasklist_lock);
  241. retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
  242. read_unlock(&tasklist_lock);
  243. return retval;
  244. }
  245. static int has_stopped_jobs(struct pid *pgrp)
  246. {
  247. int retval = 0;
  248. struct task_struct *p;
  249. do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
  250. if (!task_is_stopped(p))
  251. continue;
  252. retval = 1;
  253. break;
  254. } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
  255. return retval;
  256. }
  257. /*
  258. * Check to see if any process groups have become orphaned as
  259. * a result of our exiting, and if they have any stopped jobs,
  260. * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  261. */
  262. static void
  263. kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
  264. {
  265. struct pid *pgrp = task_pgrp(tsk);
  266. struct task_struct *ignored_task = tsk;
  267. if (!parent)
  268. /* exit: our father is in a different pgrp than
  269. * we are and we were the only connection outside.
  270. */
  271. parent = tsk->real_parent;
  272. else
  273. /* reparent: our child is in a different pgrp than
  274. * we are, and it was the only connection outside.
  275. */
  276. ignored_task = NULL;
  277. if (task_pgrp(parent) != pgrp &&
  278. task_session(parent) == task_session(tsk) &&
  279. will_become_orphaned_pgrp(pgrp, ignored_task) &&
  280. has_stopped_jobs(pgrp)) {
  281. __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
  282. __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
  283. }
  284. }
  285. /**
  286. * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
  287. *
  288. * If a kernel thread is launched as a result of a system call, or if
  289. * it ever exits, it should generally reparent itself to kthreadd so it
  290. * isn't in the way of other processes and is correctly cleaned up on exit.
  291. *
  292. * The various task state such as scheduling policy and priority may have
  293. * been inherited from a user process, so we reset them to sane values here.
  294. *
  295. * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
  296. */
  297. static void reparent_to_kthreadd(void)
  298. {
  299. write_lock_irq(&tasklist_lock);
  300. ptrace_unlink(current);
  301. /* Reparent to init */
  302. current->real_parent = current->parent = kthreadd_task;
  303. list_move_tail(&current->sibling, &current->real_parent->children);
  304. /* Set the exit signal to SIGCHLD so we signal init on exit */
  305. current->exit_signal = SIGCHLD;
  306. if (task_nice(current) < 0)
  307. set_user_nice(current, 0);
  308. /* cpus_allowed? */
  309. /* rt_priority? */
  310. /* signals? */
  311. memcpy(current->signal->rlim, init_task.signal->rlim,
  312. sizeof(current->signal->rlim));
  313. atomic_inc(&init_cred.usage);
  314. commit_creds(&init_cred);
  315. write_unlock_irq(&tasklist_lock);
  316. }
  317. void __set_special_pids(struct pid *pid)
  318. {
  319. struct task_struct *curr = current->group_leader;
  320. if (task_session(curr) != pid)
  321. change_pid(curr, PIDTYPE_SID, pid);
  322. if (task_pgrp(curr) != pid)
  323. change_pid(curr, PIDTYPE_PGID, pid);
  324. }
  325. static void set_special_pids(struct pid *pid)
  326. {
  327. write_lock_irq(&tasklist_lock);
  328. __set_special_pids(pid);
  329. write_unlock_irq(&tasklist_lock);
  330. }
  331. /*
  332. * Let kernel threads use this to say that they allow a certain signal.
  333. * Must not be used if kthread was cloned with CLONE_SIGHAND.
  334. */
  335. int allow_signal(int sig)
  336. {
  337. if (!valid_signal(sig) || sig < 1)
  338. return -EINVAL;
  339. spin_lock_irq(&current->sighand->siglock);
  340. /* This is only needed for daemonize()'ed kthreads */
  341. sigdelset(&current->blocked, sig);
  342. /*
  343. * Kernel threads handle their own signals. Let the signal code
  344. * know it'll be handled, so that they don't get converted to
  345. * SIGKILL or just silently dropped.
  346. */
  347. current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
  348. recalc_sigpending();
  349. spin_unlock_irq(&current->sighand->siglock);
  350. return 0;
  351. }
  352. EXPORT_SYMBOL(allow_signal);
  353. int disallow_signal(int sig)
  354. {
  355. if (!valid_signal(sig) || sig < 1)
  356. return -EINVAL;
  357. spin_lock_irq(&current->sighand->siglock);
  358. current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
  359. recalc_sigpending();
  360. spin_unlock_irq(&current->sighand->siglock);
  361. return 0;
  362. }
  363. EXPORT_SYMBOL(disallow_signal);
  364. /*
  365. * Put all the gunge required to become a kernel thread without
  366. * attached user resources in one place where it belongs.
  367. */
  368. void daemonize(const char *name, ...)
  369. {
  370. va_list args;
  371. sigset_t blocked;
  372. va_start(args, name);
  373. vsnprintf(current->comm, sizeof(current->comm), name, args);
  374. va_end(args);
  375. /*
  376. * If we were started as result of loading a module, close all of the
  377. * user space pages. We don't need them, and if we didn't close them
  378. * they would be locked into memory.
  379. */
  380. exit_mm(current);
  381. /*
  382. * We don't want to have TIF_FREEZE set if the system-wide hibernation
  383. * or suspend transition begins right now.
  384. */
  385. current->flags |= (PF_NOFREEZE | PF_KTHREAD);
  386. if (current->nsproxy != &init_nsproxy) {
  387. get_nsproxy(&init_nsproxy);
  388. switch_task_namespaces(current, &init_nsproxy);
  389. }
  390. set_special_pids(&init_struct_pid);
  391. proc_clear_tty(current);
  392. /* Block and flush all signals */
  393. sigfillset(&blocked);
  394. sigprocmask(SIG_BLOCK, &blocked, NULL);
  395. flush_signals(current);
  396. /* Become as one with the init task */
  397. daemonize_fs_struct();
  398. exit_files(current);
  399. current->files = init_task.files;
  400. atomic_inc(&current->files->count);
  401. reparent_to_kthreadd();
  402. }
  403. EXPORT_SYMBOL(daemonize);
  404. static void close_files(struct files_struct * files)
  405. {
  406. int i, j;
  407. struct fdtable *fdt;
  408. j = 0;
  409. /*
  410. * It is safe to dereference the fd table without RCU or
  411. * ->file_lock because this is the last reference to the
  412. * files structure. But use RCU to shut RCU-lockdep up.
  413. */
  414. rcu_read_lock();
  415. fdt = files_fdtable(files);
  416. rcu_read_unlock();
  417. for (;;) {
  418. unsigned long set;
  419. i = j * __NFDBITS;
  420. if (i >= fdt->max_fds)
  421. break;
  422. set = fdt->open_fds->fds_bits[j++];
  423. while (set) {
  424. if (set & 1) {
  425. struct file * file = xchg(&fdt->fd[i], NULL);
  426. if (file) {
  427. filp_close(file, files);
  428. cond_resched();
  429. }
  430. }
  431. i++;
  432. set >>= 1;
  433. }
  434. }
  435. }
  436. struct files_struct *get_files_struct(struct task_struct *task)
  437. {
  438. struct files_struct *files;
  439. task_lock(task);
  440. files = task->files;
  441. if (files)
  442. atomic_inc(&files->count);
  443. task_unlock(task);
  444. return files;
  445. }
  446. void put_files_struct(struct files_struct *files)
  447. {
  448. struct fdtable *fdt;
  449. if (atomic_dec_and_test(&files->count)) {
  450. close_files(files);
  451. /*
  452. * Free the fd and fdset arrays if we expanded them.
  453. * If the fdtable was embedded, pass files for freeing
  454. * at the end of the RCU grace period. Otherwise,
  455. * you can free files immediately.
  456. */
  457. rcu_read_lock();
  458. fdt = files_fdtable(files);
  459. if (fdt != &files->fdtab)
  460. kmem_cache_free(files_cachep, files);
  461. free_fdtable(fdt);
  462. rcu_read_unlock();
  463. }
  464. }
  465. void reset_files_struct(struct files_struct *files)
  466. {
  467. struct task_struct *tsk = current;
  468. struct files_struct *old;
  469. old = tsk->files;
  470. task_lock(tsk);
  471. tsk->files = files;
  472. task_unlock(tsk);
  473. put_files_struct(old);
  474. }
  475. void exit_files(struct task_struct *tsk)
  476. {
  477. struct files_struct * files = tsk->files;
  478. if (files) {
  479. task_lock(tsk);
  480. tsk->files = NULL;
  481. task_unlock(tsk);
  482. put_files_struct(files);
  483. }
  484. }
  485. #ifdef CONFIG_MM_OWNER
  486. /*
  487. * Task p is exiting and it owned mm, lets find a new owner for it
  488. */
  489. static inline int
  490. mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
  491. {
  492. /*
  493. * If there are other users of the mm and the owner (us) is exiting
  494. * we need to find a new owner to take on the responsibility.
  495. */
  496. if (atomic_read(&mm->mm_users) <= 1)
  497. return 0;
  498. if (mm->owner != p)
  499. return 0;
  500. return 1;
  501. }
  502. void mm_update_next_owner(struct mm_struct *mm)
  503. {
  504. struct task_struct *c, *g, *p = current;
  505. retry:
  506. if (!mm_need_new_owner(mm, p))
  507. return;
  508. read_lock(&tasklist_lock);
  509. /*
  510. * Search in the children
  511. */
  512. list_for_each_entry(c, &p->children, sibling) {
  513. if (c->mm == mm)
  514. goto assign_new_owner;
  515. }
  516. /*
  517. * Search in the siblings
  518. */
  519. list_for_each_entry(c, &p->real_parent->children, sibling) {
  520. if (c->mm == mm)
  521. goto assign_new_owner;
  522. }
  523. /*
  524. * Search through everything else. We should not get
  525. * here often
  526. */
  527. do_each_thread(g, c) {
  528. if (c->mm == mm)
  529. goto assign_new_owner;
  530. } while_each_thread(g, c);
  531. read_unlock(&tasklist_lock);
  532. /*
  533. * We found no owner yet mm_users > 1: this implies that we are
  534. * most likely racing with swapoff (try_to_unuse()) or /proc or
  535. * ptrace or page migration (get_task_mm()). Mark owner as NULL.
  536. */
  537. mm->owner = NULL;
  538. return;
  539. assign_new_owner:
  540. BUG_ON(c == p);
  541. get_task_struct(c);
  542. /*
  543. * The task_lock protects c->mm from changing.
  544. * We always want mm->owner->mm == mm
  545. */
  546. task_lock(c);
  547. /*
  548. * Delay read_unlock() till we have the task_lock()
  549. * to ensure that c does not slip away underneath us
  550. */
  551. read_unlock(&tasklist_lock);
  552. if (c->mm != mm) {
  553. task_unlock(c);
  554. put_task_struct(c);
  555. goto retry;
  556. }
  557. mm->owner = c;
  558. task_unlock(c);
  559. put_task_struct(c);
  560. }
  561. #endif /* CONFIG_MM_OWNER */
  562. /*
  563. * Turn us into a lazy TLB process if we
  564. * aren't already..
  565. */
  566. static void exit_mm(struct task_struct * tsk)
  567. {
  568. struct mm_struct *mm = tsk->mm;
  569. struct core_state *core_state;
  570. mm_release(tsk, mm);
  571. if (!mm)
  572. return;
  573. /*
  574. * Serialize with any possible pending coredump.
  575. * We must hold mmap_sem around checking core_state
  576. * and clearing tsk->mm. The core-inducing thread
  577. * will increment ->nr_threads for each thread in the
  578. * group with ->mm != NULL.
  579. */
  580. down_read(&mm->mmap_sem);
  581. core_state = mm->core_state;
  582. if (core_state) {
  583. struct core_thread self;
  584. up_read(&mm->mmap_sem);
  585. self.task = tsk;
  586. self.next = xchg(&core_state->dumper.next, &self);
  587. /*
  588. * Implies mb(), the result of xchg() must be visible
  589. * to core_state->dumper.
  590. */
  591. if (atomic_dec_and_test(&core_state->nr_threads))
  592. complete(&core_state->startup);
  593. for (;;) {
  594. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  595. if (!self.task) /* see coredump_finish() */
  596. break;
  597. schedule();
  598. }
  599. __set_task_state(tsk, TASK_RUNNING);
  600. down_read(&mm->mmap_sem);
  601. }
  602. atomic_inc(&mm->mm_count);
  603. BUG_ON(mm != tsk->active_mm);
  604. /* more a memory barrier than a real lock */
  605. task_lock(tsk);
  606. tsk->mm = NULL;
  607. up_read(&mm->mmap_sem);
  608. enter_lazy_tlb(mm, current);
  609. /* We don't want this task to be frozen prematurely */
  610. clear_freeze_flag(tsk);
  611. if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
  612. atomic_dec(&mm->oom_disable_count);
  613. task_unlock(tsk);
  614. mm_update_next_owner(mm);
  615. mmput(mm);
  616. }
  617. /*
  618. * When we die, we re-parent all our children.
  619. * Try to give them to another thread in our thread
  620. * group, and if no such member exists, give it to
  621. * the child reaper process (ie "init") in our pid
  622. * space.
  623. */
  624. static struct task_struct *find_new_reaper(struct task_struct *father)
  625. __releases(&tasklist_lock)
  626. __acquires(&tasklist_lock)
  627. {
  628. struct pid_namespace *pid_ns = task_active_pid_ns(father);
  629. struct task_struct *thread;
  630. thread = father;
  631. while_each_thread(father, thread) {
  632. if (thread->flags & PF_EXITING)
  633. continue;
  634. if (unlikely(pid_ns->child_reaper == father))
  635. pid_ns->child_reaper = thread;
  636. return thread;
  637. }
  638. if (unlikely(pid_ns->child_reaper == father)) {
  639. write_unlock_irq(&tasklist_lock);
  640. if (unlikely(pid_ns == &init_pid_ns))
  641. panic("Attempted to kill init!");
  642. zap_pid_ns_processes(pid_ns);
  643. write_lock_irq(&tasklist_lock);
  644. /*
  645. * We can not clear ->child_reaper or leave it alone.
  646. * There may by stealth EXIT_DEAD tasks on ->children,
  647. * forget_original_parent() must move them somewhere.
  648. */
  649. pid_ns->child_reaper = init_pid_ns.child_reaper;
  650. }
  651. return pid_ns->child_reaper;
  652. }
  653. /*
  654. * Any that need to be release_task'd are put on the @dead list.
  655. */
  656. static void reparent_leader(struct task_struct *father, struct task_struct *p,
  657. struct list_head *dead)
  658. {
  659. list_move_tail(&p->sibling, &p->real_parent->children);
  660. if (task_detached(p))
  661. return;
  662. /*
  663. * If this is a threaded reparent there is no need to
  664. * notify anyone anything has happened.
  665. */
  666. if (same_thread_group(p->real_parent, father))
  667. return;
  668. /* We don't want people slaying init. */
  669. p->exit_signal = SIGCHLD;
  670. /* If it has exited notify the new parent about this child's death. */
  671. if (!task_ptrace(p) &&
  672. p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
  673. do_notify_parent(p, p->exit_signal);
  674. if (task_detached(p)) {
  675. p->exit_state = EXIT_DEAD;
  676. list_move_tail(&p->sibling, dead);
  677. }
  678. }
  679. kill_orphaned_pgrp(p, father);
  680. }
  681. static void forget_original_parent(struct task_struct *father)
  682. {
  683. struct task_struct *p, *n, *reaper;
  684. LIST_HEAD(dead_children);
  685. write_lock_irq(&tasklist_lock);
  686. /*
  687. * Note that exit_ptrace() and find_new_reaper() might
  688. * drop tasklist_lock and reacquire it.
  689. */
  690. exit_ptrace(father);
  691. reaper = find_new_reaper(father);
  692. list_for_each_entry_safe(p, n, &father->children, sibling) {
  693. struct task_struct *t = p;
  694. do {
  695. t->real_parent = reaper;
  696. if (t->parent == father) {
  697. BUG_ON(task_ptrace(t));
  698. t->parent = t->real_parent;
  699. }
  700. if (t->pdeath_signal)
  701. group_send_sig_info(t->pdeath_signal,
  702. SEND_SIG_NOINFO, t);
  703. } while_each_thread(p, t);
  704. reparent_leader(father, p, &dead_children);
  705. }
  706. write_unlock_irq(&tasklist_lock);
  707. BUG_ON(!list_empty(&father->children));
  708. list_for_each_entry_safe(p, n, &dead_children, sibling) {
  709. list_del_init(&p->sibling);
  710. release_task(p);
  711. }
  712. }
  713. /*
  714. * Send signals to all our closest relatives so that they know
  715. * to properly mourn us..
  716. */
  717. static void exit_notify(struct task_struct *tsk, int group_dead)
  718. {
  719. int signal;
  720. void *cookie;
  721. /*
  722. * This does two things:
  723. *
  724. * A. Make init inherit all the child processes
  725. * B. Check to see if any process groups have become orphaned
  726. * as a result of our exiting, and if they have any stopped
  727. * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
  728. */
  729. forget_original_parent(tsk);
  730. exit_task_namespaces(tsk);
  731. write_lock_irq(&tasklist_lock);
  732. if (group_dead)
  733. kill_orphaned_pgrp(tsk->group_leader, NULL);
  734. /* Let father know we died
  735. *
  736. * Thread signals are configurable, but you aren't going to use
  737. * that to send signals to arbitrary processes.
  738. * That stops right now.
  739. *
  740. * If the parent exec id doesn't match the exec id we saved
  741. * when we started then we know the parent has changed security
  742. * domain.
  743. *
  744. * If our self_exec id doesn't match our parent_exec_id then
  745. * we have changed execution domain as these two values started
  746. * the same after a fork.
  747. */
  748. if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
  749. (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
  750. tsk->self_exec_id != tsk->parent_exec_id))
  751. tsk->exit_signal = SIGCHLD;
  752. signal = tracehook_notify_death(tsk, &cookie, group_dead);
  753. if (signal >= 0)
  754. signal = do_notify_parent(tsk, signal);
  755. tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
  756. /* mt-exec, de_thread() is waiting for group leader */
  757. if (unlikely(tsk->signal->notify_count < 0))
  758. wake_up_process(tsk->signal->group_exit_task);
  759. write_unlock_irq(&tasklist_lock);
  760. tracehook_report_death(tsk, signal, cookie, group_dead);
  761. /* If the process is dead, release it - nobody will wait for it */
  762. if (signal == DEATH_REAP)
  763. release_task(tsk);
  764. }
  765. #ifdef CONFIG_DEBUG_STACK_USAGE
  766. static void check_stack_usage(void)
  767. {
  768. static DEFINE_SPINLOCK(low_water_lock);
  769. static int lowest_to_date = THREAD_SIZE;
  770. unsigned long free;
  771. free = stack_not_used(current);
  772. if (free >= lowest_to_date)
  773. return;
  774. spin_lock(&low_water_lock);
  775. if (free < lowest_to_date) {
  776. printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
  777. "left\n",
  778. current->comm, free);
  779. lowest_to_date = free;
  780. }
  781. spin_unlock(&low_water_lock);
  782. }
  783. #else
  784. static inline void check_stack_usage(void) {}
  785. #endif
  786. NORET_TYPE void do_exit(long code)
  787. {
  788. struct task_struct *tsk = current;
  789. int group_dead;
  790. profile_task_exit(tsk);
  791. WARN_ON(atomic_read(&tsk->fs_excl));
  792. WARN_ON(blk_needs_flush_plug(tsk));
  793. if (unlikely(in_interrupt()))
  794. panic("Aiee, killing interrupt handler!");
  795. if (unlikely(!tsk->pid))
  796. panic("Attempted to kill the idle task!");
  797. /*
  798. * If do_exit is called because this processes oopsed, it's possible
  799. * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
  800. * continuing. Amongst other possible reasons, this is to prevent
  801. * mm_release()->clear_child_tid() from writing to a user-controlled
  802. * kernel address.
  803. */
  804. set_fs(USER_DS);
  805. tracehook_report_exit(&code);
  806. validate_creds_for_do_exit(tsk);
  807. /*
  808. * We're taking recursive faults here in do_exit. Safest is to just
  809. * leave this task alone and wait for reboot.
  810. */
  811. if (unlikely(tsk->flags & PF_EXITING)) {
  812. printk(KERN_ALERT
  813. "Fixing recursive fault but reboot is needed!\n");
  814. /*
  815. * We can do this unlocked here. The futex code uses
  816. * this flag just to verify whether the pi state
  817. * cleanup has been done or not. In the worst case it
  818. * loops once more. We pretend that the cleanup was
  819. * done as there is no way to return. Either the
  820. * OWNER_DIED bit is set by now or we push the blocked
  821. * task into the wait for ever nirwana as well.
  822. */
  823. tsk->flags |= PF_EXITPIDONE;
  824. set_current_state(TASK_UNINTERRUPTIBLE);
  825. schedule();
  826. }
  827. exit_irq_thread();
  828. exit_signals(tsk); /* sets PF_EXITING */
  829. /*
  830. * tsk->flags are checked in the futex code to protect against
  831. * an exiting task cleaning up the robust pi futexes.
  832. */
  833. smp_mb();
  834. raw_spin_unlock_wait(&tsk->pi_lock);
  835. if (unlikely(in_atomic()))
  836. printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
  837. current->comm, task_pid_nr(current),
  838. preempt_count());
  839. acct_update_integrals(tsk);
  840. /* sync mm's RSS info before statistics gathering */
  841. if (tsk->mm)
  842. sync_mm_rss(tsk, tsk->mm);
  843. group_dead = atomic_dec_and_test(&tsk->signal->live);
  844. if (group_dead) {
  845. hrtimer_cancel(&tsk->signal->real_timer);
  846. exit_itimers(tsk->signal);
  847. if (tsk->mm)
  848. setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
  849. }
  850. acct_collect(code, group_dead);
  851. if (group_dead)
  852. tty_audit_exit();
  853. if (unlikely(tsk->audit_context))
  854. audit_free(tsk);
  855. tsk->exit_code = code;
  856. taskstats_exit(tsk, group_dead);
  857. exit_mm(tsk);
  858. if (group_dead)
  859. acct_process();
  860. trace_sched_process_exit(tsk);
  861. exit_sem(tsk);
  862. exit_files(tsk);
  863. exit_fs(tsk);
  864. check_stack_usage();
  865. exit_thread();
  866. /*
  867. * Flush inherited counters to the parent - before the parent
  868. * gets woken up by child-exit notifications.
  869. *
  870. * because of cgroup mode, must be called before cgroup_exit()
  871. */
  872. perf_event_exit_task(tsk);
  873. cgroup_exit(tsk, 1);
  874. if (group_dead)
  875. disassociate_ctty(1);
  876. module_put(task_thread_info(tsk)->exec_domain->module);
  877. proc_exit_connector(tsk);
  878. /*
  879. * FIXME: do that only when needed, using sched_exit tracepoint
  880. */
  881. ptrace_put_breakpoints(tsk);
  882. exit_notify(tsk, group_dead);
  883. #ifdef CONFIG_NUMA
  884. task_lock(tsk);
  885. mpol_put(tsk->mempolicy);
  886. tsk->mempolicy = NULL;
  887. task_unlock(tsk);
  888. #endif
  889. #ifdef CONFIG_FUTEX
  890. if (unlikely(current->pi_state_cache))
  891. kfree(current->pi_state_cache);
  892. #endif
  893. /*
  894. * Make sure we are holding no locks:
  895. */
  896. debug_check_no_locks_held(tsk);
  897. /*
  898. * We can do this unlocked here. The futex code uses this flag
  899. * just to verify whether the pi state cleanup has been done
  900. * or not. In the worst case it loops once more.
  901. */
  902. tsk->flags |= PF_EXITPIDONE;
  903. if (tsk->io_context)
  904. exit_io_context(tsk);
  905. if (tsk->splice_pipe)
  906. __free_pipe_info(tsk->splice_pipe);
  907. validate_creds_for_do_exit(tsk);
  908. preempt_disable();
  909. exit_rcu();
  910. /* causes final put_task_struct in finish_task_switch(). */
  911. tsk->state = TASK_DEAD;
  912. schedule();
  913. BUG();
  914. /* Avoid "noreturn function does return". */
  915. for (;;)
  916. cpu_relax(); /* For when BUG is null */
  917. }
  918. EXPORT_SYMBOL_GPL(do_exit);
  919. NORET_TYPE void complete_and_exit(struct completion *comp, long code)
  920. {
  921. if (comp)
  922. complete(comp);
  923. do_exit(code);
  924. }
  925. EXPORT_SYMBOL(complete_and_exit);
  926. SYSCALL_DEFINE1(exit, int, error_code)
  927. {
  928. do_exit((error_code&0xff)<<8);
  929. }
  930. /*
  931. * Take down every thread in the group. This is called by fatal signals
  932. * as well as by sys_exit_group (below).
  933. */
  934. NORET_TYPE void
  935. do_group_exit(int exit_code)
  936. {
  937. struct signal_struct *sig = current->signal;
  938. BUG_ON(exit_code & 0x80); /* core dumps don't get here */
  939. if (signal_group_exit(sig))
  940. exit_code = sig->group_exit_code;
  941. else if (!thread_group_empty(current)) {
  942. struct sighand_struct *const sighand = current->sighand;
  943. spin_lock_irq(&sighand->siglock);
  944. if (signal_group_exit(sig))
  945. /* Another thread got here before we took the lock. */
  946. exit_code = sig->group_exit_code;
  947. else {
  948. sig->group_exit_code = exit_code;
  949. sig->flags = SIGNAL_GROUP_EXIT;
  950. zap_other_threads(current);
  951. }
  952. spin_unlock_irq(&sighand->siglock);
  953. }
  954. do_exit(exit_code);
  955. /* NOTREACHED */
  956. }
  957. /*
  958. * this kills every thread in the thread group. Note that any externally
  959. * wait4()-ing process will get the correct exit code - even if this
  960. * thread is not the thread group leader.
  961. */
  962. SYSCALL_DEFINE1(exit_group, int, error_code)
  963. {
  964. do_group_exit((error_code & 0xff) << 8);
  965. /* NOTREACHED */
  966. return 0;
  967. }
  968. struct wait_opts {
  969. enum pid_type wo_type;
  970. int wo_flags;
  971. struct pid *wo_pid;
  972. struct siginfo __user *wo_info;
  973. int __user *wo_stat;
  974. struct rusage __user *wo_rusage;
  975. wait_queue_t child_wait;
  976. int notask_error;
  977. };
  978. static inline
  979. struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
  980. {
  981. if (type != PIDTYPE_PID)
  982. task = task->group_leader;
  983. return task->pids[type].pid;
  984. }
  985. static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
  986. {
  987. return wo->wo_type == PIDTYPE_MAX ||
  988. task_pid_type(p, wo->wo_type) == wo->wo_pid;
  989. }
  990. static int eligible_child(struct wait_opts *wo, struct task_struct *p)
  991. {
  992. if (!eligible_pid(wo, p))
  993. return 0;
  994. /* Wait for all children (clone and not) if __WALL is set;
  995. * otherwise, wait for clone children *only* if __WCLONE is
  996. * set; otherwise, wait for non-clone children *only*. (Note:
  997. * A "clone" child here is one that reports to its parent
  998. * using a signal other than SIGCHLD.) */
  999. if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
  1000. && !(wo->wo_flags & __WALL))
  1001. return 0;
  1002. return 1;
  1003. }
  1004. static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
  1005. pid_t pid, uid_t uid, int why, int status)
  1006. {
  1007. struct siginfo __user *infop;
  1008. int retval = wo->wo_rusage
  1009. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1010. put_task_struct(p);
  1011. infop = wo->wo_info;
  1012. if (infop) {
  1013. if (!retval)
  1014. retval = put_user(SIGCHLD, &infop->si_signo);
  1015. if (!retval)
  1016. retval = put_user(0, &infop->si_errno);
  1017. if (!retval)
  1018. retval = put_user((short)why, &infop->si_code);
  1019. if (!retval)
  1020. retval = put_user(pid, &infop->si_pid);
  1021. if (!retval)
  1022. retval = put_user(uid, &infop->si_uid);
  1023. if (!retval)
  1024. retval = put_user(status, &infop->si_status);
  1025. }
  1026. if (!retval)
  1027. retval = pid;
  1028. return retval;
  1029. }
  1030. /*
  1031. * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
  1032. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1033. * the lock and this task is uninteresting. If we return nonzero, we have
  1034. * released the lock and the system call should return.
  1035. */
  1036. static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
  1037. {
  1038. unsigned long state;
  1039. int retval, status, traced;
  1040. pid_t pid = task_pid_vnr(p);
  1041. uid_t uid = __task_cred(p)->uid;
  1042. struct siginfo __user *infop;
  1043. if (!likely(wo->wo_flags & WEXITED))
  1044. return 0;
  1045. if (unlikely(wo->wo_flags & WNOWAIT)) {
  1046. int exit_code = p->exit_code;
  1047. int why;
  1048. get_task_struct(p);
  1049. read_unlock(&tasklist_lock);
  1050. if ((exit_code & 0x7f) == 0) {
  1051. why = CLD_EXITED;
  1052. status = exit_code >> 8;
  1053. } else {
  1054. why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1055. status = exit_code & 0x7f;
  1056. }
  1057. return wait_noreap_copyout(wo, p, pid, uid, why, status);
  1058. }
  1059. /*
  1060. * Try to move the task's state to DEAD
  1061. * only one thread is allowed to do this:
  1062. */
  1063. state = xchg(&p->exit_state, EXIT_DEAD);
  1064. if (state != EXIT_ZOMBIE) {
  1065. BUG_ON(state != EXIT_DEAD);
  1066. return 0;
  1067. }
  1068. traced = ptrace_reparented(p);
  1069. /*
  1070. * It can be ptraced but not reparented, check
  1071. * !task_detached() to filter out sub-threads.
  1072. */
  1073. if (likely(!traced) && likely(!task_detached(p))) {
  1074. struct signal_struct *psig;
  1075. struct signal_struct *sig;
  1076. unsigned long maxrss;
  1077. cputime_t tgutime, tgstime;
  1078. /*
  1079. * The resource counters for the group leader are in its
  1080. * own task_struct. Those for dead threads in the group
  1081. * are in its signal_struct, as are those for the child
  1082. * processes it has previously reaped. All these
  1083. * accumulate in the parent's signal_struct c* fields.
  1084. *
  1085. * We don't bother to take a lock here to protect these
  1086. * p->signal fields, because they are only touched by
  1087. * __exit_signal, which runs with tasklist_lock
  1088. * write-locked anyway, and so is excluded here. We do
  1089. * need to protect the access to parent->signal fields,
  1090. * as other threads in the parent group can be right
  1091. * here reaping other children at the same time.
  1092. *
  1093. * We use thread_group_times() to get times for the thread
  1094. * group, which consolidates times for all threads in the
  1095. * group including the group leader.
  1096. */
  1097. thread_group_times(p, &tgutime, &tgstime);
  1098. spin_lock_irq(&p->real_parent->sighand->siglock);
  1099. psig = p->real_parent->signal;
  1100. sig = p->signal;
  1101. psig->cutime =
  1102. cputime_add(psig->cutime,
  1103. cputime_add(tgutime,
  1104. sig->cutime));
  1105. psig->cstime =
  1106. cputime_add(psig->cstime,
  1107. cputime_add(tgstime,
  1108. sig->cstime));
  1109. psig->cgtime =
  1110. cputime_add(psig->cgtime,
  1111. cputime_add(p->gtime,
  1112. cputime_add(sig->gtime,
  1113. sig->cgtime)));
  1114. psig->cmin_flt +=
  1115. p->min_flt + sig->min_flt + sig->cmin_flt;
  1116. psig->cmaj_flt +=
  1117. p->maj_flt + sig->maj_flt + sig->cmaj_flt;
  1118. psig->cnvcsw +=
  1119. p->nvcsw + sig->nvcsw + sig->cnvcsw;
  1120. psig->cnivcsw +=
  1121. p->nivcsw + sig->nivcsw + sig->cnivcsw;
  1122. psig->cinblock +=
  1123. task_io_get_inblock(p) +
  1124. sig->inblock + sig->cinblock;
  1125. psig->coublock +=
  1126. task_io_get_oublock(p) +
  1127. sig->oublock + sig->coublock;
  1128. maxrss = max(sig->maxrss, sig->cmaxrss);
  1129. if (psig->cmaxrss < maxrss)
  1130. psig->cmaxrss = maxrss;
  1131. task_io_accounting_add(&psig->ioac, &p->ioac);
  1132. task_io_accounting_add(&psig->ioac, &sig->ioac);
  1133. spin_unlock_irq(&p->real_parent->sighand->siglock);
  1134. }
  1135. /*
  1136. * Now we are sure this task is interesting, and no other
  1137. * thread can reap it because we set its state to EXIT_DEAD.
  1138. */
  1139. read_unlock(&tasklist_lock);
  1140. retval = wo->wo_rusage
  1141. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1142. status = (p->signal->flags & SIGNAL_GROUP_EXIT)
  1143. ? p->signal->group_exit_code : p->exit_code;
  1144. if (!retval && wo->wo_stat)
  1145. retval = put_user(status, wo->wo_stat);
  1146. infop = wo->wo_info;
  1147. if (!retval && infop)
  1148. retval = put_user(SIGCHLD, &infop->si_signo);
  1149. if (!retval && infop)
  1150. retval = put_user(0, &infop->si_errno);
  1151. if (!retval && infop) {
  1152. int why;
  1153. if ((status & 0x7f) == 0) {
  1154. why = CLD_EXITED;
  1155. status >>= 8;
  1156. } else {
  1157. why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
  1158. status &= 0x7f;
  1159. }
  1160. retval = put_user((short)why, &infop->si_code);
  1161. if (!retval)
  1162. retval = put_user(status, &infop->si_status);
  1163. }
  1164. if (!retval && infop)
  1165. retval = put_user(pid, &infop->si_pid);
  1166. if (!retval && infop)
  1167. retval = put_user(uid, &infop->si_uid);
  1168. if (!retval)
  1169. retval = pid;
  1170. if (traced) {
  1171. write_lock_irq(&tasklist_lock);
  1172. /* We dropped tasklist, ptracer could die and untrace */
  1173. ptrace_unlink(p);
  1174. /*
  1175. * If this is not a detached task, notify the parent.
  1176. * If it's still not detached after that, don't release
  1177. * it now.
  1178. */
  1179. if (!task_detached(p)) {
  1180. do_notify_parent(p, p->exit_signal);
  1181. if (!task_detached(p)) {
  1182. p->exit_state = EXIT_ZOMBIE;
  1183. p = NULL;
  1184. }
  1185. }
  1186. write_unlock_irq(&tasklist_lock);
  1187. }
  1188. if (p != NULL)
  1189. release_task(p);
  1190. return retval;
  1191. }
  1192. static int *task_stopped_code(struct task_struct *p, bool ptrace)
  1193. {
  1194. if (ptrace) {
  1195. if (task_is_stopped_or_traced(p))
  1196. return &p->exit_code;
  1197. } else {
  1198. if (p->signal->flags & SIGNAL_STOP_STOPPED)
  1199. return &p->signal->group_exit_code;
  1200. }
  1201. return NULL;
  1202. }
  1203. /**
  1204. * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
  1205. * @wo: wait options
  1206. * @ptrace: is the wait for ptrace
  1207. * @p: task to wait for
  1208. *
  1209. * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
  1210. *
  1211. * CONTEXT:
  1212. * read_lock(&tasklist_lock), which is released if return value is
  1213. * non-zero. Also, grabs and releases @p->sighand->siglock.
  1214. *
  1215. * RETURNS:
  1216. * 0 if wait condition didn't exist and search for other wait conditions
  1217. * should continue. Non-zero return, -errno on failure and @p's pid on
  1218. * success, implies that tasklist_lock is released and wait condition
  1219. * search should terminate.
  1220. */
  1221. static int wait_task_stopped(struct wait_opts *wo,
  1222. int ptrace, struct task_struct *p)
  1223. {
  1224. struct siginfo __user *infop;
  1225. int retval, exit_code, *p_code, why;
  1226. uid_t uid = 0; /* unneeded, required by compiler */
  1227. pid_t pid;
  1228. /*
  1229. * Traditionally we see ptrace'd stopped tasks regardless of options.
  1230. */
  1231. if (!ptrace && !(wo->wo_flags & WUNTRACED))
  1232. return 0;
  1233. if (!task_stopped_code(p, ptrace))
  1234. return 0;
  1235. exit_code = 0;
  1236. spin_lock_irq(&p->sighand->siglock);
  1237. p_code = task_stopped_code(p, ptrace);
  1238. if (unlikely(!p_code))
  1239. goto unlock_sig;
  1240. exit_code = *p_code;
  1241. if (!exit_code)
  1242. goto unlock_sig;
  1243. if (!unlikely(wo->wo_flags & WNOWAIT))
  1244. *p_code = 0;
  1245. uid = task_uid(p);
  1246. unlock_sig:
  1247. spin_unlock_irq(&p->sighand->siglock);
  1248. if (!exit_code)
  1249. return 0;
  1250. /*
  1251. * Now we are pretty sure this task is interesting.
  1252. * Make sure it doesn't get reaped out from under us while we
  1253. * give up the lock and then examine it below. We don't want to
  1254. * keep holding onto the tasklist_lock while we call getrusage and
  1255. * possibly take page faults for user memory.
  1256. */
  1257. get_task_struct(p);
  1258. pid = task_pid_vnr(p);
  1259. why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
  1260. read_unlock(&tasklist_lock);
  1261. if (unlikely(wo->wo_flags & WNOWAIT))
  1262. return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
  1263. retval = wo->wo_rusage
  1264. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1265. if (!retval && wo->wo_stat)
  1266. retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
  1267. infop = wo->wo_info;
  1268. if (!retval && infop)
  1269. retval = put_user(SIGCHLD, &infop->si_signo);
  1270. if (!retval && infop)
  1271. retval = put_user(0, &infop->si_errno);
  1272. if (!retval && infop)
  1273. retval = put_user((short)why, &infop->si_code);
  1274. if (!retval && infop)
  1275. retval = put_user(exit_code, &infop->si_status);
  1276. if (!retval && infop)
  1277. retval = put_user(pid, &infop->si_pid);
  1278. if (!retval && infop)
  1279. retval = put_user(uid, &infop->si_uid);
  1280. if (!retval)
  1281. retval = pid;
  1282. put_task_struct(p);
  1283. BUG_ON(!retval);
  1284. return retval;
  1285. }
  1286. /*
  1287. * Handle do_wait work for one task in a live, non-stopped state.
  1288. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
  1289. * the lock and this task is uninteresting. If we return nonzero, we have
  1290. * released the lock and the system call should return.
  1291. */
  1292. static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
  1293. {
  1294. int retval;
  1295. pid_t pid;
  1296. uid_t uid;
  1297. if (!unlikely(wo->wo_flags & WCONTINUED))
  1298. return 0;
  1299. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
  1300. return 0;
  1301. spin_lock_irq(&p->sighand->siglock);
  1302. /* Re-check with the lock held. */
  1303. if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
  1304. spin_unlock_irq(&p->sighand->siglock);
  1305. return 0;
  1306. }
  1307. if (!unlikely(wo->wo_flags & WNOWAIT))
  1308. p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
  1309. uid = task_uid(p);
  1310. spin_unlock_irq(&p->sighand->siglock);
  1311. pid = task_pid_vnr(p);
  1312. get_task_struct(p);
  1313. read_unlock(&tasklist_lock);
  1314. if (!wo->wo_info) {
  1315. retval = wo->wo_rusage
  1316. ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
  1317. put_task_struct(p);
  1318. if (!retval && wo->wo_stat)
  1319. retval = put_user(0xffff, wo->wo_stat);
  1320. if (!retval)
  1321. retval = pid;
  1322. } else {
  1323. retval = wait_noreap_copyout(wo, p, pid, uid,
  1324. CLD_CONTINUED, SIGCONT);
  1325. BUG_ON(retval == 0);
  1326. }
  1327. return retval;
  1328. }
  1329. /*
  1330. * Consider @p for a wait by @parent.
  1331. *
  1332. * -ECHILD should be in ->notask_error before the first call.
  1333. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1334. * Returns zero if the search for a child should continue;
  1335. * then ->notask_error is 0 if @p is an eligible child,
  1336. * or another error from security_task_wait(), or still -ECHILD.
  1337. */
  1338. static int wait_consider_task(struct wait_opts *wo, int ptrace,
  1339. struct task_struct *p)
  1340. {
  1341. int ret = eligible_child(wo, p);
  1342. if (!ret)
  1343. return ret;
  1344. ret = security_task_wait(p);
  1345. if (unlikely(ret < 0)) {
  1346. /*
  1347. * If we have not yet seen any eligible child,
  1348. * then let this error code replace -ECHILD.
  1349. * A permission error will give the user a clue
  1350. * to look for security policy problems, rather
  1351. * than for mysterious wait bugs.
  1352. */
  1353. if (wo->notask_error)
  1354. wo->notask_error = ret;
  1355. return 0;
  1356. }
  1357. /* dead body doesn't have much to contribute */
  1358. if (p->exit_state == EXIT_DEAD)
  1359. return 0;
  1360. /* slay zombie? */
  1361. if (p->exit_state == EXIT_ZOMBIE) {
  1362. /*
  1363. * A zombie ptracee is only visible to its ptracer.
  1364. * Notification and reaping will be cascaded to the real
  1365. * parent when the ptracer detaches.
  1366. */
  1367. if (likely(!ptrace) && unlikely(task_ptrace(p))) {
  1368. /* it will become visible, clear notask_error */
  1369. wo->notask_error = 0;
  1370. return 0;
  1371. }
  1372. /* we don't reap group leaders with subthreads */
  1373. if (!delay_group_leader(p))
  1374. return wait_task_zombie(wo, p);
  1375. /*
  1376. * Allow access to stopped/continued state via zombie by
  1377. * falling through. Clearing of notask_error is complex.
  1378. *
  1379. * When !@ptrace:
  1380. *
  1381. * If WEXITED is set, notask_error should naturally be
  1382. * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
  1383. * so, if there are live subthreads, there are events to
  1384. * wait for. If all subthreads are dead, it's still safe
  1385. * to clear - this function will be called again in finite
  1386. * amount time once all the subthreads are released and
  1387. * will then return without clearing.
  1388. *
  1389. * When @ptrace:
  1390. *
  1391. * Stopped state is per-task and thus can't change once the
  1392. * target task dies. Only continued and exited can happen.
  1393. * Clear notask_error if WCONTINUED | WEXITED.
  1394. */
  1395. if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
  1396. wo->notask_error = 0;
  1397. } else {
  1398. /*
  1399. * If @p is ptraced by a task in its real parent's group,
  1400. * hide group stop/continued state when looking at @p as
  1401. * the real parent; otherwise, a single stop can be
  1402. * reported twice as group and ptrace stops.
  1403. *
  1404. * If a ptracer wants to distinguish the two events for its
  1405. * own children, it should create a separate process which
  1406. * takes the role of real parent.
  1407. */
  1408. if (likely(!ptrace) && task_ptrace(p) &&
  1409. same_thread_group(p->parent, p->real_parent))
  1410. return 0;
  1411. /*
  1412. * @p is alive and it's gonna stop, continue or exit, so
  1413. * there always is something to wait for.
  1414. */
  1415. wo->notask_error = 0;
  1416. }
  1417. /*
  1418. * Wait for stopped. Depending on @ptrace, different stopped state
  1419. * is used and the two don't interact with each other.
  1420. */
  1421. ret = wait_task_stopped(wo, ptrace, p);
  1422. if (ret)
  1423. return ret;
  1424. /*
  1425. * Wait for continued. There's only one continued state and the
  1426. * ptracer can consume it which can confuse the real parent. Don't
  1427. * use WCONTINUED from ptracer. You don't need or want it.
  1428. */
  1429. return wait_task_continued(wo, p);
  1430. }
  1431. /*
  1432. * Do the work of do_wait() for one thread in the group, @tsk.
  1433. *
  1434. * -ECHILD should be in ->notask_error before the first call.
  1435. * Returns nonzero for a final return, when we have unlocked tasklist_lock.
  1436. * Returns zero if the search for a child should continue; then
  1437. * ->notask_error is 0 if there were any eligible children,
  1438. * or another error from security_task_wait(), or still -ECHILD.
  1439. */
  1440. static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
  1441. {
  1442. struct task_struct *p;
  1443. list_for_each_entry(p, &tsk->children, sibling) {
  1444. int ret = wait_consider_task(wo, 0, p);
  1445. if (ret)
  1446. return ret;
  1447. }
  1448. return 0;
  1449. }
  1450. static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
  1451. {
  1452. struct task_struct *p;
  1453. list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
  1454. int ret = wait_consider_task(wo, 1, p);
  1455. if (ret)
  1456. return ret;
  1457. }
  1458. return 0;
  1459. }
  1460. static int child_wait_callback(wait_queue_t *wait, unsigned mode,
  1461. int sync, void *key)
  1462. {
  1463. struct wait_opts *wo = container_of(wait, struct wait_opts,
  1464. child_wait);
  1465. struct task_struct *p = key;
  1466. if (!eligible_pid(wo, p))
  1467. return 0;
  1468. if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
  1469. return 0;
  1470. return default_wake_function(wait, mode, sync, key);
  1471. }
  1472. void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
  1473. {
  1474. __wake_up_sync_key(&parent->signal->wait_chldexit,
  1475. TASK_INTERRUPTIBLE, 1, p);
  1476. }
  1477. static long do_wait(struct wait_opts *wo)
  1478. {
  1479. struct task_struct *tsk;
  1480. int retval;
  1481. trace_sched_process_wait(wo->wo_pid);
  1482. init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
  1483. wo->child_wait.private = current;
  1484. add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1485. repeat:
  1486. /*
  1487. * If there is nothing that can match our critiera just get out.
  1488. * We will clear ->notask_error to zero if we see any child that
  1489. * might later match our criteria, even if we are not able to reap
  1490. * it yet.
  1491. */
  1492. wo->notask_error = -ECHILD;
  1493. if ((wo->wo_type < PIDTYPE_MAX) &&
  1494. (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
  1495. goto notask;
  1496. set_current_state(TASK_INTERRUPTIBLE);
  1497. read_lock(&tasklist_lock);
  1498. tsk = current;
  1499. do {
  1500. retval = do_wait_thread(wo, tsk);
  1501. if (retval)
  1502. goto end;
  1503. retval = ptrace_do_wait(wo, tsk);
  1504. if (retval)
  1505. goto end;
  1506. if (wo->wo_flags & __WNOTHREAD)
  1507. break;
  1508. } while_each_thread(current, tsk);
  1509. read_unlock(&tasklist_lock);
  1510. notask:
  1511. retval = wo->notask_error;
  1512. if (!retval && !(wo->wo_flags & WNOHANG)) {
  1513. retval = -ERESTARTSYS;
  1514. if (!signal_pending(current)) {
  1515. schedule();
  1516. goto repeat;
  1517. }
  1518. }
  1519. end:
  1520. __set_current_state(TASK_RUNNING);
  1521. remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
  1522. return retval;
  1523. }
  1524. SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
  1525. infop, int, options, struct rusage __user *, ru)
  1526. {
  1527. struct wait_opts wo;
  1528. struct pid *pid = NULL;
  1529. enum pid_type type;
  1530. long ret;
  1531. if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
  1532. return -EINVAL;
  1533. if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
  1534. return -EINVAL;
  1535. switch (which) {
  1536. case P_ALL:
  1537. type = PIDTYPE_MAX;
  1538. break;
  1539. case P_PID:
  1540. type = PIDTYPE_PID;
  1541. if (upid <= 0)
  1542. return -EINVAL;
  1543. break;
  1544. case P_PGID:
  1545. type = PIDTYPE_PGID;
  1546. if (upid <= 0)
  1547. return -EINVAL;
  1548. break;
  1549. default:
  1550. return -EINVAL;
  1551. }
  1552. if (type < PIDTYPE_MAX)
  1553. pid = find_get_pid(upid);
  1554. wo.wo_type = type;
  1555. wo.wo_pid = pid;
  1556. wo.wo_flags = options;
  1557. wo.wo_info = infop;
  1558. wo.wo_stat = NULL;
  1559. wo.wo_rusage = ru;
  1560. ret = do_wait(&wo);
  1561. if (ret > 0) {
  1562. ret = 0;
  1563. } else if (infop) {
  1564. /*
  1565. * For a WNOHANG return, clear out all the fields
  1566. * we would set so the user can easily tell the
  1567. * difference.
  1568. */
  1569. if (!ret)
  1570. ret = put_user(0, &infop->si_signo);
  1571. if (!ret)
  1572. ret = put_user(0, &infop->si_errno);
  1573. if (!ret)
  1574. ret = put_user(0, &infop->si_code);
  1575. if (!ret)
  1576. ret = put_user(0, &infop->si_pid);
  1577. if (!ret)
  1578. ret = put_user(0, &infop->si_uid);
  1579. if (!ret)
  1580. ret = put_user(0, &infop->si_status);
  1581. }
  1582. put_pid(pid);
  1583. /* avoid REGPARM breakage on x86: */
  1584. asmlinkage_protect(5, ret, which, upid, infop, options, ru);
  1585. return ret;
  1586. }
  1587. SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
  1588. int, options, struct rusage __user *, ru)
  1589. {
  1590. struct wait_opts wo;
  1591. struct pid *pid = NULL;
  1592. enum pid_type type;
  1593. long ret;
  1594. if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
  1595. __WNOTHREAD|__WCLONE|__WALL))
  1596. return -EINVAL;
  1597. if (upid == -1)
  1598. type = PIDTYPE_MAX;
  1599. else if (upid < 0) {
  1600. type = PIDTYPE_PGID;
  1601. pid = find_get_pid(-upid);
  1602. } else if (upid == 0) {
  1603. type = PIDTYPE_PGID;
  1604. pid = get_task_pid(current, PIDTYPE_PGID);
  1605. } else /* upid > 0 */ {
  1606. type = PIDTYPE_PID;
  1607. pid = find_get_pid(upid);
  1608. }
  1609. wo.wo_type = type;
  1610. wo.wo_pid = pid;
  1611. wo.wo_flags = options | WEXITED;
  1612. wo.wo_info = NULL;
  1613. wo.wo_stat = stat_addr;
  1614. wo.wo_rusage = ru;
  1615. ret = do_wait(&wo);
  1616. put_pid(pid);
  1617. /* avoid REGPARM breakage on x86: */
  1618. asmlinkage_protect(4, ret, upid, stat_addr, options, ru);
  1619. return ret;
  1620. }
  1621. #ifdef __ARCH_WANT_SYS_WAITPID
  1622. /*
  1623. * sys_waitpid() remains for compatibility. waitpid() should be
  1624. * implemented by calling sys_wait4() from libc.a.
  1625. */
  1626. SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
  1627. {
  1628. return sys_wait4(pid, stat_addr, options, NULL);
  1629. }
  1630. #endif