oom_kill.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /*
  2. * linux/mm/oom_kill.c
  3. *
  4. * Copyright (C) 1998,2000 Rik van Riel
  5. * Thanks go out to Claus Fischer for some serious inspiration and
  6. * for goading me into coding this file...
  7. *
  8. * The routines in this file are used to kill a process when
  9. * we're seriously out of memory. This gets called from __alloc_pages()
  10. * in mm/page_alloc.c when we really run out of memory.
  11. *
  12. * Since we won't call these routines often (on a well-configured
  13. * machine) this file will double as a 'coding guide' and a signpost
  14. * for newbie kernel hackers. It features several pointers to major
  15. * kernel subsystems and hints as to where to find out what things do.
  16. */
  17. #include <linux/oom.h>
  18. #include <linux/mm.h>
  19. #include <linux/err.h>
  20. #include <linux/sched.h>
  21. #include <linux/swap.h>
  22. #include <linux/timex.h>
  23. #include <linux/jiffies.h>
  24. #include <linux/cpuset.h>
  25. #include <linux/module.h>
  26. #include <linux/notifier.h>
  27. #include <linux/memcontrol.h>
  28. #include <linux/security.h>
  29. int sysctl_panic_on_oom;
  30. int sysctl_oom_kill_allocating_task;
  31. int sysctl_oom_dump_tasks;
  32. static DEFINE_SPINLOCK(zone_scan_mutex);
  33. /* #define DEBUG */
  34. /**
  35. * badness - calculate a numeric value for how bad this task has been
  36. * @p: task struct of which task we should calculate
  37. * @uptime: current uptime in seconds
  38. * @mem: target memory controller
  39. *
  40. * The formula used is relatively simple and documented inline in the
  41. * function. The main rationale is that we want to select a good task
  42. * to kill when we run out of memory.
  43. *
  44. * Good in this context means that:
  45. * 1) we lose the minimum amount of work done
  46. * 2) we recover a large amount of memory
  47. * 3) we don't kill anything innocent of eating tons of memory
  48. * 4) we want to kill the minimum amount of processes (one)
  49. * 5) we try to kill the process the user expects us to kill, this
  50. * algorithm has been meticulously tuned to meet the principle
  51. * of least surprise ... (be careful when you change it)
  52. */
  53. unsigned long badness(struct task_struct *p, unsigned long uptime)
  54. {
  55. unsigned long points, cpu_time, run_time, s;
  56. struct mm_struct *mm;
  57. struct task_struct *child;
  58. task_lock(p);
  59. mm = p->mm;
  60. if (!mm) {
  61. task_unlock(p);
  62. return 0;
  63. }
  64. /*
  65. * The memory size of the process is the basis for the badness.
  66. */
  67. points = mm->total_vm;
  68. /*
  69. * After this unlock we can no longer dereference local variable `mm'
  70. */
  71. task_unlock(p);
  72. /*
  73. * swapoff can easily use up all memory, so kill those first.
  74. */
  75. if (p->flags & PF_SWAPOFF)
  76. return ULONG_MAX;
  77. /*
  78. * Processes which fork a lot of child processes are likely
  79. * a good choice. We add half the vmsize of the children if they
  80. * have an own mm. This prevents forking servers to flood the
  81. * machine with an endless amount of children. In case a single
  82. * child is eating the vast majority of memory, adding only half
  83. * to the parents will make the child our kill candidate of choice.
  84. */
  85. list_for_each_entry(child, &p->children, sibling) {
  86. task_lock(child);
  87. if (child->mm != mm && child->mm)
  88. points += child->mm->total_vm/2 + 1;
  89. task_unlock(child);
  90. }
  91. /*
  92. * CPU time is in tens of seconds and run time is in thousands
  93. * of seconds. There is no particular reason for this other than
  94. * that it turned out to work very well in practice.
  95. */
  96. cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
  97. >> (SHIFT_HZ + 3);
  98. if (uptime >= p->start_time.tv_sec)
  99. run_time = (uptime - p->start_time.tv_sec) >> 10;
  100. else
  101. run_time = 0;
  102. s = int_sqrt(cpu_time);
  103. if (s)
  104. points /= s;
  105. s = int_sqrt(int_sqrt(run_time));
  106. if (s)
  107. points /= s;
  108. /*
  109. * Niced processes are most likely less important, so double
  110. * their badness points.
  111. */
  112. if (task_nice(p) > 0)
  113. points *= 2;
  114. /*
  115. * Superuser processes are usually more important, so we make it
  116. * less likely that we kill those.
  117. */
  118. if (has_capability(p, CAP_SYS_ADMIN) ||
  119. has_capability(p, CAP_SYS_RESOURCE))
  120. points /= 4;
  121. /*
  122. * We don't want to kill a process with direct hardware access.
  123. * Not only could that mess up the hardware, but usually users
  124. * tend to only have this flag set on applications they think
  125. * of as important.
  126. */
  127. if (has_capability(p, CAP_SYS_RAWIO))
  128. points /= 4;
  129. /*
  130. * If p's nodes don't overlap ours, it may still help to kill p
  131. * because p may have allocated or otherwise mapped memory on
  132. * this node before. However it will be less likely.
  133. */
  134. if (!cpuset_mems_allowed_intersects(current, p))
  135. points /= 8;
  136. /*
  137. * Adjust the score by oomkilladj.
  138. */
  139. if (p->oomkilladj) {
  140. if (p->oomkilladj > 0) {
  141. if (!points)
  142. points = 1;
  143. points <<= p->oomkilladj;
  144. } else
  145. points >>= -(p->oomkilladj);
  146. }
  147. #ifdef DEBUG
  148. printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
  149. p->pid, p->comm, points);
  150. #endif
  151. return points;
  152. }
  153. /*
  154. * Determine the type of allocation constraint.
  155. */
  156. static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
  157. gfp_t gfp_mask)
  158. {
  159. #ifdef CONFIG_NUMA
  160. struct zone *zone;
  161. struct zoneref *z;
  162. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  163. nodemask_t nodes = node_states[N_HIGH_MEMORY];
  164. for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
  165. if (cpuset_zone_allowed_softwall(zone, gfp_mask))
  166. node_clear(zone_to_nid(zone), nodes);
  167. else
  168. return CONSTRAINT_CPUSET;
  169. if (!nodes_empty(nodes))
  170. return CONSTRAINT_MEMORY_POLICY;
  171. #endif
  172. return CONSTRAINT_NONE;
  173. }
  174. /*
  175. * Simple selection loop. We chose the process with the highest
  176. * number of 'points'. We expect the caller will lock the tasklist.
  177. *
  178. * (not docbooked, we don't want this one cluttering up the manual)
  179. */
  180. static struct task_struct *select_bad_process(unsigned long *ppoints,
  181. struct mem_cgroup *mem)
  182. {
  183. struct task_struct *g, *p;
  184. struct task_struct *chosen = NULL;
  185. struct timespec uptime;
  186. *ppoints = 0;
  187. do_posix_clock_monotonic_gettime(&uptime);
  188. do_each_thread(g, p) {
  189. unsigned long points;
  190. /*
  191. * skip kernel threads and tasks which have already released
  192. * their mm.
  193. */
  194. if (!p->mm)
  195. continue;
  196. /* skip the init task */
  197. if (is_global_init(p))
  198. continue;
  199. if (mem && !task_in_mem_cgroup(p, mem))
  200. continue;
  201. /*
  202. * This task already has access to memory reserves and is
  203. * being killed. Don't allow any other task access to the
  204. * memory reserve.
  205. *
  206. * Note: this may have a chance of deadlock if it gets
  207. * blocked waiting for another task which itself is waiting
  208. * for memory. Is there a better alternative?
  209. */
  210. if (test_tsk_thread_flag(p, TIF_MEMDIE))
  211. return ERR_PTR(-1UL);
  212. /*
  213. * This is in the process of releasing memory so wait for it
  214. * to finish before killing some other task by mistake.
  215. *
  216. * However, if p is the current task, we allow the 'kill' to
  217. * go ahead if it is exiting: this will simply set TIF_MEMDIE,
  218. * which will allow it to gain access to memory reserves in
  219. * the process of exiting and releasing its resources.
  220. * Otherwise we could get an easy OOM deadlock.
  221. */
  222. if (p->flags & PF_EXITING) {
  223. if (p != current)
  224. return ERR_PTR(-1UL);
  225. chosen = p;
  226. *ppoints = ULONG_MAX;
  227. }
  228. if (p->oomkilladj == OOM_DISABLE)
  229. continue;
  230. points = badness(p, uptime.tv_sec);
  231. if (points > *ppoints || !chosen) {
  232. chosen = p;
  233. *ppoints = points;
  234. }
  235. } while_each_thread(g, p);
  236. return chosen;
  237. }
  238. /**
  239. * dump_tasks - dump current memory state of all system tasks
  240. * @mem: target memory controller
  241. *
  242. * Dumps the current memory state of all system tasks, excluding kernel threads.
  243. * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
  244. * score, and name.
  245. *
  246. * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
  247. * shown.
  248. *
  249. * Call with tasklist_lock read-locked.
  250. */
  251. static void dump_tasks(const struct mem_cgroup *mem)
  252. {
  253. struct task_struct *g, *p;
  254. printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj "
  255. "name\n");
  256. do_each_thread(g, p) {
  257. /*
  258. * total_vm and rss sizes do not exist for tasks with a
  259. * detached mm so there's no need to report them.
  260. */
  261. if (!p->mm)
  262. continue;
  263. if (mem && !task_in_mem_cgroup(p, mem))
  264. continue;
  265. if (!thread_group_leader(p))
  266. continue;
  267. task_lock(p);
  268. printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
  269. p->pid, p->uid, p->tgid, p->mm->total_vm,
  270. get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj,
  271. p->comm);
  272. task_unlock(p);
  273. } while_each_thread(g, p);
  274. }
  275. /*
  276. * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
  277. * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
  278. * set.
  279. */
  280. static void __oom_kill_task(struct task_struct *p, int verbose)
  281. {
  282. if (is_global_init(p)) {
  283. WARN_ON(1);
  284. printk(KERN_WARNING "tried to kill init!\n");
  285. return;
  286. }
  287. if (!p->mm) {
  288. WARN_ON(1);
  289. printk(KERN_WARNING "tried to kill an mm-less task!\n");
  290. return;
  291. }
  292. if (verbose)
  293. printk(KERN_ERR "Killed process %d (%s)\n",
  294. task_pid_nr(p), p->comm);
  295. /*
  296. * We give our sacrificial lamb high priority and access to
  297. * all the memory it needs. That way it should be able to
  298. * exit() and clear out its resources quickly...
  299. */
  300. p->rt.time_slice = HZ;
  301. set_tsk_thread_flag(p, TIF_MEMDIE);
  302. force_sig(SIGKILL, p);
  303. }
  304. static int oom_kill_task(struct task_struct *p)
  305. {
  306. struct mm_struct *mm;
  307. struct task_struct *g, *q;
  308. mm = p->mm;
  309. /* WARNING: mm may not be dereferenced since we did not obtain its
  310. * value from get_task_mm(p). This is OK since all we need to do is
  311. * compare mm to q->mm below.
  312. *
  313. * Furthermore, even if mm contains a non-NULL value, p->mm may
  314. * change to NULL at any time since we do not hold task_lock(p).
  315. * However, this is of no concern to us.
  316. */
  317. if (mm == NULL)
  318. return 1;
  319. /*
  320. * Don't kill the process if any threads are set to OOM_DISABLE
  321. */
  322. do_each_thread(g, q) {
  323. if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
  324. return 1;
  325. } while_each_thread(g, q);
  326. __oom_kill_task(p, 1);
  327. /*
  328. * kill all processes that share the ->mm (i.e. all threads),
  329. * but are in a different thread group. Don't let them have access
  330. * to memory reserves though, otherwise we might deplete all memory.
  331. */
  332. do_each_thread(g, q) {
  333. if (q->mm == mm && !same_thread_group(q, p))
  334. force_sig(SIGKILL, q);
  335. } while_each_thread(g, q);
  336. return 0;
  337. }
  338. static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
  339. unsigned long points, struct mem_cgroup *mem,
  340. const char *message)
  341. {
  342. struct task_struct *c;
  343. if (printk_ratelimit()) {
  344. printk(KERN_WARNING "%s invoked oom-killer: "
  345. "gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
  346. current->comm, gfp_mask, order, current->oomkilladj);
  347. dump_stack();
  348. show_mem();
  349. if (sysctl_oom_dump_tasks)
  350. dump_tasks(mem);
  351. }
  352. /*
  353. * If the task is already exiting, don't alarm the sysadmin or kill
  354. * its children or threads, just set TIF_MEMDIE so it can die quickly
  355. */
  356. if (p->flags & PF_EXITING) {
  357. __oom_kill_task(p, 0);
  358. return 0;
  359. }
  360. printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
  361. message, task_pid_nr(p), p->comm, points);
  362. /* Try to kill a child first */
  363. list_for_each_entry(c, &p->children, sibling) {
  364. if (c->mm == p->mm)
  365. continue;
  366. if (!oom_kill_task(c))
  367. return 0;
  368. }
  369. return oom_kill_task(p);
  370. }
  371. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  372. void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
  373. {
  374. unsigned long points = 0;
  375. struct task_struct *p;
  376. cgroup_lock();
  377. read_lock(&tasklist_lock);
  378. retry:
  379. p = select_bad_process(&points, mem);
  380. if (PTR_ERR(p) == -1UL)
  381. goto out;
  382. if (!p)
  383. p = current;
  384. if (oom_kill_process(p, gfp_mask, 0, points, mem,
  385. "Memory cgroup out of memory"))
  386. goto retry;
  387. out:
  388. read_unlock(&tasklist_lock);
  389. cgroup_unlock();
  390. }
  391. #endif
  392. static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
  393. int register_oom_notifier(struct notifier_block *nb)
  394. {
  395. return blocking_notifier_chain_register(&oom_notify_list, nb);
  396. }
  397. EXPORT_SYMBOL_GPL(register_oom_notifier);
  398. int unregister_oom_notifier(struct notifier_block *nb)
  399. {
  400. return blocking_notifier_chain_unregister(&oom_notify_list, nb);
  401. }
  402. EXPORT_SYMBOL_GPL(unregister_oom_notifier);
  403. /*
  404. * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero
  405. * if a parallel OOM killing is already taking place that includes a zone in
  406. * the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
  407. */
  408. int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
  409. {
  410. struct zoneref *z;
  411. struct zone *zone;
  412. int ret = 1;
  413. spin_lock(&zone_scan_mutex);
  414. for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  415. if (zone_is_oom_locked(zone)) {
  416. ret = 0;
  417. goto out;
  418. }
  419. }
  420. for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  421. /*
  422. * Lock each zone in the zonelist under zone_scan_mutex so a
  423. * parallel invocation of try_set_zone_oom() doesn't succeed
  424. * when it shouldn't.
  425. */
  426. zone_set_flag(zone, ZONE_OOM_LOCKED);
  427. }
  428. out:
  429. spin_unlock(&zone_scan_mutex);
  430. return ret;
  431. }
  432. /*
  433. * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
  434. * allocation attempts with zonelists containing them may now recall the OOM
  435. * killer, if necessary.
  436. */
  437. void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
  438. {
  439. struct zoneref *z;
  440. struct zone *zone;
  441. spin_lock(&zone_scan_mutex);
  442. for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  443. zone_clear_flag(zone, ZONE_OOM_LOCKED);
  444. }
  445. spin_unlock(&zone_scan_mutex);
  446. }
  447. /**
  448. * out_of_memory - kill the "best" process when we run out of memory
  449. * @zonelist: zonelist pointer
  450. * @gfp_mask: memory allocation flags
  451. * @order: amount of memory being requested as a power of 2
  452. *
  453. * If we run out of memory, we have the choice between either
  454. * killing a random task (bad), letting the system crash (worse)
  455. * OR try to be smart about which process to kill. Note that we
  456. * don't have to be perfect here, we just have to be good.
  457. */
  458. void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
  459. {
  460. struct task_struct *p;
  461. unsigned long points = 0;
  462. unsigned long freed = 0;
  463. enum oom_constraint constraint;
  464. blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
  465. if (freed > 0)
  466. /* Got some memory back in the last second. */
  467. return;
  468. if (sysctl_panic_on_oom == 2)
  469. panic("out of memory. Compulsory panic_on_oom is selected.\n");
  470. /*
  471. * Check if there were limitations on the allocation (only relevant for
  472. * NUMA) that may require different handling.
  473. */
  474. constraint = constrained_alloc(zonelist, gfp_mask);
  475. read_lock(&tasklist_lock);
  476. switch (constraint) {
  477. case CONSTRAINT_MEMORY_POLICY:
  478. oom_kill_process(current, gfp_mask, order, points, NULL,
  479. "No available memory (MPOL_BIND)");
  480. break;
  481. case CONSTRAINT_NONE:
  482. if (sysctl_panic_on_oom)
  483. panic("out of memory. panic_on_oom is selected\n");
  484. /* Fall-through */
  485. case CONSTRAINT_CPUSET:
  486. if (sysctl_oom_kill_allocating_task) {
  487. oom_kill_process(current, gfp_mask, order, points, NULL,
  488. "Out of memory (oom_kill_allocating_task)");
  489. break;
  490. }
  491. retry:
  492. /*
  493. * Rambo mode: Shoot down a process and hope it solves whatever
  494. * issues we may have.
  495. */
  496. p = select_bad_process(&points, NULL);
  497. if (PTR_ERR(p) == -1UL)
  498. goto out;
  499. /* Found nothing?!?! Either we hang forever, or we panic. */
  500. if (!p) {
  501. read_unlock(&tasklist_lock);
  502. panic("Out of memory and no killable processes...\n");
  503. }
  504. if (oom_kill_process(p, gfp_mask, order, points, NULL,
  505. "Out of memory"))
  506. goto retry;
  507. break;
  508. }
  509. out:
  510. read_unlock(&tasklist_lock);
  511. /*
  512. * Give "p" a good chance of killing itself before we
  513. * retry to allocate memory unless "p" is current
  514. */
  515. if (!test_thread_flag(TIF_MEMDIE))
  516. schedule_timeout_uninterruptible(1);
  517. }