|
@@ -2207,9 +2207,12 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
|
|
|
* Throttle direct reclaimers if backing storage is backed by the network
|
|
|
* and the PFMEMALLOC reserve for the preferred node is getting dangerously
|
|
|
* depleted. kswapd will continue to make progress and wake the processes
|
|
|
- * when the low watermark is reached
|
|
|
+ * when the low watermark is reached.
|
|
|
+ *
|
|
|
+ * Returns true if a fatal signal was delivered during throttling. If this
|
|
|
+ * happens, the page allocator should not consider triggering the OOM killer.
|
|
|
*/
|
|
|
-static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
|
|
|
+static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
|
|
|
nodemask_t *nodemask)
|
|
|
{
|
|
|
struct zone *zone;
|
|
@@ -2224,13 +2227,20 @@ static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
|
|
|
* processes to block on log_wait_commit().
|
|
|
*/
|
|
|
if (current->flags & PF_KTHREAD)
|
|
|
- return;
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If a fatal signal is pending, this process should not throttle.
|
|
|
+ * It should return quickly so it can exit and free its memory
|
|
|
+ */
|
|
|
+ if (fatal_signal_pending(current))
|
|
|
+ goto out;
|
|
|
|
|
|
/* Check if the pfmemalloc reserves are ok */
|
|
|
first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
|
|
|
pgdat = zone->zone_pgdat;
|
|
|
if (pfmemalloc_watermark_ok(pgdat))
|
|
|
- return;
|
|
|
+ goto out;
|
|
|
|
|
|
/* Account for the throttling */
|
|
|
count_vm_event(PGSCAN_DIRECT_THROTTLE);
|
|
@@ -2246,12 +2256,20 @@ static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
|
|
|
if (!(gfp_mask & __GFP_FS)) {
|
|
|
wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
|
|
|
pfmemalloc_watermark_ok(pgdat), HZ);
|
|
|
- return;
|
|
|
+
|
|
|
+ goto check_pending;
|
|
|
}
|
|
|
|
|
|
/* Throttle until kswapd wakes the process */
|
|
|
wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
|
|
|
pfmemalloc_watermark_ok(pgdat));
|
|
|
+
|
|
|
+check_pending:
|
|
|
+ if (fatal_signal_pending(current))
|
|
|
+ return true;
|
|
|
+
|
|
|
+out:
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
|
@@ -2273,13 +2291,12 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
|
|
.gfp_mask = sc.gfp_mask,
|
|
|
};
|
|
|
|
|
|
- throttle_direct_reclaim(gfp_mask, zonelist, nodemask);
|
|
|
-
|
|
|
/*
|
|
|
- * Do not enter reclaim if fatal signal is pending. 1 is returned so
|
|
|
- * that the page allocator does not consider triggering OOM
|
|
|
+ * Do not enter reclaim if fatal signal was delivered while throttled.
|
|
|
+ * 1 is returned so that the page allocator does not OOM kill at this
|
|
|
+ * point.
|
|
|
*/
|
|
|
- if (fatal_signal_pending(current))
|
|
|
+ if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
|
|
|
return 1;
|
|
|
|
|
|
trace_mm_vmscan_direct_reclaim_begin(order,
|