|
@@ -108,10 +108,12 @@ enum mem_cgroup_events_index {
|
|
enum mem_cgroup_events_target {
|
|
enum mem_cgroup_events_target {
|
|
MEM_CGROUP_TARGET_THRESH,
|
|
MEM_CGROUP_TARGET_THRESH,
|
|
MEM_CGROUP_TARGET_SOFTLIMIT,
|
|
MEM_CGROUP_TARGET_SOFTLIMIT,
|
|
|
|
+ MEM_CGROUP_TARGET_NUMAINFO,
|
|
MEM_CGROUP_NTARGETS,
|
|
MEM_CGROUP_NTARGETS,
|
|
};
|
|
};
|
|
#define THRESHOLDS_EVENTS_TARGET (128)
|
|
#define THRESHOLDS_EVENTS_TARGET (128)
|
|
#define SOFTLIMIT_EVENTS_TARGET (1024)
|
|
#define SOFTLIMIT_EVENTS_TARGET (1024)
|
|
|
|
+#define NUMAINFO_EVENTS_TARGET (1024)
|
|
|
|
|
|
struct mem_cgroup_stat_cpu {
|
|
struct mem_cgroup_stat_cpu {
|
|
long count[MEM_CGROUP_STAT_NSTATS];
|
|
long count[MEM_CGROUP_STAT_NSTATS];
|
|
@@ -237,7 +239,8 @@ struct mem_cgroup {
|
|
int last_scanned_node;
|
|
int last_scanned_node;
|
|
#if MAX_NUMNODES > 1
|
|
#if MAX_NUMNODES > 1
|
|
nodemask_t scan_nodes;
|
|
nodemask_t scan_nodes;
|
|
- unsigned long next_scan_node_update;
|
|
|
|
|
|
+ atomic_t numainfo_events;
|
|
|
|
+ atomic_t numainfo_updating;
|
|
#endif
|
|
#endif
|
|
/*
|
|
/*
|
|
* Should the accounting and control be hierarchical, per subtree?
|
|
* Should the accounting and control be hierarchical, per subtree?
|
|
@@ -680,6 +683,9 @@ static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
|
|
case MEM_CGROUP_TARGET_SOFTLIMIT:
|
|
case MEM_CGROUP_TARGET_SOFTLIMIT:
|
|
next = val + SOFTLIMIT_EVENTS_TARGET;
|
|
next = val + SOFTLIMIT_EVENTS_TARGET;
|
|
break;
|
|
break;
|
|
|
|
+ case MEM_CGROUP_TARGET_NUMAINFO:
|
|
|
|
+ next = val + NUMAINFO_EVENTS_TARGET;
|
|
|
|
+ break;
|
|
default:
|
|
default:
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -698,11 +704,19 @@ static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
|
|
mem_cgroup_threshold(mem);
|
|
mem_cgroup_threshold(mem);
|
|
__mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
|
|
__mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
|
|
if (unlikely(__memcg_event_check(mem,
|
|
if (unlikely(__memcg_event_check(mem,
|
|
- MEM_CGROUP_TARGET_SOFTLIMIT))){
|
|
|
|
|
|
+ MEM_CGROUP_TARGET_SOFTLIMIT))) {
|
|
mem_cgroup_update_tree(mem, page);
|
|
mem_cgroup_update_tree(mem, page);
|
|
__mem_cgroup_target_update(mem,
|
|
__mem_cgroup_target_update(mem,
|
|
- MEM_CGROUP_TARGET_SOFTLIMIT);
|
|
|
|
|
|
+ MEM_CGROUP_TARGET_SOFTLIMIT);
|
|
|
|
+ }
|
|
|
|
+#if MAX_NUMNODES > 1
|
|
|
|
+ if (unlikely(__memcg_event_check(mem,
|
|
|
|
+ MEM_CGROUP_TARGET_NUMAINFO))) {
|
|
|
|
+ atomic_inc(&mem->numainfo_events);
|
|
|
|
+ __mem_cgroup_target_update(mem,
|
|
|
|
+ MEM_CGROUP_TARGET_NUMAINFO);
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1582,11 +1596,15 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
|
|
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
|
|
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
|
|
{
|
|
{
|
|
int nid;
|
|
int nid;
|
|
-
|
|
|
|
- if (time_after(mem->next_scan_node_update, jiffies))
|
|
|
|
|
|
+ /*
|
|
|
|
+ * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
|
|
|
|
+ * pagein/pageout changes since the last update.
|
|
|
|
+ */
|
|
|
|
+ if (!atomic_read(&mem->numainfo_events))
|
|
|
|
+ return;
|
|
|
|
+ if (atomic_inc_return(&mem->numainfo_updating) > 1)
|
|
return;
|
|
return;
|
|
|
|
|
|
- mem->next_scan_node_update = jiffies + 10*HZ;
|
|
|
|
/* make a nodemask where this memcg uses memory from */
|
|
/* make a nodemask where this memcg uses memory from */
|
|
mem->scan_nodes = node_states[N_HIGH_MEMORY];
|
|
mem->scan_nodes = node_states[N_HIGH_MEMORY];
|
|
|
|
|
|
@@ -1595,6 +1613,9 @@ static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
|
|
if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
|
|
if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
|
|
node_clear(nid, mem->scan_nodes);
|
|
node_clear(nid, mem->scan_nodes);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ atomic_set(&mem->numainfo_events, 0);
|
|
|
|
+ atomic_set(&mem->numainfo_updating, 0);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|