|
@@ -417,6 +417,7 @@ void sock_update_memcg(struct sock *sk)
|
|
{
|
|
{
|
|
if (mem_cgroup_sockets_enabled) {
|
|
if (mem_cgroup_sockets_enabled) {
|
|
struct mem_cgroup *memcg;
|
|
struct mem_cgroup *memcg;
|
|
|
|
+ struct cg_proto *cg_proto;
|
|
|
|
|
|
BUG_ON(!sk->sk_prot->proto_cgroup);
|
|
BUG_ON(!sk->sk_prot->proto_cgroup);
|
|
|
|
|
|
@@ -436,9 +437,10 @@ void sock_update_memcg(struct sock *sk)
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
memcg = mem_cgroup_from_task(current);
|
|
memcg = mem_cgroup_from_task(current);
|
|
- if (!mem_cgroup_is_root(memcg)) {
|
|
|
|
|
|
+ cg_proto = sk->sk_prot->proto_cgroup(memcg);
|
|
|
|
+ if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
|
|
mem_cgroup_get(memcg);
|
|
mem_cgroup_get(memcg);
|
|
- sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
|
|
|
|
|
|
+ sk->sk_cgrp = cg_proto;
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
@@ -467,6 +469,19 @@ EXPORT_SYMBOL(tcp_proto_cgroup);
|
|
#endif /* CONFIG_INET */
|
|
#endif /* CONFIG_INET */
|
|
#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
|
|
#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
|
|
|
|
|
|
|
|
+#if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
|
|
|
|
+static void disarm_sock_keys(struct mem_cgroup *memcg)
|
|
|
|
+{
|
|
|
|
+ if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
|
|
|
|
+ return;
|
|
|
|
+ static_key_slow_dec(&memcg_socket_limit_enabled);
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static void disarm_sock_keys(struct mem_cgroup *memcg)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
static void drain_all_stock_async(struct mem_cgroup *memcg);
|
|
static void drain_all_stock_async(struct mem_cgroup *memcg);
|
|
|
|
|
|
static struct mem_cgroup_per_zone *
|
|
static struct mem_cgroup_per_zone *
|
|
@@ -4712,6 +4727,18 @@ static void free_work(struct work_struct *work)
|
|
int size = sizeof(struct mem_cgroup);
|
|
int size = sizeof(struct mem_cgroup);
|
|
|
|
|
|
memcg = container_of(work, struct mem_cgroup, work_freeing);
|
|
memcg = container_of(work, struct mem_cgroup, work_freeing);
|
|
|
|
+ /*
|
|
|
|
+ * We need to make sure that (at least for now), the jump label
|
|
|
|
+ * destruction code runs outside of the cgroup lock. This is because
|
|
|
|
+ * get_online_cpus(), which is called from the static_branch update,
|
|
|
|
+ * can't be called inside the cgroup_lock. cpusets are the ones
|
|
|
|
+ * enforcing this dependency, so if they ever change, we might as well.
|
|
|
|
+ *
|
|
|
|
+ * schedule_work() will guarantee this happens. Be careful if you need
|
|
|
|
+ * to move this code around, and make sure it is outside
|
|
|
|
+ * the cgroup_lock.
|
|
|
|
+ */
|
|
|
|
+ disarm_sock_keys(memcg);
|
|
if (size < PAGE_SIZE)
|
|
if (size < PAGE_SIZE)
|
|
kfree(memcg);
|
|
kfree(memcg);
|
|
else
|
|
else
|