|
@@ -2956,7 +2956,7 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
|
|
|
|
|
|
VM_BUG_ON(p->is_root_cache);
|
|
|
cachep = p->root_cache;
|
|
|
- return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
|
|
|
+ return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SLABINFO
|
|
@@ -3393,7 +3393,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|
|
idx = memcg_cache_id(memcg);
|
|
|
|
|
|
mutex_lock(&memcg_cache_mutex);
|
|
|
- new_cachep = cachep->memcg_params->memcg_caches[idx];
|
|
|
+ new_cachep = cache_from_memcg_idx(cachep, idx);
|
|
|
if (new_cachep) {
|
|
|
css_put(&memcg->css);
|
|
|
goto out;
|
|
@@ -3439,8 +3439,8 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
|
|
* we'll take the set_limit_mutex to protect ourselves against this.
|
|
|
*/
|
|
|
mutex_lock(&set_limit_mutex);
|
|
|
- for (i = 0; i < memcg_limited_groups_array_size; i++) {
|
|
|
- c = s->memcg_params->memcg_caches[i];
|
|
|
+ for_each_memcg_cache_index(i) {
|
|
|
+ c = cache_from_memcg_idx(s, i);
|
|
|
if (!c)
|
|
|
continue;
|
|
|
|
|
@@ -3573,8 +3573,8 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
|
|
|
* code updating memcg_caches will issue a write barrier to match this.
|
|
|
*/
|
|
|
read_barrier_depends();
|
|
|
- if (likely(cachep->memcg_params->memcg_caches[idx])) {
|
|
|
- cachep = cachep->memcg_params->memcg_caches[idx];
|
|
|
+ if (likely(cache_from_memcg_idx(cachep, idx))) {
|
|
|
+ cachep = cache_from_memcg_idx(cachep, idx);
|
|
|
goto out;
|
|
|
}
|
|
|
|