|
@@ -1405,7 +1405,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|
|
reset_page_mapcount(page);
|
|
|
if (current->reclaim_state)
|
|
|
current->reclaim_state->reclaimed_slab += pages;
|
|
|
- __free_pages(page, order);
|
|
|
+ __free_memcg_kmem_pages(page, order);
|
|
|
}
|
|
|
|
|
|
#define need_reserve_slab_rcu \
|
|
@@ -2323,6 +2323,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
|
|
|
if (slab_pre_alloc_hook(s, gfpflags))
|
|
|
return NULL;
|
|
|
|
|
|
+ s = memcg_kmem_get_cache(s, gfpflags);
|
|
|
redo:
|
|
|
|
|
|
/*
|
|
@@ -3284,7 +3285,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
|
|
|
struct page *page;
|
|
|
void *ptr = NULL;
|
|
|
|
|
|
- flags |= __GFP_COMP | __GFP_NOTRACK;
|
|
|
+ flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG;
|
|
|
page = alloc_pages_node(node, flags, get_order(size));
|
|
|
if (page)
|
|
|
ptr = page_address(page);
|
|
@@ -3390,7 +3391,7 @@ void kfree(const void *x)
|
|
|
if (unlikely(!PageSlab(page))) {
|
|
|
BUG_ON(!PageCompound(page));
|
|
|
kmemleak_free(x);
|
|
|
- __free_pages(page, compound_order(page));
|
|
|
+ __free_memcg_kmem_pages(page, compound_order(page));
|
|
|
return;
|
|
|
}
|
|
|
slab_free(page->slab_cache, page, object, _RET_IP_);
|