|
@@ -2612,6 +2612,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
|
int migratetype = allocflags_to_migratetype(gfp_mask);
|
|
|
unsigned int cpuset_mems_cookie;
|
|
|
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
|
|
|
+ struct mem_cgroup *memcg = NULL;
|
|
|
|
|
|
gfp_mask &= gfp_allowed_mask;
|
|
|
|
|
@@ -2630,6 +2631,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
|
|
if (unlikely(!zonelist->_zonerefs->zone))
|
|
|
return NULL;
|
|
|
|
|
|
+ /*
|
|
|
+ * Will only have any effect when __GFP_KMEMCG is set. This is
|
|
|
+ * verified in the (always inline) callee
|
|
|
+ */
|
|
|
+ if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
|
|
|
+ return NULL;
|
|
|
+
|
|
|
retry_cpuset:
|
|
|
cpuset_mems_cookie = get_mems_allowed();
|
|
|
|
|
@@ -2665,6 +2673,8 @@ out:
|
|
|
if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
|
|
|
goto retry_cpuset;
|
|
|
|
|
|
+ memcg_kmem_commit_charge(page, memcg, order);
|
|
|
+
|
|
|
return page;
|
|
|
}
|
|
|
EXPORT_SYMBOL(__alloc_pages_nodemask);
|
|
@@ -2717,6 +2727,31 @@ void free_pages(unsigned long addr, unsigned int order)
|
|
|
|
|
|
EXPORT_SYMBOL(free_pages);
|
|
|
|
|
|
+/*
|
|
|
+ * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
|
|
|
+ * pages allocated with __GFP_KMEMCG.
|
|
|
+ *
|
|
|
+ * Those pages are accounted to a particular memcg, embedded in the
|
|
|
+ * corresponding page_cgroup. To avoid adding a hit in the allocator to search
|
|
|
+ * for that information only to find out that it is NULL for users who have no
|
|
|
+ * interest in that whatsoever, we provide these functions.
|
|
|
+ *
|
|
|
+ * The caller knows better which flags it relies on.
|
|
|
+ */
|
|
|
+void __free_memcg_kmem_pages(struct page *page, unsigned int order)
|
|
|
+{
|
|
|
+ memcg_kmem_uncharge_pages(page, order);
|
|
|
+ __free_pages(page, order);
|
|
|
+}
|
|
|
+
|
|
|
+void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
|
|
|
+{
|
|
|
+ if (addr != 0) {
|
|
|
+ VM_BUG_ON(!virt_addr_valid((void *)addr));
|
|
|
+ __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
|
|
|
{
|
|
|
if (addr) {
|