Przeglądaj źródła

[PATCH] zoned vm counters: conversion of nr_pagetables to per zone counter

Conversion of nr_page_table_pages to a per zone counter

[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Christoph Lameter 19 lat temu
rodzic
commit
df849a1529

+ 1 - 1
arch/arm/mm/mm-armv.c

@@ -227,7 +227,7 @@ void free_pgd_slow(pgd_t *pgd)
 
 
 	pte = pmd_page(*pmd);
 	pte = pmd_page(*pmd);
 	pmd_clear(pmd);
 	pmd_clear(pmd);
-	dec_page_state(nr_page_table_pages);
+	dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
 	pte_lock_deinit(pte);
 	pte_lock_deinit(pte);
 	pte_free(pte);
 	pte_free(pte);
 	pmd_free(pmd);
 	pmd_free(pmd);

+ 2 - 1
arch/i386/mm/pgtable.c

@@ -63,7 +63,8 @@ void show_mem(void)
 	printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
 	printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
 	printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
 	printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
 	printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
 	printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
-	printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
+	printk(KERN_INFO "%lu pages pagetables\n",
+					global_page_state(NR_PAGETABLE));
 }
 }
 
 
 /*
 /*

+ 1 - 1
arch/um/kernel/skas/mmu.c

@@ -152,7 +152,7 @@ void destroy_context_skas(struct mm_struct *mm)
 		free_page(mmu->id.stack);
 		free_page(mmu->id.stack);
 		pte_lock_deinit(virt_to_page(mmu->last_page_table));
 		pte_lock_deinit(virt_to_page(mmu->last_page_table));
 		pte_free_kernel((pte_t *) mmu->last_page_table);
 		pte_free_kernel((pte_t *) mmu->last_page_table);
-                dec_page_state(nr_page_table_pages);
+		dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
 #ifdef CONFIG_3_LEVEL_PGTABLES
 #ifdef CONFIG_3_LEVEL_PGTABLES
 		pmd_free((pmd_t *) mmu->last_pmd);
 		pmd_free((pmd_t *) mmu->last_pmd);
 #endif
 #endif

+ 2 - 0
drivers/base/node.c

@@ -70,6 +70,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
 		       "Node %d FilePages:    %8lu kB\n"
 		       "Node %d FilePages:    %8lu kB\n"
 		       "Node %d Mapped:       %8lu kB\n"
 		       "Node %d Mapped:       %8lu kB\n"
 		       "Node %d AnonPages:    %8lu kB\n"
 		       "Node %d AnonPages:    %8lu kB\n"
+		       "Node %d PageTables:   %8lu kB\n"
 		       "Node %d Slab:         %8lu kB\n",
 		       "Node %d Slab:         %8lu kB\n",
 		       nid, K(i.totalram),
 		       nid, K(i.totalram),
 		       nid, K(i.freeram),
 		       nid, K(i.freeram),
@@ -85,6 +86,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
 		       nid, K(node_page_state(nid, NR_FILE_PAGES)),
 		       nid, K(node_page_state(nid, NR_FILE_PAGES)),
 		       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
 		       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
 		       nid, K(node_page_state(nid, NR_ANON_PAGES)),
 		       nid, K(node_page_state(nid, NR_ANON_PAGES)),
+		       nid, K(node_page_state(nid, NR_PAGETABLE)),
 		       nid, K(node_page_state(nid, NR_SLAB)));
 		       nid, K(node_page_state(nid, NR_SLAB)));
 	n += hugetlb_report_node_meminfo(nid, buf + n);
 	n += hugetlb_report_node_meminfo(nid, buf + n);
 	return n;
 	return n;

+ 2 - 2
fs/proc/proc_misc.c

@@ -171,9 +171,9 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
 		"AnonPages:    %8lu kB\n"
 		"AnonPages:    %8lu kB\n"
 		"Mapped:       %8lu kB\n"
 		"Mapped:       %8lu kB\n"
 		"Slab:         %8lu kB\n"
 		"Slab:         %8lu kB\n"
+		"PageTables:   %8lu kB\n"
 		"CommitLimit:  %8lu kB\n"
 		"CommitLimit:  %8lu kB\n"
 		"Committed_AS: %8lu kB\n"
 		"Committed_AS: %8lu kB\n"
-		"PageTables:   %8lu kB\n"
 		"VmallocTotal: %8lu kB\n"
 		"VmallocTotal: %8lu kB\n"
 		"VmallocUsed:  %8lu kB\n"
 		"VmallocUsed:  %8lu kB\n"
 		"VmallocChunk: %8lu kB\n",
 		"VmallocChunk: %8lu kB\n",
@@ -195,9 +195,9 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
 		K(global_page_state(NR_ANON_PAGES)),
 		K(global_page_state(NR_ANON_PAGES)),
 		K(global_page_state(NR_FILE_MAPPED)),
 		K(global_page_state(NR_FILE_MAPPED)),
 		K(global_page_state(NR_SLAB)),
 		K(global_page_state(NR_SLAB)),
+		K(global_page_state(NR_PAGETABLE)),
 		K(allowed),
 		K(allowed),
 		K(committed),
 		K(committed),
-		K(ps.nr_page_table_pages),
 		(unsigned long)VMALLOC_TOTAL >> 10,
 		(unsigned long)VMALLOC_TOTAL >> 10,
 		vmi.used >> 10,
 		vmi.used >> 10,
 		vmi.largest_chunk >> 10
 		vmi.largest_chunk >> 10

+ 1 - 0
include/linux/mmzone.h

@@ -52,6 +52,7 @@ enum zone_stat_item {
 			   only modified from process context */
 			   only modified from process context */
 	NR_FILE_PAGES,
 	NR_FILE_PAGES,
 	NR_SLAB,	/* Pages used by slab allocator */
 	NR_SLAB,	/* Pages used by slab allocator */
+	NR_PAGETABLE,	/* used for pagetables */
 	NR_VM_ZONE_STAT_ITEMS };
 	NR_VM_ZONE_STAT_ITEMS };
 
 
 struct per_cpu_pages {
 struct per_cpu_pages {

+ 1 - 2
include/linux/vmstat.h

@@ -25,8 +25,7 @@ struct page_state {
 	unsigned long nr_dirty;		/* Dirty writeable pages */
 	unsigned long nr_dirty;		/* Dirty writeable pages */
 	unsigned long nr_writeback;	/* Pages under writeback */
 	unsigned long nr_writeback;	/* Pages under writeback */
 	unsigned long nr_unstable;	/* NFS unstable pages */
 	unsigned long nr_unstable;	/* NFS unstable pages */
-	unsigned long nr_page_table_pages;/* Pages used for pagetables */
-#define GET_PAGE_STATE_LAST nr_page_table_pages
+#define GET_PAGE_STATE_LAST nr_unstable
 
 
 	/*
 	/*
 	 * The below are zeroed by get_page_state().  Use get_full_page_state()
 	 * The below are zeroed by get_page_state().  Use get_full_page_state()

+ 2 - 2
mm/memory.c

@@ -126,7 +126,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
 	pmd_clear(pmd);
 	pmd_clear(pmd);
 	pte_lock_deinit(page);
 	pte_lock_deinit(page);
 	pte_free_tlb(tlb, page);
 	pte_free_tlb(tlb, page);
-	dec_page_state(nr_page_table_pages);
+	dec_zone_page_state(page, NR_PAGETABLE);
 	tlb->mm->nr_ptes--;
 	tlb->mm->nr_ptes--;
 }
 }
 
 
@@ -311,7 +311,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
 		pte_free(new);
 		pte_free(new);
 	} else {
 	} else {
 		mm->nr_ptes++;
 		mm->nr_ptes++;
-		inc_page_state(nr_page_table_pages);
+		inc_zone_page_state(new, NR_PAGETABLE);
 		pmd_populate(mm, pmd, new);
 		pmd_populate(mm, pmd, new);
 	}
 	}
 	spin_unlock(&mm->page_table_lock);
 	spin_unlock(&mm->page_table_lock);

+ 1 - 1
mm/page_alloc.c

@@ -1320,7 +1320,7 @@ void show_free_areas(void)
 		nr_free_pages(),
 		nr_free_pages(),
 		global_page_state(NR_SLAB),
 		global_page_state(NR_SLAB),
 		global_page_state(NR_FILE_MAPPED),
 		global_page_state(NR_FILE_MAPPED),
-		ps.nr_page_table_pages);
+		global_page_state(NR_PAGETABLE));
 
 
 	for_each_zone(zone) {
 	for_each_zone(zone) {
 		int i;
 		int i;

+ 1 - 1
mm/vmstat.c

@@ -399,12 +399,12 @@ static char *vmstat_text[] = {
 	"nr_mapped",
 	"nr_mapped",
 	"nr_file_pages",
 	"nr_file_pages",
 	"nr_slab",
 	"nr_slab",
+	"nr_page_table_pages",
 
 
 	/* Page state */
 	/* Page state */
 	"nr_dirty",
 	"nr_dirty",
 	"nr_writeback",
 	"nr_writeback",
 	"nr_unstable",
 	"nr_unstable",
-	"nr_page_table_pages",
 
 
 	"pgpgin",
 	"pgpgin",
 	"pgpgout",
 	"pgpgout",