소스 검색

Revert "x86_64: Quicklist support for x86_64"

This reverts commit 34feb2c83beb3bdf13535a36770f7e50b47ef299.

Suresh Siddha points out that this one breaks the fundamental
requirement that you cannot free page table pages before the TLB caches
are flushed.  The quicklists do not give the same kinds of guarantees
that the mmu_gather structure does, at least not in NUMA configurations.

Requested-by: Suresh Siddha <suresh.b.siddha@intel.com>
Acked-by: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Linus Torvalds 18 년 전
부모
커밋
da8f153e51
5개의 변경된 파일26개의 추가작업 그리고 59개의 파일을 삭제
  1. 0 8
      arch/x86_64/Kconfig
  2. 0 1
      arch/x86_64/kernel/process.c
  3. 1 1
      arch/x86_64/kernel/smp.c
  4. 24 49
      include/asm-x86_64/pgalloc.h
  5. 1 0
      include/asm-x86_64/pgtable.h

+ 0 - 8
arch/x86_64/Kconfig

@@ -60,14 +60,6 @@ config ZONE_DMA
 	bool
 	default y
 
-config QUICKLIST
-	bool
-	default y
-
-config NR_QUICK
-	int
-	default 2
-
 config ISA
 	bool
 

+ 0 - 1
arch/x86_64/kernel/process.c

@@ -208,7 +208,6 @@ void cpu_idle (void)
 			if (__get_cpu_var(cpu_idle_state))
 				__get_cpu_var(cpu_idle_state) = 0;
 
-			check_pgt_cache();
 			rmb();
 			idle = pm_idle;
 			if (!idle)

+ 1 - 1
arch/x86_64/kernel/smp.c

@@ -241,7 +241,7 @@ void flush_tlb_mm (struct mm_struct * mm)
 	}
 	if (!cpus_empty(cpu_mask))
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-	check_pgt_cache();
+
 	preempt_enable();
 }
 EXPORT_SYMBOL(flush_tlb_mm);

+ 24 - 49
include/asm-x86_64/pgalloc.h

@@ -4,10 +4,6 @@
 #include <asm/pda.h>
 #include <linux/threads.h>
 #include <linux/mm.h>
-#include <linux/quicklist.h>
-
-#define QUICK_PGD 0	/* We preserve special mappings over free */
-#define QUICK_PT 1	/* Other page table pages that are zero on free */
 
 #define pmd_populate_kernel(mm, pmd, pte) \
 		set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
@@ -24,23 +20,23 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
 static inline void pmd_free(pmd_t *pmd)
 {
 	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
-	quicklist_free(QUICK_PT, NULL, pmd);
+	free_page((unsigned long)pmd);
 }
 
 static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
 {
-	return (pmd_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
+	return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 }
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-	return (pud_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
+	return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 }
 
 static inline void pud_free (pud_t *pud)
 {
 	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
-	quicklist_free(QUICK_PT, NULL, pud);
+	free_page((unsigned long)pud);
 }
 
 static inline void pgd_list_add(pgd_t *pgd)
@@ -61,57 +57,41 @@ static inline void pgd_list_del(pgd_t *pgd)
 	spin_unlock(&pgd_lock);
 }
 
-static inline void pgd_ctor(void *x)
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
 	unsigned boundary;
-	pgd_t *pgd = x;
-	struct page *page = virt_to_page(pgd);
-
+	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+	if (!pgd)
+		return NULL;
+	pgd_list_add(pgd);
 	/*
 	 * Copy kernel pointers in from init.
+	 * Could keep a freelist or slab cache of those because the kernel
+	 * part never changes.
 	 */
 	boundary = pgd_index(__PAGE_OFFSET);
+	memset(pgd, 0, boundary * sizeof(pgd_t));
 	memcpy(pgd + boundary,
-		init_level4_pgt + boundary,
-		(PTRS_PER_PGD - boundary) * sizeof(pgd_t));
-
-	spin_lock(&pgd_lock);
-	list_add(&page->lru, &pgd_list);
-	spin_unlock(&pgd_lock);
-}
-
-static inline void pgd_dtor(void *x)
-{
-	pgd_t *pgd = x;
-	struct page *page = virt_to_page(pgd);
-
-        spin_lock(&pgd_lock);
-	list_del(&page->lru);
-	spin_unlock(&pgd_lock);
-}
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
-	pgd_t *pgd = (pgd_t *)quicklist_alloc(QUICK_PGD,
-		GFP_KERNEL|__GFP_REPEAT, pgd_ctor);
+	       init_level4_pgt + boundary,
+	       (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
 	return pgd;
 }
 
 static inline void pgd_free(pgd_t *pgd)
 {
 	BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
-	quicklist_free(QUICK_PGD, pgd_dtor, pgd);
+	pgd_list_del(pgd);
+	free_page((unsigned long)pgd);
 }
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-	return (pte_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
+	return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 }
 
 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-	void *p = (void *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
-
+	void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
 	if (!p)
 		return NULL;
 	return virt_to_page(p);
@@ -123,22 +103,17 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
 static inline void pte_free_kernel(pte_t *pte)
 {
 	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
-	quicklist_free(QUICK_PT, NULL, pte);
+	free_page((unsigned long)pte); 
 }
 
 static inline void pte_free(struct page *pte)
 {
-	quicklist_free_page(QUICK_PT, NULL, pte);
-}
+	__free_page(pte);
+} 
 
-#define __pte_free_tlb(tlb,pte) quicklist_free_page(QUICK_PT, NULL,(pte))
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
 
-#define __pmd_free_tlb(tlb,x)   quicklist_free(QUICK_PT, NULL, (x))
-#define __pud_free_tlb(tlb,x)   quicklist_free(QUICK_PT, NULL, (x))
+#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
+#define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
 
-static inline void check_pgt_cache(void)
-{
-	quicklist_trim(QUICK_PGD, pgd_dtor, 25, 16);
-	quicklist_trim(QUICK_PT, NULL, 25, 16);
-}
 #endif /* _X86_64_PGALLOC_H */

+ 1 - 0
include/asm-x86_64/pgtable.h

@@ -411,6 +411,7 @@ pte_t *lookup_address(unsigned long addr);
 #define HAVE_ARCH_UNMAPPED_AREA
 
 #define pgtable_cache_init()   do { } while (0)
+#define check_pgt_cache()      do { } while (0)
 
 #define PAGE_AGP    PAGE_KERNEL_NOCACHE
 #define HAVE_PAGE_AGP 1