Browse Source

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: (27 commits)
  SLUB: Fix memory hotplug with !NUMA
  slub: Move functions to reduce #ifdefs
  slub: Enable sysfs support for !CONFIG_SLUB_DEBUG
  SLUB: Optimize slab_free() debug check
  slub: Move NUMA-related functions under CONFIG_NUMA
  slub: Add lock release annotation
  slub: Fix signedness warnings
  slub: extract common code to remove objects from partial list without locking
  SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
  slub: reduce differences between SMP and NUMA
  Revert "Slub: UP bandaid"
  percpu: clear memory allocated with the km allocator
  percpu: use percpu allocator on UP too
  percpu: reduce PCPU_MIN_UNIT_SIZE to 32k
  vmalloc: pcpu_get/free_vm_areas() aren't needed on UP
  SLUB: Fix merged slab cache names
  Slub: UP bandaid
  slub: fix SLUB_RESILIENCY_TEST for dynamic kmalloc caches
  slub: Fix up missing kmalloc_cache -> kmem_cache_node case for memoryhotplug
  slub: Add dummy functions for the !SLUB_DEBUG case
  ...
Linus Torvalds 14 years ago
parent
commit
76c39e4fef
4 changed files with 267 additions and 243 deletions
  1. 4 10
      include/linux/slub_def.h
  2. 1 1
      lib/Kconfig.debug
  3. 3 1
      mm/slob.c
  4. 259 231
      mm/slub.c

+ 4 - 10
include/linux/slub_def.h

@@ -87,7 +87,7 @@ struct kmem_cache {
 	unsigned long min_partial;
 	const char *name;	/* Name (only for display!) */
 	struct list_head list;	/* List of slab caches */
-#ifdef CONFIG_SLUB_DEBUG
+#ifdef CONFIG_SYSFS
 	struct kobject kobj;	/* For sysfs */
 #endif
 
@@ -96,11 +96,8 @@ struct kmem_cache {
 	 * Defragmentation by allocating from a remote node.
 	 */
 	int remote_node_defrag_ratio;
-	struct kmem_cache_node *node[MAX_NUMNODES];
-#else
-	/* Avoid an extra cache line for UP */
-	struct kmem_cache_node local_node;
 #endif
+	struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
 /*
@@ -139,19 +136,16 @@ struct kmem_cache {
 
 #ifdef CONFIG_ZONE_DMA
 #define SLUB_DMA __GFP_DMA
-/* Reserve extra caches for potential DMA use */
-#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
 #else
 /* Disable DMA functionality */
 #define SLUB_DMA (__force gfp_t)0
-#define KMALLOC_CACHES SLUB_PAGE_SHIFT
 #endif
 
 /*
  * We keep the general caches in an array of slab caches that are used for
  * 2^x bytes of allocations.
  */
-extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
+extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
 
 /*
  * Sorry that the following has to be that ugly but some versions of GCC
@@ -216,7 +210,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
 	if (index == 0)
 		return NULL;
 
-	return &kmalloc_caches[index];
+	return kmalloc_caches[index];
 }
 
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);

+ 1 - 1
lib/Kconfig.debug

@@ -353,7 +353,7 @@ config SLUB_DEBUG_ON
 config SLUB_STATS
 	default n
 	bool "Enable SLUB performance statistics"
-	depends on SLUB && SLUB_DEBUG && SYSFS
+	depends on SLUB && SYSFS
 	help
 	  SLUB statistics are useful to debug SLUBs allocation behavior in
 	  order find ways to optimize the allocator. This should never be

+ 3 - 1
mm/slob.c

@@ -500,7 +500,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
 	} else {
 		unsigned int order = get_order(size);
 
-		ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
+		if (likely(order))
+			gfp |= __GFP_COMP;
+		ret = slob_new_pages(gfp, order, node);
 		if (ret) {
 			struct page *page;
 			page = virt_to_page(ret);

File diff suppressed because it is too large
+ 259 - 231
mm/slub.c


Some files were not shown because too many files changed in this diff