فهرست منبع

Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm

* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm:
  slab: fix cache_cache bootstrap in kmem_cache_init()
  count_partial() is not used if !SLUB_DEBUG and !CONFIG_SLABINFO
Linus Torvalds 17 سال پیش
والد
کامیت
5254149f6c
2فایلهای تغییر یافته به همراه4 افزوده شده و 2 حذف شده
  1. 2 2
      mm/slab.c
  2. 2 0
      mm/slub.c

+ 2 - 2
mm/slab.c

@@ -1481,7 +1481,7 @@ void __init kmem_cache_init(void)
 	list_add(&cache_cache.next, &cache_chain);
 	cache_cache.colour_off = cache_line_size();
 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
-	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
+	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
 
 	/*
 	 * struct kmem_cache size depends on nr_node_ids, which
@@ -1602,7 +1602,7 @@ void __init kmem_cache_init(void)
 		int nid;
 
 		for_each_online_node(nid) {
-			init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid);
+			init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
 
 			init_list(malloc_sizes[INDEX_AC].cs_cachep,
 				  &initkmem_list3[SIZE_AC + nid], nid);

+ 2 - 0
mm/slub.c

@@ -2685,6 +2685,7 @@ void kfree(const void *x)
 }
 EXPORT_SYMBOL(kfree);
 
+#if defined(SLUB_DEBUG) || defined(CONFIG_SLABINFO)
 static unsigned long count_partial(struct kmem_cache_node *n)
 {
 	unsigned long flags;
@@ -2697,6 +2698,7 @@ static unsigned long count_partial(struct kmem_cache_node *n)
 	spin_unlock_irqrestore(&n->list_lock, flags);
 	return x;
 }
+#endif
 
 /*
  * kmem_cache_shrink removes empty slabs from the partial lists and sorts