|
@@ -481,11 +481,13 @@ EXPORT_SYMBOL(slab_buffer_size);
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
|
- * Do not go above this order unless 0 objects fit into the slab.
|
|
|
+ * Do not go above this order unless 0 objects fit into the slab or
|
|
|
+ * overridden on the command line.
|
|
|
*/
|
|
|
-#define BREAK_GFP_ORDER_HI 1
|
|
|
-#define BREAK_GFP_ORDER_LO 0
|
|
|
-static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
|
|
|
+#define SLAB_MAX_ORDER_HI 1
|
|
|
+#define SLAB_MAX_ORDER_LO 0
|
|
|
+static int slab_max_order = SLAB_MAX_ORDER_LO;
|
|
|
+static bool slab_max_order_set __initdata;
|
|
|
|
|
|
/*
|
|
|
* Functions for storing/retrieving the cachep and or slab from the page
|
|
@@ -854,6 +856,17 @@ static int __init noaliencache_setup(char *s)
|
|
|
}
|
|
|
__setup("noaliencache", noaliencache_setup);
|
|
|
|
|
|
+static int __init slab_max_order_setup(char *str)
|
|
|
+{
|
|
|
+ get_option(&str, &slab_max_order);
|
|
|
+ slab_max_order = slab_max_order < 0 ? 0 :
|
|
|
+ min(slab_max_order, MAX_ORDER - 1);
|
|
|
+ slab_max_order_set = true;
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+__setup("slab_max_order=", slab_max_order_setup);
|
|
|
+
|
|
|
#ifdef CONFIG_NUMA
|
|
|
/*
|
|
|
* Special reaping functions for NUMA systems called from cache_reap().
|
|
@@ -1502,10 +1515,11 @@ void __init kmem_cache_init(void)
|
|
|
|
|
|
/*
|
|
|
* Fragmentation resistance on low memory - only use bigger
|
|
|
- * page orders on machines with more than 32MB of memory.
|
|
|
+ * page orders on machines with more than 32MB of memory if
|
|
|
+ * not overridden on the command line.
|
|
|
*/
|
|
|
- if (totalram_pages > (32 << 20) >> PAGE_SHIFT)
|
|
|
- slab_break_gfp_order = BREAK_GFP_ORDER_HI;
|
|
|
+ if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
|
|
|
+ slab_max_order = SLAB_MAX_ORDER_HI;
|
|
|
|
|
|
/* Bootstrap is tricky, because several objects are allocated
|
|
|
* from caches that do not exist yet:
|
|
@@ -1932,8 +1946,8 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
|
|
|
/* Print header */
|
|
|
if (lines == 0) {
|
|
|
printk(KERN_ERR
|
|
|
- "Slab corruption: %s start=%p, len=%d\n",
|
|
|
- cachep->name, realobj, size);
|
|
|
+ "Slab corruption (%s): %s start=%p, len=%d\n",
|
|
|
+ print_tainted(), cachep->name, realobj, size);
|
|
|
print_objinfo(cachep, objp, 0);
|
|
|
}
|
|
|
/* Hexdump the affected line */
|
|
@@ -2117,7 +2131,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
|
|
|
* Large number of objects is good, but very large slabs are
|
|
|
* currently bad for the gfp()s.
|
|
|
*/
|
|
|
- if (gfporder >= slab_break_gfp_order)
|
|
|
+ if (gfporder >= slab_max_order)
|
|
|
break;
|
|
|
|
|
|
/*
|
|
@@ -3042,8 +3056,9 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
|
|
|
if (entries != cachep->num - slabp->inuse) {
|
|
|
bad:
|
|
|
printk(KERN_ERR "slab: Internal list corruption detected in "
|
|
|
- "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
|
|
|
- cachep->name, cachep->num, slabp, slabp->inuse);
|
|
|
+ "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
|
|
|
+ cachep->name, cachep->num, slabp, slabp->inuse,
|
|
|
+ print_tainted());
|
|
|
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
|
|
|
sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
|
|
|
1);
|