|
@@ -269,7 +269,11 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
|
|
|
|
|
|
static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
|
|
|
{
|
|
|
- return &s->cpu_slab[cpu];
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ return s->cpu_slab[cpu];
|
|
|
+#else
|
|
|
+ return &s->cpu_slab;
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static inline int check_valid_pointer(struct kmem_cache *s,
|
|
@@ -1858,16 +1862,6 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
|
|
|
c->node = 0;
|
|
|
}
|
|
|
|
|
|
-static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
|
|
|
-{
|
|
|
- int cpu;
|
|
|
-
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
- init_kmem_cache_cpu(s, get_cpu_slab(s, cpu));
|
|
|
-
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
static void init_kmem_cache_node(struct kmem_cache_node *n)
|
|
|
{
|
|
|
n->nr_partial = 0;
|
|
@@ -1879,6 +1873,131 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+/*
|
|
|
+ * Per cpu array for per cpu structures.
|
|
|
+ *
|
|
|
+ * The per cpu array places all kmem_cache_cpu structures from one processor
|
|
|
+ * close together meaning that it becomes possible that multiple per cpu
|
|
|
+ * structures are contained in one cacheline. This may be particularly
|
|
|
+ * beneficial for the kmalloc caches.
|
|
|
+ *
|
|
|
+ * A desktop system typically has around 60-80 slabs. With 100 here we are
|
|
|
+ * likely able to get per cpu structures for all caches from the array defined
|
|
|
+ * here. We must be able to cover all kmalloc caches during bootstrap.
|
|
|
+ *
|
|
|
+ * If the per cpu array is exhausted then fall back to kmalloc
|
|
|
+ * of individual cachelines. No sharing is possible then.
|
|
|
+ */
|
|
|
+#define NR_KMEM_CACHE_CPU 100
|
|
|
+
|
|
|
+static DEFINE_PER_CPU(struct kmem_cache_cpu,
|
|
|
+ kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
|
|
|
+
|
|
|
+static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
|
|
|
+static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
|
|
|
+
|
|
|
+static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
|
|
|
+ int cpu, gfp_t flags)
|
|
|
+{
|
|
|
+ struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
|
|
|
+
|
|
|
+ if (c)
|
|
|
+ per_cpu(kmem_cache_cpu_free, cpu) =
|
|
|
+ (void *)c->freelist;
|
|
|
+ else {
|
|
|
+ /* Table overflow: So allocate ourselves */
|
|
|
+ c = kmalloc_node(
|
|
|
+ ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
|
|
|
+ flags, cpu_to_node(cpu));
|
|
|
+ if (!c)
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ init_kmem_cache_cpu(s, c);
|
|
|
+ return c;
|
|
|
+}
|
|
|
+
|
|
|
+static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
|
|
|
+{
|
|
|
+ if (c < per_cpu(kmem_cache_cpu, cpu) ||
|
|
|
+ c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
|
|
|
+ kfree(c);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
|
|
|
+ per_cpu(kmem_cache_cpu_free, cpu) = c;
|
|
|
+}
|
|
|
+
|
|
|
+static void free_kmem_cache_cpus(struct kmem_cache *s)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
+ struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
|
|
|
+
|
|
|
+ if (c) {
|
|
|
+ s->cpu_slab[cpu] = NULL;
|
|
|
+ free_kmem_cache_cpu(c, cpu);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
+ struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
|
|
|
+
|
|
|
+ if (c)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ c = alloc_kmem_cache_cpu(s, cpu, flags);
|
|
|
+ if (!c) {
|
|
|
+ free_kmem_cache_cpus(s);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ s->cpu_slab[cpu] = c;
|
|
|
+ }
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Initialize the per cpu array.
|
|
|
+ */
|
|
|
+static void init_alloc_cpu_cpu(int cpu)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
|
|
|
+ return;
|
|
|
+
|
|
|
+ for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
|
|
|
+ free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
|
|
|
+
|
|
|
+ cpu_set(cpu, kmem_cach_cpu_free_init_once);
|
|
|
+}
|
|
|
+
|
|
|
+static void __init init_alloc_cpu(void)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_online_cpu(cpu)
|
|
|
+ init_alloc_cpu_cpu(cpu);
|
|
|
+ }
|
|
|
+
|
|
|
+#else
|
|
|
+static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
|
|
|
+static inline void init_alloc_cpu(void) {}
|
|
|
+
|
|
|
+static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
|
|
|
+{
|
|
|
+ init_kmem_cache_cpu(s, &s->cpu_slab);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
#ifdef CONFIG_NUMA
|
|
|
/*
|
|
|
* No kmalloc_node yet so do it by hand. We know that this is the first
|
|
@@ -1886,7 +2005,8 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
|
|
|
* possible.
|
|
|
*
|
|
|
* Note that this function only works on the kmalloc_node_cache
|
|
|
- * when allocating for the kmalloc_node_cache.
|
|
|
+ * when allocating for the kmalloc_node_cache. This is used for bootstrapping
|
|
|
+ * memory on a fresh node that has no slab structures yet.
|
|
|
*/
|
|
|
static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
|
|
|
int node)
|
|
@@ -2115,6 +2235,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
|
|
|
|
|
|
if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
|
|
|
return 1;
|
|
|
+ free_kmem_cache_nodes(s);
|
|
|
error:
|
|
|
if (flags & SLAB_PANIC)
|
|
|
panic("Cannot create slab %s size=%lu realsize=%u "
|
|
@@ -2197,6 +2318,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
|
|
flush_all(s);
|
|
|
|
|
|
/* Attempt to free all objects */
|
|
|
+ free_kmem_cache_cpus(s);
|
|
|
for_each_node_state(node, N_NORMAL_MEMORY) {
|
|
|
struct kmem_cache_node *n = get_node(s, node);
|
|
|
|
|
@@ -2584,6 +2706,8 @@ void __init kmem_cache_init(void)
|
|
|
int i;
|
|
|
int caches = 0;
|
|
|
|
|
|
+ init_alloc_cpu();
|
|
|
+
|
|
|
#ifdef CONFIG_NUMA
|
|
|
/*
|
|
|
* Must first have the slab cache available for the allocations of the
|
|
@@ -2644,10 +2768,12 @@ void __init kmem_cache_init(void)
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
register_cpu_notifier(&slab_notifier);
|
|
|
+ kmem_size = offsetof(struct kmem_cache, cpu_slab) +
|
|
|
+ nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
|
|
|
+#else
|
|
|
+ kmem_size = sizeof(struct kmem_cache);
|
|
|
#endif
|
|
|
|
|
|
- kmem_size = offsetof(struct kmem_cache, cpu_slab) +
|
|
|
- nr_cpu_ids * sizeof(struct kmem_cache_cpu);
|
|
|
|
|
|
printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
|
|
|
" CPUs=%d, Nodes=%d\n",
|
|
@@ -2774,15 +2900,29 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
|
|
|
unsigned long flags;
|
|
|
|
|
|
switch (action) {
|
|
|
+ case CPU_UP_PREPARE:
|
|
|
+ case CPU_UP_PREPARE_FROZEN:
|
|
|
+ init_alloc_cpu_cpu(cpu);
|
|
|
+ down_read(&slub_lock);
|
|
|
+ list_for_each_entry(s, &slab_caches, list)
|
|
|
+ s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
|
|
|
+ GFP_KERNEL);
|
|
|
+ up_read(&slub_lock);
|
|
|
+ break;
|
|
|
+
|
|
|
case CPU_UP_CANCELED:
|
|
|
case CPU_UP_CANCELED_FROZEN:
|
|
|
case CPU_DEAD:
|
|
|
case CPU_DEAD_FROZEN:
|
|
|
down_read(&slub_lock);
|
|
|
list_for_each_entry(s, &slab_caches, list) {
|
|
|
+ struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
|
|
|
+
|
|
|
local_irq_save(flags);
|
|
|
__flush_cpu_slab(s, cpu);
|
|
|
local_irq_restore(flags);
|
|
|
+ free_kmem_cache_cpu(c, cpu);
|
|
|
+ s->cpu_slab[cpu] = NULL;
|
|
|
}
|
|
|
up_read(&slub_lock);
|
|
|
break;
|