|
@@ -309,6 +309,13 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
|
|
#define SIZE_AC 1
|
|
#define SIZE_AC 1
|
|
#define SIZE_L3 (1 + MAX_NUMNODES)
|
|
#define SIZE_L3 (1 + MAX_NUMNODES)
|
|
|
|
|
|
|
|
+static int drain_freelist(struct kmem_cache *cache,
|
|
|
|
+ struct kmem_list3 *l3, int tofree);
|
|
|
|
+static void free_block(struct kmem_cache *cachep, void **objpp, int len,
|
|
|
|
+ int node);
|
|
|
|
+static void enable_cpucache(struct kmem_cache *cachep);
|
|
|
|
+static void cache_reap(void *unused);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* This function must be completely optimized away if a constant is passed to
|
|
* This function must be completely optimized away if a constant is passed to
|
|
* it. Mostly the same as what is in linux/slab.h except it returns an index.
|
|
* it. Mostly the same as what is in linux/slab.h except it returns an index.
|
|
@@ -456,7 +463,7 @@ struct kmem_cache {
|
|
#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
|
|
#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
|
|
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
|
|
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
|
|
#define STATS_INC_GROWN(x) ((x)->grown++)
|
|
#define STATS_INC_GROWN(x) ((x)->grown++)
|
|
-#define STATS_INC_REAPED(x) ((x)->reaped++)
|
|
|
|
|
|
+#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
|
|
#define STATS_SET_HIGH(x) \
|
|
#define STATS_SET_HIGH(x) \
|
|
do { \
|
|
do { \
|
|
if ((x)->num_active > (x)->high_mark) \
|
|
if ((x)->num_active > (x)->high_mark) \
|
|
@@ -480,7 +487,7 @@ struct kmem_cache {
|
|
#define STATS_DEC_ACTIVE(x) do { } while (0)
|
|
#define STATS_DEC_ACTIVE(x) do { } while (0)
|
|
#define STATS_INC_ALLOCED(x) do { } while (0)
|
|
#define STATS_INC_ALLOCED(x) do { } while (0)
|
|
#define STATS_INC_GROWN(x) do { } while (0)
|
|
#define STATS_INC_GROWN(x) do { } while (0)
|
|
-#define STATS_INC_REAPED(x) do { } while (0)
|
|
|
|
|
|
+#define STATS_ADD_REAPED(x,y) do { } while (0)
|
|
#define STATS_SET_HIGH(x) do { } while (0)
|
|
#define STATS_SET_HIGH(x) do { } while (0)
|
|
#define STATS_INC_ERR(x) do { } while (0)
|
|
#define STATS_INC_ERR(x) do { } while (0)
|
|
#define STATS_INC_NODEALLOCS(x) do { } while (0)
|
|
#define STATS_INC_NODEALLOCS(x) do { } while (0)
|
|
@@ -700,12 +707,6 @@ int slab_is_available(void)
|
|
|
|
|
|
static DEFINE_PER_CPU(struct work_struct, reap_work);
|
|
static DEFINE_PER_CPU(struct work_struct, reap_work);
|
|
|
|
|
|
-static void free_block(struct kmem_cache *cachep, void **objpp, int len,
|
|
|
|
- int node);
|
|
|
|
-static void enable_cpucache(struct kmem_cache *cachep);
|
|
|
|
-static void cache_reap(void *unused);
|
|
|
|
-static int __node_shrink(struct kmem_cache *cachep, int node);
|
|
|
|
-
|
|
|
|
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
|
|
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
|
|
{
|
|
{
|
|
return cachep->array[smp_processor_id()];
|
|
return cachep->array[smp_processor_id()];
|
|
@@ -1241,10 +1242,7 @@ free_array_cache:
|
|
l3 = cachep->nodelists[node];
|
|
l3 = cachep->nodelists[node];
|
|
if (!l3)
|
|
if (!l3)
|
|
continue;
|
|
continue;
|
|
- spin_lock_irq(&l3->list_lock);
|
|
|
|
- /* free slabs belonging to this node */
|
|
|
|
- __node_shrink(cachep, node);
|
|
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
|
|
|
|
+ drain_freelist(cachep, l3, l3->free_objects);
|
|
}
|
|
}
|
|
mutex_unlock(&cache_chain_mutex);
|
|
mutex_unlock(&cache_chain_mutex);
|
|
break;
|
|
break;
|
|
@@ -2248,32 +2246,45 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static int __node_shrink(struct kmem_cache *cachep, int node)
|
|
|
|
|
|
+/*
|
|
|
|
+ * Remove slabs from the list of free slabs.
|
|
|
|
+ * Specify the number of slabs to drain in tofree.
|
|
|
|
+ *
|
|
|
|
+ * Returns the actual number of slabs released.
|
|
|
|
+ */
|
|
|
|
+static int drain_freelist(struct kmem_cache *cache,
|
|
|
|
+ struct kmem_list3 *l3, int tofree)
|
|
{
|
|
{
|
|
|
|
+ struct list_head *p;
|
|
|
|
+ int nr_freed;
|
|
struct slab *slabp;
|
|
struct slab *slabp;
|
|
- struct kmem_list3 *l3 = cachep->nodelists[node];
|
|
|
|
- int ret;
|
|
|
|
|
|
|
|
- for (;;) {
|
|
|
|
- struct list_head *p;
|
|
|
|
|
|
+ nr_freed = 0;
|
|
|
|
+ while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
|
|
|
|
|
|
|
|
+ spin_lock_irq(&l3->list_lock);
|
|
p = l3->slabs_free.prev;
|
|
p = l3->slabs_free.prev;
|
|
- if (p == &l3->slabs_free)
|
|
|
|
- break;
|
|
|
|
|
|
+ if (p == &l3->slabs_free) {
|
|
|
|
+ spin_unlock_irq(&l3->list_lock);
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
|
|
- slabp = list_entry(l3->slabs_free.prev, struct slab, list);
|
|
|
|
|
|
+ slabp = list_entry(p, struct slab, list);
|
|
#if DEBUG
|
|
#if DEBUG
|
|
BUG_ON(slabp->inuse);
|
|
BUG_ON(slabp->inuse);
|
|
#endif
|
|
#endif
|
|
list_del(&slabp->list);
|
|
list_del(&slabp->list);
|
|
-
|
|
|
|
- l3->free_objects -= cachep->num;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Safe to drop the lock. The slab is no longer linked
|
|
|
|
+ * to the cache.
|
|
|
|
+ */
|
|
|
|
+ l3->free_objects -= cache->num;
|
|
spin_unlock_irq(&l3->list_lock);
|
|
spin_unlock_irq(&l3->list_lock);
|
|
- slab_destroy(cachep, slabp);
|
|
|
|
- spin_lock_irq(&l3->list_lock);
|
|
|
|
|
|
+ slab_destroy(cache, slabp);
|
|
|
|
+ nr_freed++;
|
|
}
|
|
}
|
|
- ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
|
|
|
|
- return ret;
|
|
|
|
|
|
+out:
|
|
|
|
+ return nr_freed;
|
|
}
|
|
}
|
|
|
|
|
|
static int __cache_shrink(struct kmem_cache *cachep)
|
|
static int __cache_shrink(struct kmem_cache *cachep)
|
|
@@ -2286,11 +2297,13 @@ static int __cache_shrink(struct kmem_cache *cachep)
|
|
check_irq_on();
|
|
check_irq_on();
|
|
for_each_online_node(i) {
|
|
for_each_online_node(i) {
|
|
l3 = cachep->nodelists[i];
|
|
l3 = cachep->nodelists[i];
|
|
- if (l3) {
|
|
|
|
- spin_lock_irq(&l3->list_lock);
|
|
|
|
- ret += __node_shrink(cachep, i);
|
|
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
|
|
- }
|
|
|
|
|
|
+ if (!l3)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ drain_freelist(cachep, l3, l3->free_objects);
|
|
|
|
+
|
|
|
|
+ ret += !list_empty(&l3->slabs_full) ||
|
|
|
|
+ !list_empty(&l3->slabs_partial);
|
|
}
|
|
}
|
|
return (ret ? 1 : 0);
|
|
return (ret ? 1 : 0);
|
|
}
|
|
}
|
|
@@ -3694,10 +3707,6 @@ static void cache_reap(void *unused)
|
|
}
|
|
}
|
|
|
|
|
|
list_for_each_entry(searchp, &cache_chain, next) {
|
|
list_for_each_entry(searchp, &cache_chain, next) {
|
|
- struct list_head *p;
|
|
|
|
- int tofree;
|
|
|
|
- struct slab *slabp;
|
|
|
|
-
|
|
|
|
check_irq_on();
|
|
check_irq_on();
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3722,41 +3731,15 @@ static void cache_reap(void *unused)
|
|
|
|
|
|
drain_array(searchp, l3, l3->shared, 0, node);
|
|
drain_array(searchp, l3, l3->shared, 0, node);
|
|
|
|
|
|
- if (l3->free_touched) {
|
|
|
|
|
|
+ if (l3->free_touched)
|
|
l3->free_touched = 0;
|
|
l3->free_touched = 0;
|
|
- goto next;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- tofree = (l3->free_limit + 5 * searchp->num - 1) /
|
|
|
|
- (5 * searchp->num);
|
|
|
|
- do {
|
|
|
|
- /*
|
|
|
|
- * Do not lock if there are no free blocks.
|
|
|
|
- */
|
|
|
|
- if (list_empty(&l3->slabs_free))
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- spin_lock_irq(&l3->list_lock);
|
|
|
|
- p = l3->slabs_free.next;
|
|
|
|
- if (p == &(l3->slabs_free)) {
|
|
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
|
|
+ else {
|
|
|
|
+ int freed;
|
|
|
|
|
|
- slabp = list_entry(p, struct slab, list);
|
|
|
|
- BUG_ON(slabp->inuse);
|
|
|
|
- list_del(&slabp->list);
|
|
|
|
- STATS_INC_REAPED(searchp);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Safe to drop the lock. The slab is no longer linked
|
|
|
|
- * to the cache. searchp cannot disappear, we hold
|
|
|
|
- * cache_chain_lock
|
|
|
|
- */
|
|
|
|
- l3->free_objects -= searchp->num;
|
|
|
|
- spin_unlock_irq(&l3->list_lock);
|
|
|
|
- slab_destroy(searchp, slabp);
|
|
|
|
- } while (--tofree > 0);
|
|
|
|
|
|
+ freed = drain_freelist(searchp, l3, (l3->free_limit +
|
|
|
|
+ 5 * searchp->num - 1) / (5 * searchp->num));
|
|
|
|
+ STATS_ADD_REAPED(searchp, freed);
|
|
|
|
+ }
|
|
next:
|
|
next:
|
|
cond_resched();
|
|
cond_resched();
|
|
}
|
|
}
|