|
@@ -315,7 +315,7 @@ static int drain_freelist(struct kmem_cache *cache,
|
|
struct kmem_list3 *l3, int tofree);
|
|
struct kmem_list3 *l3, int tofree);
|
|
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
|
|
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
|
|
int node);
|
|
int node);
|
|
-static int enable_cpucache(struct kmem_cache *cachep);
|
|
|
|
|
|
+static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
|
|
static void cache_reap(struct work_struct *unused);
|
|
static void cache_reap(struct work_struct *unused);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -958,12 +958,12 @@ static void __cpuinit start_cpu_timer(int cpu)
|
|
}
|
|
}
|
|
|
|
|
|
static struct array_cache *alloc_arraycache(int node, int entries,
|
|
static struct array_cache *alloc_arraycache(int node, int entries,
|
|
- int batchcount)
|
|
|
|
|
|
+ int batchcount, gfp_t gfp)
|
|
{
|
|
{
|
|
int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
|
|
int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
|
|
struct array_cache *nc = NULL;
|
|
struct array_cache *nc = NULL;
|
|
|
|
|
|
- nc = kmalloc_node(memsize, GFP_KERNEL, node);
|
|
|
|
|
|
+ nc = kmalloc_node(memsize, gfp, node);
|
|
if (nc) {
|
|
if (nc) {
|
|
nc->avail = 0;
|
|
nc->avail = 0;
|
|
nc->limit = entries;
|
|
nc->limit = entries;
|
|
@@ -1003,7 +1003,7 @@ static int transfer_objects(struct array_cache *to,
|
|
#define drain_alien_cache(cachep, alien) do { } while (0)
|
|
#define drain_alien_cache(cachep, alien) do { } while (0)
|
|
#define reap_alien(cachep, l3) do { } while (0)
|
|
#define reap_alien(cachep, l3) do { } while (0)
|
|
|
|
|
|
-static inline struct array_cache **alloc_alien_cache(int node, int limit)
|
|
|
|
|
|
+static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
|
|
{
|
|
{
|
|
return (struct array_cache **)BAD_ALIEN_MAGIC;
|
|
return (struct array_cache **)BAD_ALIEN_MAGIC;
|
|
}
|
|
}
|
|
@@ -1034,7 +1034,7 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep,
|
|
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
|
|
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
|
|
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
|
|
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
|
|
|
|
|
|
-static struct array_cache **alloc_alien_cache(int node, int limit)
|
|
|
|
|
|
+static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
|
|
{
|
|
{
|
|
struct array_cache **ac_ptr;
|
|
struct array_cache **ac_ptr;
|
|
int memsize = sizeof(void *) * nr_node_ids;
|
|
int memsize = sizeof(void *) * nr_node_ids;
|
|
@@ -1042,14 +1042,14 @@ static struct array_cache **alloc_alien_cache(int node, int limit)
|
|
|
|
|
|
if (limit > 1)
|
|
if (limit > 1)
|
|
limit = 12;
|
|
limit = 12;
|
|
- ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
|
|
|
|
|
|
+ ac_ptr = kmalloc_node(memsize, gfp, node);
|
|
if (ac_ptr) {
|
|
if (ac_ptr) {
|
|
for_each_node(i) {
|
|
for_each_node(i) {
|
|
if (i == node || !node_online(i)) {
|
|
if (i == node || !node_online(i)) {
|
|
ac_ptr[i] = NULL;
|
|
ac_ptr[i] = NULL;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
- ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
|
|
|
|
|
|
+ ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
|
|
if (!ac_ptr[i]) {
|
|
if (!ac_ptr[i]) {
|
|
for (i--; i >= 0; i--)
|
|
for (i--; i >= 0; i--)
|
|
kfree(ac_ptr[i]);
|
|
kfree(ac_ptr[i]);
|
|
@@ -1282,20 +1282,20 @@ static int __cpuinit cpuup_prepare(long cpu)
|
|
struct array_cache **alien = NULL;
|
|
struct array_cache **alien = NULL;
|
|
|
|
|
|
nc = alloc_arraycache(node, cachep->limit,
|
|
nc = alloc_arraycache(node, cachep->limit,
|
|
- cachep->batchcount);
|
|
|
|
|
|
+ cachep->batchcount, GFP_KERNEL);
|
|
if (!nc)
|
|
if (!nc)
|
|
goto bad;
|
|
goto bad;
|
|
if (cachep->shared) {
|
|
if (cachep->shared) {
|
|
shared = alloc_arraycache(node,
|
|
shared = alloc_arraycache(node,
|
|
cachep->shared * cachep->batchcount,
|
|
cachep->shared * cachep->batchcount,
|
|
- 0xbaadf00d);
|
|
|
|
|
|
+ 0xbaadf00d, GFP_KERNEL);
|
|
if (!shared) {
|
|
if (!shared) {
|
|
kfree(nc);
|
|
kfree(nc);
|
|
goto bad;
|
|
goto bad;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
if (use_alien_caches) {
|
|
if (use_alien_caches) {
|
|
- alien = alloc_alien_cache(node, cachep->limit);
|
|
|
|
|
|
+ alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
|
|
if (!alien) {
|
|
if (!alien) {
|
|
kfree(shared);
|
|
kfree(shared);
|
|
kfree(nc);
|
|
kfree(nc);
|
|
@@ -1399,10 +1399,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
|
|
{
|
|
{
|
|
struct kmem_list3 *ptr;
|
|
struct kmem_list3 *ptr;
|
|
|
|
|
|
- ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
|
|
|
|
|
|
+ ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
|
|
BUG_ON(!ptr);
|
|
BUG_ON(!ptr);
|
|
|
|
|
|
- local_irq_disable();
|
|
|
|
memcpy(ptr, list, sizeof(struct kmem_list3));
|
|
memcpy(ptr, list, sizeof(struct kmem_list3));
|
|
/*
|
|
/*
|
|
* Do not assume that spinlocks can be initialized via memcpy:
|
|
* Do not assume that spinlocks can be initialized via memcpy:
|
|
@@ -1411,7 +1410,6 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
|
|
|
|
|
|
MAKE_ALL_LISTS(cachep, ptr, nodeid);
|
|
MAKE_ALL_LISTS(cachep, ptr, nodeid);
|
|
cachep->nodelists[nodeid] = ptr;
|
|
cachep->nodelists[nodeid] = ptr;
|
|
- local_irq_enable();
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1575,9 +1573,8 @@ void __init kmem_cache_init(void)
|
|
{
|
|
{
|
|
struct array_cache *ptr;
|
|
struct array_cache *ptr;
|
|
|
|
|
|
- ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
|
|
|
|
|
|
+ ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
|
|
|
|
|
- local_irq_disable();
|
|
|
|
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
|
|
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
|
|
memcpy(ptr, cpu_cache_get(&cache_cache),
|
|
memcpy(ptr, cpu_cache_get(&cache_cache),
|
|
sizeof(struct arraycache_init));
|
|
sizeof(struct arraycache_init));
|
|
@@ -1587,11 +1584,9 @@ void __init kmem_cache_init(void)
|
|
spin_lock_init(&ptr->lock);
|
|
spin_lock_init(&ptr->lock);
|
|
|
|
|
|
cache_cache.array[smp_processor_id()] = ptr;
|
|
cache_cache.array[smp_processor_id()] = ptr;
|
|
- local_irq_enable();
|
|
|
|
|
|
|
|
- ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
|
|
|
|
|
|
+ ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
|
|
|
|
|
- local_irq_disable();
|
|
|
|
BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
|
|
BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
|
|
!= &initarray_generic.cache);
|
|
!= &initarray_generic.cache);
|
|
memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
|
|
memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
|
|
@@ -1603,7 +1598,6 @@ void __init kmem_cache_init(void)
|
|
|
|
|
|
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
|
|
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
|
|
ptr;
|
|
ptr;
|
|
- local_irq_enable();
|
|
|
|
}
|
|
}
|
|
/* 5) Replace the bootstrap kmem_list3's */
|
|
/* 5) Replace the bootstrap kmem_list3's */
|
|
{
|
|
{
|
|
@@ -1627,7 +1621,7 @@ void __init kmem_cache_init(void)
|
|
struct kmem_cache *cachep;
|
|
struct kmem_cache *cachep;
|
|
mutex_lock(&cache_chain_mutex);
|
|
mutex_lock(&cache_chain_mutex);
|
|
list_for_each_entry(cachep, &cache_chain, next)
|
|
list_for_each_entry(cachep, &cache_chain, next)
|
|
- if (enable_cpucache(cachep))
|
|
|
|
|
|
+ if (enable_cpucache(cachep, GFP_NOWAIT))
|
|
BUG();
|
|
BUG();
|
|
mutex_unlock(&cache_chain_mutex);
|
|
mutex_unlock(&cache_chain_mutex);
|
|
}
|
|
}
|
|
@@ -2064,10 +2058,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
|
|
return left_over;
|
|
return left_over;
|
|
}
|
|
}
|
|
|
|
|
|
-static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
|
|
|
|
|
|
+static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|
{
|
|
{
|
|
if (g_cpucache_up == FULL)
|
|
if (g_cpucache_up == FULL)
|
|
- return enable_cpucache(cachep);
|
|
|
|
|
|
+ return enable_cpucache(cachep, gfp);
|
|
|
|
|
|
if (g_cpucache_up == NONE) {
|
|
if (g_cpucache_up == NONE) {
|
|
/*
|
|
/*
|
|
@@ -2089,7 +2083,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
|
|
g_cpucache_up = PARTIAL_AC;
|
|
g_cpucache_up = PARTIAL_AC;
|
|
} else {
|
|
} else {
|
|
cachep->array[smp_processor_id()] =
|
|
cachep->array[smp_processor_id()] =
|
|
- kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
|
|
|
|
|
|
+ kmalloc(sizeof(struct arraycache_init), gfp);
|
|
|
|
|
|
if (g_cpucache_up == PARTIAL_AC) {
|
|
if (g_cpucache_up == PARTIAL_AC) {
|
|
set_up_list3s(cachep, SIZE_L3);
|
|
set_up_list3s(cachep, SIZE_L3);
|
|
@@ -2153,6 +2147,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|
{
|
|
{
|
|
size_t left_over, slab_size, ralign;
|
|
size_t left_over, slab_size, ralign;
|
|
struct kmem_cache *cachep = NULL, *pc;
|
|
struct kmem_cache *cachep = NULL, *pc;
|
|
|
|
+ gfp_t gfp;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Sanity checks... these are all serious usage bugs.
|
|
* Sanity checks... these are all serious usage bugs.
|
|
@@ -2168,8 +2163,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|
* We use cache_chain_mutex to ensure a consistent view of
|
|
* We use cache_chain_mutex to ensure a consistent view of
|
|
* cpu_online_mask as well. Please see cpuup_callback
|
|
* cpu_online_mask as well. Please see cpuup_callback
|
|
*/
|
|
*/
|
|
- get_online_cpus();
|
|
|
|
- mutex_lock(&cache_chain_mutex);
|
|
|
|
|
|
+ if (slab_is_available()) {
|
|
|
|
+ get_online_cpus();
|
|
|
|
+ mutex_lock(&cache_chain_mutex);
|
|
|
|
+ }
|
|
|
|
|
|
list_for_each_entry(pc, &cache_chain, next) {
|
|
list_for_each_entry(pc, &cache_chain, next) {
|
|
char tmp;
|
|
char tmp;
|
|
@@ -2278,8 +2275,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|
*/
|
|
*/
|
|
align = ralign;
|
|
align = ralign;
|
|
|
|
|
|
|
|
+ if (slab_is_available())
|
|
|
|
+ gfp = GFP_KERNEL;
|
|
|
|
+ else
|
|
|
|
+ gfp = GFP_NOWAIT;
|
|
|
|
+
|
|
/* Get cache's description obj. */
|
|
/* Get cache's description obj. */
|
|
- cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
|
|
|
|
|
|
+ cachep = kmem_cache_zalloc(&cache_cache, gfp);
|
|
if (!cachep)
|
|
if (!cachep)
|
|
goto oops;
|
|
goto oops;
|
|
|
|
|
|
@@ -2382,7 +2384,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|
cachep->ctor = ctor;
|
|
cachep->ctor = ctor;
|
|
cachep->name = name;
|
|
cachep->name = name;
|
|
|
|
|
|
- if (setup_cpu_cache(cachep)) {
|
|
|
|
|
|
+ if (setup_cpu_cache(cachep, gfp)) {
|
|
__kmem_cache_destroy(cachep);
|
|
__kmem_cache_destroy(cachep);
|
|
cachep = NULL;
|
|
cachep = NULL;
|
|
goto oops;
|
|
goto oops;
|
|
@@ -2394,8 +2396,10 @@ oops:
|
|
if (!cachep && (flags & SLAB_PANIC))
|
|
if (!cachep && (flags & SLAB_PANIC))
|
|
panic("kmem_cache_create(): failed to create slab `%s'\n",
|
|
panic("kmem_cache_create(): failed to create slab `%s'\n",
|
|
name);
|
|
name);
|
|
- mutex_unlock(&cache_chain_mutex);
|
|
|
|
- put_online_cpus();
|
|
|
|
|
|
+ if (slab_is_available()) {
|
|
|
|
+ mutex_unlock(&cache_chain_mutex);
|
|
|
|
+ put_online_cpus();
|
|
|
|
+ }
|
|
return cachep;
|
|
return cachep;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_create);
|
|
EXPORT_SYMBOL(kmem_cache_create);
|
|
@@ -3802,7 +3806,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
|
|
/*
|
|
/*
|
|
* This initializes kmem_list3 or resizes various caches for all nodes.
|
|
* This initializes kmem_list3 or resizes various caches for all nodes.
|
|
*/
|
|
*/
|
|
-static int alloc_kmemlist(struct kmem_cache *cachep)
|
|
|
|
|
|
+static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
|
|
{
|
|
{
|
|
int node;
|
|
int node;
|
|
struct kmem_list3 *l3;
|
|
struct kmem_list3 *l3;
|
|
@@ -3812,7 +3816,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
|
|
for_each_online_node(node) {
|
|
for_each_online_node(node) {
|
|
|
|
|
|
if (use_alien_caches) {
|
|
if (use_alien_caches) {
|
|
- new_alien = alloc_alien_cache(node, cachep->limit);
|
|
|
|
|
|
+ new_alien = alloc_alien_cache(node, cachep->limit, gfp);
|
|
if (!new_alien)
|
|
if (!new_alien)
|
|
goto fail;
|
|
goto fail;
|
|
}
|
|
}
|
|
@@ -3821,7 +3825,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
|
|
if (cachep->shared) {
|
|
if (cachep->shared) {
|
|
new_shared = alloc_arraycache(node,
|
|
new_shared = alloc_arraycache(node,
|
|
cachep->shared*cachep->batchcount,
|
|
cachep->shared*cachep->batchcount,
|
|
- 0xbaadf00d);
|
|
|
|
|
|
+ 0xbaadf00d, gfp);
|
|
if (!new_shared) {
|
|
if (!new_shared) {
|
|
free_alien_cache(new_alien);
|
|
free_alien_cache(new_alien);
|
|
goto fail;
|
|
goto fail;
|
|
@@ -3850,7 +3854,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
|
|
free_alien_cache(new_alien);
|
|
free_alien_cache(new_alien);
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
- l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
|
|
|
|
|
|
+ l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
|
|
if (!l3) {
|
|
if (!l3) {
|
|
free_alien_cache(new_alien);
|
|
free_alien_cache(new_alien);
|
|
kfree(new_shared);
|
|
kfree(new_shared);
|
|
@@ -3906,18 +3910,18 @@ static void do_ccupdate_local(void *info)
|
|
|
|
|
|
/* Always called with the cache_chain_mutex held */
|
|
/* Always called with the cache_chain_mutex held */
|
|
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
- int batchcount, int shared)
|
|
|
|
|
|
+ int batchcount, int shared, gfp_t gfp)
|
|
{
|
|
{
|
|
struct ccupdate_struct *new;
|
|
struct ccupdate_struct *new;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- new = kzalloc(sizeof(*new), GFP_KERNEL);
|
|
|
|
|
|
+ new = kzalloc(sizeof(*new), gfp);
|
|
if (!new)
|
|
if (!new)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
for_each_online_cpu(i) {
|
|
for_each_online_cpu(i) {
|
|
new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
|
|
new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
|
|
- batchcount);
|
|
|
|
|
|
+ batchcount, gfp);
|
|
if (!new->new[i]) {
|
|
if (!new->new[i]) {
|
|
for (i--; i >= 0; i--)
|
|
for (i--; i >= 0; i--)
|
|
kfree(new->new[i]);
|
|
kfree(new->new[i]);
|
|
@@ -3944,11 +3948,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
|
|
kfree(ccold);
|
|
kfree(ccold);
|
|
}
|
|
}
|
|
kfree(new);
|
|
kfree(new);
|
|
- return alloc_kmemlist(cachep);
|
|
|
|
|
|
+ return alloc_kmemlist(cachep, gfp);
|
|
}
|
|
}
|
|
|
|
|
|
/* Called with cache_chain_mutex held always */
|
|
/* Called with cache_chain_mutex held always */
|
|
-static int enable_cpucache(struct kmem_cache *cachep)
|
|
|
|
|
|
+static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
int limit, shared;
|
|
int limit, shared;
|
|
@@ -3994,7 +3998,7 @@ static int enable_cpucache(struct kmem_cache *cachep)
|
|
if (limit > 32)
|
|
if (limit > 32)
|
|
limit = 32;
|
|
limit = 32;
|
|
#endif
|
|
#endif
|
|
- err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
|
|
|
|
|
|
+ err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
|
|
if (err)
|
|
if (err)
|
|
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
|
|
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
|
|
cachep->name, -err);
|
|
cachep->name, -err);
|
|
@@ -4300,7 +4304,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
|
|
res = 0;
|
|
res = 0;
|
|
} else {
|
|
} else {
|
|
res = do_tune_cpucache(cachep, limit,
|
|
res = do_tune_cpucache(cachep, limit,
|
|
- batchcount, shared);
|
|
|
|
|
|
+ batchcount, shared,
|
|
|
|
+ GFP_KERNEL);
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
}
|
|
}
|