|
@@ -68,6 +68,7 @@ static int fill_pool(void)
|
|
|
{
|
|
|
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
|
|
|
struct debug_obj *new;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
|
|
|
return obj_pool_free;
|
|
@@ -81,10 +82,10 @@ static int fill_pool(void)
|
|
|
if (!new)
|
|
|
return obj_pool_free;
|
|
|
|
|
|
- spin_lock(&pool_lock);
|
|
|
+ spin_lock_irqsave(&pool_lock, flags);
|
|
|
hlist_add_head(&new->node, &obj_pool);
|
|
|
obj_pool_free++;
|
|
|
- spin_unlock(&pool_lock);
|
|
|
+ spin_unlock_irqrestore(&pool_lock, flags);
|
|
|
}
|
|
|
return obj_pool_free;
|
|
|
}
|
|
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Allocate a new object. If the pool is empty and no refill possible,
|
|
|
- * switch off the debugger.
|
|
|
+ * Allocate a new object. If the pool is empty, switch off the debugger.
|
|
|
*/
|
|
|
static struct debug_obj *
|
|
|
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
|
|
|
{
|
|
|
struct debug_obj *obj = NULL;
|
|
|
- int retry = 0;
|
|
|
|
|
|
-repeat:
|
|
|
spin_lock(&pool_lock);
|
|
|
if (obj_pool.first) {
|
|
|
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
|
|
@@ -141,9 +139,6 @@ repeat:
|
|
|
}
|
|
|
spin_unlock(&pool_lock);
|
|
|
|
|
|
- if (fill_pool() && !obj && !retry++)
|
|
|
- goto repeat;
|
|
|
-
|
|
|
return obj;
|
|
|
}
|
|
|
|
|
@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
|
|
|
struct debug_obj *obj;
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ fill_pool();
|
|
|
+
|
|
|
db = get_bucket((unsigned long) addr);
|
|
|
|
|
|
spin_lock_irqsave(&db->lock, flags);
|