|
@@ -30,7 +30,7 @@ struct debug_bucket {
|
|
|
|
|
|
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
|
|
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
|
|
|
|
|
|
-static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE];
|
|
|
|
|
|
+static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
|
|
|
|
|
|
static DEFINE_SPINLOCK(pool_lock);
|
|
static DEFINE_SPINLOCK(pool_lock);
|
|
|
|
|
|
@@ -50,12 +50,23 @@ static int debug_objects_enabled __read_mostly
|
|
|
|
|
|
static struct debug_obj_descr *descr_test __read_mostly;
|
|
static struct debug_obj_descr *descr_test __read_mostly;
|
|
|
|
|
|
|
|
+static void free_obj_work(struct work_struct *work);
|
|
|
|
+static DECLARE_WORK(debug_obj_work, free_obj_work);
|
|
|
|
+
|
|
static int __init enable_object_debug(char *str)
|
|
static int __init enable_object_debug(char *str)
|
|
{
|
|
{
|
|
debug_objects_enabled = 1;
|
|
debug_objects_enabled = 1;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static int __init disable_object_debug(char *str)
|
|
|
|
+{
|
|
|
|
+ debug_objects_enabled = 0;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
early_param("debug_objects", enable_object_debug);
|
|
early_param("debug_objects", enable_object_debug);
|
|
|
|
+early_param("no_debug_objects", disable_object_debug);
|
|
|
|
|
|
static const char *obj_states[ODEBUG_STATE_MAX] = {
|
|
static const char *obj_states[ODEBUG_STATE_MAX] = {
|
|
[ODEBUG_STATE_NONE] = "none",
|
|
[ODEBUG_STATE_NONE] = "none",
|
|
@@ -146,25 +157,51 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Put the object back into the pool or give it back to kmem_cache:
|
|
|
|
|
|
+ * workqueue function to free objects.
|
|
*/
|
|
*/
|
|
-static void free_object(struct debug_obj *obj)
|
|
|
|
|
|
+static void free_obj_work(struct work_struct *work)
|
|
{
|
|
{
|
|
- unsigned long idx = (unsigned long)(obj - obj_static_pool);
|
|
|
|
|
|
+ struct debug_obj *obj;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
|
|
|
|
- spin_lock_irqsave(&pool_lock, flags);
|
|
|
|
- hlist_add_head(&obj->node, &obj_pool);
|
|
|
|
- obj_pool_free++;
|
|
|
|
- obj_pool_used--;
|
|
|
|
- spin_unlock_irqrestore(&pool_lock, flags);
|
|
|
|
- } else {
|
|
|
|
- spin_lock_irqsave(&pool_lock, flags);
|
|
|
|
- obj_pool_used--;
|
|
|
|
|
|
+ spin_lock_irqsave(&pool_lock, flags);
|
|
|
|
+ while (obj_pool_free > ODEBUG_POOL_SIZE) {
|
|
|
|
+ obj = hlist_entry(obj_pool.first, typeof(*obj), node);
|
|
|
|
+ hlist_del(&obj->node);
|
|
|
|
+ obj_pool_free--;
|
|
|
|
+ /*
|
|
|
|
+ * We release pool_lock across kmem_cache_free() to
|
|
|
|
+ * avoid contention on pool_lock.
|
|
|
|
+ */
|
|
spin_unlock_irqrestore(&pool_lock, flags);
|
|
spin_unlock_irqrestore(&pool_lock, flags);
|
|
kmem_cache_free(obj_cache, obj);
|
|
kmem_cache_free(obj_cache, obj);
|
|
|
|
+ spin_lock_irqsave(&pool_lock, flags);
|
|
}
|
|
}
|
|
|
|
+ spin_unlock_irqrestore(&pool_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Put the object back into the pool and schedule work to free objects
|
|
|
|
+ * if necessary.
|
|
|
|
+ */
|
|
|
|
+static void free_object(struct debug_obj *obj)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ int sched = 0;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&pool_lock, flags);
|
|
|
|
+ /*
|
|
|
|
+ * schedule work when the pool is filled and the cache is
|
|
|
|
+ * initialized:
|
|
|
|
+ */
|
|
|
|
+ if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
|
|
|
|
+ sched = !work_pending(&debug_obj_work);
|
|
|
|
+ hlist_add_head(&obj->node, &obj_pool);
|
|
|
|
+ obj_pool_free++;
|
|
|
|
+ obj_pool_used--;
|
|
|
|
+ spin_unlock_irqrestore(&pool_lock, flags);
|
|
|
|
+ if (sched)
|
|
|
|
+ schedule_work(&debug_obj_work);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -875,6 +912,63 @@ void __init debug_objects_early_init(void)
|
|
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
|
|
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Convert the statically allocated objects to dynamic ones:
|
|
|
|
+ */
|
|
|
|
+static int debug_objects_replace_static_objects(void)
|
|
|
|
+{
|
|
|
|
+ struct debug_bucket *db = obj_hash;
|
|
|
|
+ struct hlist_node *node, *tmp;
|
|
|
|
+ struct debug_obj *obj, *new;
|
|
|
|
+ HLIST_HEAD(objects);
|
|
|
|
+ int i, cnt = 0;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
|
|
|
|
+ obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
|
|
|
|
+ if (!obj)
|
|
|
|
+ goto free;
|
|
|
|
+ hlist_add_head(&obj->node, &objects);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * When debug_objects_mem_init() is called we know that only
|
|
|
|
+ * one CPU is up, so disabling interrupts is enough
|
|
|
|
+ * protection. This avoids the lockdep hell of lock ordering.
|
|
|
|
+ */
|
|
|
|
+ local_irq_disable();
|
|
|
|
+
|
|
|
|
+ /* Remove the statically allocated objects from the pool */
|
|
|
|
+ hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
|
|
|
|
+ hlist_del(&obj->node);
|
|
|
|
+ /* Move the allocated objects to the pool */
|
|
|
|
+ hlist_move_list(&objects, &obj_pool);
|
|
|
|
+
|
|
|
|
+ /* Replace the active object references */
|
|
|
|
+ for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
|
|
|
|
+ hlist_move_list(&db->list, &objects);
|
|
|
|
+
|
|
|
|
+ hlist_for_each_entry(obj, node, &objects, node) {
|
|
|
|
+ new = hlist_entry(obj_pool.first, typeof(*obj), node);
|
|
|
|
+ hlist_del(&new->node);
|
|
|
|
+ /* copy object data */
|
|
|
|
+ *new = *obj;
|
|
|
|
+ hlist_add_head(&new->node, &db->list);
|
|
|
|
+ cnt++;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
|
|
|
|
+ obj_pool_used);
|
|
|
|
+ local_irq_enable();
|
|
|
|
+ return 0;
|
|
|
|
+free:
|
|
|
|
+ hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
|
|
|
|
+ hlist_del(&obj->node);
|
|
|
|
+ kmem_cache_free(obj_cache, obj);
|
|
|
|
+ }
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Called after the kmem_caches are functional to setup a dedicated
|
|
* Called after the kmem_caches are functional to setup a dedicated
|
|
* cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
|
|
* cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
|
|
@@ -890,8 +984,11 @@ void __init debug_objects_mem_init(void)
|
|
sizeof (struct debug_obj), 0,
|
|
sizeof (struct debug_obj), 0,
|
|
SLAB_DEBUG_OBJECTS, NULL);
|
|
SLAB_DEBUG_OBJECTS, NULL);
|
|
|
|
|
|
- if (!obj_cache)
|
|
|
|
|
|
+ if (!obj_cache || debug_objects_replace_static_objects()) {
|
|
debug_objects_enabled = 0;
|
|
debug_objects_enabled = 0;
|
|
- else
|
|
|
|
|
|
+ if (obj_cache)
|
|
|
|
+ kmem_cache_destroy(obj_cache);
|
|
|
|
+ printk(KERN_WARNING "ODEBUG: out of memory.\n");
|
|
|
|
+ } else
|
|
debug_objects_selftest();
|
|
debug_objects_selftest();
|
|
}
|
|
}
|