Browse Source

Btrfs: Add leak debugging for extent_buffer and extent_state

This also fixes one leak around the super block when failing to mount the
FS.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
Chris Mason 17 years ago
parent
commit
2d2ae54797
3 changed files with 30 additions and 3 deletions
  1. 2 0
      fs/btrfs/disk-io.c
  2. 26 2
      fs/btrfs/extent_io.c
  3. 2 1
      fs/btrfs/extent_io.h

+ 2 - 0
fs/btrfs/disk-io.c

@@ -915,12 +915,14 @@ fail_tree_root:
 	free_extent_buffer(tree_root->node);
 	free_extent_buffer(tree_root->node);
 fail_sb_buffer:
 fail_sb_buffer:
 	free_extent_buffer(fs_info->sb_buffer);
 	free_extent_buffer(fs_info->sb_buffer);
+	extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
 fail_iput:
 fail_iput:
 	iput(fs_info->btree_inode);
 	iput(fs_info->btree_inode);
 fail:
 fail:
 	close_all_devices(fs_info);
 	close_all_devices(fs_info);
 	kfree(extent_root);
 	kfree(extent_root);
 	kfree(tree_root);
 	kfree(tree_root);
+	bdi_destroy(&fs_info->bdi);
 	kfree(fs_info);
 	kfree(fs_info);
 	return ERR_PTR(err);
 	return ERR_PTR(err);
 }
 }

+ 26 - 2
fs/btrfs/extent_io.c

@@ -26,6 +26,7 @@ static struct kmem_cache *extent_buffer_cache;
 
 
 static LIST_HEAD(buffers);
 static LIST_HEAD(buffers);
 static LIST_HEAD(states);
 static LIST_HEAD(states);
+static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
 
 
 #define BUFFER_LRU_MAX 64
 #define BUFFER_LRU_MAX 64
 
 
@@ -64,15 +65,22 @@ free_state_cache:
 void extent_io_exit(void)
 void extent_io_exit(void)
 {
 {
 	struct extent_state *state;
 	struct extent_state *state;
+	struct extent_buffer *eb;
 
 
 	while (!list_empty(&states)) {
 	while (!list_empty(&states)) {
-		state = list_entry(states.next, struct extent_state, list);
+		state = list_entry(states.next, struct extent_state, leak_list);
 		printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
 		printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
-		list_del(&state->list);
+		list_del(&state->leak_list);
 		kmem_cache_free(extent_state_cache, state);
 		kmem_cache_free(extent_state_cache, state);
 
 
 	}
 	}
 
 
+	while (!list_empty(&buffers)) {
+		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
+		printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
+		list_del(&eb->leak_list);
+		kmem_cache_free(extent_buffer_cache, eb);
+	}
 	if (extent_state_cache)
 	if (extent_state_cache)
 		kmem_cache_destroy(extent_state_cache);
 		kmem_cache_destroy(extent_state_cache);
 	if (extent_buffer_cache)
 	if (extent_buffer_cache)
@@ -109,6 +117,7 @@ EXPORT_SYMBOL(extent_io_tree_empty_lru);
 struct extent_state *alloc_extent_state(gfp_t mask)
 struct extent_state *alloc_extent_state(gfp_t mask)
 {
 {
 	struct extent_state *state;
 	struct extent_state *state;
+	unsigned long flags;
 
 
 	state = kmem_cache_alloc(extent_state_cache, mask);
 	state = kmem_cache_alloc(extent_state_cache, mask);
 	if (!state || IS_ERR(state))
 	if (!state || IS_ERR(state))
@@ -116,6 +125,9 @@ struct extent_state *alloc_extent_state(gfp_t mask)
 	state->state = 0;
 	state->state = 0;
 	state->private = 0;
 	state->private = 0;
 	state->tree = NULL;
 	state->tree = NULL;
+	spin_lock_irqsave(&leak_lock, flags);
+	list_add(&state->leak_list, &states);
+	spin_unlock_irqrestore(&leak_lock, flags);
 
 
 	atomic_set(&state->refs, 1);
 	atomic_set(&state->refs, 1);
 	init_waitqueue_head(&state->wq);
 	init_waitqueue_head(&state->wq);
@@ -128,7 +140,11 @@ void free_extent_state(struct extent_state *state)
 	if (!state)
 	if (!state)
 		return;
 		return;
 	if (atomic_dec_and_test(&state->refs)) {
 	if (atomic_dec_and_test(&state->refs)) {
+		unsigned long flags;
 		WARN_ON(state->tree);
 		WARN_ON(state->tree);
+		spin_lock_irqsave(&leak_lock, flags);
+		list_del(&state->leak_list);
+		spin_unlock_irqrestore(&leak_lock, flags);
 		kmem_cache_free(extent_state_cache, state);
 		kmem_cache_free(extent_state_cache, state);
 	}
 	}
 }
 }
@@ -2582,6 +2598,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
 						   gfp_t mask)
 						   gfp_t mask)
 {
 {
 	struct extent_buffer *eb = NULL;
 	struct extent_buffer *eb = NULL;
+	unsigned long flags;
 
 
 	spin_lock(&tree->lru_lock);
 	spin_lock(&tree->lru_lock);
 	eb = find_lru(tree, start, len);
 	eb = find_lru(tree, start, len);
@@ -2594,6 +2611,9 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
 	INIT_LIST_HEAD(&eb->lru);
 	INIT_LIST_HEAD(&eb->lru);
 	eb->start = start;
 	eb->start = start;
 	eb->len = len;
 	eb->len = len;
+	spin_lock_irqsave(&leak_lock, flags);
+	list_add(&eb->leak_list, &buffers);
+	spin_unlock_irqrestore(&leak_lock, flags);
 	atomic_set(&eb->refs, 1);
 	atomic_set(&eb->refs, 1);
 
 
 	return eb;
 	return eb;
@@ -2601,6 +2621,10 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
 
 
 static void __free_extent_buffer(struct extent_buffer *eb)
 static void __free_extent_buffer(struct extent_buffer *eb)
 {
 {
+	unsigned long flags;
+	spin_lock_irqsave(&leak_lock, flags);
+	list_del(&eb->leak_list);
+	spin_unlock_irqrestore(&leak_lock, flags);
 	kmem_cache_free(extent_buffer_cache, eb);
 	kmem_cache_free(extent_buffer_cache, eb);
 }
 }
 
 

+ 2 - 1
fs/btrfs/extent_io.h

@@ -66,7 +66,7 @@ struct extent_state {
 	/* for use by the FS */
 	/* for use by the FS */
 	u64 private;
 	u64 private;
 
 
-	struct list_head list;
+	struct list_head leak_list;
 };
 };
 
 
 struct extent_buffer {
 struct extent_buffer {
@@ -80,6 +80,7 @@ struct extent_buffer {
 	struct list_head lru;
 	struct list_head lru;
 	atomic_t refs;
 	atomic_t refs;
 	int flags;
 	int flags;
+	struct list_head leak_list;
 };
 };
 
 
 struct extent_map_tree;
 struct extent_map_tree;