|
@@ -24,12 +24,62 @@
|
|
|
static struct kmem_cache *extent_state_cache;
|
|
|
static struct kmem_cache *extent_buffer_cache;
|
|
|
|
|
|
+#ifdef CONFIG_BTRFS_DEBUG
|
|
|
static LIST_HEAD(buffers);
|
|
|
static LIST_HEAD(states);
|
|
|
|
|
|
-#define LEAK_DEBUG 0
|
|
|
-#if LEAK_DEBUG
|
|
|
static DEFINE_SPINLOCK(leak_lock);
|
|
|
+
|
|
|
+static inline
|
|
|
+void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&leak_lock, flags);
|
|
|
+ list_add(new, head);
|
|
|
+ spin_unlock_irqrestore(&leak_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline
|
|
|
+void btrfs_leak_debug_del(struct list_head *entry)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&leak_lock, flags);
|
|
|
+ list_del(entry);
|
|
|
+ spin_unlock_irqrestore(&leak_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static inline
|
|
|
+void btrfs_leak_debug_check(void)
|
|
|
+{
|
|
|
+ struct extent_state *state;
|
|
|
+ struct extent_buffer *eb;
|
|
|
+
|
|
|
+ while (!list_empty(&states)) {
|
|
|
+ state = list_entry(states.next, struct extent_state, leak_list);
|
|
|
+ printk(KERN_ERR "btrfs state leak: start %llu end %llu "
|
|
|
+ "state %lu in tree %p refs %d\n",
|
|
|
+ (unsigned long long)state->start,
|
|
|
+ (unsigned long long)state->end,
|
|
|
+ state->state, state->tree, atomic_read(&state->refs));
|
|
|
+ list_del(&state->leak_list);
|
|
|
+ kmem_cache_free(extent_state_cache, state);
|
|
|
+ }
|
|
|
+
|
|
|
+ while (!list_empty(&buffers)) {
|
|
|
+ eb = list_entry(buffers.next, struct extent_buffer, leak_list);
|
|
|
+ printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
|
|
|
+ "refs %d\n", (unsigned long long)eb->start,
|
|
|
+ eb->len, atomic_read(&eb->refs));
|
|
|
+ list_del(&eb->leak_list);
|
|
|
+ kmem_cache_free(extent_buffer_cache, eb);
|
|
|
+ }
|
|
|
+}
|
|
|
+#else
|
|
|
+#define btrfs_leak_debug_add(new, head) do {} while (0)
|
|
|
+#define btrfs_leak_debug_del(entry) do {} while (0)
|
|
|
+#define btrfs_leak_debug_check() do {} while (0)
|
|
|
#endif
|
|
|
|
|
|
#define BUFFER_LRU_MAX 64
|
|
@@ -84,29 +134,7 @@ free_state_cache:
|
|
|
|
|
|
void extent_io_exit(void)
|
|
|
{
|
|
|
- struct extent_state *state;
|
|
|
- struct extent_buffer *eb;
|
|
|
-
|
|
|
- while (!list_empty(&states)) {
|
|
|
- state = list_entry(states.next, struct extent_state, leak_list);
|
|
|
- printk(KERN_ERR "btrfs state leak: start %llu end %llu "
|
|
|
- "state %lu in tree %p refs %d\n",
|
|
|
- (unsigned long long)state->start,
|
|
|
- (unsigned long long)state->end,
|
|
|
- state->state, state->tree, atomic_read(&state->refs));
|
|
|
- list_del(&state->leak_list);
|
|
|
- kmem_cache_free(extent_state_cache, state);
|
|
|
-
|
|
|
- }
|
|
|
-
|
|
|
- while (!list_empty(&buffers)) {
|
|
|
- eb = list_entry(buffers.next, struct extent_buffer, leak_list);
|
|
|
- printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
|
|
|
- "refs %d\n", (unsigned long long)eb->start,
|
|
|
- eb->len, atomic_read(&eb->refs));
|
|
|
- list_del(&eb->leak_list);
|
|
|
- kmem_cache_free(extent_buffer_cache, eb);
|
|
|
- }
|
|
|
+ btrfs_leak_debug_check();
|
|
|
|
|
|
/*
|
|
|
* Make sure all delayed rcu free are flushed before we
|
|
@@ -134,9 +162,6 @@ void extent_io_tree_init(struct extent_io_tree *tree,
|
|
|
static struct extent_state *alloc_extent_state(gfp_t mask)
|
|
|
{
|
|
|
struct extent_state *state;
|
|
|
-#if LEAK_DEBUG
|
|
|
- unsigned long flags;
|
|
|
-#endif
|
|
|
|
|
|
state = kmem_cache_alloc(extent_state_cache, mask);
|
|
|
if (!state)
|
|
@@ -144,11 +169,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
|
|
|
state->state = 0;
|
|
|
state->private = 0;
|
|
|
state->tree = NULL;
|
|
|
-#if LEAK_DEBUG
|
|
|
- spin_lock_irqsave(&leak_lock, flags);
|
|
|
- list_add(&state->leak_list, &states);
|
|
|
- spin_unlock_irqrestore(&leak_lock, flags);
|
|
|
-#endif
|
|
|
+ btrfs_leak_debug_add(&state->leak_list, &states);
|
|
|
atomic_set(&state->refs, 1);
|
|
|
init_waitqueue_head(&state->wq);
|
|
|
trace_alloc_extent_state(state, mask, _RET_IP_);
|
|
@@ -160,15 +181,8 @@ void free_extent_state(struct extent_state *state)
|
|
|
if (!state)
|
|
|
return;
|
|
|
if (atomic_dec_and_test(&state->refs)) {
|
|
|
-#if LEAK_DEBUG
|
|
|
- unsigned long flags;
|
|
|
-#endif
|
|
|
WARN_ON(state->tree);
|
|
|
-#if LEAK_DEBUG
|
|
|
- spin_lock_irqsave(&leak_lock, flags);
|
|
|
- list_del(&state->leak_list);
|
|
|
- spin_unlock_irqrestore(&leak_lock, flags);
|
|
|
-#endif
|
|
|
+ btrfs_leak_debug_del(&state->leak_list);
|
|
|
trace_free_extent_state(state, _RET_IP_);
|
|
|
kmem_cache_free(extent_state_cache, state);
|
|
|
}
|
|
@@ -4065,12 +4079,7 @@ out:
|
|
|
|
|
|
static void __free_extent_buffer(struct extent_buffer *eb)
|
|
|
{
|
|
|
-#if LEAK_DEBUG
|
|
|
- unsigned long flags;
|
|
|
- spin_lock_irqsave(&leak_lock, flags);
|
|
|
- list_del(&eb->leak_list);
|
|
|
- spin_unlock_irqrestore(&leak_lock, flags);
|
|
|
-#endif
|
|
|
+ btrfs_leak_debug_del(&eb->leak_list);
|
|
|
kmem_cache_free(extent_buffer_cache, eb);
|
|
|
}
|
|
|
|
|
@@ -4080,9 +4089,6 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
|
|
|
gfp_t mask)
|
|
|
{
|
|
|
struct extent_buffer *eb = NULL;
|
|
|
-#if LEAK_DEBUG
|
|
|
- unsigned long flags;
|
|
|
-#endif
|
|
|
|
|
|
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
|
|
|
if (eb == NULL)
|
|
@@ -4102,11 +4108,8 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
|
|
|
init_waitqueue_head(&eb->write_lock_wq);
|
|
|
init_waitqueue_head(&eb->read_lock_wq);
|
|
|
|
|
|
-#if LEAK_DEBUG
|
|
|
- spin_lock_irqsave(&leak_lock, flags);
|
|
|
- list_add(&eb->leak_list, &buffers);
|
|
|
- spin_unlock_irqrestore(&leak_lock, flags);
|
|
|
-#endif
|
|
|
+ btrfs_leak_debug_add(&eb->leak_list, &buffers);
|
|
|
+
|
|
|
spin_lock_init(&eb->refs_lock);
|
|
|
atomic_set(&eb->refs, 1);
|
|
|
atomic_set(&eb->io_pages, 0);
|