|
@@ -4486,6 +4486,23 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
|
|
|
+ u64 start)
|
|
|
+{
|
|
|
+ struct extent_buffer *eb;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
|
|
|
+ if (eb && atomic_inc_not_zero(&eb->refs)) {
|
|
|
+ rcu_read_unlock();
|
|
|
+ mark_extent_buffer_accessed(eb);
|
|
|
+ return eb;
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
|
|
|
u64 start, unsigned long len)
|
|
|
{
|
|
@@ -4499,14 +4516,10 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
|
|
|
int uptodate = 1;
|
|
|
int ret;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
- eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
|
|
|
- if (eb && atomic_inc_not_zero(&eb->refs)) {
|
|
|
- rcu_read_unlock();
|
|
|
- mark_extent_buffer_accessed(eb);
|
|
|
+
|
|
|
+ eb = find_extent_buffer(tree, start);
|
|
|
+ if (eb)
|
|
|
return eb;
|
|
|
- }
|
|
|
- rcu_read_unlock();
|
|
|
|
|
|
eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
|
|
|
if (!eb)
|
|
@@ -4565,24 +4578,17 @@ again:
|
|
|
|
|
|
spin_lock(&tree->buffer_lock);
|
|
|
ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
|
|
|
+ spin_unlock(&tree->buffer_lock);
|
|
|
+ radix_tree_preload_end();
|
|
|
if (ret == -EEXIST) {
|
|
|
- exists = radix_tree_lookup(&tree->buffer,
|
|
|
- start >> PAGE_CACHE_SHIFT);
|
|
|
- if (!atomic_inc_not_zero(&exists->refs)) {
|
|
|
- spin_unlock(&tree->buffer_lock);
|
|
|
- radix_tree_preload_end();
|
|
|
- exists = NULL;
|
|
|
+ exists = find_extent_buffer(tree, start);
|
|
|
+ if (exists)
|
|
|
+ goto free_eb;
|
|
|
+ else
|
|
|
goto again;
|
|
|
- }
|
|
|
- spin_unlock(&tree->buffer_lock);
|
|
|
- radix_tree_preload_end();
|
|
|
- mark_extent_buffer_accessed(exists);
|
|
|
- goto free_eb;
|
|
|
}
|
|
|
/* add one reference for the tree */
|
|
|
check_buffer_tree_ref(eb);
|
|
|
- spin_unlock(&tree->buffer_lock);
|
|
|
- radix_tree_preload_end();
|
|
|
|
|
|
/*
|
|
|
* there is a race where release page may have
|
|
@@ -4613,23 +4619,6 @@ free_eb:
|
|
|
return exists;
|
|
|
}
|
|
|
|
|
|
-struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
|
|
|
- u64 start, unsigned long len)
|
|
|
-{
|
|
|
- struct extent_buffer *eb;
|
|
|
-
|
|
|
- rcu_read_lock();
|
|
|
- eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
|
|
|
- if (eb && atomic_inc_not_zero(&eb->refs)) {
|
|
|
- rcu_read_unlock();
|
|
|
- mark_extent_buffer_accessed(eb);
|
|
|
- return eb;
|
|
|
- }
|
|
|
- rcu_read_unlock();
|
|
|
-
|
|
|
- return NULL;
|
|
|
-}
|
|
|
-
|
|
|
static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
|
|
|
{
|
|
|
struct extent_buffer *eb =
|