|
@@ -104,7 +104,7 @@ void extent_io_tree_init(struct extent_io_tree *tree,
|
|
struct address_space *mapping, gfp_t mask)
|
|
struct address_space *mapping, gfp_t mask)
|
|
{
|
|
{
|
|
tree->state = RB_ROOT;
|
|
tree->state = RB_ROOT;
|
|
- tree->buffer = RB_ROOT;
|
|
|
|
|
|
+ INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
|
|
tree->ops = NULL;
|
|
tree->ops = NULL;
|
|
tree->dirty_bytes = 0;
|
|
tree->dirty_bytes = 0;
|
|
spin_lock_init(&tree->lock);
|
|
spin_lock_init(&tree->lock);
|
|
@@ -235,50 +235,6 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
|
|
|
|
- u64 offset, struct rb_node *node)
|
|
|
|
-{
|
|
|
|
- struct rb_root *root = &tree->buffer;
|
|
|
|
- struct rb_node **p = &root->rb_node;
|
|
|
|
- struct rb_node *parent = NULL;
|
|
|
|
- struct extent_buffer *eb;
|
|
|
|
-
|
|
|
|
- while (*p) {
|
|
|
|
- parent = *p;
|
|
|
|
- eb = rb_entry(parent, struct extent_buffer, rb_node);
|
|
|
|
-
|
|
|
|
- if (offset < eb->start)
|
|
|
|
- p = &(*p)->rb_left;
|
|
|
|
- else if (offset > eb->start)
|
|
|
|
- p = &(*p)->rb_right;
|
|
|
|
- else
|
|
|
|
- return eb;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- rb_link_node(node, parent, p);
|
|
|
|
- rb_insert_color(node, root);
|
|
|
|
- return NULL;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
|
|
|
|
- u64 offset)
|
|
|
|
-{
|
|
|
|
- struct rb_root *root = &tree->buffer;
|
|
|
|
- struct rb_node *n = root->rb_node;
|
|
|
|
- struct extent_buffer *eb;
|
|
|
|
-
|
|
|
|
- while (n) {
|
|
|
|
- eb = rb_entry(n, struct extent_buffer, rb_node);
|
|
|
|
- if (offset < eb->start)
|
|
|
|
- n = n->rb_left;
|
|
|
|
- else if (offset > eb->start)
|
|
|
|
- n = n->rb_right;
|
|
|
|
- else
|
|
|
|
- return eb;
|
|
|
|
- }
|
|
|
|
- return NULL;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
|
|
static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
|
|
struct extent_state *other)
|
|
struct extent_state *other)
|
|
{
|
|
{
|
|
@@ -3082,6 +3038,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
|
|
eb->len = len;
|
|
eb->len = len;
|
|
spin_lock_init(&eb->lock);
|
|
spin_lock_init(&eb->lock);
|
|
init_waitqueue_head(&eb->lock_wq);
|
|
init_waitqueue_head(&eb->lock_wq);
|
|
|
|
+ INIT_RCU_HEAD(&eb->rcu_head);
|
|
|
|
|
|
#if LEAK_DEBUG
|
|
#if LEAK_DEBUG
|
|
spin_lock_irqsave(&leak_lock, flags);
|
|
spin_lock_irqsave(&leak_lock, flags);
|
|
@@ -3150,16 +3107,16 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
|
|
struct page *p;
|
|
struct page *p;
|
|
struct address_space *mapping = tree->mapping;
|
|
struct address_space *mapping = tree->mapping;
|
|
int uptodate = 1;
|
|
int uptodate = 1;
|
|
|
|
+ int ret;
|
|
|
|
|
|
- spin_lock(&tree->buffer_lock);
|
|
|
|
- eb = buffer_search(tree, start);
|
|
|
|
- if (eb) {
|
|
|
|
- atomic_inc(&eb->refs);
|
|
|
|
- spin_unlock(&tree->buffer_lock);
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
|
|
|
|
+ if (eb && atomic_inc_not_zero(&eb->refs)) {
|
|
|
|
+ rcu_read_unlock();
|
|
mark_page_accessed(eb->first_page);
|
|
mark_page_accessed(eb->first_page);
|
|
return eb;
|
|
return eb;
|
|
}
|
|
}
|
|
- spin_unlock(&tree->buffer_lock);
|
|
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
eb = __alloc_extent_buffer(tree, start, len, mask);
|
|
eb = __alloc_extent_buffer(tree, start, len, mask);
|
|
if (!eb)
|
|
if (!eb)
|
|
@@ -3198,17 +3155,25 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
|
|
if (uptodate)
|
|
if (uptodate)
|
|
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
|
|
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
|
|
|
|
|
|
|
|
+ ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto free_eb;
|
|
|
|
+
|
|
spin_lock(&tree->buffer_lock);
|
|
spin_lock(&tree->buffer_lock);
|
|
- exists = buffer_tree_insert(tree, start, &eb->rb_node);
|
|
|
|
- if (exists) {
|
|
|
|
|
|
+ ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
|
|
|
|
+ if (ret == -EEXIST) {
|
|
|
|
+ exists = radix_tree_lookup(&tree->buffer,
|
|
|
|
+ start >> PAGE_CACHE_SHIFT);
|
|
/* add one reference for the caller */
|
|
/* add one reference for the caller */
|
|
atomic_inc(&exists->refs);
|
|
atomic_inc(&exists->refs);
|
|
spin_unlock(&tree->buffer_lock);
|
|
spin_unlock(&tree->buffer_lock);
|
|
|
|
+ radix_tree_preload_end();
|
|
goto free_eb;
|
|
goto free_eb;
|
|
}
|
|
}
|
|
/* add one reference for the tree */
|
|
/* add one reference for the tree */
|
|
atomic_inc(&eb->refs);
|
|
atomic_inc(&eb->refs);
|
|
spin_unlock(&tree->buffer_lock);
|
|
spin_unlock(&tree->buffer_lock);
|
|
|
|
+ radix_tree_preload_end();
|
|
return eb;
|
|
return eb;
|
|
|
|
|
|
free_eb:
|
|
free_eb:
|
|
@@ -3224,16 +3189,16 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
|
|
{
|
|
{
|
|
struct extent_buffer *eb;
|
|
struct extent_buffer *eb;
|
|
|
|
|
|
- spin_lock(&tree->buffer_lock);
|
|
|
|
- eb = buffer_search(tree, start);
|
|
|
|
- if (eb)
|
|
|
|
- atomic_inc(&eb->refs);
|
|
|
|
- spin_unlock(&tree->buffer_lock);
|
|
|
|
-
|
|
|
|
- if (eb)
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
|
|
|
|
+ if (eb && atomic_inc_not_zero(&eb->refs)) {
|
|
|
|
+ rcu_read_unlock();
|
|
mark_page_accessed(eb->first_page);
|
|
mark_page_accessed(eb->first_page);
|
|
|
|
+ return eb;
|
|
|
|
+ }
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
- return eb;
|
|
|
|
|
|
+ return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
void free_extent_buffer(struct extent_buffer *eb)
|
|
void free_extent_buffer(struct extent_buffer *eb)
|
|
@@ -3863,6 +3828,14 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
|
|
|
|
+{
|
|
|
|
+ struct extent_buffer *eb =
|
|
|
|
+ container_of(head, struct extent_buffer, rcu_head);
|
|
|
|
+
|
|
|
|
+ btrfs_release_extent_buffer(eb);
|
|
|
|
+}
|
|
|
|
+
|
|
int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
|
|
int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
|
|
{
|
|
{
|
|
u64 start = page_offset(page);
|
|
u64 start = page_offset(page);
|
|
@@ -3870,23 +3843,30 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
|
|
int ret = 1;
|
|
int ret = 1;
|
|
|
|
|
|
spin_lock(&tree->buffer_lock);
|
|
spin_lock(&tree->buffer_lock);
|
|
- eb = buffer_search(tree, start);
|
|
|
|
|
|
+ eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
|
|
if (!eb)
|
|
if (!eb)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- if (atomic_read(&eb->refs) > 1) {
|
|
|
|
|
|
+ if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
|
|
ret = 0;
|
|
ret = 0;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
- if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
|
|
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * set @eb->refs to 0 if it is already 1, and then release the @eb.
|
|
|
|
+ * Or go back.
|
|
|
|
+ */
|
|
|
|
+ if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
|
|
ret = 0;
|
|
ret = 0;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- rb_erase(&eb->rb_node, &tree->buffer);
|
|
|
|
- /* at this point we can safely release the extent buffer */
|
|
|
|
- btrfs_release_extent_buffer(eb);
|
|
|
|
|
|
+ radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
|
|
out:
|
|
out:
|
|
spin_unlock(&tree->buffer_lock);
|
|
spin_unlock(&tree->buffer_lock);
|
|
|
|
+
|
|
|
|
+ /* at this point we can safely release the extent buffer */
|
|
|
|
+ if (atomic_read(&eb->refs) == 0)
|
|
|
|
+ call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|