|
@@ -254,14 +254,14 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
|
|
|
*
|
|
|
* This should be called with the tree lock held.
|
|
|
*/
|
|
|
-static int merge_state(struct extent_io_tree *tree,
|
|
|
- struct extent_state *state)
|
|
|
+static void merge_state(struct extent_io_tree *tree,
|
|
|
+ struct extent_state *state)
|
|
|
{
|
|
|
struct extent_state *other;
|
|
|
struct rb_node *other_node;
|
|
|
|
|
|
if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
|
|
|
- return 0;
|
|
|
+ return;
|
|
|
|
|
|
other_node = rb_prev(&state->rb_node);
|
|
|
if (other_node) {
|
|
@@ -287,19 +287,13 @@ static int merge_state(struct extent_io_tree *tree,
|
|
|
free_extent_state(other);
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- return 0;
|
|
|
}
|
|
|
|
|
|
-static int set_state_cb(struct extent_io_tree *tree,
|
|
|
+static void set_state_cb(struct extent_io_tree *tree,
|
|
|
struct extent_state *state, int *bits)
|
|
|
{
|
|
|
- if (tree->ops && tree->ops->set_bit_hook) {
|
|
|
- return tree->ops->set_bit_hook(tree->mapping->host,
|
|
|
- state, bits);
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
+ if (tree->ops && tree->ops->set_bit_hook)
|
|
|
+ tree->ops->set_bit_hook(tree->mapping->host, state, bits);
|
|
|
}
|
|
|
|
|
|
static void clear_state_cb(struct extent_io_tree *tree,
|
|
@@ -309,6 +303,9 @@ static void clear_state_cb(struct extent_io_tree *tree,
|
|
|
tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
|
|
|
}
|
|
|
|
|
|
+static void set_state_bits(struct extent_io_tree *tree,
|
|
|
+ struct extent_state *state, int *bits);
|
|
|
+
|
|
|
/*
|
|
|
* insert an extent_state struct into the tree. 'bits' are set on the
|
|
|
* struct before it is inserted.
|
|
@@ -324,8 +321,6 @@ static int insert_state(struct extent_io_tree *tree,
|
|
|
int *bits)
|
|
|
{
|
|
|
struct rb_node *node;
|
|
|
- int bits_to_set = *bits & ~EXTENT_CTLBITS;
|
|
|
- int ret;
|
|
|
|
|
|
if (end < start) {
|
|
|
printk(KERN_ERR "btrfs end < start %llu %llu\n",
|
|
@@ -335,13 +330,9 @@ static int insert_state(struct extent_io_tree *tree,
|
|
|
}
|
|
|
state->start = start;
|
|
|
state->end = end;
|
|
|
- ret = set_state_cb(tree, state, bits);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
|
|
|
- if (bits_to_set & EXTENT_DIRTY)
|
|
|
- tree->dirty_bytes += end - start + 1;
|
|
|
- state->state |= bits_to_set;
|
|
|
+ set_state_bits(tree, state, bits);
|
|
|
+
|
|
|
node = tree_insert(&tree->state, end, &state->rb_node);
|
|
|
if (node) {
|
|
|
struct extent_state *found;
|
|
@@ -357,13 +348,11 @@ static int insert_state(struct extent_io_tree *tree,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
|
|
|
+static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
|
|
|
u64 split)
|
|
|
{
|
|
|
if (tree->ops && tree->ops->split_extent_hook)
|
|
|
- return tree->ops->split_extent_hook(tree->mapping->host,
|
|
|
- orig, split);
|
|
|
- return 0;
|
|
|
+ tree->ops->split_extent_hook(tree->mapping->host, orig, split);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -659,34 +648,25 @@ again:
|
|
|
if (start > end)
|
|
|
break;
|
|
|
|
|
|
- if (need_resched()) {
|
|
|
- spin_unlock(&tree->lock);
|
|
|
- cond_resched();
|
|
|
- spin_lock(&tree->lock);
|
|
|
- }
|
|
|
+ cond_resched_lock(&tree->lock);
|
|
|
}
|
|
|
out:
|
|
|
spin_unlock(&tree->lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int set_state_bits(struct extent_io_tree *tree,
|
|
|
+static void set_state_bits(struct extent_io_tree *tree,
|
|
|
struct extent_state *state,
|
|
|
int *bits)
|
|
|
{
|
|
|
- int ret;
|
|
|
int bits_to_set = *bits & ~EXTENT_CTLBITS;
|
|
|
|
|
|
- ret = set_state_cb(tree, state, bits);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ set_state_cb(tree, state, bits);
|
|
|
if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
|
|
|
u64 range = state->end - state->start + 1;
|
|
|
tree->dirty_bytes += range;
|
|
|
}
|
|
|
state->state |= bits_to_set;
|
|
|
-
|
|
|
- return 0;
|
|
|
}
|
|
|
|
|
|
static void cache_state(struct extent_state *state,
|
|
@@ -779,9 +759,7 @@ hit_next:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- err = set_state_bits(tree, state, &bits);
|
|
|
- if (err)
|
|
|
- goto out;
|
|
|
+ set_state_bits(tree, state, &bits);
|
|
|
|
|
|
cache_state(state, cached_state);
|
|
|
merge_state(tree, state);
|
|
@@ -830,9 +808,7 @@ hit_next:
|
|
|
if (err)
|
|
|
goto out;
|
|
|
if (state->end <= end) {
|
|
|
- err = set_state_bits(tree, state, &bits);
|
|
|
- if (err)
|
|
|
- goto out;
|
|
|
+ set_state_bits(tree, state, &bits);
|
|
|
cache_state(state, cached_state);
|
|
|
merge_state(tree, state);
|
|
|
if (last_end == (u64)-1)
|
|
@@ -893,11 +869,7 @@ hit_next:
|
|
|
err = split_state(tree, state, prealloc, end + 1);
|
|
|
BUG_ON(err == -EEXIST);
|
|
|
|
|
|
- err = set_state_bits(tree, prealloc, &bits);
|
|
|
- if (err) {
|
|
|
- prealloc = NULL;
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ set_state_bits(tree, prealloc, &bits);
|
|
|
cache_state(prealloc, cached_state);
|
|
|
merge_state(tree, prealloc);
|
|
|
prealloc = NULL;
|
|
@@ -1059,46 +1031,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * find the first offset in the io tree with 'bits' set. zero is
|
|
|
- * returned if we find something, and *start_ret and *end_ret are
|
|
|
- * set to reflect the state struct that was found.
|
|
|
- *
|
|
|
- * If nothing was found, 1 is returned, < 0 on error
|
|
|
- */
|
|
|
-int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
|
|
- u64 *start_ret, u64 *end_ret, int bits)
|
|
|
-{
|
|
|
- struct rb_node *node;
|
|
|
- struct extent_state *state;
|
|
|
- int ret = 1;
|
|
|
-
|
|
|
- spin_lock(&tree->lock);
|
|
|
- /*
|
|
|
- * this search will find all the extents that end after
|
|
|
- * our range starts.
|
|
|
- */
|
|
|
- node = tree_search(tree, start);
|
|
|
- if (!node)
|
|
|
- goto out;
|
|
|
-
|
|
|
- while (1) {
|
|
|
- state = rb_entry(node, struct extent_state, rb_node);
|
|
|
- if (state->end >= start && (state->state & bits)) {
|
|
|
- *start_ret = state->start;
|
|
|
- *end_ret = state->end;
|
|
|
- ret = 0;
|
|
|
- break;
|
|
|
- }
|
|
|
- node = rb_next(node);
|
|
|
- if (!node)
|
|
|
- break;
|
|
|
- }
|
|
|
-out:
|
|
|
- spin_unlock(&tree->lock);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
/* find the first state struct with 'bits' set after 'start', and
|
|
|
* return it. tree->lock must be held. NULL will returned if
|
|
|
* nothing was found after 'start'
|
|
@@ -1130,6 +1062,30 @@ out:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * find the first offset in the io tree with 'bits' set. zero is
|
|
|
+ * returned if we find something, and *start_ret and *end_ret are
|
|
|
+ * set to reflect the state struct that was found.
|
|
|
+ *
|
|
|
+ * If nothing was found, 1 is returned, < 0 on error
|
|
|
+ */
|
|
|
+int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
|
|
|
+ u64 *start_ret, u64 *end_ret, int bits)
|
|
|
+{
|
|
|
+ struct extent_state *state;
|
|
|
+ int ret = 1;
|
|
|
+
|
|
|
+ spin_lock(&tree->lock);
|
|
|
+ state = find_first_extent_bit_state(tree, start, bits);
|
|
|
+ if (state) {
|
|
|
+ *start_ret = state->start;
|
|
|
+ *end_ret = state->end;
|
|
|
+ ret = 0;
|
|
|
+ }
|
|
|
+ spin_unlock(&tree->lock);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* find a contiguous range of bytes in the file marked as delalloc, not
|
|
|
* more than 'max_bytes'. start and end are used to return the range,
|
|
@@ -2546,7 +2502,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
|
|
|
struct writeback_control *wbc)
|
|
|
{
|
|
|
int ret;
|
|
|
- struct address_space *mapping = page->mapping;
|
|
|
struct extent_page_data epd = {
|
|
|
.bio = NULL,
|
|
|
.tree = tree,
|
|
@@ -2554,17 +2509,9 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
|
|
|
.extent_locked = 0,
|
|
|
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
|
|
|
};
|
|
|
- struct writeback_control wbc_writepages = {
|
|
|
- .sync_mode = wbc->sync_mode,
|
|
|
- .nr_to_write = 64,
|
|
|
- .range_start = page_offset(page) + PAGE_CACHE_SIZE,
|
|
|
- .range_end = (loff_t)-1,
|
|
|
- };
|
|
|
|
|
|
ret = __extent_writepage(page, wbc, &epd);
|
|
|
|
|
|
- extent_write_cache_pages(tree, mapping, &wbc_writepages,
|
|
|
- __extent_writepage, &epd, flush_write_bio);
|
|
|
flush_epd_write_bio(&epd);
|
|
|
return ret;
|
|
|
}
|