Bläddra i källkod

Btrfs: Add BH_Defrag to mark buffers that are in need of defragging

This allows the tree walking code to defrag only the newly allocated
buffers, it seems to be a good balance between perfect defragging and the
performance hit of repeatedly reallocating blocks.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
Chris Mason 17 år sedan
förälder
incheckning
f2183bde1a
4 ändrade filer med 18 tillägg och 11 borttagningar
  1. 12 10
      fs/btrfs/ctree.c
  2. 2 0
      fs/btrfs/disk-io.h
  3. 2 0
      fs/btrfs/extent-tree.c
  4. 2 1
      fs/btrfs/tree-defrag.c

+ 12 - 10
fs/btrfs/ctree.c

@@ -175,6 +175,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 	int end_slot;
 	int end_slot;
 	int i;
 	int i;
 	int err = 0;
 	int err = 0;
+	int parent_level;
 
 
 	if (trans->transaction != root->fs_info->running_transaction) {
 	if (trans->transaction != root->fs_info->running_transaction) {
 		printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
 		printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
@@ -188,6 +189,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 	}
 	}
 	parent_node = btrfs_buffer_node(parent);
 	parent_node = btrfs_buffer_node(parent);
 	parent_nritems = btrfs_header_nritems(&parent_node->header);
 	parent_nritems = btrfs_header_nritems(&parent_node->header);
+	parent_level = btrfs_header_level(&parent_node->header);
 
 
 	start_slot = 0;
 	start_slot = 0;
 	end_slot = parent_nritems;
 	end_slot = parent_nritems;
@@ -215,13 +217,16 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 
 
 		cur_bh = btrfs_find_tree_block(root, blocknr);
 		cur_bh = btrfs_find_tree_block(root, blocknr);
 		if (!cur_bh || !buffer_uptodate(cur_bh) ||
 		if (!cur_bh || !buffer_uptodate(cur_bh) ||
-		    buffer_locked(cur_bh)) {
+		    buffer_locked(cur_bh) || !buffer_defrag(cur_bh)) {
 			if (cache_only) {
 			if (cache_only) {
 				brelse(cur_bh);
 				brelse(cur_bh);
 				continue;
 				continue;
 			}
 			}
-			brelse(cur_bh);
-			cur_bh = read_tree_block(root, blocknr);
+			if (!cur_bh || !buffer_uptodate(cur_bh) ||
+			    buffer_locked(cur_bh)) {
+				brelse(cur_bh);
+				cur_bh = read_tree_block(root, blocknr);
+			}
 		}
 		}
 		if (search_start == 0)
 		if (search_start == 0)
 			search_start = last_block & ~((u64)65535);
 			search_start = last_block & ~((u64)65535);
@@ -232,6 +237,9 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 		if (err)
 		if (err)
 			break;
 			break;
 		search_start = bh_blocknr(tmp_bh);
 		search_start = bh_blocknr(tmp_bh);
+		*last_ret = search_start;
+		if (parent_level == 1)
+			clear_buffer_defrag(tmp_bh);
 		brelse(tmp_bh);
 		brelse(tmp_bh);
 	}
 	}
 	return err;
 	return err;
@@ -811,16 +819,10 @@ static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path,
 			clear_radix_bit(&found, blocknr);
 			clear_radix_bit(&found, blocknr);
 			if (nread > 32)
 			if (nread > 32)
 				continue;
 				continue;
-			if (direction > 0 && cluster_start <= blocknr &&
-			    cluster_start + 8 > blocknr) {
-				cluster_start = blocknr;
+			if (close_blocks(cluster_start, blocknr)) {
 				readahead_tree_block(root, blocknr);
 				readahead_tree_block(root, blocknr);
 				nread++;
 				nread++;
-			} else if (direction < 0 && cluster_start >= blocknr &&
-				   blocknr + 8 > cluster_start) {
 				cluster_start = blocknr;
 				cluster_start = blocknr;
-				readahead_tree_block(root, blocknr);
-				nread++;
 			}
 			}
 		}
 		}
 	}
 	}

+ 2 - 0
fs/btrfs/disk-io.h

@@ -25,8 +25,10 @@
 
 
 enum btrfs_bh_state_bits {
 enum btrfs_bh_state_bits {
 	BH_Checked = BH_PrivateStart,
 	BH_Checked = BH_PrivateStart,
+	BH_Defrag,
 };
 };
 BUFFER_FNS(Checked, checked);
 BUFFER_FNS(Checked, checked);
+BUFFER_FNS(Defrag, defrag);
 
 
 static inline struct btrfs_node *btrfs_buffer_node(struct buffer_head *bh)
 static inline struct btrfs_node *btrfs_buffer_node(struct buffer_head *bh)
 {
 {

+ 2 - 0
fs/btrfs/extent-tree.c

@@ -1015,6 +1015,7 @@ check_failed:
 	ins->objectid = search_start;
 	ins->objectid = search_start;
 	ins->offset = 0;
 	ins->offset = 0;
 	start_found = 0;
 	start_found = 0;
+	path->reada = 1;
 
 
 	ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
 	ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
 	if (ret < 0)
 	if (ret < 0)
@@ -1264,6 +1265,7 @@ struct buffer_head *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
 	WARN_ON(buffer_dirty(buf));
 	WARN_ON(buffer_dirty(buf));
 	set_buffer_uptodate(buf);
 	set_buffer_uptodate(buf);
 	set_buffer_checked(buf);
 	set_buffer_checked(buf);
+	set_buffer_defrag(buf);
 	set_radix_bit(&trans->transaction->dirty_pages, buf->b_page->index);
 	set_radix_bit(&trans->transaction->dirty_pages, buf->b_page->index);
 	return buf;
 	return buf;
 }
 }

+ 2 - 1
fs/btrfs/tree-defrag.c

@@ -86,7 +86,7 @@ static int defrag_walk_down(struct btrfs_trans_handle *trans,
 		if (cache_only) {
 		if (cache_only) {
 			next = btrfs_find_tree_block(root, blocknr);
 			next = btrfs_find_tree_block(root, blocknr);
 			if (!next || !buffer_uptodate(next) ||
 			if (!next || !buffer_uptodate(next) ||
-			   buffer_locked(next)) {
+			   buffer_locked(next) || !buffer_defrag(next)) {
 				brelse(next);
 				brelse(next);
 				path->slots[*level]++;
 				path->slots[*level]++;
 				continue;
 				continue;
@@ -142,6 +142,7 @@ static int defrag_walk_up(struct btrfs_trans_handle *trans,
 			root->defrag_level = i;
 			root->defrag_level = i;
 			return 0;
 			return 0;
 		} else {
 		} else {
+			clear_buffer_defrag(path->nodes[*level]);
 			btrfs_block_release(root, path->nodes[*level]);
 			btrfs_block_release(root, path->nodes[*level]);
 			path->nodes[*level] = NULL;
 			path->nodes[*level] = NULL;
 			*level = i + 1;
 			*level = i + 1;