|
@@ -216,11 +216,11 @@ out:
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * must be called with the defrag_inodes lock held
|
|
|
|
|
|
+ * pick the defragable inode that we want, if it doesn't exist, we will get
|
|
|
|
+ * the next one.
|
|
*/
|
|
*/
|
|
-struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
|
|
|
|
- u64 root, u64 ino,
|
|
|
|
- struct rb_node **next)
|
|
|
|
|
|
+static struct inode_defrag *
|
|
|
|
+btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
|
|
{
|
|
{
|
|
struct inode_defrag *entry = NULL;
|
|
struct inode_defrag *entry = NULL;
|
|
struct inode_defrag tmp;
|
|
struct inode_defrag tmp;
|
|
@@ -231,7 +231,8 @@ struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
|
|
tmp.ino = ino;
|
|
tmp.ino = ino;
|
|
tmp.root = root;
|
|
tmp.root = root;
|
|
|
|
|
|
- p = info->defrag_inodes.rb_node;
|
|
|
|
|
|
+ spin_lock(&fs_info->defrag_inodes_lock);
|
|
|
|
+ p = fs_info->defrag_inodes.rb_node;
|
|
while (p) {
|
|
while (p) {
|
|
parent = p;
|
|
parent = p;
|
|
entry = rb_entry(parent, struct inode_defrag, rb_node);
|
|
entry = rb_entry(parent, struct inode_defrag, rb_node);
|
|
@@ -242,52 +243,128 @@ struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
|
|
else if (ret > 0)
|
|
else if (ret > 0)
|
|
p = parent->rb_right;
|
|
p = parent->rb_right;
|
|
else
|
|
else
|
|
- return entry;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- if (next) {
|
|
|
|
- while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
|
|
|
|
- parent = rb_next(parent);
|
|
|
|
|
|
+ if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
|
|
|
|
+ parent = rb_next(parent);
|
|
|
|
+ if (parent)
|
|
entry = rb_entry(parent, struct inode_defrag, rb_node);
|
|
entry = rb_entry(parent, struct inode_defrag, rb_node);
|
|
- }
|
|
|
|
- *next = parent;
|
|
|
|
|
|
+ else
|
|
|
|
+ entry = NULL;
|
|
}
|
|
}
|
|
- return NULL;
|
|
|
|
|
|
+out:
|
|
|
|
+ if (entry)
|
|
|
|
+ rb_erase(parent, &fs_info->defrag_inodes);
|
|
|
|
+ spin_unlock(&fs_info->defrag_inodes_lock);
|
|
|
|
+ return entry;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * run through the list of inodes in the FS that need
|
|
|
|
- * defragging
|
|
|
|
- */
|
|
|
|
-int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
|
|
|
|
|
|
+void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
|
|
{
|
|
{
|
|
struct inode_defrag *defrag;
|
|
struct inode_defrag *defrag;
|
|
|
|
+ struct rb_node *node;
|
|
|
|
+
|
|
|
|
+ spin_lock(&fs_info->defrag_inodes_lock);
|
|
|
|
+ node = rb_first(&fs_info->defrag_inodes);
|
|
|
|
+ while (node) {
|
|
|
|
+ rb_erase(node, &fs_info->defrag_inodes);
|
|
|
|
+ defrag = rb_entry(node, struct inode_defrag, rb_node);
|
|
|
|
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
|
|
|
+
|
|
|
|
+ if (need_resched()) {
|
|
|
|
+ spin_unlock(&fs_info->defrag_inodes_lock);
|
|
|
|
+ cond_resched();
|
|
|
|
+ spin_lock(&fs_info->defrag_inodes_lock);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ node = rb_first(&fs_info->defrag_inodes);
|
|
|
|
+ }
|
|
|
|
+ spin_unlock(&fs_info->defrag_inodes_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#define BTRFS_DEFRAG_BATCH 1024
|
|
|
|
+
|
|
|
|
+static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
|
|
|
|
+ struct inode_defrag *defrag)
|
|
|
|
+{
|
|
struct btrfs_root *inode_root;
|
|
struct btrfs_root *inode_root;
|
|
struct inode *inode;
|
|
struct inode *inode;
|
|
- struct rb_node *n;
|
|
|
|
struct btrfs_key key;
|
|
struct btrfs_key key;
|
|
struct btrfs_ioctl_defrag_range_args range;
|
|
struct btrfs_ioctl_defrag_range_args range;
|
|
- u64 first_ino = 0;
|
|
|
|
- u64 root_objectid = 0;
|
|
|
|
int num_defrag;
|
|
int num_defrag;
|
|
- int defrag_batch = 1024;
|
|
|
|
|
|
|
|
|
|
+ /* get the inode */
|
|
|
|
+ key.objectid = defrag->root;
|
|
|
|
+ btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
|
|
|
|
+ key.offset = (u64)-1;
|
|
|
|
+ inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
|
|
|
|
+ if (IS_ERR(inode_root)) {
|
|
|
|
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
|
|
|
+ return PTR_ERR(inode_root);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ key.objectid = defrag->ino;
|
|
|
|
+ btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
|
|
|
|
+ key.offset = 0;
|
|
|
|
+ inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
|
|
|
|
+ if (IS_ERR(inode)) {
|
|
|
|
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
|
|
|
+ return PTR_ERR(inode);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* do a chunk of defrag */
|
|
|
|
+ clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
|
|
memset(&range, 0, sizeof(range));
|
|
memset(&range, 0, sizeof(range));
|
|
range.len = (u64)-1;
|
|
range.len = (u64)-1;
|
|
|
|
+ range.start = defrag->last_offset;
|
|
|
|
+ num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
|
|
|
|
+ BTRFS_DEFRAG_BATCH);
|
|
|
|
+ /*
|
|
|
|
+ * if we filled the whole defrag batch, there
|
|
|
|
+ * must be more work to do. Queue this defrag
|
|
|
|
+ * again
|
|
|
|
+ */
|
|
|
|
+ if (num_defrag == BTRFS_DEFRAG_BATCH) {
|
|
|
|
+ defrag->last_offset = range.start;
|
|
|
|
+ btrfs_requeue_inode_defrag(inode, defrag);
|
|
|
|
+ } else if (defrag->last_offset && !defrag->cycled) {
|
|
|
|
+ /*
|
|
|
|
+ * we didn't fill our defrag batch, but
|
|
|
|
+ * we didn't start at zero. Make sure we loop
|
|
|
|
+ * around to the start of the file.
|
|
|
|
+ */
|
|
|
|
+ defrag->last_offset = 0;
|
|
|
|
+ defrag->cycled = 1;
|
|
|
|
+ btrfs_requeue_inode_defrag(inode, defrag);
|
|
|
|
+ } else {
|
|
|
|
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ iput(inode);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * run through the list of inodes in the FS that need
|
|
|
|
+ * defragging
|
|
|
|
+ */
|
|
|
|
+int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
|
|
|
|
+{
|
|
|
|
+ struct inode_defrag *defrag;
|
|
|
|
+ u64 first_ino = 0;
|
|
|
|
+ u64 root_objectid = 0;
|
|
|
|
|
|
atomic_inc(&fs_info->defrag_running);
|
|
atomic_inc(&fs_info->defrag_running);
|
|
- spin_lock(&fs_info->defrag_inodes_lock);
|
|
|
|
while(1) {
|
|
while(1) {
|
|
- n = NULL;
|
|
|
|
|
|
+ if (!__need_auto_defrag(fs_info->tree_root))
|
|
|
|
+ break;
|
|
|
|
|
|
/* find an inode to defrag */
|
|
/* find an inode to defrag */
|
|
- defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
|
|
|
|
- first_ino, &n);
|
|
|
|
|
|
+ defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
|
|
|
|
+ first_ino);
|
|
if (!defrag) {
|
|
if (!defrag) {
|
|
- if (n) {
|
|
|
|
- defrag = rb_entry(n, struct inode_defrag,
|
|
|
|
- rb_node);
|
|
|
|
- } else if (root_objectid || first_ino) {
|
|
|
|
|
|
+ if (root_objectid || first_ino) {
|
|
root_objectid = 0;
|
|
root_objectid = 0;
|
|
first_ino = 0;
|
|
first_ino = 0;
|
|
continue;
|
|
continue;
|
|
@@ -296,71 +373,11 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- /* remove it from the rbtree */
|
|
|
|
first_ino = defrag->ino + 1;
|
|
first_ino = defrag->ino + 1;
|
|
root_objectid = defrag->root;
|
|
root_objectid = defrag->root;
|
|
- rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
|
|
|
|
-
|
|
|
|
- if (btrfs_fs_closing(fs_info))
|
|
|
|
- goto next_free;
|
|
|
|
|
|
|
|
- spin_unlock(&fs_info->defrag_inodes_lock);
|
|
|
|
-
|
|
|
|
- /* get the inode */
|
|
|
|
- key.objectid = defrag->root;
|
|
|
|
- btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
|
|
|
|
- key.offset = (u64)-1;
|
|
|
|
- inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
|
|
|
|
- if (IS_ERR(inode_root))
|
|
|
|
- goto next;
|
|
|
|
-
|
|
|
|
- key.objectid = defrag->ino;
|
|
|
|
- btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
|
|
|
|
- key.offset = 0;
|
|
|
|
-
|
|
|
|
- inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
|
|
|
|
- if (IS_ERR(inode))
|
|
|
|
- goto next;
|
|
|
|
-
|
|
|
|
- /* do a chunk of defrag */
|
|
|
|
- clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
|
|
|
|
- range.start = defrag->last_offset;
|
|
|
|
- num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
|
|
|
|
- defrag_batch);
|
|
|
|
- /*
|
|
|
|
- * if we filled the whole defrag batch, there
|
|
|
|
- * must be more work to do. Queue this defrag
|
|
|
|
- * again
|
|
|
|
- */
|
|
|
|
- if (num_defrag == defrag_batch) {
|
|
|
|
- defrag->last_offset = range.start;
|
|
|
|
- btrfs_requeue_inode_defrag(inode, defrag);
|
|
|
|
- /*
|
|
|
|
- * we don't want to kfree defrag, we added it back to
|
|
|
|
- * the rbtree
|
|
|
|
- */
|
|
|
|
- defrag = NULL;
|
|
|
|
- } else if (defrag->last_offset && !defrag->cycled) {
|
|
|
|
- /*
|
|
|
|
- * we didn't fill our defrag batch, but
|
|
|
|
- * we didn't start at zero. Make sure we loop
|
|
|
|
- * around to the start of the file.
|
|
|
|
- */
|
|
|
|
- defrag->last_offset = 0;
|
|
|
|
- defrag->cycled = 1;
|
|
|
|
- btrfs_requeue_inode_defrag(inode, defrag);
|
|
|
|
- defrag = NULL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- iput(inode);
|
|
|
|
-next:
|
|
|
|
- spin_lock(&fs_info->defrag_inodes_lock);
|
|
|
|
-next_free:
|
|
|
|
- if (defrag)
|
|
|
|
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
|
|
|
|
|
+ __btrfs_run_defrag_inode(fs_info, defrag);
|
|
}
|
|
}
|
|
- spin_unlock(&fs_info->defrag_inodes_lock);
|
|
|
|
-
|
|
|
|
atomic_dec(&fs_info->defrag_running);
|
|
atomic_dec(&fs_info->defrag_running);
|
|
|
|
|
|
/*
|
|
/*
|