|
@@ -319,9 +319,11 @@ static void io_ctl_drop_pages(struct io_ctl *io_ctl)
|
|
|
io_ctl_unmap_page(io_ctl);
|
|
|
|
|
|
for (i = 0; i < io_ctl->num_pages; i++) {
|
|
|
- ClearPageChecked(io_ctl->pages[i]);
|
|
|
- unlock_page(io_ctl->pages[i]);
|
|
|
- page_cache_release(io_ctl->pages[i]);
|
|
|
+ if (io_ctl->pages[i]) {
|
|
|
+ ClearPageChecked(io_ctl->pages[i]);
|
|
|
+ unlock_page(io_ctl->pages[i]);
|
|
|
+ page_cache_release(io_ctl->pages[i]);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -635,7 +637,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
|
|
|
if (!num_entries)
|
|
|
return 0;
|
|
|
|
|
|
- io_ctl_init(&io_ctl, inode, root);
|
|
|
+ ret = io_ctl_init(&io_ctl, inode, root);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
ret = readahead_cache(inode);
|
|
|
if (ret)
|
|
|
goto out;
|
|
@@ -838,7 +843,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
|
|
struct io_ctl io_ctl;
|
|
|
struct list_head bitmap_list;
|
|
|
struct btrfs_key key;
|
|
|
- u64 start, end, len;
|
|
|
+ u64 start, extent_start, extent_end, len;
|
|
|
int entries = 0;
|
|
|
int bitmaps = 0;
|
|
|
int ret;
|
|
@@ -849,7 +854,9 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
|
|
if (!i_size_read(inode))
|
|
|
return -1;
|
|
|
|
|
|
- io_ctl_init(&io_ctl, inode, root);
|
|
|
+ ret = io_ctl_init(&io_ctl, inode, root);
|
|
|
+ if (ret)
|
|
|
+ return -1;
|
|
|
|
|
|
/* Get the cluster for this block_group if it exists */
|
|
|
if (block_group && !list_empty(&block_group->cluster_list))
|
|
@@ -857,25 +864,12 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
|
|
struct btrfs_free_cluster,
|
|
|
block_group_list);
|
|
|
|
|
|
- /*
|
|
|
- * We shouldn't have switched the pinned extents yet so this is the
|
|
|
- * right one
|
|
|
- */
|
|
|
- unpin = root->fs_info->pinned_extents;
|
|
|
-
|
|
|
/* Lock all pages first so we can lock the extent safely. */
|
|
|
io_ctl_prepare_pages(&io_ctl, inode, 0);
|
|
|
|
|
|
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
|
|
|
0, &cached_state, GFP_NOFS);
|
|
|
|
|
|
- /*
|
|
|
- * When searching for pinned extents, we need to start at our start
|
|
|
- * offset.
|
|
|
- */
|
|
|
- if (block_group)
|
|
|
- start = block_group->key.objectid;
|
|
|
-
|
|
|
node = rb_first(&ctl->free_space_offset);
|
|
|
if (!node && cluster) {
|
|
|
node = rb_first(&cluster->root);
|
|
@@ -918,9 +912,20 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
|
|
* We want to add any pinned extents to our free space cache
|
|
|
* so we don't leak the space
|
|
|
*/
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We shouldn't have switched the pinned extents yet so this is the
|
|
|
+ * right one
|
|
|
+ */
|
|
|
+ unpin = root->fs_info->pinned_extents;
|
|
|
+
|
|
|
+ if (block_group)
|
|
|
+ start = block_group->key.objectid;
|
|
|
+
|
|
|
while (block_group && (start < block_group->key.objectid +
|
|
|
block_group->key.offset)) {
|
|
|
- ret = find_first_extent_bit(unpin, start, &start, &end,
|
|
|
+ ret = find_first_extent_bit(unpin, start,
|
|
|
+ &extent_start, &extent_end,
|
|
|
EXTENT_DIRTY);
|
|
|
if (ret) {
|
|
|
ret = 0;
|
|
@@ -928,20 +933,21 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
|
|
}
|
|
|
|
|
|
/* This pinned extent is out of our range */
|
|
|
- if (start >= block_group->key.objectid +
|
|
|
+ if (extent_start >= block_group->key.objectid +
|
|
|
block_group->key.offset)
|
|
|
break;
|
|
|
|
|
|
- len = block_group->key.objectid +
|
|
|
- block_group->key.offset - start;
|
|
|
- len = min(len, end + 1 - start);
|
|
|
+ extent_start = max(extent_start, start);
|
|
|
+ extent_end = min(block_group->key.objectid +
|
|
|
+ block_group->key.offset, extent_end + 1);
|
|
|
+ len = extent_end - extent_start;
|
|
|
|
|
|
entries++;
|
|
|
- ret = io_ctl_add_entry(&io_ctl, start, len, NULL);
|
|
|
+ ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
|
|
|
if (ret)
|
|
|
goto out_nospc;
|
|
|
|
|
|
- start = end + 1;
|
|
|
+ start = extent_end;
|
|
|
}
|
|
|
|
|
|
/* Write out the bitmaps */
|
|
@@ -2574,17 +2580,57 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
|
|
|
cluster->block_group = NULL;
|
|
|
}
|
|
|
|
|
|
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
|
|
|
- u64 *trimmed, u64 start, u64 end, u64 minlen)
|
|
|
+static int do_trimming(struct btrfs_block_group_cache *block_group,
|
|
|
+ u64 *total_trimmed, u64 start, u64 bytes,
|
|
|
+ u64 reserved_start, u64 reserved_bytes)
|
|
|
{
|
|
|
- struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
|
|
- struct btrfs_free_space *entry = NULL;
|
|
|
+ struct btrfs_space_info *space_info = block_group->space_info;
|
|
|
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
|
|
- u64 bytes = 0;
|
|
|
- u64 actually_trimmed;
|
|
|
- int ret = 0;
|
|
|
+ int ret;
|
|
|
+ int update = 0;
|
|
|
+ u64 trimmed = 0;
|
|
|
|
|
|
- *trimmed = 0;
|
|
|
+ spin_lock(&space_info->lock);
|
|
|
+ spin_lock(&block_group->lock);
|
|
|
+ if (!block_group->ro) {
|
|
|
+ block_group->reserved += reserved_bytes;
|
|
|
+ space_info->bytes_reserved += reserved_bytes;
|
|
|
+ update = 1;
|
|
|
+ }
|
|
|
+ spin_unlock(&block_group->lock);
|
|
|
+ spin_unlock(&space_info->lock);
|
|
|
+
|
|
|
+ ret = btrfs_error_discard_extent(fs_info->extent_root,
|
|
|
+ start, bytes, &trimmed);
|
|
|
+ if (!ret)
|
|
|
+ *total_trimmed += trimmed;
|
|
|
+
|
|
|
+ btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
|
|
|
+
|
|
|
+ if (update) {
|
|
|
+ spin_lock(&space_info->lock);
|
|
|
+ spin_lock(&block_group->lock);
|
|
|
+ if (block_group->ro)
|
|
|
+ space_info->bytes_readonly += reserved_bytes;
|
|
|
+ block_group->reserved -= reserved_bytes;
|
|
|
+ space_info->bytes_reserved -= reserved_bytes;
|
|
|
+ spin_unlock(&space_info->lock);
|
|
|
+ spin_unlock(&block_group->lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
|
|
|
+ u64 *total_trimmed, u64 start, u64 end, u64 minlen)
|
|
|
+{
|
|
|
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
|
|
+ struct btrfs_free_space *entry;
|
|
|
+ struct rb_node *node;
|
|
|
+ int ret = 0;
|
|
|
+ u64 extent_start;
|
|
|
+ u64 extent_bytes;
|
|
|
+ u64 bytes;
|
|
|
|
|
|
while (start < end) {
|
|
|
spin_lock(&ctl->tree_lock);
|
|
@@ -2595,81 +2641,118 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
|
|
|
}
|
|
|
|
|
|
entry = tree_search_offset(ctl, start, 0, 1);
|
|
|
- if (!entry)
|
|
|
- entry = tree_search_offset(ctl,
|
|
|
- offset_to_bitmap(ctl, start),
|
|
|
- 1, 1);
|
|
|
-
|
|
|
- if (!entry || entry->offset >= end) {
|
|
|
+ if (!entry) {
|
|
|
spin_unlock(&ctl->tree_lock);
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (entry->bitmap) {
|
|
|
- ret = search_bitmap(ctl, entry, &start, &bytes);
|
|
|
- if (!ret) {
|
|
|
- if (start >= end) {
|
|
|
- spin_unlock(&ctl->tree_lock);
|
|
|
- break;
|
|
|
- }
|
|
|
- bytes = min(bytes, end - start);
|
|
|
- bitmap_clear_bits(ctl, entry, start, bytes);
|
|
|
- if (entry->bytes == 0)
|
|
|
- free_bitmap(ctl, entry);
|
|
|
- } else {
|
|
|
- start = entry->offset + BITS_PER_BITMAP *
|
|
|
- block_group->sectorsize;
|
|
|
+ /* skip bitmaps */
|
|
|
+ while (entry->bitmap) {
|
|
|
+ node = rb_next(&entry->offset_index);
|
|
|
+ if (!node) {
|
|
|
spin_unlock(&ctl->tree_lock);
|
|
|
- ret = 0;
|
|
|
- continue;
|
|
|
+ goto out;
|
|
|
}
|
|
|
- } else {
|
|
|
- start = entry->offset;
|
|
|
- bytes = min(entry->bytes, end - start);
|
|
|
- unlink_free_space(ctl, entry);
|
|
|
- kmem_cache_free(btrfs_free_space_cachep, entry);
|
|
|
+ entry = rb_entry(node, struct btrfs_free_space,
|
|
|
+ offset_index);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (entry->offset >= end) {
|
|
|
+ spin_unlock(&ctl->tree_lock);
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
+ extent_start = entry->offset;
|
|
|
+ extent_bytes = entry->bytes;
|
|
|
+ start = max(start, extent_start);
|
|
|
+ bytes = min(extent_start + extent_bytes, end) - start;
|
|
|
+ if (bytes < minlen) {
|
|
|
+ spin_unlock(&ctl->tree_lock);
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+
|
|
|
+ unlink_free_space(ctl, entry);
|
|
|
+ kmem_cache_free(btrfs_free_space_cachep, entry);
|
|
|
+
|
|
|
spin_unlock(&ctl->tree_lock);
|
|
|
|
|
|
- if (bytes >= minlen) {
|
|
|
- struct btrfs_space_info *space_info;
|
|
|
- int update = 0;
|
|
|
-
|
|
|
- space_info = block_group->space_info;
|
|
|
- spin_lock(&space_info->lock);
|
|
|
- spin_lock(&block_group->lock);
|
|
|
- if (!block_group->ro) {
|
|
|
- block_group->reserved += bytes;
|
|
|
- space_info->bytes_reserved += bytes;
|
|
|
- update = 1;
|
|
|
- }
|
|
|
- spin_unlock(&block_group->lock);
|
|
|
- spin_unlock(&space_info->lock);
|
|
|
-
|
|
|
- ret = btrfs_error_discard_extent(fs_info->extent_root,
|
|
|
- start,
|
|
|
- bytes,
|
|
|
- &actually_trimmed);
|
|
|
-
|
|
|
- btrfs_add_free_space(block_group, start, bytes);
|
|
|
- if (update) {
|
|
|
- spin_lock(&space_info->lock);
|
|
|
- spin_lock(&block_group->lock);
|
|
|
- if (block_group->ro)
|
|
|
- space_info->bytes_readonly += bytes;
|
|
|
- block_group->reserved -= bytes;
|
|
|
- space_info->bytes_reserved -= bytes;
|
|
|
- spin_unlock(&space_info->lock);
|
|
|
- spin_unlock(&block_group->lock);
|
|
|
- }
|
|
|
+ ret = do_trimming(block_group, total_trimmed, start, bytes,
|
|
|
+ extent_start, extent_bytes);
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
+next:
|
|
|
+ start += bytes;
|
|
|
|
|
|
- if (ret)
|
|
|
- break;
|
|
|
- *trimmed += actually_trimmed;
|
|
|
+ if (fatal_signal_pending(current)) {
|
|
|
+ ret = -ERESTARTSYS;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ cond_resched();
|
|
|
+ }
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
|
|
|
+ u64 *total_trimmed, u64 start, u64 end, u64 minlen)
|
|
|
+{
|
|
|
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
|
|
+ struct btrfs_free_space *entry;
|
|
|
+ int ret = 0;
|
|
|
+ int ret2;
|
|
|
+ u64 bytes;
|
|
|
+ u64 offset = offset_to_bitmap(ctl, start);
|
|
|
+
|
|
|
+ while (offset < end) {
|
|
|
+ bool next_bitmap = false;
|
|
|
+
|
|
|
+ spin_lock(&ctl->tree_lock);
|
|
|
+
|
|
|
+ if (ctl->free_space < minlen) {
|
|
|
+ spin_unlock(&ctl->tree_lock);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ entry = tree_search_offset(ctl, offset, 1, 0);
|
|
|
+ if (!entry) {
|
|
|
+ spin_unlock(&ctl->tree_lock);
|
|
|
+ next_bitmap = true;
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+
|
|
|
+ bytes = minlen;
|
|
|
+ ret2 = search_bitmap(ctl, entry, &start, &bytes);
|
|
|
+ if (ret2 || start >= end) {
|
|
|
+ spin_unlock(&ctl->tree_lock);
|
|
|
+ next_bitmap = true;
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+
|
|
|
+ bytes = min(bytes, end - start);
|
|
|
+ if (bytes < minlen) {
|
|
|
+ spin_unlock(&ctl->tree_lock);
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+
|
|
|
+ bitmap_clear_bits(ctl, entry, start, bytes);
|
|
|
+ if (entry->bytes == 0)
|
|
|
+ free_bitmap(ctl, entry);
|
|
|
+
|
|
|
+ spin_unlock(&ctl->tree_lock);
|
|
|
+
|
|
|
+ ret = do_trimming(block_group, total_trimmed, start, bytes,
|
|
|
+ start, bytes);
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
+next:
|
|
|
+ if (next_bitmap) {
|
|
|
+ offset += BITS_PER_BITMAP * ctl->unit;
|
|
|
+ } else {
|
|
|
+ start += bytes;
|
|
|
+ if (start >= offset + BITS_PER_BITMAP * ctl->unit)
|
|
|
+ offset += BITS_PER_BITMAP * ctl->unit;
|
|
|
}
|
|
|
- start += bytes;
|
|
|
- bytes = 0;
|
|
|
|
|
|
if (fatal_signal_pending(current)) {
|
|
|
ret = -ERESTARTSYS;
|
|
@@ -2682,6 +2765,22 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
|
|
|
+ u64 *trimmed, u64 start, u64 end, u64 minlen)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ *trimmed = 0;
|
|
|
+
|
|
|
+ ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Find the left-most item in the cache tree, and then return the
|
|
|
* smallest inode number in the item.
|