|
@@ -1918,15 +1918,29 @@ void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
|
|
|
spin_unlock(&info->lock);
|
|
|
}
|
|
|
|
|
|
+static void force_metadata_allocation(struct btrfs_fs_info *info)
|
|
|
+{
|
|
|
+ struct list_head *head = &info->space_info;
|
|
|
+ struct btrfs_space_info *found;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ list_for_each_entry_rcu(found, head, list) {
|
|
|
+ if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
|
|
|
+ found->force_alloc = 1;
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+}
|
|
|
+
|
|
|
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
|
|
struct btrfs_root *extent_root, u64 alloc_bytes,
|
|
|
u64 flags, int force)
|
|
|
{
|
|
|
struct btrfs_space_info *space_info;
|
|
|
+ struct btrfs_fs_info *fs_info = extent_root->fs_info;
|
|
|
u64 thresh;
|
|
|
int ret = 0;
|
|
|
|
|
|
- mutex_lock(&extent_root->fs_info->chunk_mutex);
|
|
|
+ mutex_lock(&fs_info->chunk_mutex);
|
|
|
|
|
|
flags = btrfs_reduce_alloc_profile(extent_root, flags);
|
|
|
|
|
@@ -1958,6 +1972,18 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
|
|
|
}
|
|
|
spin_unlock(&space_info->lock);
|
|
|
|
|
|
+ /*
|
|
|
+ * if we're doing a data chunk, go ahead and make sure that
|
|
|
+ * we keep a reasonable number of metadata chunks allocated in the
|
|
|
+ * FS as well.
|
|
|
+ */
|
|
|
+ if (flags & BTRFS_BLOCK_GROUP_DATA) {
|
|
|
+ fs_info->data_chunk_allocations++;
|
|
|
+ if (!(fs_info->data_chunk_allocations %
|
|
|
+ fs_info->metadata_ratio))
|
|
|
+ force_metadata_allocation(fs_info);
|
|
|
+ }
|
|
|
+
|
|
|
ret = btrfs_alloc_chunk(trans, extent_root, flags);
|
|
|
if (ret)
|
|
|
space_info->full = 1;
|