|
@@ -3621,10 +3621,9 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
|
|
|
/* make sure bytes are sectorsize aligned */
|
|
|
bytes = ALIGN(bytes, root->sectorsize);
|
|
|
|
|
|
- if (root == root->fs_info->tree_root ||
|
|
|
- BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
|
|
|
- alloc_chunk = 0;
|
|
|
+ if (btrfs_is_free_space_inode(inode)) {
|
|
|
committed = 1;
|
|
|
+ ASSERT(current->journal_info);
|
|
|
}
|
|
|
|
|
|
data_sinfo = fs_info->data_sinfo;
|
|
@@ -3652,6 +3651,16 @@ again:
|
|
|
spin_unlock(&data_sinfo->lock);
|
|
|
alloc:
|
|
|
alloc_target = btrfs_get_alloc_profile(root, 1);
|
|
|
+ /*
|
|
|
+ * It is ugly that we don't call nolock join
|
|
|
+ * transaction for the free space inode case here.
|
|
|
+ * But it is safe because we only do the data space
|
|
|
+ * reservation for the free space cache in the
|
|
|
+ * transaction context, the common join transaction
|
|
|
+ * just increase the counter of the current transaction
|
|
|
+ * handler, doesn't try to acquire the trans_lock of
|
|
|
+ * the fs.
|
|
|
+ */
|
|
|
trans = btrfs_join_transaction(root);
|
|
|
if (IS_ERR(trans))
|
|
|
return PTR_ERR(trans);
|