|
@@ -1399,6 +1399,48 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
|
|
|
kmem_cache_free(btrfs_trans_handle_cachep, trans);
|
|
|
}
|
|
|
|
|
|
+static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
|
|
|
+ struct btrfs_root *root)
|
|
|
+{
|
|
|
+ int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
|
|
|
+ int snap_pending = 0;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!flush_on_commit) {
|
|
|
+ spin_lock(&root->fs_info->trans_lock);
|
|
|
+ if (!list_empty(&trans->transaction->pending_snapshots))
|
|
|
+ snap_pending = 1;
|
|
|
+ spin_unlock(&root->fs_info->trans_lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (flush_on_commit || snap_pending) {
|
|
|
+ btrfs_start_delalloc_inodes(root, 1);
|
|
|
+ btrfs_wait_ordered_extents(root, 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = btrfs_run_delayed_items(trans, root);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * running the delayed items may have added new refs. account
|
|
|
+ * them now so that they hinder processing of more delayed refs
|
|
|
+ * as little as possible.
|
|
|
+ */
|
|
|
+ btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * rename don't use btrfs_join_transaction, so, once we
|
|
|
+ * set the transaction to blocked above, we aren't going
|
|
|
+ * to get any new ordered operations. We can safely run
|
|
|
+ * it here and no for sure that nothing new will be added
|
|
|
+ * to the list
|
|
|
+ */
|
|
|
+ btrfs_run_ordered_operations(root, 1);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* btrfs_transaction state sequence:
|
|
|
* in_commit = 0, blocked = 0 (initial)
|
|
@@ -1416,7 +1458,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|
|
int ret;
|
|
|
int should_grow = 0;
|
|
|
unsigned long now = get_seconds();
|
|
|
- int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
|
|
|
|
|
|
ret = btrfs_run_ordered_operations(root, 0);
|
|
|
if (ret) {
|
|
@@ -1495,47 +1536,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|
|
should_grow = 1;
|
|
|
|
|
|
do {
|
|
|
- int snap_pending = 0;
|
|
|
-
|
|
|
joined = cur_trans->num_joined;
|
|
|
- if (!list_empty(&trans->transaction->pending_snapshots))
|
|
|
- snap_pending = 1;
|
|
|
|
|
|
WARN_ON(cur_trans != trans->transaction);
|
|
|
|
|
|
- if (flush_on_commit || snap_pending) {
|
|
|
- ret = btrfs_start_delalloc_inodes(root, 1);
|
|
|
- if (ret) {
|
|
|
- btrfs_abort_transaction(trans, root, ret);
|
|
|
- goto cleanup_transaction;
|
|
|
- }
|
|
|
- btrfs_wait_ordered_extents(root, 1);
|
|
|
- }
|
|
|
-
|
|
|
- ret = btrfs_run_delayed_items(trans, root);
|
|
|
+ ret = btrfs_flush_all_pending_stuffs(trans, root);
|
|
|
if (ret)
|
|
|
goto cleanup_transaction;
|
|
|
|
|
|
- /*
|
|
|
- * running the delayed items may have added new refs. account
|
|
|
- * them now so that they hinder processing of more delayed refs
|
|
|
- * as little as possible.
|
|
|
- */
|
|
|
- btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
|
|
|
-
|
|
|
- /*
|
|
|
- * rename don't use btrfs_join_transaction, so, once we
|
|
|
- * set the transaction to blocked above, we aren't going
|
|
|
- * to get any new ordered operations. We can safely run
|
|
|
- * it here and no for sure that nothing new will be added
|
|
|
- * to the list
|
|
|
- */
|
|
|
- ret = btrfs_run_ordered_operations(root, 1);
|
|
|
- if (ret) {
|
|
|
- btrfs_abort_transaction(trans, root, ret);
|
|
|
- goto cleanup_transaction;
|
|
|
- }
|
|
|
-
|
|
|
prepare_to_wait(&cur_trans->writer_wait, &wait,
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
|
@@ -1548,6 +1556,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|
|
} while (atomic_read(&cur_trans->num_writers) > 1 ||
|
|
|
(should_grow && cur_trans->num_joined != joined));
|
|
|
|
|
|
+ ret = btrfs_flush_all_pending_stuffs(trans, root);
|
|
|
+ if (ret)
|
|
|
+ goto cleanup_transaction;
|
|
|
+
|
|
|
/*
|
|
|
* Ok now we need to make sure to block out any other joins while we
|
|
|
* commit the transaction. We could have started a join before setting
|