|
@@ -2347,12 +2347,10 @@ next:
|
|
return count;
|
|
return count;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
|
|
static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
|
|
- unsigned long num_refs)
|
|
|
|
|
|
+ unsigned long num_refs,
|
|
|
|
+ struct list_head *first_seq)
|
|
{
|
|
{
|
|
- struct list_head *first_seq = delayed_refs->seq_head.next;
|
|
|
|
-
|
|
|
|
spin_unlock(&delayed_refs->lock);
|
|
spin_unlock(&delayed_refs->lock);
|
|
pr_debug("waiting for more refs (num %ld, first %p)\n",
|
|
pr_debug("waiting for more refs (num %ld, first %p)\n",
|
|
num_refs, first_seq);
|
|
num_refs, first_seq);
|
|
@@ -2381,6 +2379,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|
struct btrfs_delayed_ref_root *delayed_refs;
|
|
struct btrfs_delayed_ref_root *delayed_refs;
|
|
struct btrfs_delayed_ref_node *ref;
|
|
struct btrfs_delayed_ref_node *ref;
|
|
struct list_head cluster;
|
|
struct list_head cluster;
|
|
|
|
+ struct list_head *first_seq = NULL;
|
|
int ret;
|
|
int ret;
|
|
u64 delayed_start;
|
|
u64 delayed_start;
|
|
int run_all = count == (unsigned long)-1;
|
|
int run_all = count == (unsigned long)-1;
|
|
@@ -2436,8 +2435,10 @@ again:
|
|
*/
|
|
*/
|
|
consider_waiting = 1;
|
|
consider_waiting = 1;
|
|
num_refs = delayed_refs->num_entries;
|
|
num_refs = delayed_refs->num_entries;
|
|
|
|
+ first_seq = root->fs_info->tree_mod_seq_list.next;
|
|
} else {
|
|
} else {
|
|
- wait_for_more_refs(delayed_refs, num_refs);
|
|
|
|
|
|
+ wait_for_more_refs(delayed_refs,
|
|
|
|
+ num_refs, first_seq);
|
|
/*
|
|
/*
|
|
* after waiting, things have changed. we
|
|
* after waiting, things have changed. we
|
|
* dropped the lock and someone else might have
|
|
* dropped the lock and someone else might have
|