|
@@ -677,7 +677,7 @@ void synchronize_rcu(void)
|
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
|
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
|
|
|
-static long sync_rcu_preempt_exp_count;
|
|
|
+static unsigned long sync_rcu_preempt_exp_count;
|
|
|
static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
|
|
|
|
|
|
/*
|
|
@@ -792,7 +792,7 @@ void synchronize_rcu_expedited(void)
|
|
|
unsigned long flags;
|
|
|
struct rcu_node *rnp;
|
|
|
struct rcu_state *rsp = &rcu_preempt_state;
|
|
|
- long snap;
|
|
|
+ unsigned long snap;
|
|
|
int trycount = 0;
|
|
|
|
|
|
smp_mb(); /* Caller's modifications seen first by other CPUs. */
|
|
@@ -811,10 +811,10 @@ void synchronize_rcu_expedited(void)
|
|
|
synchronize_rcu();
|
|
|
return;
|
|
|
}
|
|
|
- if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
|
|
|
+ if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count)))
|
|
|
goto mb_ret; /* Others did our work for us. */
|
|
|
}
|
|
|
- if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
|
|
|
+ if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count)))
|
|
|
goto unlock_mb_ret; /* Others did our work for us. */
|
|
|
|
|
|
/* force all RCU readers onto ->blkd_tasks lists. */
|