|
@@ -142,7 +142,20 @@ static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
|
|
|
* partially setup state.
|
|
|
*/
|
|
|
local_irq_save(flags);
|
|
|
-
|
|
|
+ /*
|
|
|
+ * We don't really care if we're overwriting some other
|
|
|
+ * (lock,want) pair, as that would mean that we're currently
|
|
|
+ * in an interrupt context, and the outer context had
|
|
|
+ * interrupts enabled. That has already kicked the VCPU out
|
|
|
+ * of xen_poll_irq(), so it will just return spuriously and
|
|
|
+ * retry with newly setup (lock,want).
|
|
|
+ *
|
|
|
+ * The ordering protocol on this is that the "lock" pointer
|
|
|
+ * may only be set non-NULL if the "want" ticket is correct.
|
|
|
+ * If we're updating "want", we must first clear "lock".
|
|
|
+ */
|
|
|
+ w->lock = NULL;
|
|
|
+ smp_wmb();
|
|
|
w->want = want;
|
|
|
smp_wmb();
|
|
|
w->lock = lock;
|
|
@@ -157,24 +170,43 @@ static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
|
|
|
/* Only check lock once pending cleared */
|
|
|
barrier();
|
|
|
|
|
|
- /* Mark entry to slowpath before doing the pickup test to make
|
|
|
- sure we don't deadlock with an unlocker. */
|
|
|
+ /*
|
|
|
+ * Mark entry to slowpath before doing the pickup test to make
|
|
|
+ * sure we don't deadlock with an unlocker.
|
|
|
+ */
|
|
|
__ticket_enter_slowpath(lock);
|
|
|
|
|
|
- /* check again make sure it didn't become free while
|
|
|
- we weren't looking */
|
|
|
+ /*
|
|
|
+ * check again make sure it didn't become free while
|
|
|
+ * we weren't looking
|
|
|
+ */
|
|
|
if (ACCESS_ONCE(lock->tickets.head) == want) {
|
|
|
add_stats(TAKEN_SLOW_PICKUP, 1);
|
|
|
goto out;
|
|
|
}
|
|
|
+
|
|
|
+ /* Allow interrupts while blocked */
|
|
|
+ local_irq_restore(flags);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If an interrupt happens here, it will leave the wakeup irq
|
|
|
+ * pending, which will cause xen_poll_irq() to return
|
|
|
+ * immediately.
|
|
|
+ */
|
|
|
+
|
|
|
/* Block until irq becomes pending (or perhaps a spurious wakeup) */
|
|
|
xen_poll_irq(irq);
|
|
|
add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+
|
|
|
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
|
|
out:
|
|
|
cpumask_clear_cpu(cpu, &waiting_cpus);
|
|
|
w->lock = NULL;
|
|
|
+
|
|
|
local_irq_restore(flags);
|
|
|
+
|
|
|
spin_time_accum_blocked(start);
|
|
|
}
|
|
|
PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
|
|
@@ -188,7 +220,9 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
|
|
|
for_each_cpu(cpu, &waiting_cpus) {
|
|
|
const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
|
|
|
|
|
|
- if (w->lock == lock && w->want == next) {
|
|
|
+ /* Make sure we read lock before want */
|
|
|
+ if (ACCESS_ONCE(w->lock) == lock &&
|
|
|
+ ACCESS_ONCE(w->want) == next) {
|
|
|
add_stats(RELEASED_SLOW_KICKED, 1);
|
|
|
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
|
|
|
break;
|