|
@@ -187,7 +187,6 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
|
|
|
struct xen_spinlock *prev;
|
|
|
int irq = __get_cpu_var(lock_kicker_irq);
|
|
|
int ret;
|
|
|
- unsigned long flags;
|
|
|
u64 start;
|
|
|
|
|
|
/* If kicker interrupts not initialized yet, just spin */
|
|
@@ -199,16 +198,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
|
|
|
/* announce we're spinning */
|
|
|
prev = spinning_lock(xl);
|
|
|
|
|
|
- flags = __raw_local_save_flags();
|
|
|
- if (irq_enable) {
|
|
|
- ADD_STATS(taken_slow_irqenable, 1);
|
|
|
- raw_local_irq_enable();
|
|
|
- }
|
|
|
-
|
|
|
ADD_STATS(taken_slow, 1);
|
|
|
ADD_STATS(taken_slow_nested, prev != NULL);
|
|
|
|
|
|
do {
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
/* clear pending */
|
|
|
xen_clear_irq_pending(irq);
|
|
|
|
|
@@ -228,6 +223,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ flags = __raw_local_save_flags();
|
|
|
+ if (irq_enable) {
|
|
|
+ ADD_STATS(taken_slow_irqenable, 1);
|
|
|
+ raw_local_irq_enable();
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Block until irq becomes pending. If we're
|
|
|
* interrupted at this point (after the trylock but
|
|
@@ -238,13 +239,15 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
|
|
|
* pending.
|
|
|
*/
|
|
|
xen_poll_irq(irq);
|
|
|
+
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+
|
|
|
ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
|
|
|
} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
|
|
|
|
|
|
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
|
|
|
|
|
out:
|
|
|
- raw_local_irq_restore(flags);
|
|
|
unspinning_lock(xl, prev);
|
|
|
spin_time_accum_blocked(start);
|
|
|
|