|
@@ -81,7 +81,6 @@ static inline void spin_time_accum_blocked(u64 start)
|
|
|
spinlock_stats.time_blocked += delta;
|
|
|
}
|
|
|
#else /* !CONFIG_XEN_DEBUG_FS */
|
|
|
-#define TIMEOUT (1 << 10)
|
|
|
static inline void add_stats(enum xen_contention_stat var, u32 val)
|
|
|
{
|
|
|
}
|
|
@@ -96,23 +95,6 @@ static inline void spin_time_accum_blocked(u64 start)
|
|
|
}
|
|
|
#endif /* CONFIG_XEN_DEBUG_FS */
|
|
|
|
|
|
-/*
|
|
|
- * Size struct xen_spinlock so it's the same as arch_spinlock_t.
|
|
|
- */
|
|
|
-#if NR_CPUS < 256
|
|
|
-typedef u8 xen_spinners_t;
|
|
|
-# define inc_spinners(xl) \
|
|
|
- asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
|
|
|
-# define dec_spinners(xl) \
|
|
|
- asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
|
|
|
-#else
|
|
|
-typedef u16 xen_spinners_t;
|
|
|
-# define inc_spinners(xl) \
|
|
|
- asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
|
|
|
-# define dec_spinners(xl) \
|
|
|
- asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
|
|
|
-#endif
|
|
|
-
|
|
|
struct xen_lock_waiting {
|
|
|
struct arch_spinlock *lock;
|
|
|
__ticket_t want;
|
|
@@ -123,6 +105,7 @@ static DEFINE_PER_CPU(char *, irq_name);
|
|
|
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
|
|
|
static cpumask_t waiting_cpus;
|
|
|
|
|
|
+static bool xen_pvspin = true;
|
|
|
static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
|
|
|
{
|
|
|
int irq = __this_cpu_read(lock_kicker_irq);
|
|
@@ -241,16 +224,12 @@ void xen_init_lock_cpu(int cpu)
|
|
|
int irq;
|
|
|
char *name;
|
|
|
|
|
|
+ if (!xen_pvspin)
|
|
|
+ return;
|
|
|
+
|
|
|
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
|
|
|
cpu, per_cpu(lock_kicker_irq, cpu));
|
|
|
|
|
|
- /*
|
|
|
- * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
|
|
|
- * (xen: disable PV spinlocks on HVM)
|
|
|
- */
|
|
|
- if (xen_hvm_domain())
|
|
|
- return;
|
|
|
-
|
|
|
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
|
|
|
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
|
|
|
cpu,
|
|
@@ -270,11 +249,7 @@ void xen_init_lock_cpu(int cpu)
|
|
|
|
|
|
void xen_uninit_lock_cpu(int cpu)
|
|
|
{
|
|
|
- /*
|
|
|
- * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
|
|
|
- * (xen: disable PV spinlocks on HVM)
|
|
|
- */
|
|
|
- if (xen_hvm_domain())
|
|
|
+ if (!xen_pvspin)
|
|
|
return;
|
|
|
|
|
|
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
|
|
@@ -283,16 +258,9 @@ void xen_uninit_lock_cpu(int cpu)
|
|
|
per_cpu(irq_name, cpu) = NULL;
|
|
|
}
|
|
|
|
|
|
-static bool xen_pvspin __initdata = true;
|
|
|
|
|
|
void __init xen_init_spinlocks(void)
|
|
|
{
|
|
|
- /*
|
|
|
- * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
|
|
|
- * (xen: disable PV spinlocks on HVM)
|
|
|
- */
|
|
|
- if (xen_hvm_domain())
|
|
|
- return;
|
|
|
|
|
|
if (!xen_pvspin) {
|
|
|
printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
|
|
@@ -323,6 +291,9 @@ static int __init xen_spinlock_debugfs(void)
|
|
|
if (d_xen == NULL)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ if (!xen_pvspin)
|
|
|
+ return 0;
|
|
|
+
|
|
|
d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
|
|
|
|
|
|
debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
|