|
@@ -15,6 +15,7 @@
|
|
* This does not handle HOTPLUG_CPU yet.
|
|
* This does not handle HOTPLUG_CPU yet.
|
|
*/
|
|
*/
|
|
#include <linux/sched.h>
|
|
#include <linux/sched.h>
|
|
|
|
+#include <linux/kernel_stat.h>
|
|
#include <linux/err.h>
|
|
#include <linux/err.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/smp.h>
|
|
|
|
|
|
@@ -35,6 +36,8 @@
|
|
#include "xen-ops.h"
|
|
#include "xen-ops.h"
|
|
#include "mmu.h"
|
|
#include "mmu.h"
|
|
|
|
|
|
|
|
+static void __cpuinit xen_init_lock_cpu(int cpu);
|
|
|
|
+
|
|
cpumask_t xen_cpu_initialized_map;
|
|
cpumask_t xen_cpu_initialized_map;
|
|
|
|
|
|
static DEFINE_PER_CPU(int, resched_irq);
|
|
static DEFINE_PER_CPU(int, resched_irq);
|
|
@@ -179,6 +182,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
|
{
|
|
{
|
|
unsigned cpu;
|
|
unsigned cpu;
|
|
|
|
|
|
|
|
+ xen_init_lock_cpu(0);
|
|
|
|
+
|
|
smp_store_cpu_info(0);
|
|
smp_store_cpu_info(0);
|
|
cpu_data(0).x86_max_cores = 1;
|
|
cpu_data(0).x86_max_cores = 1;
|
|
set_cpu_sibling_map(0);
|
|
set_cpu_sibling_map(0);
|
|
@@ -301,6 +306,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
|
|
clear_tsk_thread_flag(idle, TIF_FORK);
|
|
clear_tsk_thread_flag(idle, TIF_FORK);
|
|
#endif
|
|
#endif
|
|
xen_setup_timer(cpu);
|
|
xen_setup_timer(cpu);
|
|
|
|
+ xen_init_lock_cpu(cpu);
|
|
|
|
|
|
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
|
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
|
|
|
|
|
@@ -413,6 +419,170 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct xen_spinlock {
|
|
|
|
+ unsigned char lock; /* 0 -> free; 1 -> locked */
|
|
|
|
+ unsigned short spinners; /* count of waiting cpus */
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static int xen_spin_is_locked(struct raw_spinlock *lock)
|
|
|
|
+{
|
|
|
|
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
|
|
|
+
|
|
|
|
+ return xl->lock != 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int xen_spin_is_contended(struct raw_spinlock *lock)
|
|
|
|
+{
|
|
|
|
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
|
|
|
+
|
|
|
|
+ /* Not strictly true; this is only the count of contended
|
|
|
|
+ lock-takers entering the slow path. */
|
|
|
|
+ return xl->spinners != 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int xen_spin_trylock(struct raw_spinlock *lock)
|
|
|
|
+{
|
|
|
|
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
|
|
|
+ u8 old = 1;
|
|
|
|
+
|
|
|
|
+ asm("xchgb %b0,%1"
|
|
|
|
+ : "+q" (old), "+m" (xl->lock) : : "memory");
|
|
|
|
+
|
|
|
|
+ return old == 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
|
|
|
|
+static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
|
|
|
|
+
|
|
|
|
+static inline void spinning_lock(struct xen_spinlock *xl)
|
|
|
|
+{
|
|
|
|
+ __get_cpu_var(lock_spinners) = xl;
|
|
|
|
+ wmb(); /* set lock of interest before count */
|
|
|
|
+ asm(LOCK_PREFIX " incw %0"
|
|
|
|
+ : "+m" (xl->spinners) : : "memory");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void unspinning_lock(struct xen_spinlock *xl)
|
|
|
|
+{
|
|
|
|
+ asm(LOCK_PREFIX " decw %0"
|
|
|
|
+ : "+m" (xl->spinners) : : "memory");
|
|
|
|
+ wmb(); /* decrement count before clearing lock */
|
|
|
|
+ __get_cpu_var(lock_spinners) = NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
|
|
|
|
+{
|
|
|
|
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
|
|
|
+ int irq = __get_cpu_var(lock_kicker_irq);
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ /* If kicker interrupts not initialized yet, just spin */
|
|
|
|
+ if (irq == -1)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /* announce we're spinning */
|
|
|
|
+ spinning_lock(xl);
|
|
|
|
+
|
|
|
|
+ /* clear pending */
|
|
|
|
+ xen_clear_irq_pending(irq);
|
|
|
|
+
|
|
|
|
+ /* check again make sure it didn't become free while
|
|
|
|
+ we weren't looking */
|
|
|
|
+ ret = xen_spin_trylock(lock);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ /* block until irq becomes pending */
|
|
|
|
+ xen_poll_irq(irq);
|
|
|
|
+ kstat_this_cpu.irqs[irq]++;
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ unspinning_lock(xl);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void xen_spin_lock(struct raw_spinlock *lock)
|
|
|
|
+{
|
|
|
|
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
|
|
|
+ int timeout;
|
|
|
|
+ u8 oldval;
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ timeout = 1 << 10;
|
|
|
|
+
|
|
|
|
+ asm("1: xchgb %1,%0\n"
|
|
|
|
+ " testb %1,%1\n"
|
|
|
|
+ " jz 3f\n"
|
|
|
|
+ "2: rep;nop\n"
|
|
|
|
+ " cmpb $0,%0\n"
|
|
|
|
+ " je 1b\n"
|
|
|
|
+ " dec %2\n"
|
|
|
|
+ " jnz 2b\n"
|
|
|
|
+ "3:\n"
|
|
|
|
+ : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
|
|
|
|
+ : "1" (1)
|
|
|
|
+ : "memory");
|
|
|
|
+
|
|
|
|
+ } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
|
|
|
|
+{
|
|
|
|
+ int cpu;
|
|
|
|
+
|
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
|
+ /* XXX should mix up next cpu selection */
|
|
|
|
+ if (per_cpu(lock_spinners, cpu) == xl) {
|
|
|
|
+ xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void xen_spin_unlock(struct raw_spinlock *lock)
|
|
|
|
+{
|
|
|
|
+ struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
|
|
|
+
|
|
|
|
+ smp_wmb(); /* make sure no writes get moved after unlock */
|
|
|
|
+ xl->lock = 0; /* release lock */
|
|
|
|
+
|
|
|
|
+ /* make sure unlock happens before kick */
|
|
|
|
+ barrier();
|
|
|
|
+
|
|
|
|
+ if (unlikely(xl->spinners))
|
|
|
|
+ xen_spin_unlock_slow(xl);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static __cpuinit void xen_init_lock_cpu(int cpu)
|
|
|
|
+{
|
|
|
|
+ int irq;
|
|
|
|
+ const char *name;
|
|
|
|
+
|
|
|
|
+ name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
|
|
|
|
+ irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
|
|
|
|
+ cpu,
|
|
|
|
+ xen_reschedule_interrupt,
|
|
|
|
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
|
|
|
|
+ name,
|
|
|
|
+ NULL);
|
|
|
|
+
|
|
|
|
+ if (irq >= 0) {
|
|
|
|
+ disable_irq(irq); /* make sure it's never delivered */
|
|
|
|
+ per_cpu(lock_kicker_irq, cpu) = irq;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ printk("cpu %d spinlock event irq %d\n", cpu, irq);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __init xen_init_spinlocks(void)
|
|
|
|
+{
|
|
|
|
+ pv_lock_ops.spin_is_locked = xen_spin_is_locked;
|
|
|
|
+ pv_lock_ops.spin_is_contended = xen_spin_is_contended;
|
|
|
|
+ pv_lock_ops.spin_lock = xen_spin_lock;
|
|
|
|
+ pv_lock_ops.spin_trylock = xen_spin_trylock;
|
|
|
|
+ pv_lock_ops.spin_unlock = xen_spin_unlock;
|
|
|
|
+}
|
|
|
|
+
|
|
static const struct smp_ops xen_smp_ops __initdata = {
|
|
static const struct smp_ops xen_smp_ops __initdata = {
|
|
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
|
|
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
|
|
.smp_prepare_cpus = xen_smp_prepare_cpus,
|
|
.smp_prepare_cpus = xen_smp_prepare_cpus,
|
|
@@ -430,5 +600,5 @@ void __init xen_smp_init(void)
|
|
{
|
|
{
|
|
smp_ops = xen_smp_ops;
|
|
smp_ops = xen_smp_ops;
|
|
xen_fill_possible_map();
|
|
xen_fill_possible_map();
|
|
- paravirt_use_bytelocks();
|
|
|
|
|
|
+ xen_init_spinlocks();
|
|
}
|
|
}
|