spinlock.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /*
  2. * Split spinlock implementation out into its own file, so it can be
  3. * compiled in a FTRACE-compatible way.
  4. */
  5. #include <linux/kernel_stat.h>
  6. #include <linux/spinlock.h>
  7. #include <asm/paravirt.h>
  8. #include <xen/interface/xen.h>
  9. #include <xen/events.h>
  10. #include "xen-ops.h"
  11. struct xen_spinlock {
  12. unsigned char lock; /* 0 -> free; 1 -> locked */
  13. unsigned short spinners; /* count of waiting cpus */
  14. };
  15. static int xen_spin_is_locked(struct raw_spinlock *lock)
  16. {
  17. struct xen_spinlock *xl = (struct xen_spinlock *)lock;
  18. return xl->lock != 0;
  19. }
  20. static int xen_spin_is_contended(struct raw_spinlock *lock)
  21. {
  22. struct xen_spinlock *xl = (struct xen_spinlock *)lock;
  23. /* Not strictly true; this is only the count of contended
  24. lock-takers entering the slow path. */
  25. return xl->spinners != 0;
  26. }
  27. static int xen_spin_trylock(struct raw_spinlock *lock)
  28. {
  29. struct xen_spinlock *xl = (struct xen_spinlock *)lock;
  30. u8 old = 1;
  31. asm("xchgb %b0,%1"
  32. : "+q" (old), "+m" (xl->lock) : : "memory");
  33. return old == 0;
  34. }
  35. static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
  36. static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
  37. static inline void spinning_lock(struct xen_spinlock *xl)
  38. {
  39. __get_cpu_var(lock_spinners) = xl;
  40. wmb(); /* set lock of interest before count */
  41. asm(LOCK_PREFIX " incw %0"
  42. : "+m" (xl->spinners) : : "memory");
  43. }
  44. static inline void unspinning_lock(struct xen_spinlock *xl)
  45. {
  46. asm(LOCK_PREFIX " decw %0"
  47. : "+m" (xl->spinners) : : "memory");
  48. wmb(); /* decrement count before clearing lock */
  49. __get_cpu_var(lock_spinners) = NULL;
  50. }
  51. static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
  52. {
  53. struct xen_spinlock *xl = (struct xen_spinlock *)lock;
  54. int irq = __get_cpu_var(lock_kicker_irq);
  55. int ret;
  56. /* If kicker interrupts not initialized yet, just spin */
  57. if (irq == -1)
  58. return 0;
  59. /* announce we're spinning */
  60. spinning_lock(xl);
  61. /* clear pending */
  62. xen_clear_irq_pending(irq);
  63. /* check again make sure it didn't become free while
  64. we weren't looking */
  65. ret = xen_spin_trylock(lock);
  66. if (ret)
  67. goto out;
  68. /* block until irq becomes pending */
  69. xen_poll_irq(irq);
  70. kstat_this_cpu.irqs[irq]++;
  71. out:
  72. unspinning_lock(xl);
  73. return ret;
  74. }
  75. static void xen_spin_lock(struct raw_spinlock *lock)
  76. {
  77. struct xen_spinlock *xl = (struct xen_spinlock *)lock;
  78. int timeout;
  79. u8 oldval;
  80. do {
  81. timeout = 1 << 10;
  82. asm("1: xchgb %1,%0\n"
  83. " testb %1,%1\n"
  84. " jz 3f\n"
  85. "2: rep;nop\n"
  86. " cmpb $0,%0\n"
  87. " je 1b\n"
  88. " dec %2\n"
  89. " jnz 2b\n"
  90. "3:\n"
  91. : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
  92. : "1" (1)
  93. : "memory");
  94. } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
  95. }
  96. static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
  97. {
  98. int cpu;
  99. for_each_online_cpu(cpu) {
  100. /* XXX should mix up next cpu selection */
  101. if (per_cpu(lock_spinners, cpu) == xl) {
  102. xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
  103. break;
  104. }
  105. }
  106. }
  107. static void xen_spin_unlock(struct raw_spinlock *lock)
  108. {
  109. struct xen_spinlock *xl = (struct xen_spinlock *)lock;
  110. smp_wmb(); /* make sure no writes get moved after unlock */
  111. xl->lock = 0; /* release lock */
  112. /* make sure unlock happens before kick */
  113. barrier();
  114. if (unlikely(xl->spinners))
  115. xen_spin_unlock_slow(xl);
  116. }
  117. static irqreturn_t dummy_handler(int irq, void *dev_id)
  118. {
  119. BUG();
  120. return IRQ_HANDLED;
  121. }
  122. void __cpuinit xen_init_lock_cpu(int cpu)
  123. {
  124. int irq;
  125. const char *name;
  126. name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
  127. irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
  128. cpu,
  129. dummy_handler,
  130. IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
  131. name,
  132. NULL);
  133. if (irq >= 0) {
  134. disable_irq(irq); /* make sure it's never delivered */
  135. per_cpu(lock_kicker_irq, cpu) = irq;
  136. }
  137. printk("cpu %d spinlock event irq %d\n", cpu, irq);
  138. }
  139. void __init xen_init_spinlocks(void)
  140. {
  141. pv_lock_ops.spin_is_locked = xen_spin_is_locked;
  142. pv_lock_ops.spin_is_contended = xen_spin_is_contended;
  143. pv_lock_ops.spin_lock = xen_spin_lock;
  144. pv_lock_ops.spin_trylock = xen_spin_trylock;
  145. pv_lock_ops.spin_unlock = xen_spin_unlock;
  146. }