spinlock.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * Out of line spinlock code.
  3. *
  4. * Copyright IBM Corp. 2004, 2006
  5. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. */
  7. #include <linux/types.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/init.h>
  11. #include <linux/smp.h>
  12. #include <asm/io.h>
  13. int spin_retry = 1000;
  14. /**
  15. * spin_retry= parameter
  16. */
  17. static int __init spin_retry_setup(char *str)
  18. {
  19. spin_retry = simple_strtoul(str, &str, 0);
  20. return 1;
  21. }
  22. __setup("spin_retry=", spin_retry_setup);
  23. void arch_spin_lock_wait(arch_spinlock_t *lp)
  24. {
  25. int count = spin_retry;
  26. unsigned int cpu = ~smp_processor_id();
  27. unsigned int owner;
  28. while (1) {
  29. owner = lp->owner_cpu;
  30. if (!owner || smp_vcpu_scheduled(~owner)) {
  31. for (count = spin_retry; count > 0; count--) {
  32. if (arch_spin_is_locked(lp))
  33. continue;
  34. if (_raw_compare_and_swap(&lp->owner_cpu, 0,
  35. cpu) == 0)
  36. return;
  37. }
  38. if (MACHINE_IS_LPAR)
  39. continue;
  40. }
  41. owner = lp->owner_cpu;
  42. if (owner)
  43. smp_yield_cpu(~owner);
  44. if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  45. return;
  46. }
  47. }
  48. EXPORT_SYMBOL(arch_spin_lock_wait);
  49. void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
  50. {
  51. int count = spin_retry;
  52. unsigned int cpu = ~smp_processor_id();
  53. unsigned int owner;
  54. local_irq_restore(flags);
  55. while (1) {
  56. owner = lp->owner_cpu;
  57. if (!owner || smp_vcpu_scheduled(~owner)) {
  58. for (count = spin_retry; count > 0; count--) {
  59. if (arch_spin_is_locked(lp))
  60. continue;
  61. local_irq_disable();
  62. if (_raw_compare_and_swap(&lp->owner_cpu, 0,
  63. cpu) == 0)
  64. return;
  65. local_irq_restore(flags);
  66. }
  67. if (MACHINE_IS_LPAR)
  68. continue;
  69. }
  70. owner = lp->owner_cpu;
  71. if (owner)
  72. smp_yield_cpu(~owner);
  73. local_irq_disable();
  74. if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  75. return;
  76. local_irq_restore(flags);
  77. }
  78. }
  79. EXPORT_SYMBOL(arch_spin_lock_wait_flags);
  80. int arch_spin_trylock_retry(arch_spinlock_t *lp)
  81. {
  82. unsigned int cpu = ~smp_processor_id();
  83. int count;
  84. for (count = spin_retry; count > 0; count--) {
  85. if (arch_spin_is_locked(lp))
  86. continue;
  87. if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  88. return 1;
  89. }
  90. return 0;
  91. }
  92. EXPORT_SYMBOL(arch_spin_trylock_retry);
  93. void arch_spin_relax(arch_spinlock_t *lock)
  94. {
  95. unsigned int cpu = lock->owner_cpu;
  96. if (cpu != 0) {
  97. if (MACHINE_IS_VM || MACHINE_IS_KVM ||
  98. !smp_vcpu_scheduled(~cpu))
  99. smp_yield_cpu(~cpu);
  100. }
  101. }
  102. EXPORT_SYMBOL(arch_spin_relax);
  103. void _raw_read_lock_wait(arch_rwlock_t *rw)
  104. {
  105. unsigned int old;
  106. int count = spin_retry;
  107. while (1) {
  108. if (count-- <= 0) {
  109. smp_yield();
  110. count = spin_retry;
  111. }
  112. if (!arch_read_can_lock(rw))
  113. continue;
  114. old = rw->lock & 0x7fffffffU;
  115. if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
  116. return;
  117. }
  118. }
  119. EXPORT_SYMBOL(_raw_read_lock_wait);
  120. void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
  121. {
  122. unsigned int old;
  123. int count = spin_retry;
  124. local_irq_restore(flags);
  125. while (1) {
  126. if (count-- <= 0) {
  127. smp_yield();
  128. count = spin_retry;
  129. }
  130. if (!arch_read_can_lock(rw))
  131. continue;
  132. old = rw->lock & 0x7fffffffU;
  133. local_irq_disable();
  134. if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
  135. return;
  136. }
  137. }
  138. EXPORT_SYMBOL(_raw_read_lock_wait_flags);
  139. int _raw_read_trylock_retry(arch_rwlock_t *rw)
  140. {
  141. unsigned int old;
  142. int count = spin_retry;
  143. while (count-- > 0) {
  144. if (!arch_read_can_lock(rw))
  145. continue;
  146. old = rw->lock & 0x7fffffffU;
  147. if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
  148. return 1;
  149. }
  150. return 0;
  151. }
  152. EXPORT_SYMBOL(_raw_read_trylock_retry);
  153. void _raw_write_lock_wait(arch_rwlock_t *rw)
  154. {
  155. int count = spin_retry;
  156. while (1) {
  157. if (count-- <= 0) {
  158. smp_yield();
  159. count = spin_retry;
  160. }
  161. if (!arch_write_can_lock(rw))
  162. continue;
  163. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
  164. return;
  165. }
  166. }
  167. EXPORT_SYMBOL(_raw_write_lock_wait);
  168. void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
  169. {
  170. int count = spin_retry;
  171. local_irq_restore(flags);
  172. while (1) {
  173. if (count-- <= 0) {
  174. smp_yield();
  175. count = spin_retry;
  176. }
  177. if (!arch_write_can_lock(rw))
  178. continue;
  179. local_irq_disable();
  180. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
  181. return;
  182. }
  183. }
  184. EXPORT_SYMBOL(_raw_write_lock_wait_flags);
  185. int _raw_write_trylock_retry(arch_rwlock_t *rw)
  186. {
  187. int count = spin_retry;
  188. while (count-- > 0) {
  189. if (!arch_write_can_lock(rw))
  190. continue;
  191. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
  192. return 1;
  193. }
  194. return 0;
  195. }
  196. EXPORT_SYMBOL(_raw_write_trylock_retry);