locks.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /*
  2. * Locks for smp ppc
  3. *
  4. * Written by Cort Dougan (cort@cs.nmt.edu)
  5. */
  6. #include <linux/config.h>
  7. #include <linux/sched.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/module.h>
  10. #include <asm/ppc_asm.h>
  11. #include <asm/smp.h>
  12. #ifdef CONFIG_DEBUG_SPINLOCK
  13. #undef INIT_STUCK
  14. #define INIT_STUCK 200000000 /*0xffffffff*/
  15. /*
  16. * Try to acquire a spinlock.
  17. * Only does the stwcx. if the load returned 0 - the Programming
  18. * Environments Manual suggests not doing unnecessary stcwx.'s
  19. * since they may inhibit forward progress by other CPUs in getting
  20. * a lock.
  21. */
  22. static inline unsigned long __spin_trylock(volatile unsigned long *lock)
  23. {
  24. unsigned long ret;
  25. __asm__ __volatile__ ("\n\
  26. 1: lwarx %0,0,%1\n\
  27. cmpwi 0,%0,0\n\
  28. bne 2f\n"
  29. PPC405_ERR77(0,%1)
  30. " stwcx. %2,0,%1\n\
  31. bne- 1b\n\
  32. isync\n\
  33. 2:"
  34. : "=&r"(ret)
  35. : "r"(lock), "r"(1)
  36. : "cr0", "memory");
  37. return ret;
  38. }
  39. void _raw_spin_lock(spinlock_t *lock)
  40. {
  41. int cpu = smp_processor_id();
  42. unsigned int stuck = INIT_STUCK;
  43. while (__spin_trylock(&lock->lock)) {
  44. while ((unsigned volatile long)lock->lock != 0) {
  45. if (!--stuck) {
  46. printk("_spin_lock(%p) CPU#%d NIP %p"
  47. " holder: cpu %ld pc %08lX\n",
  48. lock, cpu, __builtin_return_address(0),
  49. lock->owner_cpu,lock->owner_pc);
  50. stuck = INIT_STUCK;
  51. /* steal the lock */
  52. /*xchg_u32((void *)&lock->lock,0);*/
  53. }
  54. }
  55. }
  56. lock->owner_pc = (unsigned long)__builtin_return_address(0);
  57. lock->owner_cpu = cpu;
  58. }
  59. EXPORT_SYMBOL(_raw_spin_lock);
  60. int _raw_spin_trylock(spinlock_t *lock)
  61. {
  62. if (__spin_trylock(&lock->lock))
  63. return 0;
  64. lock->owner_cpu = smp_processor_id();
  65. lock->owner_pc = (unsigned long)__builtin_return_address(0);
  66. return 1;
  67. }
  68. EXPORT_SYMBOL(_raw_spin_trylock);
  69. void _raw_spin_unlock(spinlock_t *lp)
  70. {
  71. if ( !lp->lock )
  72. printk("_spin_unlock(%p): no lock cpu %d curr PC %p %s/%d\n",
  73. lp, smp_processor_id(), __builtin_return_address(0),
  74. current->comm, current->pid);
  75. if ( lp->owner_cpu != smp_processor_id() )
  76. printk("_spin_unlock(%p): cpu %d trying clear of cpu %d pc %lx val %lx\n",
  77. lp, smp_processor_id(), (int)lp->owner_cpu,
  78. lp->owner_pc,lp->lock);
  79. lp->owner_pc = lp->owner_cpu = 0;
  80. wmb();
  81. lp->lock = 0;
  82. }
  83. EXPORT_SYMBOL(_raw_spin_unlock);
  84. /*
  85. * For rwlocks, zero is unlocked, -1 is write-locked,
  86. * positive is read-locked.
  87. */
  88. static __inline__ int __read_trylock(rwlock_t *rw)
  89. {
  90. signed int tmp;
  91. __asm__ __volatile__(
  92. "2: lwarx %0,0,%1 # __read_trylock\n\
  93. addic. %0,%0,1\n\
  94. ble- 1f\n"
  95. PPC405_ERR77(0,%1)
  96. " stwcx. %0,0,%1\n\
  97. bne- 2b\n\
  98. isync\n\
  99. 1:"
  100. : "=&r"(tmp)
  101. : "r"(&rw->lock)
  102. : "cr0", "memory");
  103. return tmp;
  104. }
  105. int _raw_read_trylock(rwlock_t *rw)
  106. {
  107. return __read_trylock(rw) > 0;
  108. }
  109. EXPORT_SYMBOL(_raw_read_trylock);
  110. void _raw_read_lock(rwlock_t *rw)
  111. {
  112. unsigned int stuck;
  113. while (__read_trylock(rw) <= 0) {
  114. stuck = INIT_STUCK;
  115. while (!read_can_lock(rw)) {
  116. if (--stuck == 0) {
  117. printk("_read_lock(%p) CPU#%d lock %d\n",
  118. rw, raw_smp_processor_id(), rw->lock);
  119. stuck = INIT_STUCK;
  120. }
  121. }
  122. }
  123. }
  124. EXPORT_SYMBOL(_raw_read_lock);
  125. void _raw_read_unlock(rwlock_t *rw)
  126. {
  127. if ( rw->lock == 0 )
  128. printk("_read_unlock(): %s/%d (nip %08lX) lock %d\n",
  129. current->comm,current->pid,current->thread.regs->nip,
  130. rw->lock);
  131. wmb();
  132. atomic_dec((atomic_t *) &(rw)->lock);
  133. }
  134. EXPORT_SYMBOL(_raw_read_unlock);
  135. void _raw_write_lock(rwlock_t *rw)
  136. {
  137. unsigned int stuck;
  138. while (cmpxchg(&rw->lock, 0, -1) != 0) {
  139. stuck = INIT_STUCK;
  140. while (!write_can_lock(rw)) {
  141. if (--stuck == 0) {
  142. printk("write_lock(%p) CPU#%d lock %d)\n",
  143. rw, raw_smp_processor_id(), rw->lock);
  144. stuck = INIT_STUCK;
  145. }
  146. }
  147. }
  148. wmb();
  149. }
  150. EXPORT_SYMBOL(_raw_write_lock);
  151. int _raw_write_trylock(rwlock_t *rw)
  152. {
  153. if (cmpxchg(&rw->lock, 0, -1) != 0)
  154. return 0;
  155. wmb();
  156. return 1;
  157. }
  158. EXPORT_SYMBOL(_raw_write_trylock);
  159. void _raw_write_unlock(rwlock_t *rw)
  160. {
  161. if (rw->lock >= 0)
  162. printk("_write_lock(): %s/%d (nip %08lX) lock %d\n",
  163. current->comm,current->pid,current->thread.regs->nip,
  164. rw->lock);
  165. wmb();
  166. rw->lock = 0;
  167. }
  168. EXPORT_SYMBOL(_raw_write_unlock);
  169. #endif