semaphore.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /*
  2. * MIPS-specific semaphore code.
  3. *
  4. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  5. * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
  13. * to eliminate the SMP races in the old version between the updates
  14. * of `count' and `waking'. Now we use negative `count' values to
  15. * indicate that some process(es) are waiting for the semaphore.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/sched.h>
  19. #include <linux/init.h>
  20. #include <asm/atomic.h>
  21. #include <asm/cpu-features.h>
  22. #include <asm/errno.h>
  23. #include <asm/semaphore.h>
  24. #include <asm/war.h>
  25. /*
  26. * Atomically update sem->count.
  27. * This does the equivalent of the following:
  28. *
  29. * old_count = sem->count;
  30. * tmp = MAX(old_count, 0) + incr;
  31. * sem->count = tmp;
  32. * return old_count;
  33. *
  34. * On machines without lld/scd we need a spinlock to make the manipulation of
  35. * sem->count and sem->waking atomic. Scalability isn't an issue because
  36. * this lock is used on UP only so it's just an empty variable.
  37. */
  38. static inline int __sem_update_count(struct semaphore *sem, int incr)
  39. {
  40. int old_count, tmp;
  41. if (cpu_has_llsc && R10000_LLSC_WAR) {
  42. __asm__ __volatile__(
  43. " .set mips3 \n"
  44. "1: ll %0, %2 # __sem_update_count \n"
  45. " sra %1, %0, 31 \n"
  46. " not %1 \n"
  47. " and %1, %0, %1 \n"
  48. " addu %1, %1, %3 \n"
  49. " sc %1, %2 \n"
  50. " beqzl %1, 1b \n"
  51. " .set mips0 \n"
  52. : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
  53. : "r" (incr), "m" (sem->count));
  54. } else if (cpu_has_llsc) {
  55. __asm__ __volatile__(
  56. " .set mips3 \n"
  57. "1: ll %0, %2 # __sem_update_count \n"
  58. " sra %1, %0, 31 \n"
  59. " not %1 \n"
  60. " and %1, %0, %1 \n"
  61. " addu %1, %1, %3 \n"
  62. " sc %1, %2 \n"
  63. " beqz %1, 1b \n"
  64. " .set mips0 \n"
  65. : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
  66. : "r" (incr), "m" (sem->count));
  67. } else {
  68. static DEFINE_SPINLOCK(semaphore_lock);
  69. unsigned long flags;
  70. spin_lock_irqsave(&semaphore_lock, flags);
  71. old_count = atomic_read(&sem->count);
  72. tmp = max_t(int, old_count, 0) + incr;
  73. atomic_set(&sem->count, tmp);
  74. spin_unlock_irqrestore(&semaphore_lock, flags);
  75. }
  76. return old_count;
  77. }
  78. void __up(struct semaphore *sem)
  79. {
  80. /*
  81. * Note that we incremented count in up() before we came here,
  82. * but that was ineffective since the result was <= 0, and
  83. * any negative value of count is equivalent to 0.
  84. * This ends up setting count to 1, unless count is now > 0
  85. * (i.e. because some other cpu has called up() in the meantime),
  86. * in which case we just increment count.
  87. */
  88. __sem_update_count(sem, 1);
  89. wake_up(&sem->wait);
  90. }
  91. EXPORT_SYMBOL(__up);
  92. /*
  93. * Note that when we come in to __down or __down_interruptible,
  94. * we have already decremented count, but that decrement was
  95. * ineffective since the result was < 0, and any negative value
  96. * of count is equivalent to 0.
  97. * Thus it is only when we decrement count from some value > 0
  98. * that we have actually got the semaphore.
  99. */
  100. void __sched __down(struct semaphore *sem)
  101. {
  102. struct task_struct *tsk = current;
  103. DECLARE_WAITQUEUE(wait, tsk);
  104. __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  105. add_wait_queue_exclusive(&sem->wait, &wait);
  106. /*
  107. * Try to get the semaphore. If the count is > 0, then we've
  108. * got the semaphore; we decrement count and exit the loop.
  109. * If the count is 0 or negative, we set it to -1, indicating
  110. * that we are asleep, and then sleep.
  111. */
  112. while (__sem_update_count(sem, -1) <= 0) {
  113. schedule();
  114. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  115. }
  116. remove_wait_queue(&sem->wait, &wait);
  117. __set_task_state(tsk, TASK_RUNNING);
  118. /*
  119. * If there are any more sleepers, wake one of them up so
  120. * that it can either get the semaphore, or set count to -1
  121. * indicating that there are still processes sleeping.
  122. */
  123. wake_up(&sem->wait);
  124. }
  125. EXPORT_SYMBOL(__down);
  126. int __sched __down_interruptible(struct semaphore * sem)
  127. {
  128. int retval = 0;
  129. struct task_struct *tsk = current;
  130. DECLARE_WAITQUEUE(wait, tsk);
  131. __set_task_state(tsk, TASK_INTERRUPTIBLE);
  132. add_wait_queue_exclusive(&sem->wait, &wait);
  133. while (__sem_update_count(sem, -1) <= 0) {
  134. if (signal_pending(current)) {
  135. /*
  136. * A signal is pending - give up trying.
  137. * Set sem->count to 0 if it is negative,
  138. * since we are no longer sleeping.
  139. */
  140. __sem_update_count(sem, 0);
  141. retval = -EINTR;
  142. break;
  143. }
  144. schedule();
  145. set_task_state(tsk, TASK_INTERRUPTIBLE);
  146. }
  147. remove_wait_queue(&sem->wait, &wait);
  148. __set_task_state(tsk, TASK_RUNNING);
  149. wake_up(&sem->wait);
  150. return retval;
  151. }
  152. EXPORT_SYMBOL(__down_interruptible);