semaphore.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /* $Id: semaphore.c,v 1.9 2001/11/18 00:12:56 davem Exp $
  2. * semaphore.c: Sparc64 semaphore implementation.
  3. *
  4. * This is basically the PPC semaphore scheme ported to use
  5. * the sparc64 atomic instructions, so see the PPC code for
  6. * credits.
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/errno.h>
  10. #include <linux/init.h>
  11. /*
  12. * Atomically update sem->count.
  13. * This does the equivalent of the following:
  14. *
  15. * old_count = sem->count;
  16. * tmp = MAX(old_count, 0) + incr;
  17. * sem->count = tmp;
  18. * return old_count;
  19. */
  20. static __inline__ int __sem_update_count(struct semaphore *sem, int incr)
  21. {
  22. int old_count, tmp;
  23. __asm__ __volatile__("\n"
  24. " ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
  25. "1: ldsw [%3], %0\n"
  26. " mov %0, %1\n"
  27. " cmp %0, 0\n"
  28. " movl %%icc, 0, %1\n"
  29. " add %1, %4, %1\n"
  30. " cas [%3], %0, %1\n"
  31. " cmp %0, %1\n"
  32. " bne,pn %%icc, 1b\n"
  33. " membar #StoreLoad | #StoreStore\n"
  34. : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
  35. : "r" (&sem->count), "r" (incr), "m" (sem->count)
  36. : "cc");
  37. return old_count;
  38. }
  39. static void __up(struct semaphore *sem)
  40. {
  41. __sem_update_count(sem, 1);
  42. wake_up(&sem->wait);
  43. }
  44. void up(struct semaphore *sem)
  45. {
  46. /* This atomically does:
  47. * old_val = sem->count;
  48. * new_val = sem->count + 1;
  49. * sem->count = new_val;
  50. * if (old_val < 0)
  51. * __up(sem);
  52. *
  53. * The (old_val < 0) test is equivalent to
  54. * the more straightforward (new_val <= 0),
  55. * but it is easier to test the former because
  56. * of how the CAS instruction works.
  57. */
  58. __asm__ __volatile__("\n"
  59. " ! up sem(%0)\n"
  60. " membar #StoreLoad | #LoadLoad\n"
  61. "1: lduw [%0], %%g1\n"
  62. " add %%g1, 1, %%g7\n"
  63. " cas [%0], %%g1, %%g7\n"
  64. " cmp %%g1, %%g7\n"
  65. " bne,pn %%icc, 1b\n"
  66. " addcc %%g7, 1, %%g0\n"
  67. " ble,pn %%icc, 3f\n"
  68. " membar #StoreLoad | #StoreStore\n"
  69. "2:\n"
  70. " .subsection 2\n"
  71. "3: mov %0, %%g1\n"
  72. " save %%sp, -160, %%sp\n"
  73. " call %1\n"
  74. " mov %%g1, %%o0\n"
  75. " ba,pt %%xcc, 2b\n"
  76. " restore\n"
  77. " .previous\n"
  78. : : "r" (sem), "i" (__up)
  79. : "g1", "g2", "g3", "g7", "memory", "cc");
  80. }
  81. static void __sched __down(struct semaphore * sem)
  82. {
  83. struct task_struct *tsk = current;
  84. DECLARE_WAITQUEUE(wait, tsk);
  85. tsk->state = TASK_UNINTERRUPTIBLE;
  86. add_wait_queue_exclusive(&sem->wait, &wait);
  87. while (__sem_update_count(sem, -1) <= 0) {
  88. schedule();
  89. tsk->state = TASK_UNINTERRUPTIBLE;
  90. }
  91. remove_wait_queue(&sem->wait, &wait);
  92. tsk->state = TASK_RUNNING;
  93. wake_up(&sem->wait);
  94. }
  95. void __sched down(struct semaphore *sem)
  96. {
  97. might_sleep();
  98. /* This atomically does:
  99. * old_val = sem->count;
  100. * new_val = sem->count - 1;
  101. * sem->count = new_val;
  102. * if (old_val < 1)
  103. * __down(sem);
  104. *
  105. * The (old_val < 1) test is equivalent to
  106. * the more straightforward (new_val < 0),
  107. * but it is easier to test the former because
  108. * of how the CAS instruction works.
  109. */
  110. __asm__ __volatile__("\n"
  111. " ! down sem(%0)\n"
  112. "1: lduw [%0], %%g1\n"
  113. " sub %%g1, 1, %%g7\n"
  114. " cas [%0], %%g1, %%g7\n"
  115. " cmp %%g1, %%g7\n"
  116. " bne,pn %%icc, 1b\n"
  117. " cmp %%g7, 1\n"
  118. " bl,pn %%icc, 3f\n"
  119. " membar #StoreLoad | #StoreStore\n"
  120. "2:\n"
  121. " .subsection 2\n"
  122. "3: mov %0, %%g1\n"
  123. " save %%sp, -160, %%sp\n"
  124. " call %1\n"
  125. " mov %%g1, %%o0\n"
  126. " ba,pt %%xcc, 2b\n"
  127. " restore\n"
  128. " .previous\n"
  129. : : "r" (sem), "i" (__down)
  130. : "g1", "g2", "g3", "g7", "memory", "cc");
  131. }
  132. int down_trylock(struct semaphore *sem)
  133. {
  134. int ret;
  135. /* This atomically does:
  136. * old_val = sem->count;
  137. * new_val = sem->count - 1;
  138. * if (old_val < 1) {
  139. * ret = 1;
  140. * } else {
  141. * sem->count = new_val;
  142. * ret = 0;
  143. * }
  144. *
  145. * The (old_val < 1) test is equivalent to
  146. * the more straightforward (new_val < 0),
  147. * but it is easier to test the former because
  148. * of how the CAS instruction works.
  149. */
  150. __asm__ __volatile__("\n"
  151. " ! down_trylock sem(%1) ret(%0)\n"
  152. "1: lduw [%1], %%g1\n"
  153. " sub %%g1, 1, %%g7\n"
  154. " cmp %%g1, 1\n"
  155. " bl,pn %%icc, 2f\n"
  156. " mov 1, %0\n"
  157. " cas [%1], %%g1, %%g7\n"
  158. " cmp %%g1, %%g7\n"
  159. " bne,pn %%icc, 1b\n"
  160. " mov 0, %0\n"
  161. " membar #StoreLoad | #StoreStore\n"
  162. "2:\n"
  163. : "=&r" (ret)
  164. : "r" (sem)
  165. : "g1", "g7", "memory", "cc");
  166. return ret;
  167. }
  168. static int __sched __down_interruptible(struct semaphore * sem)
  169. {
  170. int retval = 0;
  171. struct task_struct *tsk = current;
  172. DECLARE_WAITQUEUE(wait, tsk);
  173. tsk->state = TASK_INTERRUPTIBLE;
  174. add_wait_queue_exclusive(&sem->wait, &wait);
  175. while (__sem_update_count(sem, -1) <= 0) {
  176. if (signal_pending(current)) {
  177. __sem_update_count(sem, 0);
  178. retval = -EINTR;
  179. break;
  180. }
  181. schedule();
  182. tsk->state = TASK_INTERRUPTIBLE;
  183. }
  184. tsk->state = TASK_RUNNING;
  185. remove_wait_queue(&sem->wait, &wait);
  186. wake_up(&sem->wait);
  187. return retval;
  188. }
  189. int __sched down_interruptible(struct semaphore *sem)
  190. {
  191. int ret = 0;
  192. might_sleep();
  193. /* This atomically does:
  194. * old_val = sem->count;
  195. * new_val = sem->count - 1;
  196. * sem->count = new_val;
  197. * if (old_val < 1)
  198. * ret = __down_interruptible(sem);
  199. *
  200. * The (old_val < 1) test is equivalent to
  201. * the more straightforward (new_val < 0),
  202. * but it is easier to test the former because
  203. * of how the CAS instruction works.
  204. */
  205. __asm__ __volatile__("\n"
  206. " ! down_interruptible sem(%2) ret(%0)\n"
  207. "1: lduw [%2], %%g1\n"
  208. " sub %%g1, 1, %%g7\n"
  209. " cas [%2], %%g1, %%g7\n"
  210. " cmp %%g1, %%g7\n"
  211. " bne,pn %%icc, 1b\n"
  212. " cmp %%g7, 1\n"
  213. " bl,pn %%icc, 3f\n"
  214. " membar #StoreLoad | #StoreStore\n"
  215. "2:\n"
  216. " .subsection 2\n"
  217. "3: mov %2, %%g1\n"
  218. " save %%sp, -160, %%sp\n"
  219. " call %3\n"
  220. " mov %%g1, %%o0\n"
  221. " ba,pt %%xcc, 2b\n"
  222. " restore\n"
  223. " .previous\n"
  224. : "=r" (ret)
  225. : "0" (ret), "r" (sem), "i" (__down_interruptible)
  226. : "g1", "g2", "g3", "g7", "memory", "cc");
  227. return ret;
  228. }