semaphore.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /* semaphore.c: Sparc64 semaphore implementation.
  2. *
  3. * This is basically the PPC semaphore scheme ported to use
  4. * the sparc64 atomic instructions, so see the PPC code for
  5. * credits.
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/errno.h>
  9. #include <linux/init.h>
  10. /*
  11. * Atomically update sem->count.
  12. * This does the equivalent of the following:
  13. *
  14. * old_count = sem->count;
  15. * tmp = MAX(old_count, 0) + incr;
  16. * sem->count = tmp;
  17. * return old_count;
  18. */
  19. static inline int __sem_update_count(struct semaphore *sem, int incr)
  20. {
  21. int old_count, tmp;
  22. __asm__ __volatile__("\n"
  23. " ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
  24. "1: ldsw [%3], %0\n"
  25. " mov %0, %1\n"
  26. " cmp %0, 0\n"
  27. " movl %%icc, 0, %1\n"
  28. " add %1, %4, %1\n"
  29. " cas [%3], %0, %1\n"
  30. " cmp %0, %1\n"
  31. " membar #StoreLoad | #StoreStore\n"
  32. " bne,pn %%icc, 1b\n"
  33. " nop\n"
  34. : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
  35. : "r" (&sem->count), "r" (incr), "m" (sem->count)
  36. : "cc");
  37. return old_count;
  38. }
  39. static void __up(struct semaphore *sem)
  40. {
  41. __sem_update_count(sem, 1);
  42. wake_up(&sem->wait);
  43. }
  44. void up(struct semaphore *sem)
  45. {
  46. /* This atomically does:
  47. * old_val = sem->count;
  48. * new_val = sem->count + 1;
  49. * sem->count = new_val;
  50. * if (old_val < 0)
  51. * __up(sem);
  52. *
  53. * The (old_val < 0) test is equivalent to
  54. * the more straightforward (new_val <= 0),
  55. * but it is easier to test the former because
  56. * of how the CAS instruction works.
  57. */
  58. __asm__ __volatile__("\n"
  59. " ! up sem(%0)\n"
  60. " membar #StoreLoad | #LoadLoad\n"
  61. "1: lduw [%0], %%g1\n"
  62. " add %%g1, 1, %%g7\n"
  63. " cas [%0], %%g1, %%g7\n"
  64. " cmp %%g1, %%g7\n"
  65. " bne,pn %%icc, 1b\n"
  66. " addcc %%g7, 1, %%g0\n"
  67. " membar #StoreLoad | #StoreStore\n"
  68. " ble,pn %%icc, 3f\n"
  69. " nop\n"
  70. "2:\n"
  71. " .subsection 2\n"
  72. "3: mov %0, %%g1\n"
  73. " save %%sp, -160, %%sp\n"
  74. " call %1\n"
  75. " mov %%g1, %%o0\n"
  76. " ba,pt %%xcc, 2b\n"
  77. " restore\n"
  78. " .previous\n"
  79. : : "r" (sem), "i" (__up)
  80. : "g1", "g2", "g3", "g7", "memory", "cc");
  81. }
  82. static void __sched __down(struct semaphore * sem)
  83. {
  84. struct task_struct *tsk = current;
  85. DECLARE_WAITQUEUE(wait, tsk);
  86. tsk->state = TASK_UNINTERRUPTIBLE;
  87. add_wait_queue_exclusive(&sem->wait, &wait);
  88. while (__sem_update_count(sem, -1) <= 0) {
  89. schedule();
  90. tsk->state = TASK_UNINTERRUPTIBLE;
  91. }
  92. remove_wait_queue(&sem->wait, &wait);
  93. tsk->state = TASK_RUNNING;
  94. wake_up(&sem->wait);
  95. }
  96. void __sched down(struct semaphore *sem)
  97. {
  98. might_sleep();
  99. /* This atomically does:
  100. * old_val = sem->count;
  101. * new_val = sem->count - 1;
  102. * sem->count = new_val;
  103. * if (old_val < 1)
  104. * __down(sem);
  105. *
  106. * The (old_val < 1) test is equivalent to
  107. * the more straightforward (new_val < 0),
  108. * but it is easier to test the former because
  109. * of how the CAS instruction works.
  110. */
  111. __asm__ __volatile__("\n"
  112. " ! down sem(%0)\n"
  113. "1: lduw [%0], %%g1\n"
  114. " sub %%g1, 1, %%g7\n"
  115. " cas [%0], %%g1, %%g7\n"
  116. " cmp %%g1, %%g7\n"
  117. " bne,pn %%icc, 1b\n"
  118. " cmp %%g7, 1\n"
  119. " membar #StoreLoad | #StoreStore\n"
  120. " bl,pn %%icc, 3f\n"
  121. " nop\n"
  122. "2:\n"
  123. " .subsection 2\n"
  124. "3: mov %0, %%g1\n"
  125. " save %%sp, -160, %%sp\n"
  126. " call %1\n"
  127. " mov %%g1, %%o0\n"
  128. " ba,pt %%xcc, 2b\n"
  129. " restore\n"
  130. " .previous\n"
  131. : : "r" (sem), "i" (__down)
  132. : "g1", "g2", "g3", "g7", "memory", "cc");
  133. }
  134. int down_trylock(struct semaphore *sem)
  135. {
  136. int ret;
  137. /* This atomically does:
  138. * old_val = sem->count;
  139. * new_val = sem->count - 1;
  140. * if (old_val < 1) {
  141. * ret = 1;
  142. * } else {
  143. * sem->count = new_val;
  144. * ret = 0;
  145. * }
  146. *
  147. * The (old_val < 1) test is equivalent to
  148. * the more straightforward (new_val < 0),
  149. * but it is easier to test the former because
  150. * of how the CAS instruction works.
  151. */
  152. __asm__ __volatile__("\n"
  153. " ! down_trylock sem(%1) ret(%0)\n"
  154. "1: lduw [%1], %%g1\n"
  155. " sub %%g1, 1, %%g7\n"
  156. " cmp %%g1, 1\n"
  157. " bl,pn %%icc, 2f\n"
  158. " mov 1, %0\n"
  159. " cas [%1], %%g1, %%g7\n"
  160. " cmp %%g1, %%g7\n"
  161. " bne,pn %%icc, 1b\n"
  162. " mov 0, %0\n"
  163. " membar #StoreLoad | #StoreStore\n"
  164. "2:\n"
  165. : "=&r" (ret)
  166. : "r" (sem)
  167. : "g1", "g7", "memory", "cc");
  168. return ret;
  169. }
  170. static int __sched __down_interruptible(struct semaphore * sem)
  171. {
  172. int retval = 0;
  173. struct task_struct *tsk = current;
  174. DECLARE_WAITQUEUE(wait, tsk);
  175. tsk->state = TASK_INTERRUPTIBLE;
  176. add_wait_queue_exclusive(&sem->wait, &wait);
  177. while (__sem_update_count(sem, -1) <= 0) {
  178. if (signal_pending(current)) {
  179. __sem_update_count(sem, 0);
  180. retval = -EINTR;
  181. break;
  182. }
  183. schedule();
  184. tsk->state = TASK_INTERRUPTIBLE;
  185. }
  186. tsk->state = TASK_RUNNING;
  187. remove_wait_queue(&sem->wait, &wait);
  188. wake_up(&sem->wait);
  189. return retval;
  190. }
  191. int __sched down_interruptible(struct semaphore *sem)
  192. {
  193. int ret = 0;
  194. might_sleep();
  195. /* This atomically does:
  196. * old_val = sem->count;
  197. * new_val = sem->count - 1;
  198. * sem->count = new_val;
  199. * if (old_val < 1)
  200. * ret = __down_interruptible(sem);
  201. *
  202. * The (old_val < 1) test is equivalent to
  203. * the more straightforward (new_val < 0),
  204. * but it is easier to test the former because
  205. * of how the CAS instruction works.
  206. */
  207. __asm__ __volatile__("\n"
  208. " ! down_interruptible sem(%2) ret(%0)\n"
  209. "1: lduw [%2], %%g1\n"
  210. " sub %%g1, 1, %%g7\n"
  211. " cas [%2], %%g1, %%g7\n"
  212. " cmp %%g1, %%g7\n"
  213. " bne,pn %%icc, 1b\n"
  214. " cmp %%g7, 1\n"
  215. " membar #StoreLoad | #StoreStore\n"
  216. " bl,pn %%icc, 3f\n"
  217. " nop\n"
  218. "2:\n"
  219. " .subsection 2\n"
  220. "3: mov %2, %%g1\n"
  221. " save %%sp, -160, %%sp\n"
  222. " call %3\n"
  223. " mov %%g1, %%o0\n"
  224. " ba,pt %%xcc, 2b\n"
  225. " restore\n"
  226. " .previous\n"
  227. : "=r" (ret)
  228. : "0" (ret), "r" (sem), "i" (__down_interruptible)
  229. : "g1", "g2", "g3", "g7", "memory", "cc");
  230. return ret;
  231. }