semaphore.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. #ifndef _SPARC_SEMAPHORE_H
  2. #define _SPARC_SEMAPHORE_H
  3. /* Dinky, good for nothing, just barely irq safe, Sparc semaphores. */
  4. #ifdef __KERNEL__
  5. #include <asm/atomic.h>
  6. #include <linux/wait.h>
  7. #include <linux/rwsem.h>
  8. struct semaphore {
  9. atomic24_t count;
  10. int sleepers;
  11. wait_queue_head_t wait;
  12. };
  13. #define __SEMAPHORE_INITIALIZER(name, n) \
  14. { \
  15. .count = ATOMIC24_INIT(n), \
  16. .sleepers = 0, \
  17. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  18. }
  19. #define __MUTEX_INITIALIZER(name) \
  20. __SEMAPHORE_INITIALIZER(name,1)
  21. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  22. struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  23. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
  24. #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
  25. static inline void sema_init (struct semaphore *sem, int val)
  26. {
  27. atomic24_set(&sem->count, val);
  28. sem->sleepers = 0;
  29. init_waitqueue_head(&sem->wait);
  30. }
  31. static inline void init_MUTEX (struct semaphore *sem)
  32. {
  33. sema_init(sem, 1);
  34. }
  35. static inline void init_MUTEX_LOCKED (struct semaphore *sem)
  36. {
  37. sema_init(sem, 0);
  38. }
  39. extern void __down(struct semaphore * sem);
  40. extern int __down_interruptible(struct semaphore * sem);
  41. extern int __down_trylock(struct semaphore * sem);
  42. extern void __up(struct semaphore * sem);
  43. static inline void down(struct semaphore * sem)
  44. {
  45. register volatile int *ptr asm("g1");
  46. register int increment asm("g2");
  47. might_sleep();
  48. ptr = &(sem->count.counter);
  49. increment = 1;
  50. __asm__ __volatile__(
  51. "mov %%o7, %%g4\n\t"
  52. "call ___atomic24_sub\n\t"
  53. " add %%o7, 8, %%o7\n\t"
  54. "tst %%g2\n\t"
  55. "bl 2f\n\t"
  56. " nop\n"
  57. "1:\n\t"
  58. ".subsection 2\n"
  59. "2:\n\t"
  60. "save %%sp, -64, %%sp\n\t"
  61. "mov %%g1, %%l1\n\t"
  62. "mov %%g5, %%l5\n\t"
  63. "call %3\n\t"
  64. " mov %%g1, %%o0\n\t"
  65. "mov %%l1, %%g1\n\t"
  66. "ba 1b\n\t"
  67. " restore %%l5, %%g0, %%g5\n\t"
  68. ".previous\n"
  69. : "=&r" (increment)
  70. : "0" (increment), "r" (ptr), "i" (__down)
  71. : "g3", "g4", "g7", "memory", "cc");
  72. }
  73. static inline int down_interruptible(struct semaphore * sem)
  74. {
  75. register volatile int *ptr asm("g1");
  76. register int increment asm("g2");
  77. might_sleep();
  78. ptr = &(sem->count.counter);
  79. increment = 1;
  80. __asm__ __volatile__(
  81. "mov %%o7, %%g4\n\t"
  82. "call ___atomic24_sub\n\t"
  83. " add %%o7, 8, %%o7\n\t"
  84. "tst %%g2\n\t"
  85. "bl 2f\n\t"
  86. " clr %%g2\n"
  87. "1:\n\t"
  88. ".subsection 2\n"
  89. "2:\n\t"
  90. "save %%sp, -64, %%sp\n\t"
  91. "mov %%g1, %%l1\n\t"
  92. "mov %%g5, %%l5\n\t"
  93. "call %3\n\t"
  94. " mov %%g1, %%o0\n\t"
  95. "mov %%l1, %%g1\n\t"
  96. "mov %%l5, %%g5\n\t"
  97. "ba 1b\n\t"
  98. " restore %%o0, %%g0, %%g2\n\t"
  99. ".previous\n"
  100. : "=&r" (increment)
  101. : "0" (increment), "r" (ptr), "i" (__down_interruptible)
  102. : "g3", "g4", "g7", "memory", "cc");
  103. return increment;
  104. }
  105. static inline int down_trylock(struct semaphore * sem)
  106. {
  107. register volatile int *ptr asm("g1");
  108. register int increment asm("g2");
  109. ptr = &(sem->count.counter);
  110. increment = 1;
  111. __asm__ __volatile__(
  112. "mov %%o7, %%g4\n\t"
  113. "call ___atomic24_sub\n\t"
  114. " add %%o7, 8, %%o7\n\t"
  115. "tst %%g2\n\t"
  116. "bl 2f\n\t"
  117. " clr %%g2\n"
  118. "1:\n\t"
  119. ".subsection 2\n"
  120. "2:\n\t"
  121. "save %%sp, -64, %%sp\n\t"
  122. "mov %%g1, %%l1\n\t"
  123. "mov %%g5, %%l5\n\t"
  124. "call %3\n\t"
  125. " mov %%g1, %%o0\n\t"
  126. "mov %%l1, %%g1\n\t"
  127. "mov %%l5, %%g5\n\t"
  128. "ba 1b\n\t"
  129. " restore %%o0, %%g0, %%g2\n\t"
  130. ".previous\n"
  131. : "=&r" (increment)
  132. : "0" (increment), "r" (ptr), "i" (__down_trylock)
  133. : "g3", "g4", "g7", "memory", "cc");
  134. return increment;
  135. }
  136. static inline void up(struct semaphore * sem)
  137. {
  138. register volatile int *ptr asm("g1");
  139. register int increment asm("g2");
  140. ptr = &(sem->count.counter);
  141. increment = 1;
  142. __asm__ __volatile__(
  143. "mov %%o7, %%g4\n\t"
  144. "call ___atomic24_add\n\t"
  145. " add %%o7, 8, %%o7\n\t"
  146. "tst %%g2\n\t"
  147. "ble 2f\n\t"
  148. " nop\n"
  149. "1:\n\t"
  150. ".subsection 2\n"
  151. "2:\n\t"
  152. "save %%sp, -64, %%sp\n\t"
  153. "mov %%g1, %%l1\n\t"
  154. "mov %%g5, %%l5\n\t"
  155. "call %3\n\t"
  156. " mov %%g1, %%o0\n\t"
  157. "mov %%l1, %%g1\n\t"
  158. "ba 1b\n\t"
  159. " restore %%l5, %%g0, %%g5\n\t"
  160. ".previous\n"
  161. : "=&r" (increment)
  162. : "0" (increment), "r" (ptr), "i" (__up)
  163. : "g3", "g4", "g7", "memory", "cc");
  164. }
  165. #endif /* __KERNEL__ */
  166. #endif /* !(_SPARC_SEMAPHORE_H) */