semaphore.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. #ifndef _H8300_SEMAPHORE_H
  2. #define _H8300_SEMAPHORE_H
  3. #define RW_LOCK_BIAS 0x01000000
  4. #ifndef __ASSEMBLY__
  5. #include <linux/linkage.h>
  6. #include <linux/wait.h>
  7. #include <linux/spinlock.h>
  8. #include <linux/rwsem.h>
  9. #include <asm/system.h>
  10. #include <asm/atomic.h>
  11. /*
  12. * Interrupt-safe semaphores..
  13. *
  14. * (C) Copyright 1996 Linus Torvalds
  15. *
  16. * H8/300 version by Yoshinori Sato
  17. */
  18. struct semaphore {
  19. atomic_t count;
  20. int sleepers;
  21. wait_queue_head_t wait;
  22. };
  23. #define __SEMAPHORE_INITIALIZER(name, n) \
  24. { \
  25. .count = ATOMIC_INIT(n), \
  26. .sleepers = 0, \
  27. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  28. }
  29. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  30. struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  31. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
  32. #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
  33. static inline void sema_init (struct semaphore *sem, int val)
  34. {
  35. *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
  36. }
  37. static inline void init_MUTEX (struct semaphore *sem)
  38. {
  39. sema_init(sem, 1);
  40. }
  41. static inline void init_MUTEX_LOCKED (struct semaphore *sem)
  42. {
  43. sema_init(sem, 0);
  44. }
  45. asmlinkage void __down_failed(void /* special register calling convention */);
  46. asmlinkage int __down_failed_interruptible(void /* params in registers */);
  47. asmlinkage int __down_failed_trylock(void /* params in registers */);
  48. asmlinkage void __up_wakeup(void /* special register calling convention */);
  49. asmlinkage void __down(struct semaphore * sem);
  50. asmlinkage int __down_interruptible(struct semaphore * sem);
  51. asmlinkage int __down_trylock(struct semaphore * sem);
  52. asmlinkage void __up(struct semaphore * sem);
  53. extern spinlock_t semaphore_wake_lock;
  54. /*
  55. * This is ugly, but we want the default case to fall through.
  56. * "down_failed" is a special asm handler that calls the C
  57. * routine that actually waits. See arch/m68k/lib/semaphore.S
  58. */
  59. static inline void down(struct semaphore * sem)
  60. {
  61. register atomic_t *count asm("er0");
  62. might_sleep();
  63. count = &(sem->count);
  64. __asm__ __volatile__(
  65. "stc ccr,r3l\n\t"
  66. "orc #0x80,ccr\n\t"
  67. "mov.l %2, er1\n\t"
  68. "dec.l #1,er1\n\t"
  69. "mov.l er1,%0\n\t"
  70. "bpl 1f\n\t"
  71. "ldc r3l,ccr\n\t"
  72. "mov.l %1,er0\n\t"
  73. "jsr @___down\n\t"
  74. "bra 2f\n"
  75. "1:\n\t"
  76. "ldc r3l,ccr\n"
  77. "2:"
  78. : "=m"(*count)
  79. : "g"(sem),"m"(*count)
  80. : "cc", "er1", "er2", "er3");
  81. }
  82. static inline int down_interruptible(struct semaphore * sem)
  83. {
  84. register atomic_t *count asm("er0");
  85. might_sleep();
  86. count = &(sem->count);
  87. __asm__ __volatile__(
  88. "stc ccr,r1l\n\t"
  89. "orc #0x80,ccr\n\t"
  90. "mov.l %3, er2\n\t"
  91. "dec.l #1,er2\n\t"
  92. "mov.l er2,%1\n\t"
  93. "bpl 1f\n\t"
  94. "ldc r1l,ccr\n\t"
  95. "mov.l %2,er0\n\t"
  96. "jsr @___down_interruptible\n\t"
  97. "bra 2f\n"
  98. "1:\n\t"
  99. "ldc r1l,ccr\n\t"
  100. "sub.l %0,%0\n\t"
  101. "2:\n\t"
  102. : "=r" (count),"=m" (*count)
  103. : "g"(sem),"m"(*count)
  104. : "cc", "er1", "er2", "er3");
  105. return (int)count;
  106. }
  107. static inline int down_trylock(struct semaphore * sem)
  108. {
  109. register atomic_t *count asm("er0");
  110. count = &(sem->count);
  111. __asm__ __volatile__(
  112. "stc ccr,r3l\n\t"
  113. "orc #0x80,ccr\n\t"
  114. "mov.l %3,er2\n\t"
  115. "dec.l #1,er2\n\t"
  116. "mov.l er2,%0\n\t"
  117. "bpl 1f\n\t"
  118. "ldc r3l,ccr\n\t"
  119. "jmp @3f\n\t"
  120. LOCK_SECTION_START(".align 2\n\t")
  121. "3:\n\t"
  122. "mov.l %2,er0\n\t"
  123. "jsr @___down_trylock\n\t"
  124. "jmp @2f\n\t"
  125. LOCK_SECTION_END
  126. "1:\n\t"
  127. "ldc r3l,ccr\n\t"
  128. "sub.l %1,%1\n"
  129. "2:"
  130. : "=m" (*count),"=r"(count)
  131. : "g"(sem),"m"(*count)
  132. : "cc", "er1","er2", "er3");
  133. return (int)count;
  134. }
  135. /*
  136. * Note! This is subtle. We jump to wake people up only if
  137. * the semaphore was negative (== somebody was waiting on it).
  138. * The default case (no contention) will result in NO
  139. * jumps for both down() and up().
  140. */
  141. static inline void up(struct semaphore * sem)
  142. {
  143. register atomic_t *count asm("er0");
  144. count = &(sem->count);
  145. __asm__ __volatile__(
  146. "stc ccr,r3l\n\t"
  147. "orc #0x80,ccr\n\t"
  148. "mov.l %2,er1\n\t"
  149. "inc.l #1,er1\n\t"
  150. "mov.l er1,%0\n\t"
  151. "ldc r3l,ccr\n\t"
  152. "sub.l er2,er2\n\t"
  153. "cmp.l er2,er1\n\t"
  154. "bgt 1f\n\t"
  155. "mov.l %1,er0\n\t"
  156. "jsr @___up\n"
  157. "1:"
  158. : "=m"(*count)
  159. : "g"(sem),"m"(*count)
  160. : "cc", "er1", "er2", "er3");
  161. }
  162. #endif /* __ASSEMBLY__ */
  163. #endif