semaphore.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. #ifndef _H8300_SEMAPHORE_H
  2. #define _H8300_SEMAPHORE_H
  3. #define RW_LOCK_BIAS 0x01000000
  4. #ifndef __ASSEMBLY__
  5. #include <linux/linkage.h>
  6. #include <linux/wait.h>
  7. #include <linux/spinlock.h>
  8. #include <linux/rwsem.h>
  9. #include <asm/system.h>
  10. #include <asm/atomic.h>
  11. /*
  12. * Interrupt-safe semaphores..
  13. *
  14. * (C) Copyright 1996 Linus Torvalds
  15. *
  16. * H8/300 version by Yoshinori Sato
  17. */
  18. struct semaphore {
  19. atomic_t count;
  20. int sleepers;
  21. wait_queue_head_t wait;
  22. };
  23. #define __SEMAPHORE_INITIALIZER(name, n) \
  24. { \
  25. .count = ATOMIC_INIT(n), \
  26. .sleepers = 0, \
  27. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  28. }
  29. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  30. struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  31. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
  32. static inline void sema_init (struct semaphore *sem, int val)
  33. {
  34. *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
  35. }
  36. static inline void init_MUTEX (struct semaphore *sem)
  37. {
  38. sema_init(sem, 1);
  39. }
  40. static inline void init_MUTEX_LOCKED (struct semaphore *sem)
  41. {
  42. sema_init(sem, 0);
  43. }
  44. asmlinkage void __down_failed(void /* special register calling convention */);
  45. asmlinkage int __down_failed_interruptible(void /* params in registers */);
  46. asmlinkage int __down_failed_trylock(void /* params in registers */);
  47. asmlinkage void __up_wakeup(void /* special register calling convention */);
  48. asmlinkage void __down(struct semaphore * sem);
  49. asmlinkage int __down_interruptible(struct semaphore * sem);
  50. asmlinkage int __down_trylock(struct semaphore * sem);
  51. asmlinkage void __up(struct semaphore * sem);
  52. extern spinlock_t semaphore_wake_lock;
  53. /*
  54. * This is ugly, but we want the default case to fall through.
  55. * "down_failed" is a special asm handler that calls the C
  56. * routine that actually waits. See arch/m68k/lib/semaphore.S
  57. */
  58. static inline void down(struct semaphore * sem)
  59. {
  60. register atomic_t *count asm("er0");
  61. might_sleep();
  62. count = &(sem->count);
  63. __asm__ __volatile__(
  64. "stc ccr,r3l\n\t"
  65. "orc #0x80,ccr\n\t"
  66. "mov.l %2, er1\n\t"
  67. "dec.l #1,er1\n\t"
  68. "mov.l er1,%0\n\t"
  69. "bpl 1f\n\t"
  70. "ldc r3l,ccr\n\t"
  71. "mov.l %1,er0\n\t"
  72. "jsr @___down\n\t"
  73. "bra 2f\n"
  74. "1:\n\t"
  75. "ldc r3l,ccr\n"
  76. "2:"
  77. : "=m"(*count)
  78. : "g"(sem),"m"(*count)
  79. : "cc", "er1", "er2", "er3");
  80. }
  81. static inline int down_interruptible(struct semaphore * sem)
  82. {
  83. register atomic_t *count asm("er0");
  84. might_sleep();
  85. count = &(sem->count);
  86. __asm__ __volatile__(
  87. "stc ccr,r1l\n\t"
  88. "orc #0x80,ccr\n\t"
  89. "mov.l %3, er2\n\t"
  90. "dec.l #1,er2\n\t"
  91. "mov.l er2,%1\n\t"
  92. "bpl 1f\n\t"
  93. "ldc r1l,ccr\n\t"
  94. "mov.l %2,er0\n\t"
  95. "jsr @___down_interruptible\n\t"
  96. "bra 2f\n"
  97. "1:\n\t"
  98. "ldc r1l,ccr\n\t"
  99. "sub.l %0,%0\n\t"
  100. "2:\n\t"
  101. : "=r" (count),"=m" (*count)
  102. : "g"(sem),"m"(*count)
  103. : "cc", "er1", "er2", "er3");
  104. return (int)count;
  105. }
  106. static inline int down_trylock(struct semaphore * sem)
  107. {
  108. register atomic_t *count asm("er0");
  109. count = &(sem->count);
  110. __asm__ __volatile__(
  111. "stc ccr,r3l\n\t"
  112. "orc #0x80,ccr\n\t"
  113. "mov.l %3,er2\n\t"
  114. "dec.l #1,er2\n\t"
  115. "mov.l er2,%0\n\t"
  116. "bpl 1f\n\t"
  117. "ldc r3l,ccr\n\t"
  118. "jmp @3f\n\t"
  119. LOCK_SECTION_START(".align 2\n\t")
  120. "3:\n\t"
  121. "mov.l %2,er0\n\t"
  122. "jsr @___down_trylock\n\t"
  123. "jmp @2f\n\t"
  124. LOCK_SECTION_END
  125. "1:\n\t"
  126. "ldc r3l,ccr\n\t"
  127. "sub.l %1,%1\n"
  128. "2:"
  129. : "=m" (*count),"=r"(count)
  130. : "g"(sem),"m"(*count)
  131. : "cc", "er1","er2", "er3");
  132. return (int)count;
  133. }
  134. /*
  135. * Note! This is subtle. We jump to wake people up only if
  136. * the semaphore was negative (== somebody was waiting on it).
  137. * The default case (no contention) will result in NO
  138. * jumps for both down() and up().
  139. */
  140. static inline void up(struct semaphore * sem)
  141. {
  142. register atomic_t *count asm("er0");
  143. count = &(sem->count);
  144. __asm__ __volatile__(
  145. "stc ccr,r3l\n\t"
  146. "orc #0x80,ccr\n\t"
  147. "mov.l %2,er1\n\t"
  148. "inc.l #1,er1\n\t"
  149. "mov.l er1,%0\n\t"
  150. "ldc r3l,ccr\n\t"
  151. "sub.l er2,er2\n\t"
  152. "cmp.l er2,er1\n\t"
  153. "bgt 1f\n\t"
  154. "mov.l %1,er0\n\t"
  155. "jsr @___up\n"
  156. "1:"
  157. : "=m"(*count)
  158. : "g"(sem),"m"(*count)
  159. : "cc", "er1", "er2", "er3");
  160. }
  161. #endif /* __ASSEMBLY__ */
  162. #endif