semaphore.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. #ifndef _ASM_M32R_SEMAPHORE_H
  2. #define _ASM_M32R_SEMAPHORE_H
  3. #include <linux/linkage.h>
  4. #ifdef __KERNEL__
  5. /*
  6. * SMP- and interrupt-safe semaphores..
  7. *
  8. * Copyright (C) 1996 Linus Torvalds
  9. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  10. */
  11. #include <linux/config.h>
  12. #include <linux/wait.h>
  13. #include <linux/rwsem.h>
  14. #include <asm/assembler.h>
  15. #include <asm/system.h>
  16. #include <asm/atomic.h>
  17. struct semaphore {
  18. atomic_t count;
  19. int sleepers;
  20. wait_queue_head_t wait;
  21. };
  22. #define __SEMAPHORE_INITIALIZER(name, n) \
  23. { \
  24. .count = ATOMIC_INIT(n), \
  25. .sleepers = 0, \
  26. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  27. }
  28. #define __MUTEX_INITIALIZER(name) \
  29. __SEMAPHORE_INITIALIZER(name,1)
  30. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  31. struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  32. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
  33. #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
  34. static inline void sema_init (struct semaphore *sem, int val)
  35. {
  36. /*
  37. * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
  38. *
  39. * i'd rather use the more flexible initialization above, but sadly
  40. * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
  41. */
  42. atomic_set(&sem->count, val);
  43. sem->sleepers = 0;
  44. init_waitqueue_head(&sem->wait);
  45. }
  46. static inline void init_MUTEX (struct semaphore *sem)
  47. {
  48. sema_init(sem, 1);
  49. }
  50. static inline void init_MUTEX_LOCKED (struct semaphore *sem)
  51. {
  52. sema_init(sem, 0);
  53. }
  54. asmlinkage void __down_failed(void /* special register calling convention */);
  55. asmlinkage int __down_failed_interruptible(void /* params in registers */);
  56. asmlinkage int __down_failed_trylock(void /* params in registers */);
  57. asmlinkage void __up_wakeup(void /* special register calling convention */);
  58. asmlinkage void __down(struct semaphore * sem);
  59. asmlinkage int __down_interruptible(struct semaphore * sem);
  60. asmlinkage int __down_trylock(struct semaphore * sem);
  61. asmlinkage void __up(struct semaphore * sem);
  62. /*
  63. * Atomically decrement the semaphore's count. If it goes negative,
  64. * block the calling thread in the TASK_UNINTERRUPTIBLE state.
  65. */
  66. static inline void down(struct semaphore * sem)
  67. {
  68. unsigned long flags;
  69. long count;
  70. might_sleep();
  71. local_irq_save(flags);
  72. __asm__ __volatile__ (
  73. "# down \n\t"
  74. DCACHE_CLEAR("%0", "r4", "%1")
  75. M32R_LOCK" %0, @%1; \n\t"
  76. "addi %0, #-1; \n\t"
  77. M32R_UNLOCK" %0, @%1; \n\t"
  78. : "=&r" (count)
  79. : "r" (&sem->count)
  80. : "memory"
  81. #ifdef CONFIG_CHIP_M32700_TS1
  82. , "r4"
  83. #endif /* CONFIG_CHIP_M32700_TS1 */
  84. );
  85. local_irq_restore(flags);
  86. if (unlikely(count < 0))
  87. __down(sem);
  88. }
  89. /*
  90. * Interruptible try to acquire a semaphore. If we obtained
  91. * it, return zero. If we were interrupted, returns -EINTR
  92. */
  93. static inline int down_interruptible(struct semaphore * sem)
  94. {
  95. unsigned long flags;
  96. long count;
  97. int result = 0;
  98. might_sleep();
  99. local_irq_save(flags);
  100. __asm__ __volatile__ (
  101. "# down_interruptible \n\t"
  102. DCACHE_CLEAR("%0", "r4", "%1")
  103. M32R_LOCK" %0, @%1; \n\t"
  104. "addi %0, #-1; \n\t"
  105. M32R_UNLOCK" %0, @%1; \n\t"
  106. : "=&r" (count)
  107. : "r" (&sem->count)
  108. : "memory"
  109. #ifdef CONFIG_CHIP_M32700_TS1
  110. , "r4"
  111. #endif /* CONFIG_CHIP_M32700_TS1 */
  112. );
  113. local_irq_restore(flags);
  114. if (unlikely(count < 0))
  115. result = __down_interruptible(sem);
  116. return result;
  117. }
  118. /*
  119. * Non-blockingly attempt to down() a semaphore.
  120. * Returns zero if we acquired it
  121. */
  122. static inline int down_trylock(struct semaphore * sem)
  123. {
  124. unsigned long flags;
  125. long count;
  126. int result = 0;
  127. local_irq_save(flags);
  128. __asm__ __volatile__ (
  129. "# down_trylock \n\t"
  130. DCACHE_CLEAR("%0", "r4", "%1")
  131. M32R_LOCK" %0, @%1; \n\t"
  132. "addi %0, #-1; \n\t"
  133. M32R_UNLOCK" %0, @%1; \n\t"
  134. : "=&r" (count)
  135. : "r" (&sem->count)
  136. : "memory"
  137. #ifdef CONFIG_CHIP_M32700_TS1
  138. , "r4"
  139. #endif /* CONFIG_CHIP_M32700_TS1 */
  140. );
  141. local_irq_restore(flags);
  142. if (unlikely(count < 0))
  143. result = __down_trylock(sem);
  144. return result;
  145. }
  146. /*
  147. * Note! This is subtle. We jump to wake people up only if
  148. * the semaphore was negative (== somebody was waiting on it).
  149. * The default case (no contention) will result in NO
  150. * jumps for both down() and up().
  151. */
  152. static inline void up(struct semaphore * sem)
  153. {
  154. unsigned long flags;
  155. long count;
  156. local_irq_save(flags);
  157. __asm__ __volatile__ (
  158. "# up \n\t"
  159. DCACHE_CLEAR("%0", "r4", "%1")
  160. M32R_LOCK" %0, @%1; \n\t"
  161. "addi %0, #1; \n\t"
  162. M32R_UNLOCK" %0, @%1; \n\t"
  163. : "=&r" (count)
  164. : "r" (&sem->count)
  165. : "memory"
  166. #ifdef CONFIG_CHIP_M32700_TS1
  167. , "r4"
  168. #endif /* CONFIG_CHIP_M32700_TS1 */
  169. );
  170. local_irq_restore(flags);
  171. if (unlikely(count <= 0))
  172. __up(sem);
  173. }
  174. #endif /* __KERNEL__ */
  175. #endif /* _ASM_M32R_SEMAPHORE_H */