semaphore.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. #ifndef _X86_64_SEMAPHORE_H
  2. #define _X86_64_SEMAPHORE_H
  3. #include <linux/linkage.h>
  4. #ifdef __KERNEL__
  5. /*
  6. * SMP- and interrupt-safe semaphores..
  7. *
  8. * (C) Copyright 1996 Linus Torvalds
  9. *
  10. * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
  11. * the original code and to make semaphore waits
  12. * interruptible so that processes waiting on
  13. * semaphores can be killed.
  14. * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
  15. * functions in asm/sempahore-helper.h while fixing a
  16. * potential and subtle race discovered by Ulrich Schmid
  17. * in down_interruptible(). Since I started to play here I
  18. * also implemented the `trylock' semaphore operation.
  19. * 1999-07-02 Artur Skawina <skawina@geocities.com>
  20. * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
  21. * do this). Changed calling sequences from push/jmp to
  22. * traditional call/ret.
  23. * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
  24. * Some hacks to ensure compatibility with recent
  25. * GCC snapshots, to avoid stack corruption when compiling
  26. * with -fomit-frame-pointer. It's not sure if this will
  27. * be fixed in GCC, as our previous implementation was a
  28. * bit dubious.
  29. *
  30. * If you would like to see an analysis of this implementation, please
  31. * ftp to gcom.com and download the file
  32. * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
  33. *
  34. */
  35. #include <asm/system.h>
  36. #include <asm/atomic.h>
  37. #include <asm/rwlock.h>
  38. #include <linux/wait.h>
  39. #include <linux/rwsem.h>
  40. #include <linux/stringify.h>
  41. struct semaphore {
  42. atomic_t count;
  43. int sleepers;
  44. wait_queue_head_t wait;
  45. };
  46. #define __SEMAPHORE_INITIALIZER(name, n) \
  47. { \
  48. .count = ATOMIC_INIT(n), \
  49. .sleepers = 0, \
  50. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  51. }
  52. #define __MUTEX_INITIALIZER(name) \
  53. __SEMAPHORE_INITIALIZER(name,1)
  54. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  55. struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  56. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
  57. #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
  58. static inline void sema_init (struct semaphore *sem, int val)
  59. {
  60. /*
  61. * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
  62. *
  63. * i'd rather use the more flexible initialization above, but sadly
  64. * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
  65. */
  66. atomic_set(&sem->count, val);
  67. sem->sleepers = 0;
  68. init_waitqueue_head(&sem->wait);
  69. }
  70. static inline void init_MUTEX (struct semaphore *sem)
  71. {
  72. sema_init(sem, 1);
  73. }
  74. static inline void init_MUTEX_LOCKED (struct semaphore *sem)
  75. {
  76. sema_init(sem, 0);
  77. }
  78. asmlinkage void __down_failed(void /* special register calling convention */);
  79. asmlinkage int __down_failed_interruptible(void /* params in registers */);
  80. asmlinkage int __down_failed_trylock(void /* params in registers */);
  81. asmlinkage void __up_wakeup(void /* special register calling convention */);
  82. asmlinkage void __down(struct semaphore * sem);
  83. asmlinkage int __down_interruptible(struct semaphore * sem);
  84. asmlinkage int __down_trylock(struct semaphore * sem);
  85. asmlinkage void __up(struct semaphore * sem);
  86. /*
  87. * This is ugly, but we want the default case to fall through.
  88. * "__down_failed" is a special asm handler that calls the C
  89. * routine that actually waits. See arch/x86_64/kernel/semaphore.c
  90. */
  91. static inline void down(struct semaphore * sem)
  92. {
  93. might_sleep();
  94. __asm__ __volatile__(
  95. "# atomic down operation\n\t"
  96. LOCK "decl %0\n\t" /* --sem->count */
  97. "js 2f\n"
  98. "1:\n"
  99. LOCK_SECTION_START("")
  100. "2:\tcall __down_failed\n\t"
  101. "jmp 1b\n"
  102. LOCK_SECTION_END
  103. :"=m" (sem->count)
  104. :"D" (sem)
  105. :"memory");
  106. }
  107. /*
  108. * Interruptible try to acquire a semaphore. If we obtained
  109. * it, return zero. If we were interrupted, returns -EINTR
  110. */
  111. static inline int down_interruptible(struct semaphore * sem)
  112. {
  113. int result;
  114. might_sleep();
  115. __asm__ __volatile__(
  116. "# atomic interruptible down operation\n\t"
  117. LOCK "decl %1\n\t" /* --sem->count */
  118. "js 2f\n\t"
  119. "xorl %0,%0\n"
  120. "1:\n"
  121. LOCK_SECTION_START("")
  122. "2:\tcall __down_failed_interruptible\n\t"
  123. "jmp 1b\n"
  124. LOCK_SECTION_END
  125. :"=a" (result), "=m" (sem->count)
  126. :"D" (sem)
  127. :"memory");
  128. return result;
  129. }
  130. /*
  131. * Non-blockingly attempt to down() a semaphore.
  132. * Returns zero if we acquired it
  133. */
  134. static inline int down_trylock(struct semaphore * sem)
  135. {
  136. int result;
  137. __asm__ __volatile__(
  138. "# atomic interruptible down operation\n\t"
  139. LOCK "decl %1\n\t" /* --sem->count */
  140. "js 2f\n\t"
  141. "xorl %0,%0\n"
  142. "1:\n"
  143. LOCK_SECTION_START("")
  144. "2:\tcall __down_failed_trylock\n\t"
  145. "jmp 1b\n"
  146. LOCK_SECTION_END
  147. :"=a" (result), "=m" (sem->count)
  148. :"D" (sem)
  149. :"memory","cc");
  150. return result;
  151. }
  152. /*
  153. * Note! This is subtle. We jump to wake people up only if
  154. * the semaphore was negative (== somebody was waiting on it).
  155. * The default case (no contention) will result in NO
  156. * jumps for both down() and up().
  157. */
  158. static inline void up(struct semaphore * sem)
  159. {
  160. __asm__ __volatile__(
  161. "# atomic up operation\n\t"
  162. LOCK "incl %0\n\t" /* ++sem->count */
  163. "jle 2f\n"
  164. "1:\n"
  165. LOCK_SECTION_START("")
  166. "2:\tcall __up_wakeup\n\t"
  167. "jmp 1b\n"
  168. LOCK_SECTION_END
  169. :"=m" (sem->count)
  170. :"D" (sem)
  171. :"memory");
  172. }
  173. #endif /* __KERNEL__ */
  174. #endif