semaphore.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. #ifndef _ALPHA_SEMAPHORE_H
  2. #define _ALPHA_SEMAPHORE_H
  3. /*
  4. * SMP- and interrupt-safe semaphores..
  5. *
  6. * (C) Copyright 1996 Linus Torvalds
  7. * (C) Copyright 1996, 2000 Richard Henderson
  8. */
  9. #include <asm/current.h>
  10. #include <asm/system.h>
  11. #include <asm/atomic.h>
  12. #include <linux/compiler.h>
  13. #include <linux/wait.h>
  14. #include <linux/rwsem.h>
  15. struct semaphore {
  16. atomic_t count;
  17. wait_queue_head_t wait;
  18. };
  19. #define __SEMAPHORE_INITIALIZER(name, n) \
  20. { \
  21. .count = ATOMIC_INIT(n), \
  22. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
  23. }
  24. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  25. struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  26. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
  27. static inline void sema_init(struct semaphore *sem, int val)
  28. {
  29. /*
  30. * Logically,
  31. * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
  32. * except that gcc produces better initializing by parts yet.
  33. */
  34. atomic_set(&sem->count, val);
  35. init_waitqueue_head(&sem->wait);
  36. }
  37. static inline void init_MUTEX (struct semaphore *sem)
  38. {
  39. sema_init(sem, 1);
  40. }
  41. static inline void init_MUTEX_LOCKED (struct semaphore *sem)
  42. {
  43. sema_init(sem, 0);
  44. }
  45. extern void down(struct semaphore *);
  46. extern void __down_failed(struct semaphore *);
  47. extern int down_interruptible(struct semaphore *);
  48. extern int __down_failed_interruptible(struct semaphore *);
  49. extern int down_trylock(struct semaphore *);
  50. extern void up(struct semaphore *);
  51. extern void __up_wakeup(struct semaphore *);
  52. /*
  53. * Hidden out of line code is fun, but extremely messy. Rely on newer
  54. * compilers to do a respectable job with this. The contention cases
  55. * are handled out of line in arch/alpha/kernel/semaphore.c.
  56. */
  57. static inline void __down(struct semaphore *sem)
  58. {
  59. long count;
  60. might_sleep();
  61. count = atomic_dec_return(&sem->count);
  62. if (unlikely(count < 0))
  63. __down_failed(sem);
  64. }
  65. static inline int __down_interruptible(struct semaphore *sem)
  66. {
  67. long count;
  68. might_sleep();
  69. count = atomic_dec_return(&sem->count);
  70. if (unlikely(count < 0))
  71. return __down_failed_interruptible(sem);
  72. return 0;
  73. }
  74. /*
  75. * down_trylock returns 0 on success, 1 if we failed to get the lock.
  76. */
  77. static inline int __down_trylock(struct semaphore *sem)
  78. {
  79. long ret;
  80. /* "Equivalent" C:
  81. do {
  82. ret = ldl_l;
  83. --ret;
  84. if (ret < 0)
  85. break;
  86. ret = stl_c = ret;
  87. } while (ret == 0);
  88. */
  89. __asm__ __volatile__(
  90. "1: ldl_l %0,%1\n"
  91. " subl %0,1,%0\n"
  92. " blt %0,2f\n"
  93. " stl_c %0,%1\n"
  94. " beq %0,3f\n"
  95. " mb\n"
  96. "2:\n"
  97. ".subsection 2\n"
  98. "3: br 1b\n"
  99. ".previous"
  100. : "=&r" (ret), "=m" (sem->count)
  101. : "m" (sem->count));
  102. return ret < 0;
  103. }
  104. static inline void __up(struct semaphore *sem)
  105. {
  106. if (unlikely(atomic_inc_return(&sem->count) <= 0))
  107. __up_wakeup(sem);
  108. }
  109. #if !defined(CONFIG_DEBUG_SEMAPHORE)
  110. extern inline void down(struct semaphore *sem)
  111. {
  112. __down(sem);
  113. }
  114. extern inline int down_interruptible(struct semaphore *sem)
  115. {
  116. return __down_interruptible(sem);
  117. }
  118. extern inline int down_trylock(struct semaphore *sem)
  119. {
  120. return __down_trylock(sem);
  121. }
  122. extern inline void up(struct semaphore *sem)
  123. {
  124. __up(sem);
  125. }
  126. #endif
  127. #endif