semaphore.h 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. #ifndef _PPC64_SEMAPHORE_H
  2. #define _PPC64_SEMAPHORE_H
  3. /*
  4. * Remove spinlock-based RW semaphores; RW semaphore definitions are
  5. * now in rwsem.h and we use the generic lib/rwsem.c implementation.
  6. * Rework semaphores to use atomic_dec_if_positive.
  7. * -- Paul Mackerras (paulus@samba.org)
  8. */
  9. #ifdef __KERNEL__
  10. #include <asm/atomic.h>
  11. #include <asm/system.h>
  12. #include <linux/wait.h>
  13. #include <linux/rwsem.h>
  14. struct semaphore {
  15. /*
  16. * Note that any negative value of count is equivalent to 0,
  17. * but additionally indicates that some process(es) might be
  18. * sleeping on `wait'.
  19. */
  20. atomic_t count;
  21. wait_queue_head_t wait;
  22. };
  23. #define __SEMAPHORE_INITIALIZER(name, n) \
  24. { \
  25. .count = ATOMIC_INIT(n), \
  26. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  27. }
  28. #define __MUTEX_INITIALIZER(name) \
  29. __SEMAPHORE_INITIALIZER(name, 1)
  30. #define __DECLARE_SEMAPHORE_GENERIC(name, count) \
  31. struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
  32. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
  33. #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
  34. static inline void sema_init (struct semaphore *sem, int val)
  35. {
  36. atomic_set(&sem->count, val);
  37. init_waitqueue_head(&sem->wait);
  38. }
  39. static inline void init_MUTEX (struct semaphore *sem)
  40. {
  41. sema_init(sem, 1);
  42. }
  43. static inline void init_MUTEX_LOCKED (struct semaphore *sem)
  44. {
  45. sema_init(sem, 0);
  46. }
  47. extern void __down(struct semaphore * sem);
  48. extern int __down_interruptible(struct semaphore * sem);
  49. extern void __up(struct semaphore * sem);
  50. static inline void down(struct semaphore * sem)
  51. {
  52. might_sleep();
  53. /*
  54. * Try to get the semaphore, take the slow path if we fail.
  55. */
  56. if (unlikely(atomic_dec_return(&sem->count) < 0))
  57. __down(sem);
  58. }
  59. static inline int down_interruptible(struct semaphore * sem)
  60. {
  61. int ret = 0;
  62. might_sleep();
  63. if (unlikely(atomic_dec_return(&sem->count) < 0))
  64. ret = __down_interruptible(sem);
  65. return ret;
  66. }
  67. static inline int down_trylock(struct semaphore * sem)
  68. {
  69. return atomic_dec_if_positive(&sem->count) < 0;
  70. }
  71. static inline void up(struct semaphore * sem)
  72. {
  73. if (unlikely(atomic_inc_return(&sem->count) <= 0))
  74. __up(sem);
  75. }
  76. #endif /* __KERNEL__ */
  77. #endif /* !(_PPC64_SEMAPHORE_H) */