semaphore.h 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. #ifndef _ASM_IA64_SEMAPHORE_H
  2. #define _ASM_IA64_SEMAPHORE_H
  3. /*
  4. * Copyright (C) 1998-2000 Hewlett-Packard Co
  5. * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <linux/wait.h>
  8. #include <linux/rwsem.h>
  9. #include <asm/atomic.h>
  10. struct semaphore {
  11. atomic_t count;
  12. int sleepers;
  13. wait_queue_head_t wait;
  14. };
  15. #define __SEMAPHORE_INITIALIZER(name, n) \
  16. { \
  17. .count = ATOMIC_INIT(n), \
  18. .sleepers = 0, \
  19. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  20. }
  21. #define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name,1)
  22. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  23. struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
  24. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
  25. #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
  26. static inline void
  27. sema_init (struct semaphore *sem, int val)
  28. {
  29. *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
  30. }
  31. static inline void
  32. init_MUTEX (struct semaphore *sem)
  33. {
  34. sema_init(sem, 1);
  35. }
  36. static inline void
  37. init_MUTEX_LOCKED (struct semaphore *sem)
  38. {
  39. sema_init(sem, 0);
  40. }
  41. extern void __down (struct semaphore * sem);
  42. extern int __down_interruptible (struct semaphore * sem);
  43. extern int __down_trylock (struct semaphore * sem);
  44. extern void __up (struct semaphore * sem);
  45. /*
  46. * Atomically decrement the semaphore's count. If it goes negative,
  47. * block the calling thread in the TASK_UNINTERRUPTIBLE state.
  48. */
  49. static inline void
  50. down (struct semaphore *sem)
  51. {
  52. might_sleep();
  53. if (atomic_dec_return(&sem->count) < 0)
  54. __down(sem);
  55. }
  56. /*
  57. * Atomically decrement the semaphore's count. If it goes negative,
  58. * block the calling thread in the TASK_INTERRUPTIBLE state.
  59. */
  60. static inline int
  61. down_interruptible (struct semaphore * sem)
  62. {
  63. int ret = 0;
  64. might_sleep();
  65. if (atomic_dec_return(&sem->count) < 0)
  66. ret = __down_interruptible(sem);
  67. return ret;
  68. }
  69. static inline int
  70. down_trylock (struct semaphore *sem)
  71. {
  72. int ret = 0;
  73. if (atomic_dec_return(&sem->count) < 0)
  74. ret = __down_trylock(sem);
  75. return ret;
  76. }
  77. static inline void
  78. up (struct semaphore * sem)
  79. {
  80. if (atomic_inc_return(&sem->count) <= 0)
  81. __up(sem);
  82. }
  83. #endif /* _ASM_IA64_SEMAPHORE_H */