semaphore.h 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. #ifndef _ASM_IA64_SEMAPHORE_H
  2. #define _ASM_IA64_SEMAPHORE_H
  3. /*
  4. * Copyright (C) 1998-2000 Hewlett-Packard Co
  5. * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <linux/wait.h>
  8. #include <linux/rwsem.h>
  9. #include <asm/atomic.h>
  10. struct semaphore {
  11. atomic_t count;
  12. int sleepers;
  13. wait_queue_head_t wait;
  14. };
  15. #define __SEMAPHORE_INITIALIZER(name, n) \
  16. { \
  17. .count = ATOMIC_INIT(n), \
  18. .sleepers = 0, \
  19. .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
  20. }
  21. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  22. struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
  23. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
  24. #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
  25. static inline void
  26. sema_init (struct semaphore *sem, int val)
  27. {
  28. *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
  29. }
  30. static inline void
  31. init_MUTEX (struct semaphore *sem)
  32. {
  33. sema_init(sem, 1);
  34. }
  35. static inline void
  36. init_MUTEX_LOCKED (struct semaphore *sem)
  37. {
  38. sema_init(sem, 0);
  39. }
  40. extern void __down (struct semaphore * sem);
  41. extern int __down_interruptible (struct semaphore * sem);
  42. extern int __down_trylock (struct semaphore * sem);
  43. extern void __up (struct semaphore * sem);
  44. /*
  45. * Atomically decrement the semaphore's count. If it goes negative,
  46. * block the calling thread in the TASK_UNINTERRUPTIBLE state.
  47. */
  48. static inline void
  49. down (struct semaphore *sem)
  50. {
  51. might_sleep();
  52. if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
  53. __down(sem);
  54. }
  55. /*
  56. * Atomically decrement the semaphore's count. If it goes negative,
  57. * block the calling thread in the TASK_INTERRUPTIBLE state.
  58. */
  59. static inline int
  60. down_interruptible (struct semaphore * sem)
  61. {
  62. int ret = 0;
  63. might_sleep();
  64. if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
  65. ret = __down_interruptible(sem);
  66. return ret;
  67. }
  68. static inline int
  69. down_trylock (struct semaphore *sem)
  70. {
  71. int ret = 0;
  72. if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
  73. ret = __down_trylock(sem);
  74. return ret;
  75. }
  76. static inline void
  77. up (struct semaphore * sem)
  78. {
  79. if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
  80. __up(sem);
  81. }
  82. #endif /* _ASM_IA64_SEMAPHORE_H */