semaphore.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. /* MN10300 Semaphores
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_SEMAPHORE_H
  12. #define _ASM_SEMAPHORE_H
  13. #ifndef __ASSEMBLY__
  14. #include <linux/linkage.h>
  15. #include <linux/wait.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/rwsem.h>
  18. #define SEMAPHORE_DEBUG 0
  19. /*
  20. * the semaphore definition
  21. * - if count is >0 then there are tokens available on the semaphore for down
  22. * to collect
  23. * - if count is <=0 then there are no spare tokens, and anyone that wants one
  24. * must wait
  25. * - if wait_list is not empty, then there are processes waiting for the
  26. * semaphore
  27. */
  28. struct semaphore {
  29. atomic_t count; /* it's not really atomic, it's
  30. * just that certain modules
  31. * expect to be able to access
  32. * it directly */
  33. spinlock_t wait_lock;
  34. struct list_head wait_list;
  35. #if SEMAPHORE_DEBUG
  36. unsigned __magic;
  37. #endif
  38. };
  39. #if SEMAPHORE_DEBUG
  40. # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
  41. #else
  42. # define __SEM_DEBUG_INIT(name)
  43. #endif
  44. #define __SEMAPHORE_INITIALIZER(name, init_count) \
  45. { \
  46. .count = ATOMIC_INIT(init_count), \
  47. .wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \
  48. .wait_list = LIST_HEAD_INIT((name).wait_list) \
  49. __SEM_DEBUG_INIT(name) \
  50. }
  51. #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
  52. struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
  53. #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
  54. #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
  55. static inline void sema_init(struct semaphore *sem, int val)
  56. {
  57. *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
  58. }
  59. static inline void init_MUTEX(struct semaphore *sem)
  60. {
  61. sema_init(sem, 1);
  62. }
  63. static inline void init_MUTEX_LOCKED(struct semaphore *sem)
  64. {
  65. sema_init(sem, 0);
  66. }
  67. extern void __down(struct semaphore *sem, unsigned long flags);
  68. extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
  69. extern void __up(struct semaphore *sem);
  70. static inline void down(struct semaphore *sem)
  71. {
  72. unsigned long flags;
  73. int count;
  74. #if SEMAPHORE_DEBUG
  75. CHECK_MAGIC(sem->__magic);
  76. #endif
  77. spin_lock_irqsave(&sem->wait_lock, flags);
  78. count = atomic_read(&sem->count);
  79. if (likely(count > 0)) {
  80. atomic_set(&sem->count, count - 1);
  81. spin_unlock_irqrestore(&sem->wait_lock, flags);
  82. } else {
  83. __down(sem, flags);
  84. }
  85. }
  86. static inline int down_interruptible(struct semaphore *sem)
  87. {
  88. unsigned long flags;
  89. int count, ret = 0;
  90. #if SEMAPHORE_DEBUG
  91. CHECK_MAGIC(sem->__magic);
  92. #endif
  93. spin_lock_irqsave(&sem->wait_lock, flags);
  94. count = atomic_read(&sem->count);
  95. if (likely(count > 0)) {
  96. atomic_set(&sem->count, count - 1);
  97. spin_unlock_irqrestore(&sem->wait_lock, flags);
  98. } else {
  99. ret = __down_interruptible(sem, flags);
  100. }
  101. return ret;
  102. }
  103. /*
  104. * non-blockingly attempt to down() a semaphore.
  105. * - returns zero if we acquired it
  106. */
  107. static inline int down_trylock(struct semaphore *sem)
  108. {
  109. unsigned long flags;
  110. int count, success = 0;
  111. #if SEMAPHORE_DEBUG
  112. CHECK_MAGIC(sem->__magic);
  113. #endif
  114. spin_lock_irqsave(&sem->wait_lock, flags);
  115. count = atomic_read(&sem->count);
  116. if (likely(count > 0)) {
  117. atomic_set(&sem->count, count - 1);
  118. success = 1;
  119. }
  120. spin_unlock_irqrestore(&sem->wait_lock, flags);
  121. return !success;
  122. }
  123. static inline void up(struct semaphore *sem)
  124. {
  125. unsigned long flags;
  126. #if SEMAPHORE_DEBUG
  127. CHECK_MAGIC(sem->__magic);
  128. #endif
  129. spin_lock_irqsave(&sem->wait_lock, flags);
  130. if (!list_empty(&sem->wait_list))
  131. __up(sem);
  132. else
  133. atomic_set(&sem->count, atomic_read(&sem->count) + 1);
  134. spin_unlock_irqrestore(&sem->wait_lock, flags);
  135. }
  136. static inline int sem_getcount(struct semaphore *sem)
  137. {
  138. return atomic_read(&sem->count);
  139. }
  140. #endif /* __ASSEMBLY__ */
  141. #endif