rwsem.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /*
  2. * include/asm-xtensa/rwsem.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Largely copied from include/asm-ppc/rwsem.h
  9. *
  10. * Copyright (C) 2001 - 2005 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_RWSEM_H
  13. #define _XTENSA_RWSEM_H
  14. #include <linux/list.h>
  15. #include <linux/spinlock.h>
  16. #include <asm/atomic.h>
  17. #include <asm/system.h>
  18. /*
  19. * the semaphore definition
  20. */
  21. struct rw_semaphore {
  22. signed long count;
  23. #define RWSEM_UNLOCKED_VALUE 0x00000000
  24. #define RWSEM_ACTIVE_BIAS 0x00000001
  25. #define RWSEM_ACTIVE_MASK 0x0000ffff
  26. #define RWSEM_WAITING_BIAS (-0x00010000)
  27. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  28. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  29. spinlock_t wait_lock;
  30. struct list_head wait_list;
  31. };
  32. #define __RWSEM_INITIALIZER(name) \
  33. { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  34. LIST_HEAD_INIT((name).wait_list) }
  35. #define DECLARE_RWSEM(name) \
  36. struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  37. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  38. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  39. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  40. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  41. static inline void init_rwsem(struct rw_semaphore *sem)
  42. {
  43. sem->count = RWSEM_UNLOCKED_VALUE;
  44. spin_lock_init(&sem->wait_lock);
  45. INIT_LIST_HEAD(&sem->wait_list);
  46. }
  47. /*
  48. * lock for reading
  49. */
  50. static inline void __down_read(struct rw_semaphore *sem)
  51. {
  52. if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
  53. smp_wmb();
  54. else
  55. rwsem_down_read_failed(sem);
  56. }
  57. static inline int __down_read_trylock(struct rw_semaphore *sem)
  58. {
  59. int tmp;
  60. while ((tmp = sem->count) >= 0) {
  61. if (tmp == cmpxchg(&sem->count, tmp,
  62. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  63. smp_wmb();
  64. return 1;
  65. }
  66. }
  67. return 0;
  68. }
  69. /*
  70. * lock for writing
  71. */
  72. static inline void __down_write(struct rw_semaphore *sem)
  73. {
  74. int tmp;
  75. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  76. (atomic_t *)(&sem->count));
  77. if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  78. smp_wmb();
  79. else
  80. rwsem_down_write_failed(sem);
  81. }
  82. static inline int __down_write_trylock(struct rw_semaphore *sem)
  83. {
  84. int tmp;
  85. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  86. RWSEM_ACTIVE_WRITE_BIAS);
  87. smp_wmb();
  88. return tmp == RWSEM_UNLOCKED_VALUE;
  89. }
  90. /*
  91. * unlock after reading
  92. */
  93. static inline void __up_read(struct rw_semaphore *sem)
  94. {
  95. int tmp;
  96. smp_wmb();
  97. tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
  98. if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
  99. rwsem_wake(sem);
  100. }
  101. /*
  102. * unlock after writing
  103. */
  104. static inline void __up_write(struct rw_semaphore *sem)
  105. {
  106. smp_wmb();
  107. if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  108. (atomic_t *)(&sem->count)) < 0)
  109. rwsem_wake(sem);
  110. }
  111. /*
  112. * implement atomic add functionality
  113. */
  114. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  115. {
  116. atomic_add(delta, (atomic_t *)(&sem->count));
  117. }
  118. /*
  119. * downgrade write lock to read lock
  120. */
  121. static inline void __downgrade_write(struct rw_semaphore *sem)
  122. {
  123. int tmp;
  124. smp_wmb();
  125. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  126. if (tmp < 0)
  127. rwsem_downgrade_wake(sem);
  128. }
  129. /*
  130. * implement exchange and add functionality
  131. */
  132. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  133. {
  134. smp_mb();
  135. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  136. }
  137. static inline int rwsem_is_locked(struct rw_semaphore *sem)
  138. {
  139. return (sem->count != 0);
  140. }
  141. #endif /* _XTENSA_RWSEM_H */