rwsem.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /*
  2. * include/asm-xtensa/rwsem.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Largely copied from include/asm-ppc/rwsem.h
  9. *
  10. * Copyright (C) 2001 - 2005 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_RWSEM_H
  13. #define _XTENSA_RWSEM_H
  14. #ifndef _LINUX_RWSEM_H
  15. #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
  16. #endif
  17. #define RWSEM_UNLOCKED_VALUE 0x00000000
  18. #define RWSEM_ACTIVE_BIAS 0x00000001
  19. #define RWSEM_ACTIVE_MASK 0x0000ffff
  20. #define RWSEM_WAITING_BIAS (-0x00010000)
  21. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  22. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  23. extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  24. extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  25. extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  26. extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  27. /*
  28. * lock for reading
  29. */
  30. static inline void __down_read(struct rw_semaphore *sem)
  31. {
  32. if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
  33. smp_wmb();
  34. else
  35. rwsem_down_read_failed(sem);
  36. }
  37. static inline int __down_read_trylock(struct rw_semaphore *sem)
  38. {
  39. int tmp;
  40. while ((tmp = sem->count) >= 0) {
  41. if (tmp == cmpxchg(&sem->count, tmp,
  42. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  43. smp_wmb();
  44. return 1;
  45. }
  46. }
  47. return 0;
  48. }
  49. /*
  50. * lock for writing
  51. */
  52. static inline void __down_write(struct rw_semaphore *sem)
  53. {
  54. int tmp;
  55. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  56. (atomic_t *)(&sem->count));
  57. if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  58. smp_wmb();
  59. else
  60. rwsem_down_write_failed(sem);
  61. }
  62. static inline int __down_write_trylock(struct rw_semaphore *sem)
  63. {
  64. int tmp;
  65. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  66. RWSEM_ACTIVE_WRITE_BIAS);
  67. smp_wmb();
  68. return tmp == RWSEM_UNLOCKED_VALUE;
  69. }
  70. /*
  71. * unlock after reading
  72. */
  73. static inline void __up_read(struct rw_semaphore *sem)
  74. {
  75. int tmp;
  76. smp_wmb();
  77. tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
  78. if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
  79. rwsem_wake(sem);
  80. }
  81. /*
  82. * unlock after writing
  83. */
  84. static inline void __up_write(struct rw_semaphore *sem)
  85. {
  86. smp_wmb();
  87. if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  88. (atomic_t *)(&sem->count)) < 0)
  89. rwsem_wake(sem);
  90. }
  91. /*
  92. * implement atomic add functionality
  93. */
  94. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  95. {
  96. atomic_add(delta, (atomic_t *)(&sem->count));
  97. }
  98. /*
  99. * downgrade write lock to read lock
  100. */
  101. static inline void __downgrade_write(struct rw_semaphore *sem)
  102. {
  103. int tmp;
  104. smp_wmb();
  105. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  106. if (tmp < 0)
  107. rwsem_downgrade_wake(sem);
  108. }
  109. /*
  110. * implement exchange and add functionality
  111. */
  112. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  113. {
  114. smp_mb();
  115. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  116. }
  117. #endif /* _XTENSA_RWSEM_H */