seqlock.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. #ifndef __LINUX_SEQLOCK_H
  2. #define __LINUX_SEQLOCK_H
  3. /*
  4. * Reader/writer consistent mechanism without starving writers. This type of
  5. * lock for data where the reader wants a consistent set of information
  6. * and is willing to retry if the information changes. There are two types
  7. * of readers:
  8. * 1. Sequence readers which never block a writer but they may have to retry
  9. * if a writer is in progress by detecting change in sequence number.
  10. * Writers do not wait for a sequence reader.
  11. * 2. Locking readers which will wait if a writer or another locking reader
  12. * is in progress. A locking reader in progress will also block a writer
  13. * from going forward. Unlike the regular rwlock, the read lock here is
  14. * exclusive so that only one locking reader can get it.
  15. *
  16. * This is not as cache friendly as brlock. Also, this may not work well
  17. * for data that contains pointers, because any writer could
  18. * invalidate a pointer that a reader was following.
  19. *
  20. * Expected non-blocking reader usage:
  21. * do {
  22. * seq = read_seqbegin(&foo);
  23. * ...
  24. * } while (read_seqretry(&foo, seq));
  25. *
  26. *
  27. * On non-SMP the spin locks disappear but the writer still needs
  28. * to increment the sequence variables because an interrupt routine could
  29. * change the state of the data.
  30. *
  31. * Based on x86_64 vsyscall gettimeofday
  32. * by Keith Owens and Andrea Arcangeli
  33. */
  34. #include <linux/spinlock.h>
  35. #include <linux/preempt.h>
  36. #include <asm/processor.h>
  37. /*
  38. * Version using sequence counter only.
  39. * This can be used when code has its own mutex protecting the
  40. * updating starting before the write_seqcountbeqin() and ending
  41. * after the write_seqcount_end().
  42. */
  43. typedef struct seqcount {
  44. unsigned sequence;
  45. } seqcount_t;
  46. #define SEQCNT_ZERO { 0 }
  47. #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
  48. /**
  49. * __read_seqcount_begin - begin a seq-read critical section (without barrier)
  50. * @s: pointer to seqcount_t
  51. * Returns: count to be passed to read_seqcount_retry
  52. *
  53. * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
  54. * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
  55. * provided before actually loading any of the variables that are to be
  56. * protected in this critical section.
  57. *
  58. * Use carefully, only in critical code, and comment how the barrier is
  59. * provided.
  60. */
  61. static inline unsigned __read_seqcount_begin(const seqcount_t *s)
  62. {
  63. unsigned ret;
  64. repeat:
  65. ret = ACCESS_ONCE(s->sequence);
  66. if (unlikely(ret & 1)) {
  67. cpu_relax();
  68. goto repeat;
  69. }
  70. return ret;
  71. }
  72. /**
  73. * read_seqcount_begin - begin a seq-read critical section
  74. * @s: pointer to seqcount_t
  75. * Returns: count to be passed to read_seqcount_retry
  76. *
  77. * read_seqcount_begin opens a read critical section of the given seqcount.
  78. * Validity of the critical section is tested by checking read_seqcount_retry
  79. * function.
  80. */
  81. static inline unsigned read_seqcount_begin(const seqcount_t *s)
  82. {
  83. unsigned ret = __read_seqcount_begin(s);
  84. smp_rmb();
  85. return ret;
  86. }
  87. /**
  88. * raw_seqcount_begin - begin a seq-read critical section
  89. * @s: pointer to seqcount_t
  90. * Returns: count to be passed to read_seqcount_retry
  91. *
  92. * raw_seqcount_begin opens a read critical section of the given seqcount.
  93. * Validity of the critical section is tested by checking read_seqcount_retry
  94. * function.
  95. *
  96. * Unlike read_seqcount_begin(), this function will not wait for the count
  97. * to stabilize. If a writer is active when we begin, we will fail the
  98. * read_seqcount_retry() instead of stabilizing at the beginning of the
  99. * critical section.
  100. */
  101. static inline unsigned raw_seqcount_begin(const seqcount_t *s)
  102. {
  103. unsigned ret = ACCESS_ONCE(s->sequence);
  104. smp_rmb();
  105. return ret & ~1;
  106. }
  107. /**
  108. * __read_seqcount_retry - end a seq-read critical section (without barrier)
  109. * @s: pointer to seqcount_t
  110. * @start: count, from read_seqcount_begin
  111. * Returns: 1 if retry is required, else 0
  112. *
  113. * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
  114. * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
  115. * provided before actually loading any of the variables that are to be
  116. * protected in this critical section.
  117. *
  118. * Use carefully, only in critical code, and comment how the barrier is
  119. * provided.
  120. */
  121. static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
  122. {
  123. return unlikely(s->sequence != start);
  124. }
  125. /**
  126. * read_seqcount_retry - end a seq-read critical section
  127. * @s: pointer to seqcount_t
  128. * @start: count, from read_seqcount_begin
  129. * Returns: 1 if retry is required, else 0
  130. *
  131. * read_seqcount_retry closes a read critical section of the given seqcount.
  132. * If the critical section was invalid, it must be ignored (and typically
  133. * retried).
  134. */
  135. static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
  136. {
  137. smp_rmb();
  138. return __read_seqcount_retry(s, start);
  139. }
  140. /*
  141. * Sequence counter only version assumes that callers are using their
  142. * own mutexing.
  143. */
  144. static inline void write_seqcount_begin(seqcount_t *s)
  145. {
  146. s->sequence++;
  147. smp_wmb();
  148. }
  149. static inline void write_seqcount_end(seqcount_t *s)
  150. {
  151. smp_wmb();
  152. s->sequence++;
  153. }
  154. /**
  155. * write_seqcount_barrier - invalidate in-progress read-side seq operations
  156. * @s: pointer to seqcount_t
  157. *
  158. * After write_seqcount_barrier, no read-side seq operations will complete
  159. * successfully and see data older than this.
  160. */
  161. static inline void write_seqcount_barrier(seqcount_t *s)
  162. {
  163. smp_wmb();
  164. s->sequence+=2;
  165. }
  166. typedef struct {
  167. struct seqcount seqcount;
  168. spinlock_t lock;
  169. } seqlock_t;
  170. /*
  171. * These macros triggered gcc-3.x compile-time problems. We think these are
  172. * OK now. Be cautious.
  173. */
  174. #define __SEQLOCK_UNLOCKED(lockname) \
  175. { \
  176. .seqcount = SEQCNT_ZERO, \
  177. .lock = __SPIN_LOCK_UNLOCKED(lockname) \
  178. }
  179. #define seqlock_init(x) \
  180. do { \
  181. seqcount_init(&(x)->seqcount); \
  182. spin_lock_init(&(x)->lock); \
  183. } while (0)
  184. #define DEFINE_SEQLOCK(x) \
  185. seqlock_t x = __SEQLOCK_UNLOCKED(x)
  186. /*
  187. * Read side functions for starting and finalizing a read side section.
  188. */
  189. static inline unsigned read_seqbegin(const seqlock_t *sl)
  190. {
  191. return read_seqcount_begin(&sl->seqcount);
  192. }
  193. static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
  194. {
  195. return read_seqcount_retry(&sl->seqcount, start);
  196. }
  197. /*
  198. * Lock out other writers and update the count.
  199. * Acts like a normal spin_lock/unlock.
  200. * Don't need preempt_disable() because that is in the spin_lock already.
  201. */
  202. static inline void write_seqlock(seqlock_t *sl)
  203. {
  204. spin_lock(&sl->lock);
  205. write_seqcount_begin(&sl->seqcount);
  206. }
  207. static inline void write_sequnlock(seqlock_t *sl)
  208. {
  209. write_seqcount_end(&sl->seqcount);
  210. spin_unlock(&sl->lock);
  211. }
  212. static inline void write_seqlock_bh(seqlock_t *sl)
  213. {
  214. spin_lock_bh(&sl->lock);
  215. write_seqcount_begin(&sl->seqcount);
  216. }
  217. static inline void write_sequnlock_bh(seqlock_t *sl)
  218. {
  219. write_seqcount_end(&sl->seqcount);
  220. spin_unlock_bh(&sl->lock);
  221. }
  222. static inline void write_seqlock_irq(seqlock_t *sl)
  223. {
  224. spin_lock_irq(&sl->lock);
  225. write_seqcount_begin(&sl->seqcount);
  226. }
  227. static inline void write_sequnlock_irq(seqlock_t *sl)
  228. {
  229. write_seqcount_end(&sl->seqcount);
  230. spin_unlock_irq(&sl->lock);
  231. }
  232. static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
  233. {
  234. unsigned long flags;
  235. spin_lock_irqsave(&sl->lock, flags);
  236. write_seqcount_begin(&sl->seqcount);
  237. return flags;
  238. }
  239. #define write_seqlock_irqsave(lock, flags) \
  240. do { flags = __write_seqlock_irqsave(lock); } while (0)
  241. static inline void
  242. write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
  243. {
  244. write_seqcount_end(&sl->seqcount);
  245. spin_unlock_irqrestore(&sl->lock, flags);
  246. }
  247. /*
  248. * A locking reader exclusively locks out other writers and locking readers,
  249. * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
  250. * Don't need preempt_disable() because that is in the spin_lock already.
  251. */
  252. static inline void read_seqlock_excl(seqlock_t *sl)
  253. {
  254. spin_lock(&sl->lock);
  255. }
  256. static inline void read_sequnlock_excl(seqlock_t *sl)
  257. {
  258. spin_unlock(&sl->lock);
  259. }
  260. static inline void read_seqlock_excl_bh(seqlock_t *sl)
  261. {
  262. spin_lock_bh(&sl->lock);
  263. }
  264. static inline void read_sequnlock_excl_bh(seqlock_t *sl)
  265. {
  266. spin_unlock_bh(&sl->lock);
  267. }
  268. static inline void read_seqlock_excl_irq(seqlock_t *sl)
  269. {
  270. spin_lock_irq(&sl->lock);
  271. }
  272. static inline void read_sequnlock_excl_irq(seqlock_t *sl)
  273. {
  274. spin_unlock_irq(&sl->lock);
  275. }
  276. static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
  277. {
  278. unsigned long flags;
  279. spin_lock_irqsave(&sl->lock, flags);
  280. return flags;
  281. }
  282. #define read_seqlock_excl_irqsave(lock, flags) \
  283. do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
  284. static inline void
  285. read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
  286. {
  287. spin_unlock_irqrestore(&sl->lock, flags);
  288. }
  289. #endif /* __LINUX_SEQLOCK_H */