seqlock.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. #ifndef __LINUX_SEQLOCK_H
  2. #define __LINUX_SEQLOCK_H
  3. /*
  4. * Reader/writer consistent mechanism without starving writers. This type of
  5. * lock for data where the reader wants a consistent set of information
  6. * and is willing to retry if the information changes. There are two types
  7. * of readers:
  8. * 1. Sequence readers which never block a writer but they may have to retry
  9. * if a writer is in progress by detecting change in sequence number.
  10. * Writers do not wait for a sequence reader.
  11. * 2. Locking readers which will wait if a writer or another locking reader
  12. * is in progress. A locking reader in progress will also block a writer
  13. * from going forward. Unlike the regular rwlock, the read lock here is
  14. * exclusive so that only one locking reader can get it.
  15. *
  16. * This is not as cache friendly as brlock. Also, this may not work well
  17. * for data that contains pointers, because any writer could
  18. * invalidate a pointer that a reader was following.
  19. *
  20. * Expected non-blocking reader usage:
  21. * do {
  22. * seq = read_seqbegin(&foo);
  23. * ...
  24. * } while (read_seqretry(&foo, seq));
  25. *
  26. *
  27. * On non-SMP the spin locks disappear but the writer still needs
  28. * to increment the sequence variables because an interrupt routine could
  29. * change the state of the data.
  30. *
  31. * Based on x86_64 vsyscall gettimeofday
  32. * by Keith Owens and Andrea Arcangeli
  33. */
  34. #include <linux/spinlock.h>
  35. #include <linux/preempt.h>
  36. #include <linux/lockdep.h>
  37. #include <asm/processor.h>
  38. /*
  39. * Version using sequence counter only.
  40. * This can be used when code has its own mutex protecting the
  41. * updating starting before the write_seqcountbeqin() and ending
  42. * after the write_seqcount_end().
  43. */
  44. typedef struct seqcount {
  45. unsigned sequence;
  46. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  47. struct lockdep_map dep_map;
  48. #endif
  49. } seqcount_t;
  50. static inline void __seqcount_init(seqcount_t *s, const char *name,
  51. struct lock_class_key *key)
  52. {
  53. /*
  54. * Make sure we are not reinitializing a held lock:
  55. */
  56. lockdep_init_map(&s->dep_map, name, key, 0);
  57. s->sequence = 0;
  58. }
  59. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  60. # define SEQCOUNT_DEP_MAP_INIT(lockname) \
  61. .dep_map = { .name = #lockname } \
  62. # define seqcount_init(s) \
  63. do { \
  64. static struct lock_class_key __key; \
  65. __seqcount_init((s), #s, &__key); \
  66. } while (0)
  67. static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
  68. {
  69. seqcount_t *l = (seqcount_t *)s;
  70. unsigned long flags;
  71. local_irq_save(flags);
  72. seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
  73. seqcount_release(&l->dep_map, 1, _RET_IP_);
  74. local_irq_restore(flags);
  75. }
  76. #else
  77. # define SEQCOUNT_DEP_MAP_INIT(lockname)
  78. # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
  79. # define seqcount_lockdep_reader_access(x)
  80. #endif
  81. #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
  82. /**
  83. * __read_seqcount_begin - begin a seq-read critical section (without barrier)
  84. * @s: pointer to seqcount_t
  85. * Returns: count to be passed to read_seqcount_retry
  86. *
  87. * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
  88. * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
  89. * provided before actually loading any of the variables that are to be
  90. * protected in this critical section.
  91. *
  92. * Use carefully, only in critical code, and comment how the barrier is
  93. * provided.
  94. */
  95. static inline unsigned __read_seqcount_begin(const seqcount_t *s)
  96. {
  97. unsigned ret;
  98. repeat:
  99. ret = ACCESS_ONCE(s->sequence);
  100. if (unlikely(ret & 1)) {
  101. cpu_relax();
  102. goto repeat;
  103. }
  104. return ret;
  105. }
  106. /**
  107. * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
  108. * @s: pointer to seqcount_t
  109. * Returns: count to be passed to read_seqcount_retry
  110. *
  111. * read_seqcount_begin_no_lockdep opens a read critical section of the given
  112. * seqcount, but without any lockdep checking. Validity of the critical
  113. * section is tested by checking read_seqcount_retry function.
  114. */
  115. static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
  116. {
  117. unsigned ret = __read_seqcount_begin(s);
  118. smp_rmb();
  119. return ret;
  120. }
  121. /**
  122. * read_seqcount_begin - begin a seq-read critical section
  123. * @s: pointer to seqcount_t
  124. * Returns: count to be passed to read_seqcount_retry
  125. *
  126. * read_seqcount_begin opens a read critical section of the given seqcount.
  127. * Validity of the critical section is tested by checking read_seqcount_retry
  128. * function.
  129. */
  130. static inline unsigned read_seqcount_begin(const seqcount_t *s)
  131. {
  132. seqcount_lockdep_reader_access(s);
  133. return read_seqcount_begin_no_lockdep(s);
  134. }
  135. /**
  136. * raw_seqcount_begin - begin a seq-read critical section
  137. * @s: pointer to seqcount_t
  138. * Returns: count to be passed to read_seqcount_retry
  139. *
  140. * raw_seqcount_begin opens a read critical section of the given seqcount.
  141. * Validity of the critical section is tested by checking read_seqcount_retry
  142. * function.
  143. *
  144. * Unlike read_seqcount_begin(), this function will not wait for the count
  145. * to stabilize. If a writer is active when we begin, we will fail the
  146. * read_seqcount_retry() instead of stabilizing at the beginning of the
  147. * critical section.
  148. */
  149. static inline unsigned raw_seqcount_begin(const seqcount_t *s)
  150. {
  151. unsigned ret = ACCESS_ONCE(s->sequence);
  152. seqcount_lockdep_reader_access(s);
  153. smp_rmb();
  154. return ret & ~1;
  155. }
  156. /**
  157. * __read_seqcount_retry - end a seq-read critical section (without barrier)
  158. * @s: pointer to seqcount_t
  159. * @start: count, from read_seqcount_begin
  160. * Returns: 1 if retry is required, else 0
  161. *
  162. * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
  163. * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
  164. * provided before actually loading any of the variables that are to be
  165. * protected in this critical section.
  166. *
  167. * Use carefully, only in critical code, and comment how the barrier is
  168. * provided.
  169. */
  170. static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
  171. {
  172. return unlikely(s->sequence != start);
  173. }
  174. /**
  175. * read_seqcount_retry - end a seq-read critical section
  176. * @s: pointer to seqcount_t
  177. * @start: count, from read_seqcount_begin
  178. * Returns: 1 if retry is required, else 0
  179. *
  180. * read_seqcount_retry closes a read critical section of the given seqcount.
  181. * If the critical section was invalid, it must be ignored (and typically
  182. * retried).
  183. */
  184. static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
  185. {
  186. smp_rmb();
  187. return __read_seqcount_retry(s, start);
  188. }
  189. /*
  190. * Sequence counter only version assumes that callers are using their
  191. * own mutexing.
  192. */
  193. static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
  194. {
  195. s->sequence++;
  196. smp_wmb();
  197. seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
  198. }
  199. static inline void write_seqcount_begin(seqcount_t *s)
  200. {
  201. write_seqcount_begin_nested(s, 0);
  202. }
  203. static inline void write_seqcount_end(seqcount_t *s)
  204. {
  205. seqcount_release(&s->dep_map, 1, _RET_IP_);
  206. smp_wmb();
  207. s->sequence++;
  208. }
  209. /**
  210. * write_seqcount_barrier - invalidate in-progress read-side seq operations
  211. * @s: pointer to seqcount_t
  212. *
  213. * After write_seqcount_barrier, no read-side seq operations will complete
  214. * successfully and see data older than this.
  215. */
  216. static inline void write_seqcount_barrier(seqcount_t *s)
  217. {
  218. smp_wmb();
  219. s->sequence+=2;
  220. }
  221. typedef struct {
  222. struct seqcount seqcount;
  223. spinlock_t lock;
  224. } seqlock_t;
  225. /*
  226. * These macros triggered gcc-3.x compile-time problems. We think these are
  227. * OK now. Be cautious.
  228. */
  229. #define __SEQLOCK_UNLOCKED(lockname) \
  230. { \
  231. .seqcount = SEQCNT_ZERO(lockname), \
  232. .lock = __SPIN_LOCK_UNLOCKED(lockname) \
  233. }
  234. #define seqlock_init(x) \
  235. do { \
  236. seqcount_init(&(x)->seqcount); \
  237. spin_lock_init(&(x)->lock); \
  238. } while (0)
  239. #define DEFINE_SEQLOCK(x) \
  240. seqlock_t x = __SEQLOCK_UNLOCKED(x)
  241. /*
  242. * Read side functions for starting and finalizing a read side section.
  243. */
  244. static inline unsigned read_seqbegin(const seqlock_t *sl)
  245. {
  246. return read_seqcount_begin(&sl->seqcount);
  247. }
  248. static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
  249. {
  250. return read_seqcount_retry(&sl->seqcount, start);
  251. }
  252. /*
  253. * Lock out other writers and update the count.
  254. * Acts like a normal spin_lock/unlock.
  255. * Don't need preempt_disable() because that is in the spin_lock already.
  256. */
  257. static inline void write_seqlock(seqlock_t *sl)
  258. {
  259. spin_lock(&sl->lock);
  260. write_seqcount_begin(&sl->seqcount);
  261. }
  262. static inline void write_sequnlock(seqlock_t *sl)
  263. {
  264. write_seqcount_end(&sl->seqcount);
  265. spin_unlock(&sl->lock);
  266. }
  267. static inline void write_seqlock_bh(seqlock_t *sl)
  268. {
  269. spin_lock_bh(&sl->lock);
  270. write_seqcount_begin(&sl->seqcount);
  271. }
  272. static inline void write_sequnlock_bh(seqlock_t *sl)
  273. {
  274. write_seqcount_end(&sl->seqcount);
  275. spin_unlock_bh(&sl->lock);
  276. }
  277. static inline void write_seqlock_irq(seqlock_t *sl)
  278. {
  279. spin_lock_irq(&sl->lock);
  280. write_seqcount_begin(&sl->seqcount);
  281. }
  282. static inline void write_sequnlock_irq(seqlock_t *sl)
  283. {
  284. write_seqcount_end(&sl->seqcount);
  285. spin_unlock_irq(&sl->lock);
  286. }
  287. static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
  288. {
  289. unsigned long flags;
  290. spin_lock_irqsave(&sl->lock, flags);
  291. write_seqcount_begin(&sl->seqcount);
  292. return flags;
  293. }
  294. #define write_seqlock_irqsave(lock, flags) \
  295. do { flags = __write_seqlock_irqsave(lock); } while (0)
  296. static inline void
  297. write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
  298. {
  299. write_seqcount_end(&sl->seqcount);
  300. spin_unlock_irqrestore(&sl->lock, flags);
  301. }
  302. /*
  303. * A locking reader exclusively locks out other writers and locking readers,
  304. * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
  305. * Don't need preempt_disable() because that is in the spin_lock already.
  306. */
  307. static inline void read_seqlock_excl(seqlock_t *sl)
  308. {
  309. spin_lock(&sl->lock);
  310. }
  311. static inline void read_sequnlock_excl(seqlock_t *sl)
  312. {
  313. spin_unlock(&sl->lock);
  314. }
  315. /**
  316. * read_seqbegin_or_lock - begin a sequence number check or locking block
  317. * @lock: sequence lock
  318. * @seq : sequence number to be checked
  319. *
  320. * First try it once optimistically without taking the lock. If that fails,
  321. * take the lock. The sequence number is also used as a marker for deciding
  322. * whether to be a reader (even) or writer (odd).
  323. * N.B. seq must be initialized to an even number to begin with.
  324. */
  325. static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
  326. {
  327. if (!(*seq & 1)) /* Even */
  328. *seq = read_seqbegin(lock);
  329. else /* Odd */
  330. read_seqlock_excl(lock);
  331. }
  332. static inline int need_seqretry(seqlock_t *lock, int seq)
  333. {
  334. return !(seq & 1) && read_seqretry(lock, seq);
  335. }
  336. static inline void done_seqretry(seqlock_t *lock, int seq)
  337. {
  338. if (seq & 1)
  339. read_sequnlock_excl(lock);
  340. }
  341. static inline void read_seqlock_excl_bh(seqlock_t *sl)
  342. {
  343. spin_lock_bh(&sl->lock);
  344. }
  345. static inline void read_sequnlock_excl_bh(seqlock_t *sl)
  346. {
  347. spin_unlock_bh(&sl->lock);
  348. }
  349. static inline void read_seqlock_excl_irq(seqlock_t *sl)
  350. {
  351. spin_lock_irq(&sl->lock);
  352. }
  353. static inline void read_sequnlock_excl_irq(seqlock_t *sl)
  354. {
  355. spin_unlock_irq(&sl->lock);
  356. }
  357. static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
  358. {
  359. unsigned long flags;
  360. spin_lock_irqsave(&sl->lock, flags);
  361. return flags;
  362. }
  363. #define read_seqlock_excl_irqsave(lock, flags) \
  364. do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
  365. static inline void
  366. read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
  367. {
  368. spin_unlock_irqrestore(&sl->lock, flags);
  369. }
  370. #endif /* __LINUX_SEQLOCK_H */