|
@@ -3,15 +3,21 @@
|
|
|
/*
|
|
|
* Reader/writer consistent mechanism without starving writers. This type of
|
|
|
* lock for data where the reader wants a consistent set of information
|
|
|
- * and is willing to retry if the information changes. Readers never
|
|
|
- * block but they may have to retry if a writer is in
|
|
|
- * progress. Writers do not wait for readers.
|
|
|
+ * and is willing to retry if the information changes. There are two types
|
|
|
+ * of readers:
|
|
|
+ * 1. Sequence readers which never block a writer but they may have to retry
|
|
|
+ * if a writer is in progress by detecting change in sequence number.
|
|
|
+ * Writers do not wait for a sequence reader.
|
|
|
+ * 2. Locking readers which will wait if a writer or another locking reader
|
|
|
+ * is in progress. A locking reader in progress will also block a writer
|
|
|
+ * from going forward. Unlike the regular rwlock, the read lock here is
|
|
|
+ * exclusive so that only one locking reader can get it.
|
|
|
*
|
|
|
- * This is not as cache friendly as brlock. Also, this will not work
|
|
|
+ * This is not as cache friendly as brlock. Also, this may not work well
|
|
|
* for data that contains pointers, because any writer could
|
|
|
* invalidate a pointer that a reader was following.
|
|
|
*
|
|
|
- * Expected reader usage:
|
|
|
+ * Expected non-blocking reader usage:
|
|
|
* do {
|
|
|
* seq = read_seqbegin(&foo);
|
|
|
* ...
|
|
@@ -268,4 +274,56 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
|
|
spin_unlock_irqrestore(&sl->lock, flags);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * A locking reader exclusively locks out other writers and locking readers,
|
|
|
+ * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
|
|
|
+ * Don't need preempt_disable() because that is in the spin_lock already.
|
|
|
+ */
|
|
|
+static inline void read_seqlock_excl(seqlock_t *sl)
|
|
|
+{
|
|
|
+ spin_lock(&sl->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void read_sequnlock_excl(seqlock_t *sl)
|
|
|
+{
|
|
|
+ spin_unlock(&sl->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void read_seqlock_excl_bh(seqlock_t *sl)
|
|
|
+{
|
|
|
+ spin_lock_bh(&sl->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void read_sequnlock_excl_bh(seqlock_t *sl)
|
|
|
+{
|
|
|
+ spin_unlock_bh(&sl->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void read_seqlock_excl_irq(seqlock_t *sl)
|
|
|
+{
|
|
|
+ spin_lock_irq(&sl->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void read_sequnlock_excl_irq(seqlock_t *sl)
|
|
|
+{
|
|
|
+ spin_unlock_irq(&sl->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&sl->lock, flags);
|
|
|
+ return flags;
|
|
|
+}
|
|
|
+
|
|
|
+#define read_seqlock_excl_irqsave(lock, flags) \
|
|
|
+ do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
|
|
|
+
|
|
|
+static inline void
|
|
|
+read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
|
|
|
+{
|
|
|
+ spin_unlock_irqrestore(&sl->lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
#endif /* __LINUX_SEQLOCK_H */
|