|
@@ -46,7 +46,7 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
|
|
|
}
|
|
|
|
|
|
/* increase and wrap the start pointer, returning the old value */
|
|
|
-static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
|
|
|
+static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
|
|
|
{
|
|
|
int old;
|
|
|
int new;
|
|
@@ -62,7 +62,7 @@ static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
|
|
|
}
|
|
|
|
|
|
/* increase the size counter until it hits the max size */
|
|
|
-static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
|
|
|
+static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
|
|
|
{
|
|
|
size_t old;
|
|
|
size_t new;
|
|
@@ -78,6 +78,53 @@ static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
|
|
|
} while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
|
|
|
}
|
|
|
|
|
|
+static DEFINE_RAW_SPINLOCK(buffer_lock);
|
|
|
+
|
|
|
+/* increase and wrap the start pointer, returning the old value */
|
|
|
+static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
|
|
|
+{
|
|
|
+ int old;
|
|
|
+ int new;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&buffer_lock, flags);
|
|
|
+
|
|
|
+ old = atomic_read(&prz->buffer->start);
|
|
|
+ new = old + a;
|
|
|
+ while (unlikely(new > prz->buffer_size))
|
|
|
+ new -= prz->buffer_size;
|
|
|
+ atomic_set(&prz->buffer->start, new);
|
|
|
+
|
|
|
+ raw_spin_unlock_irqrestore(&buffer_lock, flags);
|
|
|
+
|
|
|
+ return old;
|
|
|
+}
|
|
|
+
|
|
|
+/* increase the size counter until it hits the max size */
|
|
|
+static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
|
|
|
+{
|
|
|
+ size_t old;
|
|
|
+ size_t new;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&buffer_lock, flags);
|
|
|
+
|
|
|
+ old = atomic_read(&prz->buffer->size);
|
|
|
+ if (old == prz->buffer_size)
|
|
|
+ goto exit;
|
|
|
+
|
|
|
+ new = old + a;
|
|
|
+ if (new > prz->buffer_size)
|
|
|
+ new = prz->buffer_size;
|
|
|
+ atomic_set(&prz->buffer->size, new);
|
|
|
+
|
|
|
+exit:
|
|
|
+ raw_spin_unlock_irqrestore(&buffer_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
|
|
|
+static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
|
|
|
+
|
|
|
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
|
|
|
uint8_t *data, size_t len, uint8_t *ecc)
|
|
|
{
|
|
@@ -372,6 +419,9 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+ buffer_start_add = buffer_start_add_locked;
|
|
|
+ buffer_size_add = buffer_size_add_locked;
|
|
|
+
|
|
|
return ioremap(start, size);
|
|
|
}
|
|
|
|