|
@@ -42,25 +42,6 @@ static struct zram *zram_devices;
|
|
|
/* Module params (documentation at end) */
|
|
|
static unsigned int num_devices = 1;
|
|
|
|
|
|
-static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
|
|
|
-{
|
|
|
- spin_lock(&zram->stat64_lock);
|
|
|
- *v = *v + inc;
|
|
|
- spin_unlock(&zram->stat64_lock);
|
|
|
-}
|
|
|
-
|
|
|
-static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
|
|
|
-{
|
|
|
- spin_lock(&zram->stat64_lock);
|
|
|
- *v = *v - dec;
|
|
|
- spin_unlock(&zram->stat64_lock);
|
|
|
-}
|
|
|
-
|
|
|
-static void zram_stat64_inc(struct zram *zram, u64 *v)
|
|
|
-{
|
|
|
- zram_stat64_add(zram, v, 1);
|
|
|
-}
|
|
|
-
|
|
|
static int zram_test_flag(struct zram_meta *meta, u32 index,
|
|
|
enum zram_pageflags flag)
|
|
|
{
|
|
@@ -120,8 +101,7 @@ static void zram_free_page(struct zram *zram, size_t index)
|
|
|
if (size <= PAGE_SIZE / 2)
|
|
|
zram->stats.good_compress--;
|
|
|
|
|
|
- zram_stat64_sub(zram, &zram->stats.compr_size,
|
|
|
- meta->table[index].size);
|
|
|
+ atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
|
|
|
zram->stats.pages_stored--;
|
|
|
|
|
|
meta->table[index].handle = 0;
|
|
@@ -172,7 +152,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
|
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
|
|
|
- zram_stat64_inc(zram, &zram->stats.failed_reads);
|
|
|
+ atomic64_inc(&zram->stats.failed_reads);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -326,7 +306,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
meta->table[index].size = clen;
|
|
|
|
|
|
/* Update stats */
|
|
|
- zram_stat64_add(zram, &zram->stats.compr_size, clen);
|
|
|
+ atomic64_add(clen, &zram->stats.compr_size);
|
|
|
zram->stats.pages_stored++;
|
|
|
if (clen <= PAGE_SIZE / 2)
|
|
|
zram->stats.good_compress++;
|
|
@@ -336,7 +316,7 @@ out:
|
|
|
kfree(uncmem);
|
|
|
|
|
|
if (ret)
|
|
|
- zram_stat64_inc(zram, &zram->stats.failed_writes);
|
|
|
+ atomic64_inc(&zram->stats.failed_writes);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -373,10 +353,10 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
|
|
|
|
|
|
switch (rw) {
|
|
|
case READ:
|
|
|
- zram_stat64_inc(zram, &zram->stats.num_reads);
|
|
|
+ atomic64_inc(&zram->stats.num_reads);
|
|
|
break;
|
|
|
case WRITE:
|
|
|
- zram_stat64_inc(zram, &zram->stats.num_writes);
|
|
|
+ atomic64_inc(&zram->stats.num_writes);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -456,7 +436,7 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
|
|
|
goto error;
|
|
|
|
|
|
if (!valid_io_request(zram, bio)) {
|
|
|
- zram_stat64_inc(zram, &zram->stats.invalid_io);
|
|
|
+ atomic64_inc(&zram->stats.invalid_io);
|
|
|
goto error;
|
|
|
}
|
|
|
|
|
@@ -595,7 +575,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
|
|
|
down_write(&zram->lock);
|
|
|
zram_free_page(zram, index);
|
|
|
up_write(&zram->lock);
|
|
|
- zram_stat64_inc(zram, &zram->stats.notify_free);
|
|
|
+ atomic64_inc(&zram->stats.notify_free);
|
|
|
}
|
|
|
|
|
|
static const struct block_device_operations zram_devops = {
|
|
@@ -609,7 +589,6 @@ static int create_device(struct zram *zram, int device_id)
|
|
|
|
|
|
init_rwsem(&zram->lock);
|
|
|
init_rwsem(&zram->init_lock);
|
|
|
- spin_lock_init(&zram->stat64_lock);
|
|
|
|
|
|
zram->queue = blk_alloc_queue(GFP_KERNEL);
|
|
|
if (!zram->queue) {
|