|
@@ -359,8 +359,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
zram_test_flag(zram, index, ZRAM_ZERO))
|
|
|
zram_free_page(zram, index);
|
|
|
|
|
|
- mutex_lock(&zram->lock);
|
|
|
-
|
|
|
user_mem = kmap_atomic(page, KM_USER0);
|
|
|
|
|
|
if (is_partial_io(bvec))
|
|
@@ -371,7 +369,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
|
|
|
if (page_zero_filled(uncmem)) {
|
|
|
kunmap_atomic(user_mem, KM_USER0);
|
|
|
- mutex_unlock(&zram->lock);
|
|
|
if (is_partial_io(bvec))
|
|
|
kfree(uncmem);
|
|
|
zram_stat_inc(&zram->stats.pages_zero);
|
|
@@ -388,7 +385,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
kfree(uncmem);
|
|
|
|
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
|
- mutex_unlock(&zram->lock);
|
|
|
pr_err("Compression failed! err=%d\n", ret);
|
|
|
goto out;
|
|
|
}
|
|
@@ -402,7 +398,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
clen = PAGE_SIZE;
|
|
|
page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
|
|
|
if (unlikely(!page_store)) {
|
|
|
- mutex_unlock(&zram->lock);
|
|
|
pr_info("Error allocating memory for "
|
|
|
"incompressible page: %u\n", index);
|
|
|
ret = -ENOMEM;
|
|
@@ -420,7 +415,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
|
|
|
&zram->table[index].page, &store_offset,
|
|
|
GFP_NOIO | __GFP_HIGHMEM)) {
|
|
|
- mutex_unlock(&zram->lock);
|
|
|
pr_info("Error allocating memory for compressed "
|
|
|
"page: %u, size=%zu\n", index, clen);
|
|
|
ret = -ENOMEM;
|
|
@@ -454,8 +448,6 @@ memstore:
|
|
|
if (clen <= PAGE_SIZE / 2)
|
|
|
zram_stat_inc(&zram->stats.good_compress);
|
|
|
|
|
|
- mutex_unlock(&zram->lock);
|
|
|
-
|
|
|
return 0;
|
|
|
|
|
|
out:
|
|
@@ -467,10 +459,19 @@ out:
|
|
|
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
int offset, struct bio *bio, int rw)
|
|
|
{
|
|
|
- if (rw == READ)
|
|
|
- return zram_bvec_read(zram, bvec, index, offset, bio);
|
|
|
+ int ret;
|
|
|
|
|
|
- return zram_bvec_write(zram, bvec, index, offset);
|
|
|
+ if (rw == READ) {
|
|
|
+ down_read(&zram->lock);
|
|
|
+ ret = zram_bvec_read(zram, bvec, index, offset, bio);
|
|
|
+ up_read(&zram->lock);
|
|
|
+ } else {
|
|
|
+ down_write(&zram->lock);
|
|
|
+ ret = zram_bvec_write(zram, bvec, index, offset);
|
|
|
+ up_write(&zram->lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
|
|
@@ -701,7 +702,7 @@ static int create_device(struct zram *zram, int device_id)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
|
|
|
- mutex_init(&zram->lock);
|
|
|
+ init_rwsem(&zram->lock);
|
|
|
mutex_init(&zram->init_lock);
|
|
|
spin_lock_init(&zram->stat64_lock);
|
|
|
|