|
@@ -265,7 +265,7 @@ out_cleanup:
|
|
|
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
int offset)
|
|
|
{
|
|
|
- int ret;
|
|
|
+ int ret = 0;
|
|
|
size_t clen;
|
|
|
unsigned long handle;
|
|
|
struct page *page;
|
|
@@ -286,10 +286,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
goto out;
|
|
|
}
|
|
|
ret = zram_decompress_page(zram, uncmem, index);
|
|
|
- if (ret) {
|
|
|
- kfree(uncmem);
|
|
|
+ if (ret)
|
|
|
goto out;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -302,16 +300,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
|
|
|
user_mem = kmap_atomic(page);
|
|
|
|
|
|
- if (is_partial_io(bvec))
|
|
|
+ if (is_partial_io(bvec)) {
|
|
|
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
|
|
|
bvec->bv_len);
|
|
|
- else
|
|
|
+ kunmap_atomic(user_mem);
|
|
|
+ user_mem = NULL;
|
|
|
+ } else {
|
|
|
uncmem = user_mem;
|
|
|
+ }
|
|
|
|
|
|
if (page_zero_filled(uncmem)) {
|
|
|
- kunmap_atomic(user_mem);
|
|
|
- if (is_partial_io(bvec))
|
|
|
- kfree(uncmem);
|
|
|
+ if (!is_partial_io(bvec))
|
|
|
+ kunmap_atomic(user_mem);
|
|
|
zram_stat_inc(&zram->stats.pages_zero);
|
|
|
zram_set_flag(zram, index, ZRAM_ZERO);
|
|
|
ret = 0;
|
|
@@ -321,9 +321,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
|
|
|
zram->compress_workmem);
|
|
|
|
|
|
- kunmap_atomic(user_mem);
|
|
|
- if (is_partial_io(bvec))
|
|
|
- kfree(uncmem);
|
|
|
+ if (!is_partial_io(bvec)) {
|
|
|
+ kunmap_atomic(user_mem);
|
|
|
+ user_mem = NULL;
|
|
|
+ uncmem = NULL;
|
|
|
+ }
|
|
|
|
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
|
pr_err("Compression failed! err=%d\n", ret);
|
|
@@ -332,8 +334,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
|
|
|
if (unlikely(clen > max_zpage_size)) {
|
|
|
zram_stat_inc(&zram->stats.bad_compress);
|
|
|
- src = uncmem;
|
|
|
clen = PAGE_SIZE;
|
|
|
+ src = NULL;
|
|
|
+ if (is_partial_io(bvec))
|
|
|
+ src = uncmem;
|
|
|
}
|
|
|
|
|
|
handle = zs_malloc(zram->mem_pool, clen);
|
|
@@ -345,7 +349,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
}
|
|
|
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
|
|
|
|
|
|
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
|
|
|
+ src = kmap_atomic(page);
|
|
|
memcpy(cmem, src, clen);
|
|
|
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
|
|
|
+ kunmap_atomic(src);
|
|
|
|
|
|
zs_unmap_object(zram->mem_pool, handle);
|
|
|
|
|
@@ -358,9 +366,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
if (clen <= PAGE_SIZE / 2)
|
|
|
zram_stat_inc(&zram->stats.good_compress);
|
|
|
|
|
|
- return 0;
|
|
|
-
|
|
|
out:
|
|
|
+ if (is_partial_io(bvec))
|
|
|
+ kfree(uncmem);
|
|
|
+
|
|
|
if (ret)
|
|
|
zram_stat64_inc(zram, &zram->stats.failed_writes);
|
|
|
return ret;
|