|
@@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
|
|
|
return bvec->bv_len != PAGE_SIZE;
|
|
|
}
|
|
|
|
|
|
-static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
|
- u32 index, int offset, struct bio *bio)
|
|
|
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|
|
{
|
|
|
- int ret;
|
|
|
- size_t clen;
|
|
|
- struct page *page;
|
|
|
- unsigned char *user_mem, *cmem, *uncmem = NULL;
|
|
|
-
|
|
|
- page = bvec->bv_page;
|
|
|
-
|
|
|
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
|
|
|
- handle_zero_page(bvec);
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ int ret = LZO_E_OK;
|
|
|
+ size_t clen = PAGE_SIZE;
|
|
|
+ unsigned char *cmem;
|
|
|
+ unsigned long handle = zram->table[index].handle;
|
|
|
|
|
|
- /* Requested page is not present in compressed area */
|
|
|
- if (unlikely(!zram->table[index].handle)) {
|
|
|
- pr_debug("Read before write: sector=%lu, size=%u",
|
|
|
- (ulong)(bio->bi_sector), bio->bi_size);
|
|
|
- handle_zero_page(bvec);
|
|
|
+ if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
|
|
|
+ memset(mem, 0, PAGE_SIZE);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- if (is_partial_io(bvec)) {
|
|
|
- /* Use a temporary buffer to decompress the page */
|
|
|
- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
|
|
- if (!uncmem) {
|
|
|
- pr_info("Error allocating temp memory!\n");
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- user_mem = kmap_atomic(page);
|
|
|
- if (!is_partial_io(bvec))
|
|
|
- uncmem = user_mem;
|
|
|
- clen = PAGE_SIZE;
|
|
|
-
|
|
|
- cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
|
|
|
- ZS_MM_RO);
|
|
|
-
|
|
|
- if (zram->table[index].size == PAGE_SIZE) {
|
|
|
- memcpy(uncmem, cmem, PAGE_SIZE);
|
|
|
- ret = LZO_E_OK;
|
|
|
- } else {
|
|
|
+ cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
|
|
|
+ if (zram->table[index].size == PAGE_SIZE)
|
|
|
+ memcpy(mem, cmem, PAGE_SIZE);
|
|
|
+ else
|
|
|
ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
|
|
|
- uncmem, &clen);
|
|
|
- }
|
|
|
-
|
|
|
- if (is_partial_io(bvec)) {
|
|
|
- memcpy(user_mem + bvec->bv_offset, uncmem + offset,
|
|
|
- bvec->bv_len);
|
|
|
- kfree(uncmem);
|
|
|
- }
|
|
|
-
|
|
|
- zs_unmap_object(zram->mem_pool, zram->table[index].handle);
|
|
|
- kunmap_atomic(user_mem);
|
|
|
+ mem, &clen);
|
|
|
+ zs_unmap_object(zram->mem_pool, handle);
|
|
|
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
@@ -247,36 +210,56 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- flush_dcache_page(page);
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
|
|
|
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
|
+ u32 index, int offset, struct bio *bio)
|
|
|
{
|
|
|
int ret;
|
|
|
- size_t clen = PAGE_SIZE;
|
|
|
- unsigned char *cmem;
|
|
|
- unsigned long handle = zram->table[index].handle;
|
|
|
+ struct page *page;
|
|
|
+ unsigned char *user_mem, *uncmem = NULL;
|
|
|
|
|
|
- if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
|
|
|
- memset(mem, 0, PAGE_SIZE);
|
|
|
+ page = bvec->bv_page;
|
|
|
+
|
|
|
+ if (unlikely(!zram->table[index].handle) ||
|
|
|
+ zram_test_flag(zram, index, ZRAM_ZERO)) {
|
|
|
+ handle_zero_page(bvec);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
|
|
|
- ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
|
|
|
- mem, &clen);
|
|
|
- zs_unmap_object(zram->mem_pool, handle);
|
|
|
+ user_mem = kmap_atomic(page);
|
|
|
+ if (is_partial_io(bvec))
|
|
|
+ /* Use a temporary buffer to decompress the page */
|
|
|
+ uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
|
|
+ else
|
|
|
+ uncmem = user_mem;
|
|
|
|
|
|
+ if (!uncmem) {
|
|
|
+ pr_info("Unable to allocate temp memory\n");
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out_cleanup;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = zram_decompress_page(zram, uncmem, index);
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
|
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
|
|
|
zram_stat64_inc(zram, &zram->stats.failed_reads);
|
|
|
- return ret;
|
|
|
+ goto out_cleanup;
|
|
|
}
|
|
|
|
|
|
- return 0;
|
|
|
+ if (is_partial_io(bvec))
|
|
|
+ memcpy(user_mem + bvec->bv_offset, uncmem + offset,
|
|
|
+ bvec->bv_len);
|
|
|
+
|
|
|
+ flush_dcache_page(page);
|
|
|
+ ret = 0;
|
|
|
+out_cleanup:
|
|
|
+ kunmap_atomic(user_mem);
|
|
|
+ if (is_partial_io(bvec))
|
|
|
+ kfree(uncmem);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
@@ -302,7 +285,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
ret = -ENOMEM;
|
|
|
goto out;
|
|
|
}
|
|
|
- ret = zram_read_before_write(zram, uncmem, index);
|
|
|
+ ret = zram_decompress_page(zram, uncmem, index);
|
|
|
if (ret) {
|
|
|
kfree(uncmem);
|
|
|
goto out;
|