|
@@ -161,9 +161,9 @@ static void zram_free_page(struct zram *zram, size_t index)
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- obj = kmap_atomic(page, KM_USER0) + offset;
|
|
|
|
|
|
+ obj = kmap_atomic(page) + offset;
|
|
clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
|
|
clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
|
|
- kunmap_atomic(obj, KM_USER0);
|
|
|
|
|
|
+ kunmap_atomic(obj);
|
|
|
|
|
|
xv_free(zram->mem_pool, page, offset);
|
|
xv_free(zram->mem_pool, page, offset);
|
|
if (clen <= PAGE_SIZE / 2)
|
|
if (clen <= PAGE_SIZE / 2)
|
|
@@ -182,9 +182,9 @@ static void handle_zero_page(struct bio_vec *bvec)
|
|
struct page *page = bvec->bv_page;
|
|
struct page *page = bvec->bv_page;
|
|
void *user_mem;
|
|
void *user_mem;
|
|
|
|
|
|
- user_mem = kmap_atomic(page, KM_USER0);
|
|
|
|
|
|
+ user_mem = kmap_atomic(page);
|
|
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
|
|
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
|
|
- kunmap_atomic(user_mem, KM_USER0);
|
|
|
|
|
|
+ kunmap_atomic(user_mem);
|
|
|
|
|
|
flush_dcache_page(page);
|
|
flush_dcache_page(page);
|
|
}
|
|
}
|
|
@@ -195,12 +195,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
|
|
struct page *page = bvec->bv_page;
|
|
struct page *page = bvec->bv_page;
|
|
unsigned char *user_mem, *cmem;
|
|
unsigned char *user_mem, *cmem;
|
|
|
|
|
|
- user_mem = kmap_atomic(page, KM_USER0);
|
|
|
|
- cmem = kmap_atomic(zram->table[index].page, KM_USER1);
|
|
|
|
|
|
+ user_mem = kmap_atomic(page);
|
|
|
|
+ cmem = kmap_atomic(zram->table[index].page);
|
|
|
|
|
|
memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
|
|
memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
|
|
- kunmap_atomic(cmem, KM_USER1);
|
|
|
|
- kunmap_atomic(user_mem, KM_USER0);
|
|
|
|
|
|
+ kunmap_atomic(cmem);
|
|
|
|
+ kunmap_atomic(user_mem);
|
|
|
|
|
|
flush_dcache_page(page);
|
|
flush_dcache_page(page);
|
|
}
|
|
}
|
|
@@ -249,12 +249,12 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- user_mem = kmap_atomic(page, KM_USER0);
|
|
|
|
|
|
+ user_mem = kmap_atomic(page);
|
|
if (!is_partial_io(bvec))
|
|
if (!is_partial_io(bvec))
|
|
uncmem = user_mem;
|
|
uncmem = user_mem;
|
|
clen = PAGE_SIZE;
|
|
clen = PAGE_SIZE;
|
|
|
|
|
|
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
|
|
|
|
|
|
+ cmem = kmap_atomic(zram->table[index].page) +
|
|
zram->table[index].offset;
|
|
zram->table[index].offset;
|
|
|
|
|
|
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
|
|
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
|
|
@@ -267,8 +267,8 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|
kfree(uncmem);
|
|
kfree(uncmem);
|
|
}
|
|
}
|
|
|
|
|
|
- kunmap_atomic(cmem, KM_USER1);
|
|
|
|
- kunmap_atomic(user_mem, KM_USER0);
|
|
|
|
|
|
+ kunmap_atomic(cmem);
|
|
|
|
+ kunmap_atomic(user_mem);
|
|
|
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
@@ -295,20 +295,20 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
- cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
|
|
|
|
|
|
+ cmem = kmap_atomic(zram->table[index].page) +
|
|
zram->table[index].offset;
|
|
zram->table[index].offset;
|
|
|
|
|
|
/* Page is stored uncompressed since it's incompressible */
|
|
/* Page is stored uncompressed since it's incompressible */
|
|
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
|
|
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
|
|
memcpy(mem, cmem, PAGE_SIZE);
|
|
memcpy(mem, cmem, PAGE_SIZE);
|
|
- kunmap_atomic(cmem, KM_USER0);
|
|
|
|
|
|
+ kunmap_atomic(cmem);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
|
|
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
|
|
xv_get_object_size(cmem) - sizeof(*zheader),
|
|
xv_get_object_size(cmem) - sizeof(*zheader),
|
|
mem, &clen);
|
|
mem, &clen);
|
|
- kunmap_atomic(cmem, KM_USER0);
|
|
|
|
|
|
+ kunmap_atomic(cmem);
|
|
|
|
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
/* Should NEVER happen. Return bio error if it does. */
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
if (unlikely(ret != LZO_E_OK)) {
|
|
@@ -359,7 +359,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
zram_test_flag(zram, index, ZRAM_ZERO))
|
|
zram_test_flag(zram, index, ZRAM_ZERO))
|
|
zram_free_page(zram, index);
|
|
zram_free_page(zram, index);
|
|
|
|
|
|
- user_mem = kmap_atomic(page, KM_USER0);
|
|
|
|
|
|
+ user_mem = kmap_atomic(page);
|
|
|
|
|
|
if (is_partial_io(bvec))
|
|
if (is_partial_io(bvec))
|
|
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
|
|
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
|
|
@@ -368,7 +368,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
uncmem = user_mem;
|
|
uncmem = user_mem;
|
|
|
|
|
|
if (page_zero_filled(uncmem)) {
|
|
if (page_zero_filled(uncmem)) {
|
|
- kunmap_atomic(user_mem, KM_USER0);
|
|
|
|
|
|
+ kunmap_atomic(user_mem);
|
|
if (is_partial_io(bvec))
|
|
if (is_partial_io(bvec))
|
|
kfree(uncmem);
|
|
kfree(uncmem);
|
|
zram_stat_inc(&zram->stats.pages_zero);
|
|
zram_stat_inc(&zram->stats.pages_zero);
|
|
@@ -380,7 +380,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
|
|
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
|
|
zram->compress_workmem);
|
|
zram->compress_workmem);
|
|
|
|
|
|
- kunmap_atomic(user_mem, KM_USER0);
|
|
|
|
|
|
+ kunmap_atomic(user_mem);
|
|
if (is_partial_io(bvec))
|
|
if (is_partial_io(bvec))
|
|
kfree(uncmem);
|
|
kfree(uncmem);
|
|
|
|
|
|
@@ -408,7 +408,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
|
|
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
|
|
zram_stat_inc(&zram->stats.pages_expand);
|
|
zram_stat_inc(&zram->stats.pages_expand);
|
|
zram->table[index].page = page_store;
|
|
zram->table[index].page = page_store;
|
|
- src = kmap_atomic(page, KM_USER0);
|
|
|
|
|
|
+ src = kmap_atomic(page);
|
|
goto memstore;
|
|
goto memstore;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -424,7 +424,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
memstore:
|
|
memstore:
|
|
zram->table[index].offset = store_offset;
|
|
zram->table[index].offset = store_offset;
|
|
|
|
|
|
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
|
|
|
|
|
|
+ cmem = kmap_atomic(zram->table[index].page) +
|
|
zram->table[index].offset;
|
|
zram->table[index].offset;
|
|
|
|
|
|
#if 0
|
|
#if 0
|
|
@@ -438,9 +438,9 @@ memstore:
|
|
|
|
|
|
memcpy(cmem, src, clen);
|
|
memcpy(cmem, src, clen);
|
|
|
|
|
|
- kunmap_atomic(cmem, KM_USER1);
|
|
|
|
|
|
+ kunmap_atomic(cmem);
|
|
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
|
|
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
|
|
- kunmap_atomic(src, KM_USER0);
|
|
|
|
|
|
+ kunmap_atomic(src);
|
|
|
|
|
|
/* Update stats */
|
|
/* Update stats */
|
|
zram_stat64_add(zram, &zram->stats.compr_size, clen);
|
|
zram_stat64_add(zram, &zram->stats.compr_size, clen);
|