|
@@ -211,11 +211,6 @@ static void zram_read(struct zram *zram, struct bio *bio)
|
|
u32 index;
|
|
u32 index;
|
|
struct bio_vec *bvec;
|
|
struct bio_vec *bvec;
|
|
|
|
|
|
- if (unlikely(!zram->init_done)) {
|
|
|
|
- bio_endio(bio, -ENXIO);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
zram_stat64_inc(zram, &zram->stats.num_reads);
|
|
zram_stat64_inc(zram, &zram->stats.num_reads);
|
|
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
|
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
|
|
|
|
|
@@ -286,20 +281,15 @@ out:
|
|
|
|
|
|
static void zram_write(struct zram *zram, struct bio *bio)
|
|
static void zram_write(struct zram *zram, struct bio *bio)
|
|
{
|
|
{
|
|
- int i, ret;
|
|
|
|
|
|
+ int i;
|
|
u32 index;
|
|
u32 index;
|
|
struct bio_vec *bvec;
|
|
struct bio_vec *bvec;
|
|
|
|
|
|
- if (unlikely(!zram->init_done)) {
|
|
|
|
- ret = zram_init_device(zram);
|
|
|
|
- if (ret)
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
zram_stat64_inc(zram, &zram->stats.num_writes);
|
|
zram_stat64_inc(zram, &zram->stats.num_writes);
|
|
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
|
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
|
|
|
|
|
bio_for_each_segment(bvec, bio, i) {
|
|
bio_for_each_segment(bvec, bio, i) {
|
|
|
|
+ int ret;
|
|
u32 offset;
|
|
u32 offset;
|
|
size_t clen;
|
|
size_t clen;
|
|
struct zobj_header *zheader;
|
|
struct zobj_header *zheader;
|
|
@@ -445,6 +435,11 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (unlikely(!zram->init_done) && zram_init_device(zram)) {
|
|
|
|
+ bio_io_error(bio);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
switch (bio_data_dir(bio)) {
|
|
switch (bio_data_dir(bio)) {
|
|
case READ:
|
|
case READ:
|
|
zram_read(zram, bio);
|
|
zram_read(zram, bio);
|