|
@@ -440,6 +440,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * zram_slot_free_notify could miss free so that let's
|
|
|
+ * double check.
|
|
|
+ */
|
|
|
+ if (unlikely(meta->table[index].handle ||
|
|
|
+ zram_test_flag(meta, index, ZRAM_ZERO)))
|
|
|
+ zram_free_page(zram, index);
|
|
|
+
|
|
|
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
|
|
|
meta->compress_workmem);
|
|
|
|
|
@@ -505,6 +513,20 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void handle_pending_slot_free(struct zram *zram)
|
|
|
+{
|
|
|
+ struct zram_slot_free *free_rq;
|
|
|
+
|
|
|
+ spin_lock(&zram->slot_free_lock);
|
|
|
+ while (zram->slot_free_rq) {
|
|
|
+ free_rq = zram->slot_free_rq;
|
|
|
+ zram->slot_free_rq = free_rq->next;
|
|
|
+ zram_free_page(zram, free_rq->index);
|
|
|
+ kfree(free_rq);
|
|
|
+ }
|
|
|
+ spin_unlock(&zram->slot_free_lock);
|
|
|
+}
|
|
|
+
|
|
|
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
int offset, struct bio *bio, int rw)
|
|
|
{
|
|
@@ -512,10 +534,12 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|
|
|
|
|
if (rw == READ) {
|
|
|
down_read(&zram->lock);
|
|
|
+ handle_pending_slot_free(zram);
|
|
|
ret = zram_bvec_read(zram, bvec, index, offset, bio);
|
|
|
up_read(&zram->lock);
|
|
|
} else {
|
|
|
down_write(&zram->lock);
|
|
|
+ handle_pending_slot_free(zram);
|
|
|
ret = zram_bvec_write(zram, bvec, index, offset);
|
|
|
up_write(&zram->lock);
|
|
|
}
|
|
@@ -528,6 +552,8 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
|
|
|
size_t index;
|
|
|
struct zram_meta *meta;
|
|
|
|
|
|
+ flush_work(&zram->free_work);
|
|
|
+
|
|
|
down_write(&zram->init_lock);
|
|
|
if (!zram->init_done) {
|
|
|
up_write(&zram->init_lock);
|
|
@@ -722,16 +748,40 @@ error:
|
|
|
bio_io_error(bio);
|
|
|
}
|
|
|
|
|
|
+static void zram_slot_free(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct zram *zram;
|
|
|
+
|
|
|
+ zram = container_of(work, struct zram, free_work);
|
|
|
+ down_write(&zram->lock);
|
|
|
+ handle_pending_slot_free(zram);
|
|
|
+ up_write(&zram->lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
|
|
|
+{
|
|
|
+ spin_lock(&zram->slot_free_lock);
|
|
|
+ free_rq->next = zram->slot_free_rq;
|
|
|
+ zram->slot_free_rq = free_rq;
|
|
|
+ spin_unlock(&zram->slot_free_lock);
|
|
|
+}
|
|
|
+
|
|
|
static void zram_slot_free_notify(struct block_device *bdev,
|
|
|
unsigned long index)
|
|
|
{
|
|
|
struct zram *zram;
|
|
|
+ struct zram_slot_free *free_rq;
|
|
|
|
|
|
zram = bdev->bd_disk->private_data;
|
|
|
- down_write(&zram->lock);
|
|
|
- zram_free_page(zram, index);
|
|
|
- up_write(&zram->lock);
|
|
|
atomic64_inc(&zram->stats.notify_free);
|
|
|
+
|
|
|
+ free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
|
|
|
+ if (!free_rq)
|
|
|
+ return;
|
|
|
+
|
|
|
+ free_rq->index = index;
|
|
|
+ add_slot_free(zram, free_rq);
|
|
|
+ schedule_work(&zram->free_work);
|
|
|
}
|
|
|
|
|
|
static const struct block_device_operations zram_devops = {
|
|
@@ -778,6 +828,10 @@ static int create_device(struct zram *zram, int device_id)
|
|
|
init_rwsem(&zram->lock);
|
|
|
init_rwsem(&zram->init_lock);
|
|
|
|
|
|
+ INIT_WORK(&zram->free_work, zram_slot_free);
|
|
|
+ spin_lock_init(&zram->slot_free_lock);
|
|
|
+ zram->slot_free_rq = NULL;
|
|
|
+
|
|
|
zram->queue = blk_alloc_queue(GFP_KERNEL);
|
|
|
if (!zram->queue) {
|
|
|
pr_err("Error allocating disk queue for device %d\n",
|