|
@@ -38,6 +38,8 @@ struct io {
|
|
|
struct dm_io_client *client;
|
|
|
io_notify_fn callback;
|
|
|
void *context;
|
|
|
+ void *vma_invalidate_address;
|
|
|
+ unsigned long vma_invalidate_size;
|
|
|
} __attribute__((aligned(DM_IO_MAX_REGIONS)));
|
|
|
|
|
|
static struct kmem_cache *_dm_io_cache;
|
|
@@ -116,6 +118,10 @@ static void dec_count(struct io *io, unsigned int region, int error)
|
|
|
set_bit(region, &io->error_bits);
|
|
|
|
|
|
if (atomic_dec_and_test(&io->count)) {
|
|
|
+ if (io->vma_invalidate_size)
|
|
|
+ invalidate_kernel_vmap_range(io->vma_invalidate_address,
|
|
|
+ io->vma_invalidate_size);
|
|
|
+
|
|
|
if (io->sleeper)
|
|
|
wake_up_process(io->sleeper);
|
|
|
|
|
@@ -159,6 +165,9 @@ struct dpages {
|
|
|
|
|
|
unsigned context_u;
|
|
|
void *context_ptr;
|
|
|
+
|
|
|
+ void *vma_invalidate_address;
|
|
|
+ unsigned long vma_invalidate_size;
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -377,6 +386,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
|
|
io->sleeper = current;
|
|
|
io->client = client;
|
|
|
|
|
|
+ io->vma_invalidate_address = dp->vma_invalidate_address;
|
|
|
+ io->vma_invalidate_size = dp->vma_invalidate_size;
|
|
|
+
|
|
|
dispatch_io(rw, num_regions, where, dp, io, 1);
|
|
|
|
|
|
while (1) {
|
|
@@ -415,13 +427,21 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
|
|
|
io->callback = fn;
|
|
|
io->context = context;
|
|
|
|
|
|
+ io->vma_invalidate_address = dp->vma_invalidate_address;
|
|
|
+ io->vma_invalidate_size = dp->vma_invalidate_size;
|
|
|
+
|
|
|
dispatch_io(rw, num_regions, where, dp, io, 0);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
|
|
|
+static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
|
|
|
+ unsigned long size)
|
|
|
{
|
|
|
/* Set up dpages based on memory type */
|
|
|
+
|
|
|
+ dp->vma_invalidate_address = NULL;
|
|
|
+ dp->vma_invalidate_size = 0;
|
|
|
+
|
|
|
switch (io_req->mem.type) {
|
|
|
case DM_IO_PAGE_LIST:
|
|
|
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
|
|
@@ -432,6 +452,11 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
|
|
|
break;
|
|
|
|
|
|
case DM_IO_VMA:
|
|
|
+ flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
|
|
|
+ if ((io_req->bi_rw & RW_MASK) == READ) {
|
|
|
+ dp->vma_invalidate_address = io_req->mem.ptr.vma;
|
|
|
+ dp->vma_invalidate_size = size;
|
|
|
+ }
|
|
|
vm_dp_init(dp, io_req->mem.ptr.vma);
|
|
|
break;
|
|
|
|
|
@@ -460,7 +485,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
|
|
|
int r;
|
|
|
struct dpages dp;
|
|
|
|
|
|
- r = dp_init(io_req, &dp);
|
|
|
+ r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
|
|
|
if (r)
|
|
|
return r;
|
|
|
|