|
@@ -2566,63 +2566,57 @@ cifs_uncached_readv_complete(struct work_struct *work)
|
|
|
{
|
|
|
struct cifs_readdata *rdata = container_of(work,
|
|
|
struct cifs_readdata, work);
|
|
|
- unsigned int i;
|
|
|
-
|
|
|
- /* if the result is non-zero then the pages weren't kmapped */
|
|
|
- if (rdata->result == 0) {
|
|
|
- for (i = 0; i < rdata->nr_pages; i++)
|
|
|
- kunmap(rdata->pages[i]);
|
|
|
- }
|
|
|
|
|
|
complete(&rdata->done);
|
|
|
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
|
|
|
- unsigned int remaining)
|
|
|
+cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
|
|
|
+ struct cifs_readdata *rdata, unsigned int len)
|
|
|
{
|
|
|
- int len = 0;
|
|
|
+ int total_read = 0, result = 0;
|
|
|
unsigned int i;
|
|
|
unsigned int nr_pages = rdata->nr_pages;
|
|
|
+ struct kvec iov;
|
|
|
|
|
|
- rdata->nr_iov = 1;
|
|
|
+ rdata->tailsz = PAGE_SIZE;
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
|
struct page *page = rdata->pages[i];
|
|
|
|
|
|
- if (remaining >= PAGE_SIZE) {
|
|
|
+ if (len >= PAGE_SIZE) {
|
|
|
/* enough data to fill the page */
|
|
|
- rdata->iov[rdata->nr_iov].iov_base = kmap(page);
|
|
|
- rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
|
|
|
- cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
|
|
|
- rdata->nr_iov, page->index,
|
|
|
- rdata->iov[rdata->nr_iov].iov_base,
|
|
|
- rdata->iov[rdata->nr_iov].iov_len);
|
|
|
- ++rdata->nr_iov;
|
|
|
- len += PAGE_SIZE;
|
|
|
- remaining -= PAGE_SIZE;
|
|
|
- } else if (remaining > 0) {
|
|
|
+ iov.iov_base = kmap(page);
|
|
|
+ iov.iov_len = PAGE_SIZE;
|
|
|
+ cFYI(1, "%u: iov_base=%p iov_len=%zu",
|
|
|
+ i, iov.iov_base, iov.iov_len);
|
|
|
+ len -= PAGE_SIZE;
|
|
|
+ } else if (len > 0) {
|
|
|
/* enough for partial page, fill and zero the rest */
|
|
|
- rdata->iov[rdata->nr_iov].iov_base = kmap(page);
|
|
|
- rdata->iov[rdata->nr_iov].iov_len = remaining;
|
|
|
- cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
|
|
|
- rdata->nr_iov, page->index,
|
|
|
- rdata->iov[rdata->nr_iov].iov_base,
|
|
|
- rdata->iov[rdata->nr_iov].iov_len);
|
|
|
- memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
|
|
|
- '\0', PAGE_SIZE - remaining);
|
|
|
- ++rdata->nr_iov;
|
|
|
- len += remaining;
|
|
|
- remaining = 0;
|
|
|
+ iov.iov_base = kmap(page);
|
|
|
+ iov.iov_len = len;
|
|
|
+ cFYI(1, "%u: iov_base=%p iov_len=%zu",
|
|
|
+ i, iov.iov_base, iov.iov_len);
|
|
|
+ memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
|
|
|
+ rdata->tailsz = len;
|
|
|
+ len = 0;
|
|
|
} else {
|
|
|
/* no need to hold page hostage */
|
|
|
rdata->pages[i] = NULL;
|
|
|
rdata->nr_pages--;
|
|
|
put_page(page);
|
|
|
+ continue;
|
|
|
}
|
|
|
+
|
|
|
+ result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
|
|
|
+ kunmap(page);
|
|
|
+ if (result < 0)
|
|
|
+ break;
|
|
|
+
|
|
|
+ total_read += result;
|
|
|
}
|
|
|
|
|
|
- return len;
|
|
|
+ return total_read > 0 ? total_read : result;
|
|
|
}
|
|
|
|
|
|
static ssize_t
|
|
@@ -2685,7 +2679,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
|
|
|
rdata->offset = offset;
|
|
|
rdata->bytes = cur_len;
|
|
|
rdata->pid = pid;
|
|
|
- rdata->marshal_iov = cifs_uncached_read_marshal_iov;
|
|
|
+ rdata->pagesz = PAGE_SIZE;
|
|
|
+ rdata->read_into_pages = cifs_uncached_read_into_pages;
|
|
|
|
|
|
rc = cifs_retry_async_readv(rdata);
|
|
|
error:
|
|
@@ -2935,7 +2930,6 @@ cifs_readv_complete(struct work_struct *work)
|
|
|
lru_cache_add_file(page);
|
|
|
|
|
|
if (rdata->result == 0) {
|
|
|
- kunmap(page);
|
|
|
flush_dcache_page(page);
|
|
|
SetPageUptodate(page);
|
|
|
}
|
|
@@ -2952,47 +2946,42 @@ cifs_readv_complete(struct work_struct *work)
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
|
|
|
+cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
|
|
|
+ struct cifs_readdata *rdata, unsigned int len)
|
|
|
{
|
|
|
- int len = 0;
|
|
|
+ int total_read = 0, result = 0;
|
|
|
unsigned int i;
|
|
|
u64 eof;
|
|
|
pgoff_t eof_index;
|
|
|
unsigned int nr_pages = rdata->nr_pages;
|
|
|
+ struct kvec iov;
|
|
|
|
|
|
/* determine the eof that the server (probably) has */
|
|
|
eof = CIFS_I(rdata->mapping->host)->server_eof;
|
|
|
eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
|
|
|
cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
|
|
|
|
|
|
- rdata->nr_iov = 1;
|
|
|
+ rdata->tailsz = PAGE_CACHE_SIZE;
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
|
struct page *page = rdata->pages[i];
|
|
|
|
|
|
- if (remaining >= PAGE_CACHE_SIZE) {
|
|
|
+ if (len >= PAGE_CACHE_SIZE) {
|
|
|
/* enough data to fill the page */
|
|
|
- rdata->iov[rdata->nr_iov].iov_base = kmap(page);
|
|
|
- rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
|
|
|
+ iov.iov_base = kmap(page);
|
|
|
+ iov.iov_len = PAGE_CACHE_SIZE;
|
|
|
cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
|
|
|
- rdata->nr_iov, page->index,
|
|
|
- rdata->iov[rdata->nr_iov].iov_base,
|
|
|
- rdata->iov[rdata->nr_iov].iov_len);
|
|
|
- ++rdata->nr_iov;
|
|
|
- len += PAGE_CACHE_SIZE;
|
|
|
- remaining -= PAGE_CACHE_SIZE;
|
|
|
- } else if (remaining > 0) {
|
|
|
+ i, page->index, iov.iov_base, iov.iov_len);
|
|
|
+ len -= PAGE_CACHE_SIZE;
|
|
|
+ } else if (len > 0) {
|
|
|
/* enough for partial page, fill and zero the rest */
|
|
|
- rdata->iov[rdata->nr_iov].iov_base = kmap(page);
|
|
|
- rdata->iov[rdata->nr_iov].iov_len = remaining;
|
|
|
+ iov.iov_base = kmap(page);
|
|
|
+ iov.iov_len = len;
|
|
|
cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
|
|
|
- rdata->nr_iov, page->index,
|
|
|
- rdata->iov[rdata->nr_iov].iov_base,
|
|
|
- rdata->iov[rdata->nr_iov].iov_len);
|
|
|
- memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
|
|
|
- '\0', PAGE_CACHE_SIZE - remaining);
|
|
|
- ++rdata->nr_iov;
|
|
|
- len += remaining;
|
|
|
- remaining = 0;
|
|
|
+ i, page->index, iov.iov_base, iov.iov_len);
|
|
|
+ memset(iov.iov_base + len,
|
|
|
+ '\0', PAGE_CACHE_SIZE - len);
|
|
|
+ rdata->tailsz = len;
|
|
|
+ len = 0;
|
|
|
} else if (page->index > eof_index) {
|
|
|
/*
|
|
|
* The VFS will not try to do readahead past the
|
|
@@ -3010,6 +2999,7 @@ cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
|
|
|
page_cache_release(page);
|
|
|
rdata->pages[i] = NULL;
|
|
|
rdata->nr_pages--;
|
|
|
+ continue;
|
|
|
} else {
|
|
|
/* no need to hold page hostage */
|
|
|
lru_cache_add_file(page);
|
|
@@ -3017,10 +3007,18 @@ cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
|
|
|
page_cache_release(page);
|
|
|
rdata->pages[i] = NULL;
|
|
|
rdata->nr_pages--;
|
|
|
+ continue;
|
|
|
}
|
|
|
+
|
|
|
+ result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
|
|
|
+ kunmap(page);
|
|
|
+ if (result < 0)
|
|
|
+ break;
|
|
|
+
|
|
|
+ total_read += result;
|
|
|
}
|
|
|
|
|
|
- return len;
|
|
|
+ return total_read > 0 ? total_read : result;
|
|
|
}
|
|
|
|
|
|
static int cifs_readpages(struct file *file, struct address_space *mapping,
|
|
@@ -3144,7 +3142,8 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
|
|
|
rdata->offset = offset;
|
|
|
rdata->bytes = bytes;
|
|
|
rdata->pid = pid;
|
|
|
- rdata->marshal_iov = cifs_readpages_marshal_iov;
|
|
|
+ rdata->pagesz = PAGE_CACHE_SIZE;
|
|
|
+ rdata->read_into_pages = cifs_readpages_read_into_pages;
|
|
|
|
|
|
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
|
|
|
list_del(&page->lru);
|