|
@@ -79,7 +79,7 @@ void nfs_readdata_release(void *data)
|
|
|
static
|
|
|
int nfs_return_empty_page(struct page *page)
|
|
|
{
|
|
|
- memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
|
|
|
+ zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
|
|
|
SetPageUptodate(page);
|
|
|
unlock_page(page);
|
|
|
return 0;
|
|
@@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
|
|
|
pglen = PAGE_CACHE_SIZE - base;
|
|
|
for (;;) {
|
|
|
if (remainder <= pglen) {
|
|
|
- memclear_highpage_flush(*pages, base, remainder);
|
|
|
+ zero_user_page(*pages, base, remainder, KM_USER0);
|
|
|
break;
|
|
|
}
|
|
|
- memclear_highpage_flush(*pages, base, pglen);
|
|
|
+ zero_user_page(*pages, base, pglen, KM_USER0);
|
|
|
pages++;
|
|
|
remainder -= pglen;
|
|
|
pglen = PAGE_CACHE_SIZE;
|
|
@@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
|
|
|
return PTR_ERR(new);
|
|
|
}
|
|
|
if (len < PAGE_CACHE_SIZE)
|
|
|
- memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
|
|
|
+ zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
|
|
|
|
|
|
nfs_list_add_request(new, &one_request);
|
|
|
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
|
|
@@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page)
|
|
|
return PTR_ERR(new);
|
|
|
}
|
|
|
if (len < PAGE_CACHE_SIZE)
|
|
|
- memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
|
|
|
+ zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
|
|
|
nfs_pageio_add_request(desc->pgio, new);
|
|
|
return 0;
|
|
|
}
|