|
@@ -139,15 +139,28 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
|
|
set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
|
|
set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
|
|
}
|
|
}
|
|
|
|
|
|
-static struct nfs_page *nfs_page_find_request_locked(struct page *page)
|
|
|
|
|
|
+static struct nfs_page *
|
|
|
|
+nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
|
|
{
|
|
{
|
|
struct nfs_page *req = NULL;
|
|
struct nfs_page *req = NULL;
|
|
|
|
|
|
- if (PagePrivate(page)) {
|
|
|
|
|
|
+ if (PagePrivate(page))
|
|
req = (struct nfs_page *)page_private(page);
|
|
req = (struct nfs_page *)page_private(page);
|
|
- if (req != NULL)
|
|
|
|
- kref_get(&req->wb_kref);
|
|
|
|
|
|
+ else if (unlikely(PageSwapCache(page))) {
|
|
|
|
+ struct nfs_page *freq, *t;
|
|
|
|
+
|
|
|
|
+ /* Linearly search the commit list for the correct req */
|
|
|
|
+ list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
|
|
|
|
+ if (freq->wb_page == page) {
|
|
|
|
+ req = freq;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ if (req)
|
|
|
|
+ kref_get(&req->wb_kref);
|
|
|
|
+
|
|
return req;
|
|
return req;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -157,7 +170,7 @@ static struct nfs_page *nfs_page_find_request(struct page *page)
|
|
struct nfs_page *req = NULL;
|
|
struct nfs_page *req = NULL;
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
spin_lock(&inode->i_lock);
|
|
- req = nfs_page_find_request_locked(page);
|
|
|
|
|
|
+ req = nfs_page_find_request_locked(NFS_I(inode), page);
|
|
spin_unlock(&inode->i_lock);
|
|
spin_unlock(&inode->i_lock);
|
|
return req;
|
|
return req;
|
|
}
|
|
}
|
|
@@ -258,7 +271,7 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
spin_lock(&inode->i_lock);
|
|
for (;;) {
|
|
for (;;) {
|
|
- req = nfs_page_find_request_locked(page);
|
|
|
|
|
|
+ req = nfs_page_find_request_locked(NFS_I(inode), page);
|
|
if (req == NULL)
|
|
if (req == NULL)
|
|
break;
|
|
break;
|
|
if (nfs_lock_request(req))
|
|
if (nfs_lock_request(req))
|
|
@@ -413,9 +426,15 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
|
|
spin_lock(&inode->i_lock);
|
|
spin_lock(&inode->i_lock);
|
|
if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
|
|
if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
|
|
inode->i_version++;
|
|
inode->i_version++;
|
|
- set_bit(PG_MAPPED, &req->wb_flags);
|
|
|
|
- SetPagePrivate(req->wb_page);
|
|
|
|
- set_page_private(req->wb_page, (unsigned long)req);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Swap-space should not get truncated. Hence no need to plug the race
|
|
|
|
+ * with invalidate/truncate.
|
|
|
|
+ */
|
|
|
|
+ if (likely(!PageSwapCache(req->wb_page))) {
|
|
|
|
+ set_bit(PG_MAPPED, &req->wb_flags);
|
|
|
|
+ SetPagePrivate(req->wb_page);
|
|
|
|
+ set_page_private(req->wb_page, (unsigned long)req);
|
|
|
|
+ }
|
|
nfsi->npages++;
|
|
nfsi->npages++;
|
|
kref_get(&req->wb_kref);
|
|
kref_get(&req->wb_kref);
|
|
spin_unlock(&inode->i_lock);
|
|
spin_unlock(&inode->i_lock);
|
|
@@ -432,9 +451,11 @@ static void nfs_inode_remove_request(struct nfs_page *req)
|
|
BUG_ON (!NFS_WBACK_BUSY(req));
|
|
BUG_ON (!NFS_WBACK_BUSY(req));
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
spin_lock(&inode->i_lock);
|
|
- set_page_private(req->wb_page, 0);
|
|
|
|
- ClearPagePrivate(req->wb_page);
|
|
|
|
- clear_bit(PG_MAPPED, &req->wb_flags);
|
|
|
|
|
|
+ if (likely(!PageSwapCache(req->wb_page))) {
|
|
|
|
+ set_page_private(req->wb_page, 0);
|
|
|
|
+ ClearPagePrivate(req->wb_page);
|
|
|
|
+ clear_bit(PG_MAPPED, &req->wb_flags);
|
|
|
|
+ }
|
|
nfsi->npages--;
|
|
nfsi->npages--;
|
|
spin_unlock(&inode->i_lock);
|
|
spin_unlock(&inode->i_lock);
|
|
nfs_release_request(req);
|
|
nfs_release_request(req);
|
|
@@ -730,7 +751,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
|
|
spin_lock(&inode->i_lock);
|
|
spin_lock(&inode->i_lock);
|
|
|
|
|
|
for (;;) {
|
|
for (;;) {
|
|
- req = nfs_page_find_request_locked(page);
|
|
|
|
|
|
+ req = nfs_page_find_request_locked(NFS_I(inode), page);
|
|
if (req == NULL)
|
|
if (req == NULL)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
|
|
|
|
@@ -1744,7 +1765,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
|
|
*/
|
|
*/
|
|
int nfs_wb_page(struct inode *inode, struct page *page)
|
|
int nfs_wb_page(struct inode *inode, struct page *page)
|
|
{
|
|
{
|
|
- loff_t range_start = page_offset(page);
|
|
|
|
|
|
+ loff_t range_start = page_file_offset(page);
|
|
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
|
|
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
|
|
struct writeback_control wbc = {
|
|
struct writeback_control wbc = {
|
|
.sync_mode = WB_SYNC_ALL,
|
|
.sync_mode = WB_SYNC_ALL,
|