|
@@ -502,44 +502,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
- * Wait for a request to complete.
|
|
|
- *
|
|
|
- * Interruptible by fatal signals only.
|
|
|
- */
|
|
|
-static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
|
|
|
-{
|
|
|
- struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
- struct nfs_page *req;
|
|
|
- pgoff_t idx_end, next;
|
|
|
- unsigned int res = 0;
|
|
|
- int error;
|
|
|
-
|
|
|
- if (npages == 0)
|
|
|
- idx_end = ~0;
|
|
|
- else
|
|
|
- idx_end = idx_start + npages - 1;
|
|
|
-
|
|
|
- next = idx_start;
|
|
|
- while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
|
|
|
- if (req->wb_index > idx_end)
|
|
|
- break;
|
|
|
-
|
|
|
- next = req->wb_index + 1;
|
|
|
- BUG_ON(!NFS_WBACK_BUSY(req));
|
|
|
-
|
|
|
- kref_get(&req->wb_kref);
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
- error = nfs_wait_on_request(req);
|
|
|
- nfs_release_request(req);
|
|
|
- spin_lock(&inode->i_lock);
|
|
|
- if (error < 0)
|
|
|
- return error;
|
|
|
- res++;
|
|
|
- }
|
|
|
- return res;
|
|
|
-}
|
|
|
-
|
|
|
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
|
|
static int
|
|
|
nfs_need_commit(struct nfs_inode *nfsi)
|
|
@@ -1432,7 +1394,7 @@ out_mark_dirty:
|
|
|
return ret;
|
|
|
}
|
|
|
#else
|
|
|
-static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
|
|
|
+static int nfs_commit_inode(struct inode *inode, int how)
|
|
|
{
|
|
|
return 0;
|
|
|
}
|
|
@@ -1448,46 +1410,6 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|
|
return nfs_commit_unstable_pages(inode, wbc);
|
|
|
}
|
|
|
|
|
|
-long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
|
|
|
-{
|
|
|
- struct inode *inode = mapping->host;
|
|
|
- pgoff_t idx_start, idx_end;
|
|
|
- unsigned int npages = 0;
|
|
|
- LIST_HEAD(head);
|
|
|
- long pages, ret;
|
|
|
-
|
|
|
- /* FIXME */
|
|
|
- if (wbc->range_cyclic)
|
|
|
- idx_start = 0;
|
|
|
- else {
|
|
|
- idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
|
|
|
- idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
|
|
|
- if (idx_end > idx_start) {
|
|
|
- pgoff_t l_npages = 1 + idx_end - idx_start;
|
|
|
- npages = l_npages;
|
|
|
- if (sizeof(npages) != sizeof(l_npages) &&
|
|
|
- (pgoff_t)npages != l_npages)
|
|
|
- npages = 0;
|
|
|
- }
|
|
|
- }
|
|
|
- spin_lock(&inode->i_lock);
|
|
|
- do {
|
|
|
- ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
|
|
|
- if (ret != 0)
|
|
|
- continue;
|
|
|
- pages = nfs_scan_commit(inode, &head, idx_start, npages);
|
|
|
- if (pages == 0)
|
|
|
- break;
|
|
|
- pages += nfs_scan_commit(inode, &head, 0, 0);
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
- ret = nfs_commit_list(inode, &head, how);
|
|
|
- spin_lock(&inode->i_lock);
|
|
|
-
|
|
|
- } while (ret >= 0);
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* flush the inode to disk.
|
|
|
*/
|
|
@@ -1531,45 +1453,49 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int nfs_wb_page_priority(struct inode *inode, struct page *page,
|
|
|
- int how)
|
|
|
+/*
|
|
|
+ * Write back all requests on one page - we do this before reading it.
|
|
|
+ */
|
|
|
+int nfs_wb_page(struct inode *inode, struct page *page)
|
|
|
{
|
|
|
loff_t range_start = page_offset(page);
|
|
|
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
|
|
|
struct writeback_control wbc = {
|
|
|
- .bdi = page->mapping->backing_dev_info,
|
|
|
.sync_mode = WB_SYNC_ALL,
|
|
|
- .nr_to_write = LONG_MAX,
|
|
|
+ .nr_to_write = 0,
|
|
|
.range_start = range_start,
|
|
|
.range_end = range_end,
|
|
|
};
|
|
|
+ struct nfs_page *req;
|
|
|
+ int need_commit;
|
|
|
int ret;
|
|
|
|
|
|
- do {
|
|
|
+ while(PagePrivate(page)) {
|
|
|
if (clear_page_dirty_for_io(page)) {
|
|
|
ret = nfs_writepage_locked(page, &wbc);
|
|
|
if (ret < 0)
|
|
|
goto out_error;
|
|
|
- } else if (!PagePrivate(page))
|
|
|
+ }
|
|
|
+ req = nfs_find_and_lock_request(page);
|
|
|
+ if (!req)
|
|
|
break;
|
|
|
- ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
|
|
|
- if (ret < 0)
|
|
|
+ if (IS_ERR(req)) {
|
|
|
+ ret = PTR_ERR(req);
|
|
|
goto out_error;
|
|
|
- } while (PagePrivate(page));
|
|
|
+ }
|
|
|
+ need_commit = test_bit(PG_CLEAN, &req->wb_flags);
|
|
|
+ nfs_clear_page_tag_locked(req);
|
|
|
+ if (need_commit) {
|
|
|
+ ret = nfs_commit_inode(inode, FLUSH_SYNC);
|
|
|
+ if (ret < 0)
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ }
|
|
|
return 0;
|
|
|
out_error:
|
|
|
- __mark_inode_dirty(inode, I_DIRTY_PAGES);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Write back all requests on one page - we do this before reading it.
|
|
|
- */
|
|
|
-int nfs_wb_page(struct inode *inode, struct page* page)
|
|
|
-{
|
|
|
- return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
|
|
|
-}
|
|
|
-
|
|
|
#ifdef CONFIG_MIGRATION
|
|
|
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
|
|
|
struct page *page)
|