|
@@ -438,6 +438,7 @@ nfs_mark_request_commit(struct nfs_page *req)
|
|
|
radix_tree_tag_set(&nfsi->nfs_page_tree,
|
|
|
req->wb_index,
|
|
|
NFS_PAGE_TAG_COMMIT);
|
|
|
+ nfsi->ncommit++;
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
|
|
inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
|
|
@@ -501,57 +502,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
- * Wait for a request to complete.
|
|
|
- *
|
|
|
- * Interruptible by fatal signals only.
|
|
|
- */
|
|
|
-static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
|
|
|
-{
|
|
|
- struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
- struct nfs_page *req;
|
|
|
- pgoff_t idx_end, next;
|
|
|
- unsigned int res = 0;
|
|
|
- int error;
|
|
|
-
|
|
|
- if (npages == 0)
|
|
|
- idx_end = ~0;
|
|
|
- else
|
|
|
- idx_end = idx_start + npages - 1;
|
|
|
-
|
|
|
- next = idx_start;
|
|
|
- while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
|
|
|
- if (req->wb_index > idx_end)
|
|
|
- break;
|
|
|
-
|
|
|
- next = req->wb_index + 1;
|
|
|
- BUG_ON(!NFS_WBACK_BUSY(req));
|
|
|
-
|
|
|
- kref_get(&req->wb_kref);
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
- error = nfs_wait_on_request(req);
|
|
|
- nfs_release_request(req);
|
|
|
- spin_lock(&inode->i_lock);
|
|
|
- if (error < 0)
|
|
|
- return error;
|
|
|
- res++;
|
|
|
- }
|
|
|
- return res;
|
|
|
-}
|
|
|
-
|
|
|
-static void nfs_cancel_commit_list(struct list_head *head)
|
|
|
-{
|
|
|
- struct nfs_page *req;
|
|
|
-
|
|
|
- while(!list_empty(head)) {
|
|
|
- req = nfs_list_entry(head->next);
|
|
|
- nfs_list_remove_request(req);
|
|
|
- nfs_clear_request_commit(req);
|
|
|
- nfs_inode_remove_request(req);
|
|
|
- nfs_unlock_request(req);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
|
|
static int
|
|
|
nfs_need_commit(struct nfs_inode *nfsi)
|
|
@@ -573,11 +523,17 @@ static int
|
|
|
nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
|
|
|
{
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
+ int ret;
|
|
|
|
|
|
if (!nfs_need_commit(nfsi))
|
|
|
return 0;
|
|
|
|
|
|
- return nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
|
|
|
+ ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
|
|
|
+ if (ret > 0)
|
|
|
+ nfsi->ncommit -= ret;
|
|
|
+ if (nfs_need_commit(NFS_I(inode)))
|
|
|
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
|
|
+ return ret;
|
|
|
}
|
|
|
#else
|
|
|
static inline int nfs_need_commit(struct nfs_inode *nfsi)
|
|
@@ -642,9 +598,10 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
|
|
|
spin_lock(&inode->i_lock);
|
|
|
}
|
|
|
|
|
|
- if (nfs_clear_request_commit(req))
|
|
|
- radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
|
|
|
- req->wb_index, NFS_PAGE_TAG_COMMIT);
|
|
|
+ if (nfs_clear_request_commit(req) &&
|
|
|
+ radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
|
|
|
+ req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
|
|
|
+ NFS_I(inode)->ncommit--;
|
|
|
|
|
|
/* Okay, the request matches. Update the region */
|
|
|
if (offset < req->wb_offset) {
|
|
@@ -1391,7 +1348,7 @@ static const struct rpc_call_ops nfs_commit_ops = {
|
|
|
.rpc_release = nfs_commit_release,
|
|
|
};
|
|
|
|
|
|
-int nfs_commit_inode(struct inode *inode, int how)
|
|
|
+static int nfs_commit_inode(struct inode *inode, int how)
|
|
|
{
|
|
|
LIST_HEAD(head);
|
|
|
int res;
|
|
@@ -1406,92 +1363,51 @@ int nfs_commit_inode(struct inode *inode, int how)
|
|
|
}
|
|
|
return res;
|
|
|
}
|
|
|
-#else
|
|
|
-static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
|
|
|
-{
|
|
|
- return 0;
|
|
|
-}
|
|
|
-#endif
|
|
|
|
|
|
-long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
|
|
|
+static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
|
|
|
{
|
|
|
- struct inode *inode = mapping->host;
|
|
|
- pgoff_t idx_start, idx_end;
|
|
|
- unsigned int npages = 0;
|
|
|
- LIST_HEAD(head);
|
|
|
- int nocommit = how & FLUSH_NOCOMMIT;
|
|
|
- long pages, ret;
|
|
|
-
|
|
|
- /* FIXME */
|
|
|
- if (wbc->range_cyclic)
|
|
|
- idx_start = 0;
|
|
|
- else {
|
|
|
- idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
|
|
|
- idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
|
|
|
- if (idx_end > idx_start) {
|
|
|
- pgoff_t l_npages = 1 + idx_end - idx_start;
|
|
|
- npages = l_npages;
|
|
|
- if (sizeof(npages) != sizeof(l_npages) &&
|
|
|
- (pgoff_t)npages != l_npages)
|
|
|
- npages = 0;
|
|
|
+ struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
+ int flags = FLUSH_SYNC;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ /* Don't commit yet if this is a non-blocking flush and there are
|
|
|
+ * lots of outstanding writes for this mapping.
|
|
|
+ */
|
|
|
+ if (wbc->sync_mode == WB_SYNC_NONE &&
|
|
|
+ nfsi->ncommit <= (nfsi->npages >> 1))
|
|
|
+ goto out_mark_dirty;
|
|
|
+
|
|
|
+ if (wbc->nonblocking || wbc->for_background)
|
|
|
+ flags = 0;
|
|
|
+ ret = nfs_commit_inode(inode, flags);
|
|
|
+ if (ret >= 0) {
|
|
|
+ if (wbc->sync_mode == WB_SYNC_NONE) {
|
|
|
+ if (ret < wbc->nr_to_write)
|
|
|
+ wbc->nr_to_write -= ret;
|
|
|
+ else
|
|
|
+ wbc->nr_to_write = 0;
|
|
|
}
|
|
|
+ return 0;
|
|
|
}
|
|
|
- how &= ~FLUSH_NOCOMMIT;
|
|
|
- spin_lock(&inode->i_lock);
|
|
|
- do {
|
|
|
- ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
|
|
|
- if (ret != 0)
|
|
|
- continue;
|
|
|
- if (nocommit)
|
|
|
- break;
|
|
|
- pages = nfs_scan_commit(inode, &head, idx_start, npages);
|
|
|
- if (pages == 0)
|
|
|
- break;
|
|
|
- if (how & FLUSH_INVALIDATE) {
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
- nfs_cancel_commit_list(&head);
|
|
|
- ret = pages;
|
|
|
- spin_lock(&inode->i_lock);
|
|
|
- continue;
|
|
|
- }
|
|
|
- pages += nfs_scan_commit(inode, &head, 0, 0);
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
- ret = nfs_commit_list(inode, &head, how);
|
|
|
- spin_lock(&inode->i_lock);
|
|
|
-
|
|
|
- } while (ret >= 0);
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
+out_mark_dirty:
|
|
|
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
|
|
return ret;
|
|
|
}
|
|
|
-
|
|
|
-static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
|
|
|
+#else
|
|
|
+static int nfs_commit_inode(struct inode *inode, int how)
|
|
|
{
|
|
|
- int ret;
|
|
|
-
|
|
|
- ret = nfs_writepages(mapping, wbc);
|
|
|
- if (ret < 0)
|
|
|
- goto out;
|
|
|
- ret = nfs_sync_mapping_wait(mapping, wbc, how);
|
|
|
- if (ret < 0)
|
|
|
- goto out;
|
|
|
return 0;
|
|
|
-out:
|
|
|
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
|
|
- return ret;
|
|
|
}
|
|
|
|
|
|
-/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
|
|
|
-static int nfs_write_mapping(struct address_space *mapping, int how)
|
|
|
+static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
|
|
|
{
|
|
|
- struct writeback_control wbc = {
|
|
|
- .bdi = mapping->backing_dev_info,
|
|
|
- .sync_mode = WB_SYNC_ALL,
|
|
|
- .nr_to_write = LONG_MAX,
|
|
|
- .range_start = 0,
|
|
|
- .range_end = LLONG_MAX,
|
|
|
- };
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
- return __nfs_write_mapping(mapping, &wbc, how);
|
|
|
+int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|
|
+{
|
|
|
+ return nfs_commit_unstable_pages(inode, wbc);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1499,37 +1415,26 @@ static int nfs_write_mapping(struct address_space *mapping, int how)
|
|
|
*/
|
|
|
int nfs_wb_all(struct inode *inode)
|
|
|
{
|
|
|
- return nfs_write_mapping(inode->i_mapping, 0);
|
|
|
-}
|
|
|
+ struct writeback_control wbc = {
|
|
|
+ .sync_mode = WB_SYNC_ALL,
|
|
|
+ .nr_to_write = LONG_MAX,
|
|
|
+ .range_start = 0,
|
|
|
+ .range_end = LLONG_MAX,
|
|
|
+ };
|
|
|
|
|
|
-int nfs_wb_nocommit(struct inode *inode)
|
|
|
-{
|
|
|
- return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
|
|
|
+ return sync_inode(inode, &wbc);
|
|
|
}
|
|
|
|
|
|
int nfs_wb_page_cancel(struct inode *inode, struct page *page)
|
|
|
{
|
|
|
struct nfs_page *req;
|
|
|
- loff_t range_start = page_offset(page);
|
|
|
- loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
|
|
|
- struct writeback_control wbc = {
|
|
|
- .bdi = page->mapping->backing_dev_info,
|
|
|
- .sync_mode = WB_SYNC_ALL,
|
|
|
- .nr_to_write = LONG_MAX,
|
|
|
- .range_start = range_start,
|
|
|
- .range_end = range_end,
|
|
|
- };
|
|
|
int ret = 0;
|
|
|
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
for (;;) {
|
|
|
req = nfs_page_find_request(page);
|
|
|
if (req == NULL)
|
|
|
- goto out;
|
|
|
- if (test_bit(PG_CLEAN, &req->wb_flags)) {
|
|
|
- nfs_release_request(req);
|
|
|
break;
|
|
|
- }
|
|
|
if (nfs_lock_request_dontget(req)) {
|
|
|
nfs_inode_remove_request(req);
|
|
|
/*
|
|
@@ -1543,54 +1448,54 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
|
|
|
ret = nfs_wait_on_request(req);
|
|
|
nfs_release_request(req);
|
|
|
if (ret < 0)
|
|
|
- goto out;
|
|
|
+ break;
|
|
|
}
|
|
|
- if (!PagePrivate(page))
|
|
|
- return 0;
|
|
|
- ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
|
|
|
-out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int nfs_wb_page_priority(struct inode *inode, struct page *page,
|
|
|
- int how)
|
|
|
+/*
|
|
|
+ * Write back all requests on one page - we do this before reading it.
|
|
|
+ */
|
|
|
+int nfs_wb_page(struct inode *inode, struct page *page)
|
|
|
{
|
|
|
loff_t range_start = page_offset(page);
|
|
|
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
|
|
|
struct writeback_control wbc = {
|
|
|
- .bdi = page->mapping->backing_dev_info,
|
|
|
.sync_mode = WB_SYNC_ALL,
|
|
|
- .nr_to_write = LONG_MAX,
|
|
|
+ .nr_to_write = 0,
|
|
|
.range_start = range_start,
|
|
|
.range_end = range_end,
|
|
|
};
|
|
|
+ struct nfs_page *req;
|
|
|
+ int need_commit;
|
|
|
int ret;
|
|
|
|
|
|
- do {
|
|
|
+ while(PagePrivate(page)) {
|
|
|
if (clear_page_dirty_for_io(page)) {
|
|
|
ret = nfs_writepage_locked(page, &wbc);
|
|
|
if (ret < 0)
|
|
|
goto out_error;
|
|
|
- } else if (!PagePrivate(page))
|
|
|
+ }
|
|
|
+ req = nfs_find_and_lock_request(page);
|
|
|
+ if (!req)
|
|
|
break;
|
|
|
- ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
|
|
|
- if (ret < 0)
|
|
|
+ if (IS_ERR(req)) {
|
|
|
+ ret = PTR_ERR(req);
|
|
|
goto out_error;
|
|
|
- } while (PagePrivate(page));
|
|
|
+ }
|
|
|
+ need_commit = test_bit(PG_CLEAN, &req->wb_flags);
|
|
|
+ nfs_clear_page_tag_locked(req);
|
|
|
+ if (need_commit) {
|
|
|
+ ret = nfs_commit_inode(inode, FLUSH_SYNC);
|
|
|
+ if (ret < 0)
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ }
|
|
|
return 0;
|
|
|
out_error:
|
|
|
- __mark_inode_dirty(inode, I_DIRTY_PAGES);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Write back all requests on one page - we do this before reading it.
|
|
|
- */
|
|
|
-int nfs_wb_page(struct inode *inode, struct page* page)
|
|
|
-{
|
|
|
- return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
|
|
|
-}
|
|
|
-
|
|
|
#ifdef CONFIG_MIGRATION
|
|
|
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
|
|
|
struct page *page)
|