|
@@ -899,11 +899,14 @@ int write_cache_pages(struct address_space *mapping,
|
|
|
}
|
|
|
retry:
|
|
|
done_index = index;
|
|
|
- while (!done && (index <= end) &&
|
|
|
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
|
|
|
- PAGECACHE_TAG_DIRTY,
|
|
|
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
|
|
|
- unsigned i;
|
|
|
+ while (!done && (index <= end)) {
|
|
|
+ int i;
|
|
|
+
|
|
|
+ nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
|
|
|
+ PAGECACHE_TAG_DIRTY,
|
|
|
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
|
|
|
+ if (nr_pages == 0)
|
|
|
+ break;
|
|
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
|
struct page *page = pvec.pages[i];
|
|
@@ -919,7 +922,16 @@ retry:
|
|
|
*/
|
|
|
lock_page(page);
|
|
|
|
|
|
+ /*
|
|
|
+ * Page truncated or invalidated. We can freely skip it
|
|
|
+ * then, even for data integrity operations: the page
|
|
|
+ * has disappeared concurrently, so there could be no
|
|
|
+ * real expectation of this data interity operation
|
|
|
+ * even if there is now a new, dirty page at the same
|
|
|
+ * pagecache address.
|
|
|
+ */
|
|
|
if (unlikely(page->mapping != mapping)) {
|
|
|
+continue_unlock:
|
|
|
unlock_page(page);
|
|
|
continue;
|
|
|
}
|
|
@@ -930,18 +942,15 @@ retry:
|
|
|
* end == -1 in that case.
|
|
|
*/
|
|
|
done = 1;
|
|
|
- unlock_page(page);
|
|
|
- continue;
|
|
|
+ goto continue_unlock;
|
|
|
}
|
|
|
|
|
|
if (wbc->sync_mode != WB_SYNC_NONE)
|
|
|
wait_on_page_writeback(page);
|
|
|
|
|
|
if (PageWriteback(page) ||
|
|
|
- !clear_page_dirty_for_io(page)) {
|
|
|
- unlock_page(page);
|
|
|
- continue;
|
|
|
- }
|
|
|
+ !clear_page_dirty_for_io(page))
|
|
|
+ goto continue_unlock;
|
|
|
|
|
|
ret = (*writepage)(page, wbc, data);
|
|
|
if (unlikely(ret)) {
|
|
@@ -964,7 +973,8 @@ retry:
|
|
|
}
|
|
|
|
|
|
if (wbc->sync_mode == WB_SYNC_NONE) {
|
|
|
- if (--wbc->nr_to_write <= 0)
|
|
|
+ wbc->nr_to_write--;
|
|
|
+ if (wbc->nr_to_write <= 0)
|
|
|
done = 1;
|
|
|
}
|
|
|
if (wbc->nonblocking && bdi_write_congested(bdi)) {
|