|
@@ -495,15 +495,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
|
|
|
return PAGE_ACTIVATE;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Wait on writeback if requested to. This happens when
|
|
|
- * direct reclaiming a large contiguous area and the
|
|
|
- * first attempt to free a range of pages fails.
|
|
|
- */
|
|
|
- if (PageWriteback(page) &&
|
|
|
- (sc->reclaim_mode & RECLAIM_MODE_SYNC))
|
|
|
- wait_on_page_writeback(page);
|
|
|
-
|
|
|
if (!PageWriteback(page)) {
|
|
|
/* synchronous write or broken a_ops? */
|
|
|
ClearPageReclaim(page);
|
|
@@ -804,12 +795,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|
|
|
|
|
if (PageWriteback(page)) {
|
|
|
/*
|
|
|
- * Synchronous reclaim is performed in two passes,
|
|
|
- * first an asynchronous pass over the list to
|
|
|
- * start parallel writeback, and a second synchronous
|
|
|
- * pass to wait for the IO to complete. Wait here
|
|
|
- * for any page for which writeback has already
|
|
|
- * started.
|
|
|
+ * Synchronous reclaim cannot queue pages for
|
|
|
+ * writeback due to the possibility of stack overflow
|
|
|
+ * but if it encounters a page under writeback, wait
|
|
|
+ * for the IO to complete.
|
|
|
*/
|
|
|
if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
|
|
|
may_enter_fs)
|
|
@@ -1414,7 +1403,7 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Returns true if the caller should wait to clean dirty/writeback pages.
|
|
|
+ * Returns true if a direct reclaim should wait on pages under writeback.
|
|
|
*
|
|
|
* If we are direct reclaiming for contiguous pages and we do not reclaim
|
|
|
* everything in the list, try again and wait for writeback IO to complete.
|