|
@@ -575,7 +575,7 @@ static void balance_dirty_pages(struct address_space *mapping)
|
|
if (pages_written >= write_chunk)
|
|
if (pages_written >= write_chunk)
|
|
break; /* We've done our duty */
|
|
break; /* We've done our duty */
|
|
|
|
|
|
- congestion_wait(WRITE, HZ/10);
|
|
|
|
|
|
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
}
|
|
}
|
|
|
|
|
|
if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
|
|
if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
|
|
@@ -669,7 +669,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
|
|
if (global_page_state(NR_UNSTABLE_NFS) +
|
|
if (global_page_state(NR_UNSTABLE_NFS) +
|
|
global_page_state(NR_WRITEBACK) <= dirty_thresh)
|
|
global_page_state(NR_WRITEBACK) <= dirty_thresh)
|
|
break;
|
|
break;
|
|
- congestion_wait(WRITE, HZ/10);
|
|
|
|
|
|
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
|
|
|
|
/*
|
|
/*
|
|
* The caller might hold locks which can prevent IO completion
|
|
* The caller might hold locks which can prevent IO completion
|
|
@@ -715,7 +715,7 @@ static void background_writeout(unsigned long _min_pages)
|
|
if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
|
|
if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
|
|
/* Wrote less than expected */
|
|
/* Wrote less than expected */
|
|
if (wbc.encountered_congestion || wbc.more_io)
|
|
if (wbc.encountered_congestion || wbc.more_io)
|
|
- congestion_wait(WRITE, HZ/10);
|
|
|
|
|
|
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
else
|
|
else
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -787,7 +787,7 @@ static void wb_kupdate(unsigned long arg)
|
|
writeback_inodes(&wbc);
|
|
writeback_inodes(&wbc);
|
|
if (wbc.nr_to_write > 0) {
|
|
if (wbc.nr_to_write > 0) {
|
|
if (wbc.encountered_congestion || wbc.more_io)
|
|
if (wbc.encountered_congestion || wbc.more_io)
|
|
- congestion_wait(WRITE, HZ/10);
|
|
|
|
|
|
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
else
|
|
else
|
|
break; /* All the old data is written */
|
|
break; /* All the old data is written */
|
|
}
|
|
}
|