|
@@ -355,8 +355,8 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
|
|
*/
|
|
*/
|
|
static void balance_dirty_pages(struct address_space *mapping)
|
|
static void balance_dirty_pages(struct address_space *mapping)
|
|
{
|
|
{
|
|
- long bdi_nr_reclaimable;
|
|
|
|
- long bdi_nr_writeback;
|
|
|
|
|
|
+ long nr_reclaimable, bdi_nr_reclaimable;
|
|
|
|
+ long nr_writeback, bdi_nr_writeback;
|
|
long background_thresh;
|
|
long background_thresh;
|
|
long dirty_thresh;
|
|
long dirty_thresh;
|
|
long bdi_thresh;
|
|
long bdi_thresh;
|
|
@@ -376,11 +376,26 @@ static void balance_dirty_pages(struct address_space *mapping)
|
|
|
|
|
|
get_dirty_limits(&background_thresh, &dirty_thresh,
|
|
get_dirty_limits(&background_thresh, &dirty_thresh,
|
|
&bdi_thresh, bdi);
|
|
&bdi_thresh, bdi);
|
|
|
|
+
|
|
|
|
+ nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
|
|
|
|
+ global_page_state(NR_UNSTABLE_NFS);
|
|
|
|
+ nr_writeback = global_page_state(NR_WRITEBACK);
|
|
|
|
+
|
|
bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
|
|
bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
|
|
bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
|
|
bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
|
|
|
|
+
|
|
if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
|
|
if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
|
|
break;
|
|
break;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Throttle it only when the background writeback cannot
|
|
|
|
+ * catch-up. This avoids (excessively) small writeouts
|
|
|
|
+ * when the bdi limits are ramping up.
|
|
|
|
+ */
|
|
|
|
+ if (nr_reclaimable + nr_writeback <
|
|
|
|
+ (background_thresh + dirty_thresh) / 2)
|
|
|
|
+ break;
|
|
|
|
+
|
|
if (!bdi->dirty_exceeded)
|
|
if (!bdi->dirty_exceeded)
|
|
bdi->dirty_exceeded = 1;
|
|
bdi->dirty_exceeded = 1;
|
|
|
|
|