|
@@ -1258,8 +1258,6 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
|
|
if (bdi->dirty_exceeded)
|
|
|
ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
|
|
|
|
|
|
- current->nr_dirtied += nr_pages_dirtied;
|
|
|
-
|
|
|
preempt_disable();
|
|
|
/*
|
|
|
* This prevents one CPU to accumulate too many dirtied pages without
|
|
@@ -1270,12 +1268,9 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
|
|
p = &__get_cpu_var(bdp_ratelimits);
|
|
|
if (unlikely(current->nr_dirtied >= ratelimit))
|
|
|
*p = 0;
|
|
|
- else {
|
|
|
- *p += nr_pages_dirtied;
|
|
|
- if (unlikely(*p >= ratelimit_pages)) {
|
|
|
- *p = 0;
|
|
|
- ratelimit = 0;
|
|
|
- }
|
|
|
+ else if (unlikely(*p >= ratelimit_pages)) {
|
|
|
+ *p = 0;
|
|
|
+ ratelimit = 0;
|
|
|
}
|
|
|
/*
|
|
|
* Pick up the dirtied pages by the exited tasks. This avoids lots of
|
|
@@ -1768,6 +1763,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
|
|
|
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
|
|
|
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
|
|
|
task_io_account_write(PAGE_CACHE_SIZE);
|
|
|
+ current->nr_dirtied++;
|
|
|
+ this_cpu_inc(bdp_ratelimits);
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL(account_page_dirtied);
|