|
@@ -129,6 +129,66 @@ unsigned long global_dirty_limit;
|
|
|
*/
|
|
|
static struct prop_descriptor vm_completions;
|
|
|
|
|
|
+/*
|
|
|
+ * Work out the current dirty-memory clamping and background writeout
|
|
|
+ * thresholds.
|
|
|
+ *
|
|
|
+ * The main aim here is to lower them aggressively if there is a lot of mapped
|
|
|
+ * memory around. To avoid stressing page reclaim with lots of unreclaimable
|
|
|
+ * pages. It is better to clamp down on writers than to start swapping, and
|
|
|
+ * performing lots of scanning.
|
|
|
+ *
|
|
|
+ * We only allow 1/2 of the currently-unmapped memory to be dirtied.
|
|
|
+ *
|
|
|
+ * We don't permit the clamping level to fall below 5% - that is getting rather
|
|
|
+ * excessive.
|
|
|
+ *
|
|
|
+ * We make sure that the background writeout level is below the adjusted
|
|
|
+ * clamping level.
|
|
|
+ */
|
|
|
+static unsigned long highmem_dirtyable_memory(unsigned long total)
|
|
|
+{
|
|
|
+#ifdef CONFIG_HIGHMEM
|
|
|
+ int node;
|
|
|
+ unsigned long x = 0;
|
|
|
+
|
|
|
+ for_each_node_state(node, N_HIGH_MEMORY) {
|
|
|
+ struct zone *z =
|
|
|
+ &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
|
|
|
+
|
|
|
+ x += zone_page_state(z, NR_FREE_PAGES) +
|
|
|
+ zone_reclaimable_pages(z);
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Make sure that the number of highmem pages is never larger
|
|
|
+ * than the number of the total dirtyable memory. This can only
|
|
|
+ * occur in very strange VM situations but we want to make sure
|
|
|
+ * that this does not occur.
|
|
|
+ */
|
|
|
+ return min(x, total);
|
|
|
+#else
|
|
|
+ return 0;
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * determine_dirtyable_memory - amount of memory that may be used
|
|
|
+ *
|
|
|
+ * Returns the numebr of pages that can currently be freed and used
|
|
|
+ * by the kernel for direct mappings.
|
|
|
+ */
|
|
|
+static unsigned long determine_dirtyable_memory(void)
|
|
|
+{
|
|
|
+ unsigned long x;
|
|
|
+
|
|
|
+ x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
|
|
|
+
|
|
|
+ if (!vm_highmem_is_dirtyable)
|
|
|
+ x -= highmem_dirtyable_memory(x);
|
|
|
+
|
|
|
+ return x + 1; /* Ensure that we never return 0 */
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* couple the period to the dirty_ratio:
|
|
|
*
|
|
@@ -196,7 +256,6 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
int dirty_bytes_handler(struct ctl_table *table, int write,
|
|
|
void __user *buffer, size_t *lenp,
|
|
|
loff_t *ppos)
|
|
@@ -291,67 +350,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
|
|
|
}
|
|
|
EXPORT_SYMBOL(bdi_set_max_ratio);
|
|
|
|
|
|
-/*
|
|
|
- * Work out the current dirty-memory clamping and background writeout
|
|
|
- * thresholds.
|
|
|
- *
|
|
|
- * The main aim here is to lower them aggressively if there is a lot of mapped
|
|
|
- * memory around. To avoid stressing page reclaim with lots of unreclaimable
|
|
|
- * pages. It is better to clamp down on writers than to start swapping, and
|
|
|
- * performing lots of scanning.
|
|
|
- *
|
|
|
- * We only allow 1/2 of the currently-unmapped memory to be dirtied.
|
|
|
- *
|
|
|
- * We don't permit the clamping level to fall below 5% - that is getting rather
|
|
|
- * excessive.
|
|
|
- *
|
|
|
- * We make sure that the background writeout level is below the adjusted
|
|
|
- * clamping level.
|
|
|
- */
|
|
|
-
|
|
|
-static unsigned long highmem_dirtyable_memory(unsigned long total)
|
|
|
-{
|
|
|
-#ifdef CONFIG_HIGHMEM
|
|
|
- int node;
|
|
|
- unsigned long x = 0;
|
|
|
-
|
|
|
- for_each_node_state(node, N_HIGH_MEMORY) {
|
|
|
- struct zone *z =
|
|
|
- &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
|
|
|
-
|
|
|
- x += zone_page_state(z, NR_FREE_PAGES) +
|
|
|
- zone_reclaimable_pages(z);
|
|
|
- }
|
|
|
- /*
|
|
|
- * Make sure that the number of highmem pages is never larger
|
|
|
- * than the number of the total dirtyable memory. This can only
|
|
|
- * occur in very strange VM situations but we want to make sure
|
|
|
- * that this does not occur.
|
|
|
- */
|
|
|
- return min(x, total);
|
|
|
-#else
|
|
|
- return 0;
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * determine_dirtyable_memory - amount of memory that may be used
|
|
|
- *
|
|
|
- * Returns the numebr of pages that can currently be freed and used
|
|
|
- * by the kernel for direct mappings.
|
|
|
- */
|
|
|
-unsigned long determine_dirtyable_memory(void)
|
|
|
-{
|
|
|
- unsigned long x;
|
|
|
-
|
|
|
- x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
|
|
|
-
|
|
|
- if (!vm_highmem_is_dirtyable)
|
|
|
- x -= highmem_dirtyable_memory(x);
|
|
|
-
|
|
|
- return x + 1; /* Ensure that we never return 0 */
|
|
|
-}
|
|
|
-
|
|
|
static unsigned long dirty_freerun_ceiling(unsigned long thresh,
|
|
|
unsigned long bg_thresh)
|
|
|
{
|