|
@@ -2681,6 +2681,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
|
|
|
|
|
|
static int zone_batchsize(struct zone *zone)
|
|
|
{
|
|
|
+#ifdef CONFIG_MMU
|
|
|
int batch;
|
|
|
|
|
|
/*
|
|
@@ -2709,6 +2710,23 @@ static int zone_batchsize(struct zone *zone)
|
|
|
batch = rounddown_pow_of_two(batch + batch/2) - 1;
|
|
|
|
|
|
return batch;
|
|
|
+
|
|
|
+#else
|
|
|
+ /* The deferral and batching of frees should be suppressed under NOMMU
|
|
|
+ * conditions.
|
|
|
+ *
|
|
|
+ * The problem is that NOMMU needs to be able to allocate large chunks
|
|
|
+ * of contiguous memory as there's no hardware page translation to
|
|
|
+ * assemble apparent contiguous memory from discontiguous pages.
|
|
|
+ *
|
|
|
+ * Queueing large contiguous runs of pages for batching, however,
|
|
|
+ * causes the pages to actually be freed in smaller chunks. As there
|
|
|
+ * can be a significant delay between the individual batches being
|
|
|
+ * recycled, this leads to the once large chunks of space being
|
|
|
+ * fragmented and becoming unavailable for high-order allocations.
|
|
|
+ */
|
|
|
+ return 0;
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
|