|
@@ -6085,33 +6085,18 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
|
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
|
-static int __meminit __zone_pcp_update(void *data)
|
|
|
-{
|
|
|
- struct zone *zone = data;
|
|
|
- int cpu;
|
|
|
- unsigned long batch = zone_batchsize(zone), flags;
|
|
|
-
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- struct per_cpu_pageset *pset;
|
|
|
- struct per_cpu_pages *pcp;
|
|
|
-
|
|
|
- pset = per_cpu_ptr(zone->pageset, cpu);
|
|
|
- pcp = &pset->pcp;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
- if (pcp->count > 0)
|
|
|
- free_pcppages_bulk(zone, pcp->count, pcp);
|
|
|
- drain_zonestat(zone, pset);
|
|
|
- setup_pageset(pset, batch);
|
|
|
- local_irq_restore(flags);
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
+/*
|
|
|
+ * The zone indicated has a new number of managed_pages; batch sizes and percpu
|
|
|
+ * page high values need to be recalulated.
|
|
|
+ */
|
|
|
void __meminit zone_pcp_update(struct zone *zone)
|
|
|
{
|
|
|
+ unsigned cpu;
|
|
|
+ unsigned long batch;
|
|
|
mutex_lock(&pcp_batch_high_lock);
|
|
|
- stop_machine(__zone_pcp_update, zone, NULL);
|
|
|
+ batch = zone_batchsize(zone);
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ pageset_set_batch(per_cpu_ptr(zone->pageset, cpu), batch);
|
|
|
mutex_unlock(&pcp_batch_high_lock);
|
|
|
}
|
|
|
#endif
|