|
@@ -491,49 +491,6 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
|
|
|
|
|
|
EXPORT_SYMBOL(pagevec_lookup_tag);
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
-/*
|
|
|
- * We tolerate a little inaccuracy to avoid ping-ponging the counter between
|
|
|
- * CPUs
|
|
|
- */
|
|
|
-#define ACCT_THRESHOLD max(16, NR_CPUS * 2)
|
|
|
-
|
|
|
-static DEFINE_PER_CPU(long, committed_space);
|
|
|
-
|
|
|
-void vm_acct_memory(long pages)
|
|
|
-{
|
|
|
- long *local;
|
|
|
-
|
|
|
- preempt_disable();
|
|
|
- local = &__get_cpu_var(committed_space);
|
|
|
- *local += pages;
|
|
|
- if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
|
|
|
- atomic_long_add(*local, &vm_committed_space);
|
|
|
- *local = 0;
|
|
|
- }
|
|
|
- preempt_enable();
|
|
|
-}
|
|
|
-
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
-
|
|
|
-/* Drop the CPU's cached committed space back into the central pool. */
|
|
|
-static int cpu_swap_callback(struct notifier_block *nfb,
|
|
|
- unsigned long action,
|
|
|
- void *hcpu)
|
|
|
-{
|
|
|
- long *committed;
|
|
|
-
|
|
|
- committed = &per_cpu(committed_space, (long)hcpu);
|
|
|
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
|
|
|
- atomic_long_add(*committed, &vm_committed_space);
|
|
|
- *committed = 0;
|
|
|
- drain_cpu_pagevecs((long)hcpu);
|
|
|
- }
|
|
|
- return NOTIFY_OK;
|
|
|
-}
|
|
|
-#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
-
|
|
|
/*
|
|
|
* Perform any setup for the swap system
|
|
|
*/
|
|
@@ -554,7 +511,4 @@ void __init swap_setup(void)
|
|
|
* Right now other parts of the system means that we
|
|
|
* _really_ don't want to cluster much more
|
|
|
*/
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
- hotcpu_notifier(cpu_swap_callback, 0);
|
|
|
-#endif
|
|
|
}
|