Selaa lähdekoodia

Merge branch 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'cpus4096-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (66 commits)
  x86: export vector_used_by_percpu_irq
  x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and()
  sched: nominate preferred wakeup cpu, fix
  x86: fix lguest used_vectors breakage, -v2
  x86: fix warning in arch/x86/kernel/io_apic.c
  sched: fix warning in kernel/sched.c
  sched: move test_sd_parent() to an SMP section of sched.h
  sched: add SD_BALANCE_NEWIDLE at MC and CPU level for sched_mc>0
  sched: activate active load balancing in new idle cpus
  sched: bias task wakeups to preferred semi-idle packages
  sched: nominate preferred wakeup cpu
  sched: favour lower logical cpu number for sched_mc balance
  sched: framework for sched_mc/smt_power_savings=N
  sched: convert BALANCE_FOR_xx_POWER to inline functions
  x86: use possible_cpus=NUM to extend the possible cpus allowed
  x86: fix cpu_mask_to_apicid_and to include cpu_online_mask
  x86: update io_apic.c to the new cpumask code
  x86: Introduce topology_core_cpumask()/topology_thread_cpumask()
  x86: xen: use smp_call_function_many()
  x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c
  ...

Fixed up trivial conflict in kernel/time/tick-sched.c manually
Linus Torvalds 16 vuotta sitten
vanhempi
commit
b840d79631
100 muutettua tiedostoa jossa 156 lisäystä ja 241 poistoa
  1. 9 8
      Documentation/cpu-hotplug.txt
  2. 0 1
      arch/alpha/include/asm/smp.h
  3. 1 1
      arch/alpha/kernel/irq.c
  4. 2 0
      arch/alpha/kernel/process.c
  5. 2 5
      arch/alpha/kernel/smp.c
  6. 4 4
      arch/alpha/kernel/sys_dp264.c
  7. 2 2
      arch/alpha/kernel/sys_titan.c
  8. 2 2
      arch/arm/common/gic.c
  9. 1 1
      arch/arm/kernel/irq.c
  10. 0 10
      arch/arm/kernel/smp.c
  11. 1 2
      arch/arm/mach-at91/at91rm9200_time.c
  12. 1 1
      arch/arm/mach-at91/at91sam926x_time.c
  13. 1 1
      arch/arm/mach-davinci/time.c
  14. 1 1
      arch/arm/mach-imx/time.c
  15. 1 1
      arch/arm/mach-ixp4xx/common.c
  16. 1 1
      arch/arm/mach-msm/timer.c
  17. 1 1
      arch/arm/mach-ns9xxx/time-ns9360.c
  18. 1 1
      arch/arm/mach-omap1/time.c
  19. 1 1
      arch/arm/mach-omap1/timer32k.c
  20. 1 1
      arch/arm/mach-omap2/timer-gp.c
  21. 1 1
      arch/arm/mach-pxa/time.c
  22. 1 1
      arch/arm/mach-realview/core.c
  23. 2 2
      arch/arm/mach-realview/localtimer.c
  24. 1 1
      arch/arm/mach-sa1100/time.c
  25. 1 1
      arch/arm/mach-versatile/core.c
  26. 2 2
      arch/arm/oprofile/op_model_mpcore.c
  27. 1 1
      arch/arm/plat-mxc/time.c
  28. 1 1
      arch/arm/plat-orion/time.c
  29. 1 1
      arch/avr32/kernel/time.c
  30. 1 1
      arch/blackfin/kernel/time-ts.c
  31. 2 2
      arch/cris/arch-v32/kernel/irq.c
  32. 0 4
      arch/cris/arch-v32/kernel/smp.c
  33. 0 1
      arch/cris/include/asm/smp.h
  34. 1 1
      arch/ia64/hp/sim/hpsim_irq.c
  35. 0 1
      arch/ia64/include/asm/smp.h
  36. 0 2
      arch/ia64/include/asm/topology.h
  37. 6 6
      arch/ia64/kernel/iosapic.c
  38. 4 5
      arch/ia64/kernel/irq.c
  39. 6 6
      arch/ia64/kernel/msi_ia64.c
  40. 2 8
      arch/ia64/kernel/smpboot.c
  41. 1 1
      arch/ia64/kernel/topology.c
  42. 3 3
      arch/ia64/sn/kernel/irq.c
  43. 4 3
      arch/ia64/sn/kernel/msi_sn.c
  44. 1 0
      arch/m32r/Kconfig
  45. 0 6
      arch/m32r/kernel/smpboot.c
  46. 1 1
      arch/m68knommu/platform/coldfire/pit.c
  47. 2 1
      arch/mips/include/asm/irq.h
  48. 0 1
      arch/mips/include/asm/mach-ip27/topology.h
  49. 0 3
      arch/mips/include/asm/smp.h
  50. 1 1
      arch/mips/jazz/irq.c
  51. 2 2
      arch/mips/kernel/cevt-bcm1480.c
  52. 1 1
      arch/mips/kernel/cevt-ds1287.c
  53. 1 1
      arch/mips/kernel/cevt-gt641xx.c
  54. 1 1
      arch/mips/kernel/cevt-r4k.c
  55. 2 2
      arch/mips/kernel/cevt-sb1250.c
  56. 1 1
      arch/mips/kernel/cevt-smtc.c
  57. 1 1
      arch/mips/kernel/cevt-txx9.c
  58. 1 1
      arch/mips/kernel/i8253.c
  59. 3 3
      arch/mips/kernel/irq-gic.c
  60. 3 3
      arch/mips/kernel/smp-cmp.c
  61. 1 1
      arch/mips/kernel/smp-mt.c
  62. 1 6
      arch/mips/kernel/smp.c
  63. 3 3
      arch/mips/kernel/smtc.c
  64. 3 3
      arch/mips/mti-malta/malta-smtc.c
  65. 1 0
      arch/mips/nxp/pnx8550/common/time.c
  66. 3 3
      arch/mips/pmc-sierra/yosemite/smp.c
  67. 1 1
      arch/mips/sgi-ip27/ip27-smp.c
  68. 1 1
      arch/mips/sgi-ip27/ip27-timer.c
  69. 4 4
      arch/mips/sibyte/bcm1480/irq.c
  70. 4 4
      arch/mips/sibyte/bcm1480/smp.c
  71. 4 4
      arch/mips/sibyte/sb1250/irq.c
  72. 4 4
      arch/mips/sibyte/sb1250/smp.c
  73. 1 1
      arch/mips/sni/time.c
  74. 1 0
      arch/parisc/Kconfig
  75. 3 3
      arch/parisc/kernel/irq.c
  76. 0 15
      arch/parisc/kernel/smp.c
  77. 0 1
      arch/powerpc/include/asm/topology.h
  78. 1 1
      arch/powerpc/kernel/irq.c
  79. 0 4
      arch/powerpc/kernel/smp.c
  80. 1 1
      arch/powerpc/kernel/time.c
  81. 2 2
      arch/powerpc/platforms/pseries/xics.c
  82. 2 2
      arch/powerpc/sysdev/mpic.c
  83. 1 1
      arch/powerpc/sysdev/mpic.h
  84. 1 0
      arch/s390/Kconfig
  85. 0 6
      arch/s390/kernel/smp.c
  86. 1 1
      arch/s390/kernel/time.c
  87. 1 1
      arch/sh/include/asm/smp.h
  88. 0 1
      arch/sh/include/asm/topology.h
  89. 2 8
      arch/sh/kernel/smp.c
  90. 1 1
      arch/sh/kernel/timers/timer-broadcast.c
  91. 1 1
      arch/sh/kernel/timers/timer-tmu.c
  92. 0 2
      arch/sparc/include/asm/smp_32.h
  93. 7 4
      arch/sparc/kernel/irq_64.c
  94. 1 1
      arch/sparc/kernel/of_device_64.c
  95. 1 1
      arch/sparc/kernel/pci_msi.c
  96. 2 4
      arch/sparc/kernel/smp_32.c
  97. 0 4
      arch/sparc/kernel/smp_64.c
  98. 0 4
      arch/sparc/kernel/sparc_ksyms_32.c
  99. 1 1
      arch/sparc/kernel/time_64.c
  100. 0 7
      arch/um/kernel/smp.c

+ 9 - 8
Documentation/cpu-hotplug.txt

@@ -50,16 +50,17 @@ additional_cpus=n (*)	Use this to limit hotpluggable cpus. This option sets
   			cpu_possible_map = cpu_present_map + additional_cpus
   			cpu_possible_map = cpu_present_map + additional_cpus
 
 
 (*) Option valid only for following architectures
 (*) Option valid only for following architectures
-- x86_64, ia64
+- ia64
 
 
-ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT
-to determine the number of potentially hot-pluggable cpus. The implementation
-should only rely on this to count the # of cpus, but *MUST* not rely on the
-apicid values in those tables for disabled apics. In the event BIOS doesn't
-mark such hot-pluggable cpus as disabled entries, one could use this
-parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map.
+ia64 uses the number of disabled local apics in ACPI tables MADT to
+determine the number of potentially hot-pluggable cpus. The implementation
+should only rely on this to count the # of cpus, but *MUST* not rely
+on the apicid values in those tables for disabled apics. In the event
+BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could
+use this parameter "additional_cpus=x" to represent those cpus in the
+cpu_possible_map.
 
 
-possible_cpus=n		[s390 only] use this to set hotpluggable cpus.
+possible_cpus=n		[s390,x86_64] use this to set hotpluggable cpus.
 			This option sets possible_cpus bits in
 			This option sets possible_cpus bits in
 			cpu_possible_map. Thus keeping the numbers of bits set
 			cpu_possible_map. Thus keeping the numbers of bits set
 			constant even if the machine gets rebooted.
 			constant even if the machine gets rebooted.

+ 0 - 1
arch/alpha/include/asm/smp.h

@@ -45,7 +45,6 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
 #define raw_smp_processor_id()	(current_thread_info()->cpu)
 #define raw_smp_processor_id()	(current_thread_info()->cpu)
 
 
 extern int smp_num_cpus;
 extern int smp_num_cpus;
-#define cpu_possible_map	cpu_present_map
 
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi(cpumask_t mask);
 extern void arch_send_call_function_ipi(cpumask_t mask);

+ 1 - 1
arch/alpha/kernel/irq.c

@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
 	last_cpu = cpu;
 	last_cpu = cpu;
 
 
 	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
 	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
-	irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu));
+	irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
 	return 0;
 	return 0;
 }
 }
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */

+ 2 - 0
arch/alpha/kernel/process.c

@@ -94,6 +94,7 @@ common_shutdown_1(void *generic_ptr)
 		flags |= 0x00040000UL; /* "remain halted" */
 		flags |= 0x00040000UL; /* "remain halted" */
 		*pflags = flags;
 		*pflags = flags;
 		cpu_clear(cpuid, cpu_present_map);
 		cpu_clear(cpuid, cpu_present_map);
+		cpu_clear(cpuid, cpu_possible_map);
 		halt();
 		halt();
 	}
 	}
 #endif
 #endif
@@ -120,6 +121,7 @@ common_shutdown_1(void *generic_ptr)
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	/* Wait for the secondaries to halt. */
 	/* Wait for the secondaries to halt. */
 	cpu_clear(boot_cpuid, cpu_present_map);
 	cpu_clear(boot_cpuid, cpu_present_map);
+	cpu_clear(boot_cpuid, cpu_possible_map);
 	while (cpus_weight(cpu_present_map))
 	while (cpus_weight(cpu_present_map))
 		barrier();
 		barrier();
 #endif
 #endif

+ 2 - 5
arch/alpha/kernel/smp.c

@@ -70,11 +70,6 @@ enum ipi_message_type {
 /* Set to a secondary's cpuid when it comes online.  */
 /* Set to a secondary's cpuid when it comes online.  */
 static int smp_secondary_alive __devinitdata = 0;
 static int smp_secondary_alive __devinitdata = 0;
 
 
-/* Which cpus ids came online.  */
-cpumask_t cpu_online_map;
-
-EXPORT_SYMBOL(cpu_online_map);
-
 int smp_num_probed;		/* Internal processor count */
 int smp_num_probed;		/* Internal processor count */
 int smp_num_cpus = 1;		/* Number that came online.  */
 int smp_num_cpus = 1;		/* Number that came online.  */
 EXPORT_SYMBOL(smp_num_cpus);
 EXPORT_SYMBOL(smp_num_cpus);
@@ -440,6 +435,7 @@ setup_smp(void)
 				((char *)cpubase + i*hwrpb->processor_size);
 				((char *)cpubase + i*hwrpb->processor_size);
 			if ((cpu->flags & 0x1cc) == 0x1cc) {
 			if ((cpu->flags & 0x1cc) == 0x1cc) {
 				smp_num_probed++;
 				smp_num_probed++;
+				cpu_set(i, cpu_possible_map);
 				cpu_set(i, cpu_present_map);
 				cpu_set(i, cpu_present_map);
 				cpu->pal_revision = boot_cpu_palrev;
 				cpu->pal_revision = boot_cpu_palrev;
 			}
 			}
@@ -473,6 +469,7 @@ smp_prepare_cpus(unsigned int max_cpus)
 
 
 	/* Nothing to do on a UP box, or when told not to.  */
 	/* Nothing to do on a UP box, or when told not to.  */
 	if (smp_num_probed == 1 || max_cpus == 0) {
 	if (smp_num_probed == 1 || max_cpus == 0) {
+		cpu_possible_map = cpumask_of_cpu(boot_cpuid);
 		cpu_present_map = cpumask_of_cpu(boot_cpuid);
 		cpu_present_map = cpumask_of_cpu(boot_cpuid);
 		printk(KERN_INFO "SMP mode deactivated.\n");
 		printk(KERN_INFO "SMP mode deactivated.\n");
 		return;
 		return;

+ 4 - 4
arch/alpha/kernel/sys_dp264.c

@@ -177,19 +177,19 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 }
 }
 
 
 static void
 static void
-dp264_set_affinity(unsigned int irq, cpumask_t affinity)
+dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
 { 
 { 
 	spin_lock(&dp264_irq_lock);
 	spin_lock(&dp264_irq_lock);
-	cpu_set_irq_affinity(irq, affinity);
+	cpu_set_irq_affinity(irq, *affinity);
 	tsunami_update_irq_hw(cached_irq_mask);
 	tsunami_update_irq_hw(cached_irq_mask);
 	spin_unlock(&dp264_irq_lock);
 	spin_unlock(&dp264_irq_lock);
 }
 }
 
 
 static void
 static void
-clipper_set_affinity(unsigned int irq, cpumask_t affinity)
+clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
 { 
 { 
 	spin_lock(&dp264_irq_lock);
 	spin_lock(&dp264_irq_lock);
-	cpu_set_irq_affinity(irq - 16, affinity);
+	cpu_set_irq_affinity(irq - 16, *affinity);
 	tsunami_update_irq_hw(cached_irq_mask);
 	tsunami_update_irq_hw(cached_irq_mask);
 	spin_unlock(&dp264_irq_lock);
 	spin_unlock(&dp264_irq_lock);
 }
 }

+ 2 - 2
arch/alpha/kernel/sys_titan.c

@@ -158,10 +158,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 }
 }
 
 
 static void
 static void
-titan_set_irq_affinity(unsigned int irq, cpumask_t affinity)
+titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 { 
 { 
 	spin_lock(&titan_irq_lock);
 	spin_lock(&titan_irq_lock);
-	titan_cpu_set_irq_affinity(irq - 16, affinity);
+	titan_cpu_set_irq_affinity(irq - 16, *affinity);
 	titan_update_irq_hw(titan_cached_irq_mask);
 	titan_update_irq_hw(titan_cached_irq_mask);
 	spin_unlock(&titan_irq_lock);
 	spin_unlock(&titan_irq_lock);
 }
 }

+ 2 - 2
arch/arm/common/gic.c

@@ -109,11 +109,11 @@ static void gic_unmask_irq(unsigned int irq)
 }
 }
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-static void gic_set_cpu(unsigned int irq, cpumask_t mask_val)
+static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
 {
 {
 	void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
 	void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
 	unsigned int shift = (irq % 4) * 8;
 	unsigned int shift = (irq % 4) * 8;
-	unsigned int cpu = first_cpu(mask_val);
+	unsigned int cpu = cpumask_first(mask_val);
 	u32 val;
 	u32 val;
 
 
 	spin_lock(&irq_controller_lock);
 	spin_lock(&irq_controller_lock);

+ 1 - 1
arch/arm/kernel/irq.c

@@ -174,7 +174,7 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
 	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
 	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
 
 
 	spin_lock_irq(&desc->lock);
 	spin_lock_irq(&desc->lock);
-	desc->chip->set_affinity(irq, cpumask_of_cpu(cpu));
+	desc->chip->set_affinity(irq, cpumask_of(cpu));
 	spin_unlock_irq(&desc->lock);
 	spin_unlock_irq(&desc->lock);
 }
 }
 
 

+ 0 - 10
arch/arm/kernel/smp.c

@@ -33,16 +33,6 @@
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 
 
-/*
- * bitmask of present and online CPUs.
- * The present bitmask indicates that the CPU is physically present.
- * The online bitmask indicates that the CPU is up and running.
- */
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
 /*
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  * as from 2.5, kernels no longer have an init_tasks structure
  * so we need some other way of telling a new secondary core
  * so we need some other way of telling a new secondary core

+ 1 - 2
arch/arm/mach-at91/at91rm9200_time.c

@@ -178,7 +178,6 @@ static struct clock_event_device clkevt = {
 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.shift		= 32,
 	.shift		= 32,
 	.rating		= 150,
 	.rating		= 150,
-	.cpumask	= CPU_MASK_CPU0,
 	.set_next_event	= clkevt32k_next_event,
 	.set_next_event	= clkevt32k_next_event,
 	.set_mode	= clkevt32k_mode,
 	.set_mode	= clkevt32k_mode,
 };
 };
@@ -206,7 +205,7 @@ void __init at91rm9200_timer_init(void)
 	clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
 	clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
 	clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
 	clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
 	clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
 	clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
-	clkevt.cpumask = cpumask_of_cpu(0);
+	clkevt.cpumask = cpumask_of(0);
 	clockevents_register_device(&clkevt);
 	clockevents_register_device(&clkevt);
 
 
 	/* register clocksource */
 	/* register clocksource */

+ 1 - 1
arch/arm/mach-at91/at91sam926x_time.c

@@ -91,7 +91,6 @@ static struct clock_event_device pit_clkevt = {
 	.features	= CLOCK_EVT_FEAT_PERIODIC,
 	.features	= CLOCK_EVT_FEAT_PERIODIC,
 	.shift		= 32,
 	.shift		= 32,
 	.rating		= 100,
 	.rating		= 100,
-	.cpumask	= CPU_MASK_CPU0,
 	.set_mode	= pit_clkevt_mode,
 	.set_mode	= pit_clkevt_mode,
 };
 };
 
 
@@ -173,6 +172,7 @@ static void __init at91sam926x_pit_init(void)
 
 
 	/* Set up and register clockevents */
 	/* Set up and register clockevents */
 	pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift);
 	pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift);
+	pit_clkevt.cpumask = cpumask_of(0);
 	clockevents_register_device(&pit_clkevt);
 	clockevents_register_device(&pit_clkevt);
 }
 }
 
 

+ 1 - 1
arch/arm/mach-davinci/time.c

@@ -322,7 +322,7 @@ static void __init davinci_timer_init(void)
 	clockevent_davinci.min_delta_ns =
 	clockevent_davinci.min_delta_ns =
 		clockevent_delta2ns(1, &clockevent_davinci);
 		clockevent_delta2ns(1, &clockevent_davinci);
 
 
-	clockevent_davinci.cpumask = cpumask_of_cpu(0);
+	clockevent_davinci.cpumask = cpumask_of(0);
 	clockevents_register_device(&clockevent_davinci);
 	clockevents_register_device(&clockevent_davinci);
 }
 }
 
 

+ 1 - 1
arch/arm/mach-imx/time.c

@@ -184,7 +184,7 @@ static int __init imx_clockevent_init(unsigned long rate)
 	clockevent_imx.min_delta_ns =
 	clockevent_imx.min_delta_ns =
 		clockevent_delta2ns(0xf, &clockevent_imx);
 		clockevent_delta2ns(0xf, &clockevent_imx);
 
 
-	clockevent_imx.cpumask = cpumask_of_cpu(0);
+	clockevent_imx.cpumask = cpumask_of(0);
 
 
 	clockevents_register_device(&clockevent_imx);
 	clockevents_register_device(&clockevent_imx);
 
 

+ 1 - 1
arch/arm/mach-ixp4xx/common.c

@@ -487,7 +487,7 @@ static int __init ixp4xx_clockevent_init(void)
 		clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx);
 		clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx);
 	clockevent_ixp4xx.min_delta_ns =
 	clockevent_ixp4xx.min_delta_ns =
 		clockevent_delta2ns(0xf, &clockevent_ixp4xx);
 		clockevent_delta2ns(0xf, &clockevent_ixp4xx);
-	clockevent_ixp4xx.cpumask = cpumask_of_cpu(0);
+	clockevent_ixp4xx.cpumask = cpumask_of(0);
 
 
 	clockevents_register_device(&clockevent_ixp4xx);
 	clockevents_register_device(&clockevent_ixp4xx);
 	return 0;
 	return 0;

+ 1 - 1
arch/arm/mach-msm/timer.c

@@ -182,7 +182,7 @@ static void __init msm_timer_init(void)
 			clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
 			clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
 		/* 4 gets rounded down to 3 */
 		/* 4 gets rounded down to 3 */
 		ce->min_delta_ns = clockevent_delta2ns(4, ce);
 		ce->min_delta_ns = clockevent_delta2ns(4, ce);
-		ce->cpumask = cpumask_of_cpu(0);
+		ce->cpumask = cpumask_of(0);
 
 
 		cs->mult = clocksource_hz2mult(clock->freq, cs->shift);
 		cs->mult = clocksource_hz2mult(clock->freq, cs->shift);
 		res = clocksource_register(cs);
 		res = clocksource_register(cs);

+ 1 - 1
arch/arm/mach-ns9xxx/time-ns9360.c

@@ -173,7 +173,7 @@ static void __init ns9360_timer_init(void)
 	ns9360_clockevent_device.min_delta_ns =
 	ns9360_clockevent_device.min_delta_ns =
 		clockevent_delta2ns(1, &ns9360_clockevent_device);
 		clockevent_delta2ns(1, &ns9360_clockevent_device);
 
 
-	ns9360_clockevent_device.cpumask = cpumask_of_cpu(0);
+	ns9360_clockevent_device.cpumask = cpumask_of(0);
 	clockevents_register_device(&ns9360_clockevent_device);
 	clockevents_register_device(&ns9360_clockevent_device);
 
 
 	setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT,
 	setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT,

+ 1 - 1
arch/arm/mach-omap1/time.c

@@ -173,7 +173,7 @@ static __init void omap_init_mpu_timer(unsigned long rate)
 	clockevent_mpu_timer1.min_delta_ns =
 	clockevent_mpu_timer1.min_delta_ns =
 		clockevent_delta2ns(1, &clockevent_mpu_timer1);
 		clockevent_delta2ns(1, &clockevent_mpu_timer1);
 
 
-	clockevent_mpu_timer1.cpumask = cpumask_of_cpu(0);
+	clockevent_mpu_timer1.cpumask = cpumask_of(0);
 	clockevents_register_device(&clockevent_mpu_timer1);
 	clockevents_register_device(&clockevent_mpu_timer1);
 }
 }
 
 

+ 1 - 1
arch/arm/mach-omap1/timer32k.c

@@ -187,7 +187,7 @@ static __init void omap_init_32k_timer(void)
 	clockevent_32k_timer.min_delta_ns =
 	clockevent_32k_timer.min_delta_ns =
 		clockevent_delta2ns(1, &clockevent_32k_timer);
 		clockevent_delta2ns(1, &clockevent_32k_timer);
 
 
-	clockevent_32k_timer.cpumask = cpumask_of_cpu(0);
+	clockevent_32k_timer.cpumask = cpumask_of(0);
 	clockevents_register_device(&clockevent_32k_timer);
 	clockevents_register_device(&clockevent_32k_timer);
 }
 }
 
 

+ 1 - 1
arch/arm/mach-omap2/timer-gp.c

@@ -120,7 +120,7 @@ static void __init omap2_gp_clockevent_init(void)
 	clockevent_gpt.min_delta_ns =
 	clockevent_gpt.min_delta_ns =
 		clockevent_delta2ns(1, &clockevent_gpt);
 		clockevent_delta2ns(1, &clockevent_gpt);
 
 
-	clockevent_gpt.cpumask = cpumask_of_cpu(0);
+	clockevent_gpt.cpumask = cpumask_of(0);
 	clockevents_register_device(&clockevent_gpt);
 	clockevents_register_device(&clockevent_gpt);
 }
 }
 
 

+ 1 - 1
arch/arm/mach-pxa/time.c

@@ -122,7 +122,6 @@ static struct clock_event_device ckevt_pxa_osmr0 = {
 	.features	= CLOCK_EVT_FEAT_ONESHOT,
 	.features	= CLOCK_EVT_FEAT_ONESHOT,
 	.shift		= 32,
 	.shift		= 32,
 	.rating		= 200,
 	.rating		= 200,
-	.cpumask	= CPU_MASK_CPU0,
 	.set_next_event	= pxa_osmr0_set_next_event,
 	.set_next_event	= pxa_osmr0_set_next_event,
 	.set_mode	= pxa_osmr0_set_mode,
 	.set_mode	= pxa_osmr0_set_mode,
 };
 };
@@ -163,6 +162,7 @@ static void __init pxa_timer_init(void)
 		clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0);
 		clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0);
 	ckevt_pxa_osmr0.min_delta_ns =
 	ckevt_pxa_osmr0.min_delta_ns =
 		clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1;
 		clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1;
+	ckevt_pxa_osmr0.cpumask = cpumask_of(0);
 
 
 	cksrc_pxa_oscr0.mult =
 	cksrc_pxa_oscr0.mult =
 		clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift);
 		clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift);

+ 1 - 1
arch/arm/mach-realview/core.c

@@ -624,7 +624,7 @@ static struct clock_event_device timer0_clockevent =	 {
 	.set_mode	= timer_set_mode,
 	.set_mode	= timer_set_mode,
 	.set_next_event	= timer_set_next_event,
 	.set_next_event	= timer_set_next_event,
 	.rating		= 300,
 	.rating		= 300,
-	.cpumask	= CPU_MASK_ALL,
+	.cpumask	= cpu_all_mask,
 };
 };
 
 
 static void __init realview_clockevents_init(unsigned int timer_irq)
 static void __init realview_clockevents_init(unsigned int timer_irq)

+ 2 - 2
arch/arm/mach-realview/localtimer.c

@@ -154,7 +154,7 @@ void __cpuinit local_timer_setup(void)
 	clk->set_mode		= local_timer_set_mode;
 	clk->set_mode		= local_timer_set_mode;
 	clk->set_next_event	= local_timer_set_next_event;
 	clk->set_next_event	= local_timer_set_next_event;
 	clk->irq		= IRQ_LOCALTIMER;
 	clk->irq		= IRQ_LOCALTIMER;
-	clk->cpumask		= cpumask_of_cpu(cpu);
+	clk->cpumask		= cpumask_of(cpu);
 	clk->shift		= 20;
 	clk->shift		= 20;
 	clk->mult		= div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift);
 	clk->mult		= div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift);
 	clk->max_delta_ns	= clockevent_delta2ns(0xffffffff, clk);
 	clk->max_delta_ns	= clockevent_delta2ns(0xffffffff, clk);
@@ -193,7 +193,7 @@ void __cpuinit local_timer_setup(void)
 	clk->rating		= 200;
 	clk->rating		= 200;
 	clk->set_mode		= dummy_timer_set_mode;
 	clk->set_mode		= dummy_timer_set_mode;
 	clk->broadcast		= smp_timer_broadcast;
 	clk->broadcast		= smp_timer_broadcast;
-	clk->cpumask		= cpumask_of_cpu(cpu);
+	clk->cpumask		= cpumask_of(cpu);
 
 
 	clockevents_register_device(clk);
 	clockevents_register_device(clk);
 }
 }

+ 1 - 1
arch/arm/mach-sa1100/time.c

@@ -73,7 +73,6 @@ static struct clock_event_device ckevt_sa1100_osmr0 = {
 	.features	= CLOCK_EVT_FEAT_ONESHOT,
 	.features	= CLOCK_EVT_FEAT_ONESHOT,
 	.shift		= 32,
 	.shift		= 32,
 	.rating		= 200,
 	.rating		= 200,
-	.cpumask	= CPU_MASK_CPU0,
 	.set_next_event	= sa1100_osmr0_set_next_event,
 	.set_next_event	= sa1100_osmr0_set_next_event,
 	.set_mode	= sa1100_osmr0_set_mode,
 	.set_mode	= sa1100_osmr0_set_mode,
 };
 };
@@ -110,6 +109,7 @@ static void __init sa1100_timer_init(void)
 		clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0);
 		clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0);
 	ckevt_sa1100_osmr0.min_delta_ns =
 	ckevt_sa1100_osmr0.min_delta_ns =
 		clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1;
 		clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1;
+	ckevt_sa1100_osmr0.cpumask = cpumask_of(0);
 
 
 	cksrc_sa1100_oscr.mult =
 	cksrc_sa1100_oscr.mult =
 		clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift);
 		clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift);

+ 1 - 1
arch/arm/mach-versatile/core.c

@@ -1005,7 +1005,7 @@ static void __init versatile_timer_init(void)
 	timer0_clockevent.min_delta_ns =
 	timer0_clockevent.min_delta_ns =
 		clockevent_delta2ns(0xf, &timer0_clockevent);
 		clockevent_delta2ns(0xf, &timer0_clockevent);
 
 
-	timer0_clockevent.cpumask = cpumask_of_cpu(0);
+	timer0_clockevent.cpumask = cpumask_of(0);
 	clockevents_register_device(&timer0_clockevent);
 	clockevents_register_device(&timer0_clockevent);
 }
 }
 
 

+ 2 - 2
arch/arm/oprofile/op_model_mpcore.c

@@ -260,10 +260,10 @@ static void em_stop(void)
 static void em_route_irq(int irq, unsigned int cpu)
 static void em_route_irq(int irq, unsigned int cpu)
 {
 {
 	struct irq_desc *desc = irq_desc + irq;
 	struct irq_desc *desc = irq_desc + irq;
-	cpumask_t mask = cpumask_of_cpu(cpu);
+	const struct cpumask *mask = cpumask_of(cpu);
 
 
 	spin_lock_irq(&desc->lock);
 	spin_lock_irq(&desc->lock);
-	desc->affinity = mask;
+	desc->affinity = *mask;
 	desc->chip->set_affinity(irq, mask);
 	desc->chip->set_affinity(irq, mask);
 	spin_unlock_irq(&desc->lock);
 	spin_unlock_irq(&desc->lock);
 }
 }

+ 1 - 1
arch/arm/plat-mxc/time.c

@@ -190,7 +190,7 @@ static int __init mxc_clockevent_init(void)
 	clockevent_mxc.min_delta_ns =
 	clockevent_mxc.min_delta_ns =
 			clockevent_delta2ns(0xff, &clockevent_mxc);
 			clockevent_delta2ns(0xff, &clockevent_mxc);
 
 
-	clockevent_mxc.cpumask = cpumask_of_cpu(0);
+	clockevent_mxc.cpumask = cpumask_of(0);
 
 
 	clockevents_register_device(&clockevent_mxc);
 	clockevents_register_device(&clockevent_mxc);
 
 

+ 1 - 1
arch/arm/plat-orion/time.c

@@ -149,7 +149,6 @@ static struct clock_event_device orion_clkevt = {
 	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
 	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
 	.shift		= 32,
 	.shift		= 32,
 	.rating		= 300,
 	.rating		= 300,
-	.cpumask	= CPU_MASK_CPU0,
 	.set_next_event	= orion_clkevt_next_event,
 	.set_next_event	= orion_clkevt_next_event,
 	.set_mode	= orion_clkevt_mode,
 	.set_mode	= orion_clkevt_mode,
 };
 };
@@ -199,5 +198,6 @@ void __init orion_time_init(unsigned int irq, unsigned int tclk)
 	orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift);
 	orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift);
 	orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt);
 	orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt);
 	orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt);
 	orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt);
+	orion_clkevt.cpumask = cpumask_of(0);
 	clockevents_register_device(&orion_clkevt);
 	clockevents_register_device(&orion_clkevt);
 }
 }

+ 1 - 1
arch/avr32/kernel/time.c

@@ -106,7 +106,6 @@ static struct clock_event_device comparator = {
 	.features	= CLOCK_EVT_FEAT_ONESHOT,
 	.features	= CLOCK_EVT_FEAT_ONESHOT,
 	.shift		= 16,
 	.shift		= 16,
 	.rating		= 50,
 	.rating		= 50,
-	.cpumask	= CPU_MASK_CPU0,
 	.set_next_event	= comparator_next_event,
 	.set_next_event	= comparator_next_event,
 	.set_mode	= comparator_mode,
 	.set_mode	= comparator_mode,
 };
 };
@@ -134,6 +133,7 @@ void __init time_init(void)
 	comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift);
 	comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift);
 	comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator);
 	comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator);
 	comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1;
 	comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1;
+	comparator.cpumask = cpumask_of(0);
 
 
 	sysreg_write(COMPARE, 0);
 	sysreg_write(COMPARE, 0);
 	timer_irqaction.dev_id = &comparator;
 	timer_irqaction.dev_id = &comparator;

+ 1 - 1
arch/blackfin/kernel/time-ts.c

@@ -162,7 +162,6 @@ static struct clock_event_device clockevent_bfin = {
 	.name		= "bfin_core_timer",
 	.name		= "bfin_core_timer",
 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.shift		= 32,
 	.shift		= 32,
-	.cpumask	= CPU_MASK_CPU0,
 	.set_next_event = bfin_timer_set_next_event,
 	.set_next_event = bfin_timer_set_next_event,
 	.set_mode	= bfin_timer_set_mode,
 	.set_mode	= bfin_timer_set_mode,
 };
 };
@@ -193,6 +192,7 @@ static int __init bfin_clockevent_init(void)
 	clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift);
 	clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift);
 	clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin);
 	clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin);
 	clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin);
 	clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin);
+	clockevent_bfin.cpumask = cpumask_of(0);
 	clockevents_register_device(&clockevent_bfin);
 	clockevents_register_device(&clockevent_bfin);
 
 
 	return 0;
 	return 0;

+ 2 - 2
arch/cris/arch-v32/kernel/irq.c

@@ -325,11 +325,11 @@ static void end_crisv32_irq(unsigned int irq)
 {
 {
 }
 }
 
 
-void set_affinity_crisv32_irq(unsigned int irq, cpumask_t dest)
+void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	spin_lock_irqsave(&irq_lock, flags);
 	spin_lock_irqsave(&irq_lock, flags);
-	irq_allocations[irq - FIRST_IRQ].mask = dest;
+	irq_allocations[irq - FIRST_IRQ].mask = *dest;
 	spin_unlock_irqrestore(&irq_lock, flags);
 	spin_unlock_irqrestore(&irq_lock, flags);
 }
 }
 
 

+ 0 - 4
arch/cris/arch-v32/kernel/smp.c

@@ -29,11 +29,7 @@
 spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
 spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
 
 
 /* CPU masks */
 /* CPU masks */
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_online_map);
 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
 EXPORT_SYMBOL(phys_cpu_present_map);
 EXPORT_SYMBOL(phys_cpu_present_map);
 
 
 /* Variables used during SMP boot */
 /* Variables used during SMP boot */

+ 0 - 1
arch/cris/include/asm/smp.h

@@ -4,7 +4,6 @@
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 
 
 extern cpumask_t phys_cpu_present_map;
 extern cpumask_t phys_cpu_present_map;
-extern cpumask_t cpu_possible_map;
 
 
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 

+ 1 - 1
arch/ia64/hp/sim/hpsim_irq.c

@@ -22,7 +22,7 @@ hpsim_irq_noop (unsigned int irq)
 }
 }
 
 
 static void
 static void
-hpsim_set_affinity_noop (unsigned int a, cpumask_t b)
+hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
 {
 {
 }
 }
 
 

+ 0 - 1
arch/ia64/include/asm/smp.h

@@ -57,7 +57,6 @@ extern struct smp_boot_data {
 
 
 extern char no_int_routing __devinitdata;
 extern char no_int_routing __devinitdata;
 
 
-extern cpumask_t cpu_online_map;
 extern cpumask_t cpu_core_map[NR_CPUS];
 extern cpumask_t cpu_core_map[NR_CPUS];
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 extern int smp_num_siblings;
 extern int smp_num_siblings;

+ 0 - 2
arch/ia64/include/asm/topology.h

@@ -55,7 +55,6 @@
 void build_cpu_to_node_map(void);
 void build_cpu_to_node_map(void);
 
 
 #define SD_CPU_INIT (struct sched_domain) {		\
 #define SD_CPU_INIT (struct sched_domain) {		\
-	.span			= CPU_MASK_NONE,	\
 	.parent			= NULL,			\
 	.parent			= NULL,			\
 	.child			= NULL,			\
 	.child			= NULL,			\
 	.groups			= NULL,			\
 	.groups			= NULL,			\
@@ -80,7 +79,6 @@ void build_cpu_to_node_map(void);
 
 
 /* sched_domains SD_NODE_INIT for IA64 NUMA machines */
 /* sched_domains SD_NODE_INIT for IA64 NUMA machines */
 #define SD_NODE_INIT (struct sched_domain) {		\
 #define SD_NODE_INIT (struct sched_domain) {		\
-	.span			= CPU_MASK_NONE,	\
 	.parent			= NULL,			\
 	.parent			= NULL,			\
 	.child			= NULL,			\
 	.child			= NULL,			\
 	.groups			= NULL,			\
 	.groups			= NULL,			\

+ 6 - 6
arch/ia64/kernel/iosapic.c

@@ -330,25 +330,25 @@ unmask_irq (unsigned int irq)
 
 
 
 
 static void
 static void
-iosapic_set_affinity (unsigned int irq, cpumask_t mask)
+iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
 {
 {
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	u32 high32, low32;
 	u32 high32, low32;
-	int dest, rte_index;
+	int cpu, dest, rte_index;
 	int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
 	int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
 	struct iosapic_rte_info *rte;
 	struct iosapic_rte_info *rte;
 	struct iosapic *iosapic;
 	struct iosapic *iosapic;
 
 
 	irq &= (~IA64_IRQ_REDIRECTED);
 	irq &= (~IA64_IRQ_REDIRECTED);
 
 
-	cpus_and(mask, mask, cpu_online_map);
-	if (cpus_empty(mask))
+	cpu = cpumask_first_and(cpu_online_mask, mask);
+	if (cpu >= nr_cpu_ids)
 		return;
 		return;
 
 
-	if (irq_prepare_move(irq, first_cpu(mask)))
+	if (irq_prepare_move(irq, cpu))
 		return;
 		return;
 
 
-	dest = cpu_physical_id(first_cpu(mask));
+	dest = cpu_physical_id(cpu);
 
 
 	if (!iosapic_intr_info[irq].count)
 	if (!iosapic_intr_info[irq].count)
 		return;			/* not an IOSAPIC interrupt */
 		return;			/* not an IOSAPIC interrupt */

+ 4 - 5
arch/ia64/kernel/irq.c

@@ -133,7 +133,6 @@ unsigned int vectors_in_migration[NR_IRQS];
  */
  */
 static void migrate_irqs(void)
 static void migrate_irqs(void)
 {
 {
-	cpumask_t	mask;
 	irq_desc_t *desc;
 	irq_desc_t *desc;
 	int 		irq, new_cpu;
 	int 		irq, new_cpu;
 
 
@@ -152,15 +151,14 @@ static void migrate_irqs(void)
 		if (desc->status == IRQ_PER_CPU)
 		if (desc->status == IRQ_PER_CPU)
 			continue;
 			continue;
 
 
-		cpus_and(mask, irq_desc[irq].affinity, cpu_online_map);
-		if (any_online_cpu(mask) == NR_CPUS) {
+		if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+		    >= nr_cpu_ids) {
 			/*
 			/*
 			 * Save it for phase 2 processing
 			 * Save it for phase 2 processing
 			 */
 			 */
 			vectors_in_migration[irq] = irq;
 			vectors_in_migration[irq] = irq;
 
 
 			new_cpu = any_online_cpu(cpu_online_map);
 			new_cpu = any_online_cpu(cpu_online_map);
-			mask = cpumask_of_cpu(new_cpu);
 
 
 			/*
 			/*
 			 * Al three are essential, currently WARN_ON.. maybe panic?
 			 * Al three are essential, currently WARN_ON.. maybe panic?
@@ -168,7 +166,8 @@ static void migrate_irqs(void)
 			if (desc->chip && desc->chip->disable &&
 			if (desc->chip && desc->chip->disable &&
 				desc->chip->enable && desc->chip->set_affinity) {
 				desc->chip->enable && desc->chip->set_affinity) {
 				desc->chip->disable(irq);
 				desc->chip->disable(irq);
-				desc->chip->set_affinity(irq, mask);
+				desc->chip->set_affinity(irq,
+							 cpumask_of(new_cpu));
 				desc->chip->enable(irq);
 				desc->chip->enable(irq);
 			} else {
 			} else {
 				WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
 				WARN_ON((!(desc->chip) || !(desc->chip->disable) ||

+ 6 - 6
arch/ia64/kernel/msi_ia64.c

@@ -49,11 +49,12 @@
 static struct irq_chip	ia64_msi_chip;
 static struct irq_chip	ia64_msi_chip;
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
+static void ia64_set_msi_irq_affinity(unsigned int irq,
+				      const cpumask_t *cpu_mask)
 {
 {
 	struct msi_msg msg;
 	struct msi_msg msg;
 	u32 addr, data;
 	u32 addr, data;
-	int cpu = first_cpu(cpu_mask);
+	int cpu = first_cpu(*cpu_mask);
 
 
 	if (!cpu_online(cpu))
 	if (!cpu_online(cpu))
 		return;
 		return;
@@ -166,12 +167,11 @@ void arch_teardown_msi_irq(unsigned int irq)
 
 
 #ifdef CONFIG_DMAR
 #ifdef CONFIG_DMAR
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
+static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
 {
 {
 	struct irq_cfg *cfg = irq_cfg + irq;
 	struct irq_cfg *cfg = irq_cfg + irq;
 	struct msi_msg msg;
 	struct msi_msg msg;
-	int cpu = first_cpu(mask);
-
+	int cpu = cpumask_first(mask);
 
 
 	if (!cpu_online(cpu))
 	if (!cpu_online(cpu))
 		return;
 		return;
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
 	msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
 	msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
 
 
 	dmar_msi_write(irq, &msg);
 	dmar_msi_write(irq, &msg);
-	irq_desc[irq].affinity = mask;
+	irq_desc[irq].affinity = *mask;
 }
 }
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 

+ 2 - 8
arch/ia64/kernel/smpboot.c

@@ -131,12 +131,6 @@ struct task_struct *task_for_booting_cpu;
  */
  */
 DEFINE_PER_CPU(int, cpu_state);
 DEFINE_PER_CPU(int, cpu_state);
 
 
-/* Bitmasks of currently online, and possible CPUs */
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-cpumask_t cpu_possible_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_possible_map);
-
 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
 EXPORT_SYMBOL(cpu_core_map);
 EXPORT_SYMBOL(cpu_core_map);
 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
@@ -688,7 +682,7 @@ int migrate_platform_irqs(unsigned int cpu)
 {
 {
 	int new_cpei_cpu;
 	int new_cpei_cpu;
 	irq_desc_t *desc = NULL;
 	irq_desc_t *desc = NULL;
-	cpumask_t 	mask;
+	const struct cpumask *mask;
 	int 		retval = 0;
 	int 		retval = 0;
 
 
 	/*
 	/*
@@ -701,7 +695,7 @@ int migrate_platform_irqs(unsigned int cpu)
 			 * Now re-target the CPEI to a different processor
 			 * Now re-target the CPEI to a different processor
 			 */
 			 */
 			new_cpei_cpu = any_online_cpu(cpu_online_map);
 			new_cpei_cpu = any_online_cpu(cpu_online_map);
-			mask = cpumask_of_cpu(new_cpei_cpu);
+			mask = cpumask_of(new_cpei_cpu);
 			set_cpei_target_cpu(new_cpei_cpu);
 			set_cpei_target_cpu(new_cpei_cpu);
 			desc = irq_desc + ia64_cpe_irq;
 			desc = irq_desc + ia64_cpe_irq;
 			/*
 			/*

+ 1 - 1
arch/ia64/kernel/topology.c

@@ -219,7 +219,7 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
 	cpumask_t shared_cpu_map;
 	cpumask_t shared_cpu_map;
 
 
 	cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
 	cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
-	len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
+	len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
 	len += sprintf(buf+len, "\n");
 	len += sprintf(buf+len, "\n");
 	return len;
 	return len;
 }
 }

+ 3 - 3
arch/ia64/sn/kernel/irq.c

@@ -227,14 +227,14 @@ finish_up:
 	return new_irq_info;
 	return new_irq_info;
 }
 }
 
 
-static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
 {
 {
 	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
 	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
 	nasid_t nasid;
 	nasid_t nasid;
 	int slice;
 	int slice;
 
 
-	nasid = cpuid_to_nasid(first_cpu(mask));
-	slice = cpuid_to_slice(first_cpu(mask));
+	nasid = cpuid_to_nasid(cpumask_first(mask));
+	slice = cpuid_to_slice(cpumask_first(mask));
 
 
 	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
 	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
 				 sn_irq_lh[irq], list)
 				 sn_irq_lh[irq], list)

+ 4 - 3
arch/ia64/sn/kernel/msi_sn.c

@@ -151,7 +151,8 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
 }
 }
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
+static void sn_set_msi_irq_affinity(unsigned int irq,
+				    const struct cpumask *cpu_mask)
 {
 {
 	struct msi_msg msg;
 	struct msi_msg msg;
 	int slice;
 	int slice;
@@ -164,7 +165,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
 	struct sn_pcibus_provider *provider;
 	struct sn_pcibus_provider *provider;
 	unsigned int cpu;
 	unsigned int cpu;
 
 
-	cpu = first_cpu(cpu_mask);
+	cpu = cpumask_first(cpu_mask);
 	sn_irq_info = sn_msi_info[irq].sn_irq_info;
 	sn_irq_info = sn_msi_info[irq].sn_irq_info;
 	if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
 	if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
 		return;
 		return;
@@ -204,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
 	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
 	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
 
 
 	write_msi_msg(irq, &msg);
 	write_msi_msg(irq, &msg);
-	irq_desc[irq].affinity = cpu_mask;
+	irq_desc[irq].affinity = *cpu_mask;
 }
 }
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 

+ 1 - 0
arch/m32r/Kconfig

@@ -10,6 +10,7 @@ config M32R
 	default y
 	default y
 	select HAVE_IDE
 	select HAVE_IDE
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE
+	select INIT_ALL_POSSIBLE
 
 
 config SBUS
 config SBUS
 	bool
 	bool

+ 0 - 6
arch/m32r/kernel/smpboot.c

@@ -73,17 +73,11 @@ static unsigned int bsp_phys_id = -1;
 /* Bitmask of physically existing CPUs */
 /* Bitmask of physically existing CPUs */
 physid_mask_t phys_cpu_present_map;
 physid_mask_t phys_cpu_present_map;
 
 
-/* Bitmask of currently online CPUs */
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
 cpumask_t cpu_bootout_map;
 cpumask_t cpu_bootout_map;
 cpumask_t cpu_bootin_map;
 cpumask_t cpu_bootin_map;
 static cpumask_t cpu_callin_map;
 static cpumask_t cpu_callin_map;
 cpumask_t cpu_callout_map;
 cpumask_t cpu_callout_map;
 EXPORT_SYMBOL(cpu_callout_map);
 EXPORT_SYMBOL(cpu_callout_map);
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_possible_map);
 
 
 /* Per CPU bogomips and other parameters */
 /* Per CPU bogomips and other parameters */
 struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
 struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;

+ 1 - 1
arch/m68knommu/platform/coldfire/pit.c

@@ -156,7 +156,7 @@ void hw_timer_init(void)
 {
 {
 	u32 imr;
 	u32 imr;
 
 
-	cf_pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
+	cf_pit_clockevent.cpumask = cpumask_of(smp_processor_id());
 	cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32);
 	cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32);
 	cf_pit_clockevent.max_delta_ns =
 	cf_pit_clockevent.max_delta_ns =
 		clockevent_delta2ns(0xFFFF, &cf_pit_clockevent);
 		clockevent_delta2ns(0xFFFF, &cf_pit_clockevent);

+ 2 - 1
arch/mips/include/asm/irq.h

@@ -49,7 +49,8 @@ static inline void smtc_im_ack_irq(unsigned int irq)
 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
 #include <linux/cpumask.h>
 #include <linux/cpumask.h>
 
 
-extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity);
+extern void plat_set_irq_affinity(unsigned int irq,
+				  const struct cpumask *affinity);
 extern void smtc_forward_irq(unsigned int irq);
 extern void smtc_forward_irq(unsigned int irq);
 
 
 /*
 /*

+ 0 - 1
arch/mips/include/asm/mach-ip27/topology.h

@@ -37,7 +37,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
 
 
 /* sched_domains SD_NODE_INIT for SGI IP27 machines */
 /* sched_domains SD_NODE_INIT for SGI IP27 machines */
 #define SD_NODE_INIT (struct sched_domain) {		\
 #define SD_NODE_INIT (struct sched_domain) {		\
-	.span			= CPU_MASK_NONE,	\
 	.parent			= NULL,			\
 	.parent			= NULL,			\
 	.child			= NULL,			\
 	.child			= NULL,			\
 	.groups			= NULL,			\
 	.groups			= NULL,			\

+ 0 - 3
arch/mips/include/asm/smp.h

@@ -38,9 +38,6 @@ extern int __cpu_logical_map[NR_CPUS];
 #define SMP_RESCHEDULE_YOURSELF	0x1	/* XXX braindead */
 #define SMP_RESCHEDULE_YOURSELF	0x1	/* XXX braindead */
 #define SMP_CALL_FUNCTION	0x2
 #define SMP_CALL_FUNCTION	0x2
 
 
-extern cpumask_t phys_cpu_present_map;
-#define cpu_possible_map	phys_cpu_present_map
-
 extern void asmlinkage smp_bootstrap(void);
 extern void asmlinkage smp_bootstrap(void);
 
 
 /*
 /*

+ 1 - 1
arch/mips/jazz/irq.c

@@ -146,7 +146,7 @@ void __init plat_time_init(void)
 
 
 	BUG_ON(HZ != 100);
 	BUG_ON(HZ != 100);
 
 
-	cd->cpumask             = cpumask_of_cpu(cpu);
+	cd->cpumask             = cpumask_of(cpu);
 	clockevents_register_device(cd);
 	clockevents_register_device(cd);
 	action->dev_id = cd;
 	action->dev_id = cd;
 	setup_irq(JAZZ_TIMER_IRQ, action);
 	setup_irq(JAZZ_TIMER_IRQ, action);

+ 2 - 2
arch/mips/kernel/cevt-bcm1480.c

@@ -126,7 +126,7 @@ void __cpuinit sb1480_clockevent_init(void)
 	cd->min_delta_ns	= clockevent_delta2ns(2, cd);
 	cd->min_delta_ns	= clockevent_delta2ns(2, cd);
 	cd->rating		= 200;
 	cd->rating		= 200;
 	cd->irq			= irq;
 	cd->irq			= irq;
-	cd->cpumask		= cpumask_of_cpu(cpu);
+	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= sibyte_next_event;
 	cd->set_next_event	= sibyte_next_event;
 	cd->set_mode		= sibyte_set_mode;
 	cd->set_mode		= sibyte_set_mode;
 	clockevents_register_device(cd);
 	clockevents_register_device(cd);
@@ -148,6 +148,6 @@ void __cpuinit sb1480_clockevent_init(void)
 	action->name	= name;
 	action->name	= name;
 	action->dev_id	= cd;
 	action->dev_id	= cd;
 
 
-	irq_set_affinity(irq, cpumask_of_cpu(cpu));
+	irq_set_affinity(irq, cpumask_of(cpu));
 	setup_irq(irq, action);
 	setup_irq(irq, action);
 }
 }

+ 1 - 1
arch/mips/kernel/cevt-ds1287.c

@@ -88,7 +88,6 @@ static void ds1287_event_handler(struct clock_event_device *dev)
 static struct clock_event_device ds1287_clockevent = {
 static struct clock_event_device ds1287_clockevent = {
 	.name		= "ds1287",
 	.name		= "ds1287",
 	.features	= CLOCK_EVT_FEAT_PERIODIC,
 	.features	= CLOCK_EVT_FEAT_PERIODIC,
-	.cpumask	= CPU_MASK_CPU0,
 	.set_next_event	= ds1287_set_next_event,
 	.set_next_event	= ds1287_set_next_event,
 	.set_mode	= ds1287_set_mode,
 	.set_mode	= ds1287_set_mode,
 	.event_handler	= ds1287_event_handler,
 	.event_handler	= ds1287_event_handler,
@@ -122,6 +121,7 @@ int __init ds1287_clockevent_init(int irq)
 	clockevent_set_clock(cd, 32768);
 	clockevent_set_clock(cd, 32768);
 	cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
 	cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
 	cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
 	cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+	cd->cpumask = cpumask_of(0);
 
 
 	clockevents_register_device(&ds1287_clockevent);
 	clockevents_register_device(&ds1287_clockevent);
 
 

+ 1 - 1
arch/mips/kernel/cevt-gt641xx.c

@@ -96,7 +96,6 @@ static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
 static struct clock_event_device gt641xx_timer0_clockevent = {
 static struct clock_event_device gt641xx_timer0_clockevent = {
 	.name		= "gt641xx-timer0",
 	.name		= "gt641xx-timer0",
 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.cpumask	= CPU_MASK_CPU0,
 	.irq		= GT641XX_TIMER0_IRQ,
 	.irq		= GT641XX_TIMER0_IRQ,
 	.set_next_event	= gt641xx_timer0_set_next_event,
 	.set_next_event	= gt641xx_timer0_set_next_event,
 	.set_mode	= gt641xx_timer0_set_mode,
 	.set_mode	= gt641xx_timer0_set_mode,
@@ -132,6 +131,7 @@ static int __init gt641xx_timer0_clockevent_init(void)
 	clockevent_set_clock(cd, gt641xx_base_clock);
 	clockevent_set_clock(cd, gt641xx_base_clock);
 	cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
 	cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
 	cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
 	cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+	cd->cpumask = cpumask_of(0);
 
 
 	clockevents_register_device(&gt641xx_timer0_clockevent);
 	clockevents_register_device(&gt641xx_timer0_clockevent);
 
 

+ 1 - 1
arch/mips/kernel/cevt-r4k.c

@@ -195,7 +195,7 @@ int __cpuinit mips_clockevent_init(void)
 
 
 	cd->rating		= 300;
 	cd->rating		= 300;
 	cd->irq			= irq;
 	cd->irq			= irq;
-	cd->cpumask		= cpumask_of_cpu(cpu);
+	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= mips_next_event;
 	cd->set_next_event	= mips_next_event;
 	cd->set_mode		= mips_set_clock_mode;
 	cd->set_mode		= mips_set_clock_mode;
 	cd->event_handler	= mips_event_handler;
 	cd->event_handler	= mips_event_handler;

+ 2 - 2
arch/mips/kernel/cevt-sb1250.c

@@ -125,7 +125,7 @@ void __cpuinit sb1250_clockevent_init(void)
 	cd->min_delta_ns	= clockevent_delta2ns(2, cd);
 	cd->min_delta_ns	= clockevent_delta2ns(2, cd);
 	cd->rating		= 200;
 	cd->rating		= 200;
 	cd->irq			= irq;
 	cd->irq			= irq;
-	cd->cpumask		= cpumask_of_cpu(cpu);
+	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= sibyte_next_event;
 	cd->set_next_event	= sibyte_next_event;
 	cd->set_mode		= sibyte_set_mode;
 	cd->set_mode		= sibyte_set_mode;
 	clockevents_register_device(cd);
 	clockevents_register_device(cd);
@@ -147,6 +147,6 @@ void __cpuinit sb1250_clockevent_init(void)
 	action->name	= name;
 	action->name	= name;
 	action->dev_id	= cd;
 	action->dev_id	= cd;
 
 
-	irq_set_affinity(irq, cpumask_of_cpu(cpu));
+	irq_set_affinity(irq, cpumask_of(cpu));
 	setup_irq(irq, action);
 	setup_irq(irq, action);
 }
 }

+ 1 - 1
arch/mips/kernel/cevt-smtc.c

@@ -292,7 +292,7 @@ int __cpuinit mips_clockevent_init(void)
 
 
 	cd->rating		= 300;
 	cd->rating		= 300;
 	cd->irq			= irq;
 	cd->irq			= irq;
-	cd->cpumask		= cpumask_of_cpu(cpu);
+	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= mips_next_event;
 	cd->set_next_event	= mips_next_event;
 	cd->set_mode		= mips_set_clock_mode;
 	cd->set_mode		= mips_set_clock_mode;
 	cd->event_handler	= mips_event_handler;
 	cd->event_handler	= mips_event_handler;

+ 1 - 1
arch/mips/kernel/cevt-txx9.c

@@ -112,7 +112,6 @@ static struct clock_event_device txx9tmr_clock_event_device = {
 	.name		= "TXx9",
 	.name		= "TXx9",
 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.rating		= 200,
 	.rating		= 200,
-	.cpumask	= CPU_MASK_CPU0,
 	.set_mode	= txx9tmr_set_mode,
 	.set_mode	= txx9tmr_set_mode,
 	.set_next_event	= txx9tmr_set_next_event,
 	.set_next_event	= txx9tmr_set_next_event,
 };
 };
@@ -150,6 +149,7 @@ void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
 		clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
 		clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
 	cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
 	cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
 	cd->irq = irq;
 	cd->irq = irq;
+	cd->cpumask = cpumask_of(0),
 	clockevents_register_device(cd);
 	clockevents_register_device(cd);
 	setup_irq(irq, &txx9tmr_irq);
 	setup_irq(irq, &txx9tmr_irq);
 	printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
 	printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",

+ 1 - 1
arch/mips/kernel/i8253.c

@@ -115,7 +115,7 @@ void __init setup_pit_timer(void)
 	 * Start pit with the boot cpu mask and make it global after the
 	 * Start pit with the boot cpu mask and make it global after the
 	 * IO_APIC has been initialized.
 	 * IO_APIC has been initialized.
 	 */
 	 */
-	cd->cpumask = cpumask_of_cpu(cpu);
+	cd->cpumask = cpumask_of(cpu);
 	clockevent_set_clock(cd, CLOCK_TICK_RATE);
 	clockevent_set_clock(cd, CLOCK_TICK_RATE);
 	cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
 	cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
 	cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
 	cd->min_delta_ns = clockevent_delta2ns(0xF, cd);

+ 3 - 3
arch/mips/kernel/irq-gic.c

@@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq)
 
 
 static DEFINE_SPINLOCK(gic_lock);
 static DEFINE_SPINLOCK(gic_lock);
 
 
-static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
+static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 {
 {
 	cpumask_t	tmp = CPU_MASK_NONE;
 	cpumask_t	tmp = CPU_MASK_NONE;
 	unsigned long	flags;
 	unsigned long	flags;
@@ -164,7 +164,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
 	pr_debug(KERN_DEBUG "%s called\n", __func__);
 	pr_debug(KERN_DEBUG "%s called\n", __func__);
 	irq -= _irqbase;
 	irq -= _irqbase;
 
 
-	cpus_and(tmp, cpumask, cpu_online_map);
+	cpumask_and(&tmp, cpumask, cpu_online_mask);
 	if (cpus_empty(tmp))
 	if (cpus_empty(tmp))
 		return;
 		return;
 
 
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
 		set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
 		set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
 
 
 	}
 	}
-	irq_desc[irq].affinity = cpumask;
+	irq_desc[irq].affinity = *cpumask;
 	spin_unlock_irqrestore(&gic_lock, flags);
 	spin_unlock_irqrestore(&gic_lock, flags);
 
 
 }
 }

+ 3 - 3
arch/mips/kernel/smp-cmp.c

@@ -51,10 +51,10 @@ static int __init allowcpus(char *str)
 	int len;
 	int len;
 
 
 	cpus_clear(cpu_allow_map);
 	cpus_clear(cpu_allow_map);
-	if (cpulist_parse(str, cpu_allow_map) == 0) {
+	if (cpulist_parse(str, &cpu_allow_map) == 0) {
 		cpu_set(0, cpu_allow_map);
 		cpu_set(0, cpu_allow_map);
 		cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
 		cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
-		len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map);
+		len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map);
 		buf[len] = '\0';
 		buf[len] = '\0';
 		pr_debug("Allowable CPUs: %s\n", buf);
 		pr_debug("Allowable CPUs: %s\n", buf);
 		return 1;
 		return 1;
@@ -226,7 +226,7 @@ void __init cmp_smp_setup(void)
 
 
 	for (i = 1; i < NR_CPUS; i++) {
 	for (i = 1; i < NR_CPUS; i++) {
 		if (amon_cpu_avail(i)) {
 		if (amon_cpu_avail(i)) {
-			cpu_set(i, phys_cpu_present_map);
+			cpu_set(i, cpu_possible_map);
 			__cpu_number_map[i]	= ++ncpu;
 			__cpu_number_map[i]	= ++ncpu;
 			__cpu_logical_map[ncpu]	= i;
 			__cpu_logical_map[ncpu]	= i;
 		}
 		}

+ 1 - 1
arch/mips/kernel/smp-mt.c

@@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
 		write_vpe_c0_vpeconf0(tmp);
 		write_vpe_c0_vpeconf0(tmp);
 
 
 		/* Record this as available CPU */
 		/* Record this as available CPU */
-		cpu_set(tc, phys_cpu_present_map);
+		cpu_set(tc, cpu_possible_map);
 		__cpu_number_map[tc]	= ++ncpu;
 		__cpu_number_map[tc]	= ++ncpu;
 		__cpu_logical_map[ncpu]	= tc;
 		__cpu_logical_map[ncpu]	= tc;
 	}
 	}

+ 1 - 6
arch/mips/kernel/smp.c

@@ -44,15 +44,10 @@
 #include <asm/mipsmtregs.h>
 #include <asm/mipsmtregs.h>
 #endif /* CONFIG_MIPS_MT_SMTC */
 #endif /* CONFIG_MIPS_MT_SMTC */
 
 
-cpumask_t phys_cpu_present_map;		/* Bitmask of available CPUs */
 volatile cpumask_t cpu_callin_map;	/* Bitmask of started secondaries */
 volatile cpumask_t cpu_callin_map;	/* Bitmask of started secondaries */
-cpumask_t cpu_online_map;		/* Bitmask of currently online CPUs */
 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
 
 
-EXPORT_SYMBOL(phys_cpu_present_map);
-EXPORT_SYMBOL(cpu_online_map);
-
 extern void cpu_idle(void);
 extern void cpu_idle(void);
 
 
 /* Number of TCs (or siblings in Intel speak) per CPU core */
 /* Number of TCs (or siblings in Intel speak) per CPU core */
@@ -195,7 +190,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 /* preload SMP state for boot cpu */
 /* preload SMP state for boot cpu */
 void __devinit smp_prepare_boot_cpu(void)
 void __devinit smp_prepare_boot_cpu(void)
 {
 {
-	cpu_set(0, phys_cpu_present_map);
+	cpu_set(0, cpu_possible_map);
 	cpu_set(0, cpu_online_map);
 	cpu_set(0, cpu_online_map);
 	cpu_set(0, cpu_callin_map);
 	cpu_set(0, cpu_callin_map);
 }
 }

+ 3 - 3
arch/mips/kernel/smtc.c

@@ -290,7 +290,7 @@ static void smtc_configure_tlb(void)
  * possibly leave some TCs/VPEs as "slave" processors.
  * possibly leave some TCs/VPEs as "slave" processors.
  *
  *
  * Use c0_MVPConf0 to find out how many TCs are available, setting up
  * Use c0_MVPConf0 to find out how many TCs are available, setting up
- * phys_cpu_present_map and the logical/physical mappings.
+ * cpu_possible_map and the logical/physical mappings.
  */
  */
 
 
 int __init smtc_build_cpu_map(int start_cpu_slot)
 int __init smtc_build_cpu_map(int start_cpu_slot)
@@ -304,7 +304,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
 	 */
 	 */
 	ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
 	ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
 	for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
 	for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
-		cpu_set(i, phys_cpu_present_map);
+		cpu_set(i, cpu_possible_map);
 		__cpu_number_map[i] = i;
 		__cpu_number_map[i] = i;
 		__cpu_logical_map[i] = i;
 		__cpu_logical_map[i] = i;
 	}
 	}
@@ -521,7 +521,7 @@ void smtc_prepare_cpus(int cpus)
 	 * Pull any physically present but unused TCs out of circulation.
 	 * Pull any physically present but unused TCs out of circulation.
 	 */
 	 */
 	while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
 	while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
-		cpu_clear(tc, phys_cpu_present_map);
+		cpu_clear(tc, cpu_possible_map);
 		cpu_clear(tc, cpu_present_map);
 		cpu_clear(tc, cpu_present_map);
 		tc++;
 		tc++;
 	}
 	}

+ 3 - 3
arch/mips/mti-malta/malta-smtc.c

@@ -114,9 +114,9 @@ struct plat_smp_ops msmtc_smp_ops = {
  */
  */
 
 
 
 
-void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity)
+void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 {
 {
-	cpumask_t tmask = affinity;
+	cpumask_t tmask = *affinity;
 	int cpu = 0;
 	int cpu = 0;
 	void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
 	void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
 
 
@@ -139,7 +139,7 @@ void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 	 * be made to forward to an offline "CPU".
 	 * be made to forward to an offline "CPU".
 	 */
 	 */
 
 
-	for_each_cpu_mask(cpu, affinity) {
+	for_each_cpu(cpu, affinity) {
 		if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
 		if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
 			cpu_clear(cpu, tmask);
 			cpu_clear(cpu, tmask);
 	}
 	}

+ 1 - 0
arch/mips/nxp/pnx8550/common/time.c

@@ -102,6 +102,7 @@ __init void plat_time_init(void)
 	unsigned int p;
 	unsigned int p;
 	unsigned int pow2p;
 	unsigned int pow2p;
 
 
+	pnx8xxx_clockevent.cpumask = cpu_none_mask;
 	clockevents_register_device(&pnx8xxx_clockevent);
 	clockevents_register_device(&pnx8xxx_clockevent);
 	clocksource_register(&pnx_clocksource);
 	clocksource_register(&pnx_clocksource);
 
 

+ 3 - 3
arch/mips/pmc-sierra/yosemite/smp.c

@@ -141,7 +141,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
 }
 }
 
 
 /*
 /*
- * Detect available CPUs, populate phys_cpu_present_map before smp_init
+ * Detect available CPUs, populate cpu_possible_map before smp_init
  *
  *
  * We don't want to start the secondary CPU yet nor do we have a nice probing
  * We don't want to start the secondary CPU yet nor do we have a nice probing
  * feature in PMON so we just assume presence of the secondary core.
  * feature in PMON so we just assume presence of the secondary core.
@@ -150,10 +150,10 @@ static void __init yos_smp_setup(void)
 {
 {
 	int i;
 	int i;
 
 
-	cpus_clear(phys_cpu_present_map);
+	cpus_clear(cpu_possible_map);
 
 
 	for (i = 0; i < 2; i++) {
 	for (i = 0; i < 2; i++) {
-		cpu_set(i, phys_cpu_present_map);
+		cpu_set(i, cpu_possible_map);
 		__cpu_number_map[i]	= i;
 		__cpu_number_map[i]	= i;
 		__cpu_logical_map[i]	= i;
 		__cpu_logical_map[i]	= i;
 	}
 	}

+ 1 - 1
arch/mips/sgi-ip27/ip27-smp.c

@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
 			/* Only let it join in if it's marked enabled */
 			/* Only let it join in if it's marked enabled */
 			if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
 			if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
 			    (tot_cpus_found != NR_CPUS)) {
 			    (tot_cpus_found != NR_CPUS)) {
-				cpu_set(cpuid, phys_cpu_present_map);
+				cpu_set(cpuid, cpu_possible_map);
 				alloc_cpupda(cpuid, tot_cpus_found);
 				alloc_cpupda(cpuid, tot_cpus_found);
 				cpus_found++;
 				cpus_found++;
 				tot_cpus_found++;
 				tot_cpus_found++;

+ 1 - 1
arch/mips/sgi-ip27/ip27-timer.c

@@ -134,7 +134,7 @@ void __cpuinit hub_rt_clock_event_init(void)
 	cd->min_delta_ns        = clockevent_delta2ns(0x300, cd);
 	cd->min_delta_ns        = clockevent_delta2ns(0x300, cd);
 	cd->rating		= 200;
 	cd->rating		= 200;
 	cd->irq			= irq;
 	cd->irq			= irq;
-	cd->cpumask		= cpumask_of_cpu(cpu);
+	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= rt_next_event;
 	cd->set_next_event	= rt_next_event;
 	cd->set_mode		= rt_set_mode;
 	cd->set_mode		= rt_set_mode;
 	clockevents_register_device(cd);
 	clockevents_register_device(cd);

+ 4 - 4
arch/mips/sibyte/bcm1480/irq.c

@@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq);
 static void disable_bcm1480_irq(unsigned int irq);
 static void disable_bcm1480_irq(unsigned int irq);
 static void ack_bcm1480_irq(unsigned int irq);
 static void ack_bcm1480_irq(unsigned int irq);
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask);
+static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask);
 #endif
 #endif
 
 
 #ifdef CONFIG_PCI
 #ifdef CONFIG_PCI
@@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq)
 }
 }
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
+static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
 {
 {
 	int i = 0, old_cpu, cpu, int_on, k;
 	int i = 0, old_cpu, cpu, int_on, k;
 	u64 cur_ints;
 	u64 cur_ints;
@@ -117,11 +117,11 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
 	unsigned long flags;
 	unsigned long flags;
 	unsigned int irq_dirty;
 	unsigned int irq_dirty;
 
 
-	if (cpus_weight(mask) != 1) {
+	if (cpumask_weight(mask) != 1) {
 		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
 		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
 		return;
 		return;
 	}
 	}
-	i = first_cpu(mask);
+	i = cpumask_first(mask);
 
 
 	/* Convert logical CPU to physical CPU */
 	/* Convert logical CPU to physical CPU */
 	cpu = cpu_logical_map(i);
 	cpu = cpu_logical_map(i);

+ 4 - 4
arch/mips/sibyte/bcm1480/smp.c

@@ -136,7 +136,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
 
 
 /*
 /*
  * Use CFE to find out how many CPUs are available, setting up
  * Use CFE to find out how many CPUs are available, setting up
- * phys_cpu_present_map and the logical/physical mappings.
+ * cpu_possible_map and the logical/physical mappings.
  * XXXKW will the boot CPU ever not be physical 0?
  * XXXKW will the boot CPU ever not be physical 0?
  *
  *
  * Common setup before any secondaries are started
  * Common setup before any secondaries are started
@@ -145,14 +145,14 @@ static void __init bcm1480_smp_setup(void)
 {
 {
 	int i, num;
 	int i, num;
 
 
-	cpus_clear(phys_cpu_present_map);
-	cpu_set(0, phys_cpu_present_map);
+	cpus_clear(cpu_possible_map);
+	cpu_set(0, cpu_possible_map);
 	__cpu_number_map[0] = 0;
 	__cpu_number_map[0] = 0;
 	__cpu_logical_map[0] = 0;
 	__cpu_logical_map[0] = 0;
 
 
 	for (i = 1, num = 0; i < NR_CPUS; i++) {
 	for (i = 1, num = 0; i < NR_CPUS; i++) {
 		if (cfe_cpu_stop(i) == 0) {
 		if (cfe_cpu_stop(i) == 0) {
-			cpu_set(i, phys_cpu_present_map);
+			cpu_set(i, cpu_possible_map);
 			__cpu_number_map[i] = ++num;
 			__cpu_number_map[i] = ++num;
 			__cpu_logical_map[num] = i;
 			__cpu_logical_map[num] = i;
 		}
 		}

+ 4 - 4
arch/mips/sibyte/sb1250/irq.c

@@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq);
 static void disable_sb1250_irq(unsigned int irq);
 static void disable_sb1250_irq(unsigned int irq);
 static void ack_sb1250_irq(unsigned int irq);
 static void ack_sb1250_irq(unsigned int irq);
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-static void sb1250_set_affinity(unsigned int irq, cpumask_t mask);
+static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
 #endif
 #endif
 
 
 #ifdef CONFIG_SIBYTE_HAS_LDT
 #ifdef CONFIG_SIBYTE_HAS_LDT
@@ -103,16 +103,16 @@ void sb1250_unmask_irq(int cpu, int irq)
 }
 }
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-static void sb1250_set_affinity(unsigned int irq, cpumask_t mask)
+static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
 {
 {
 	int i = 0, old_cpu, cpu, int_on;
 	int i = 0, old_cpu, cpu, int_on;
 	u64 cur_ints;
 	u64 cur_ints;
 	struct irq_desc *desc = irq_desc + irq;
 	struct irq_desc *desc = irq_desc + irq;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	i = first_cpu(mask);
+	i = cpumask_first(mask);
 
 
-	if (cpus_weight(mask) > 1) {
+	if (cpumask_weight(mask) > 1) {
 		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
 		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
 		return;
 		return;
 	}
 	}

+ 4 - 4
arch/mips/sibyte/sb1250/smp.c

@@ -124,7 +124,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
 
 
 /*
 /*
  * Use CFE to find out how many CPUs are available, setting up
  * Use CFE to find out how many CPUs are available, setting up
- * phys_cpu_present_map and the logical/physical mappings.
+ * cpu_possible_map and the logical/physical mappings.
  * XXXKW will the boot CPU ever not be physical 0?
  * XXXKW will the boot CPU ever not be physical 0?
  *
  *
  * Common setup before any secondaries are started
  * Common setup before any secondaries are started
@@ -133,14 +133,14 @@ static void __init sb1250_smp_setup(void)
 {
 {
 	int i, num;
 	int i, num;
 
 
-	cpus_clear(phys_cpu_present_map);
-	cpu_set(0, phys_cpu_present_map);
+	cpus_clear(cpu_possible_map);
+	cpu_set(0, cpu_possible_map);
 	__cpu_number_map[0] = 0;
 	__cpu_number_map[0] = 0;
 	__cpu_logical_map[0] = 0;
 	__cpu_logical_map[0] = 0;
 
 
 	for (i = 1, num = 0; i < NR_CPUS; i++) {
 	for (i = 1, num = 0; i < NR_CPUS; i++) {
 		if (cfe_cpu_stop(i) == 0) {
 		if (cfe_cpu_stop(i) == 0) {
-			cpu_set(i, phys_cpu_present_map);
+			cpu_set(i, cpu_possible_map);
 			__cpu_number_map[i] = ++num;
 			__cpu_number_map[i] = ++num;
 			__cpu_logical_map[num] = i;
 			__cpu_logical_map[num] = i;
 		}
 		}

+ 1 - 1
arch/mips/sni/time.c

@@ -80,7 +80,7 @@ static void __init sni_a20r_timer_setup(void)
 	struct irqaction *action = &a20r_irqaction;
 	struct irqaction *action = &a20r_irqaction;
 	unsigned int cpu = smp_processor_id();
 	unsigned int cpu = smp_processor_id();
 
 
-	cd->cpumask             = cpumask_of_cpu(cpu);
+	cd->cpumask             = cpumask_of(cpu);
 	clockevents_register_device(cd);
 	clockevents_register_device(cd);
 	action->dev_id = cd;
 	action->dev_id = cd;
 	setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction);
 	setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction);

+ 1 - 0
arch/parisc/Kconfig

@@ -11,6 +11,7 @@ config PARISC
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE
 	select RTC_CLASS
 	select RTC_CLASS
 	select RTC_DRV_PARISC
 	select RTC_DRV_PARISC
+	select INIT_ALL_POSSIBLE
 	help
 	help
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
 	  in many of their workstations & servers (HP9000 700 and 800 series,
 	  in many of their workstations & servers (HP9000 700 and 800 series,

+ 3 - 3
arch/parisc/kernel/irq.c

@@ -131,12 +131,12 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
 	return 0;
 	return 0;
 }
 }
 
 
-static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
+static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
 {
 {
-	if (cpu_check_affinity(irq, &dest))
+	if (cpu_check_affinity(irq, dest))
 		return;
 		return;
 
 
-	irq_desc[irq].affinity = dest;
+	irq_desc[irq].affinity = *dest;
 }
 }
 #endif
 #endif
 
 

+ 0 - 15
arch/parisc/kernel/smp.c

@@ -67,21 +67,6 @@ static volatile int cpu_now_booting __read_mostly = 0;	/* track which CPU is boo
 
 
 static int parisc_max_cpus __read_mostly = 1;
 static int parisc_max_cpus __read_mostly = 1;
 
 
-/* online cpus are ones that we've managed to bring up completely
- * possible cpus are all valid cpu 
- * present cpus are all detected cpu
- *
- * On startup we bring up the "possible" cpus. Since we discover
- * CPUs later, we add them as hotplug, so the possible cpu mask is
- * empty in the beginning.
- */
-
-cpumask_t cpu_online_map   __read_mostly = CPU_MASK_NONE;	/* Bitmap of online CPUs */
-cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;	/* Bitmap of Present CPUs */
-
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
-
 DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
 DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
 
 
 enum ipi_message_type {
 enum ipi_message_type {

+ 0 - 1
arch/powerpc/include/asm/topology.h

@@ -48,7 +48,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
 
 
 /* sched_domains SD_NODE_INIT for PPC64 machines */
 /* sched_domains SD_NODE_INIT for PPC64 machines */
 #define SD_NODE_INIT (struct sched_domain) {		\
 #define SD_NODE_INIT (struct sched_domain) {		\
-	.span			= CPU_MASK_NONE,	\
 	.parent			= NULL,			\
 	.parent			= NULL,			\
 	.child			= NULL,			\
 	.child			= NULL,			\
 	.groups			= NULL,			\
 	.groups			= NULL,			\

+ 1 - 1
arch/powerpc/kernel/irq.c

@@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map)
 			mask = map;
 			mask = map;
 		}
 		}
 		if (irq_desc[irq].chip->set_affinity)
 		if (irq_desc[irq].chip->set_affinity)
-			irq_desc[irq].chip->set_affinity(irq, mask);
+			irq_desc[irq].chip->set_affinity(irq, &mask);
 		else if (irq_desc[irq].action && !(warned++))
 		else if (irq_desc[irq].action && !(warned++))
 			printk("Cannot set affinity for irq %i\n", irq);
 			printk("Cannot set affinity for irq %i\n", irq);
 	}
 	}

+ 0 - 4
arch/powerpc/kernel/smp.c

@@ -59,13 +59,9 @@
 
 
 struct thread_info *secondary_ti;
 struct thread_info *secondary_ti;
 
 
-cpumask_t cpu_possible_map = CPU_MASK_NONE;
-cpumask_t cpu_online_map = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
 
 
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 

+ 1 - 1
arch/powerpc/kernel/time.c

@@ -844,7 +844,7 @@ static void register_decrementer_clockevent(int cpu)
 	struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
 	struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
 
 
 	*dec = decrementer_clockevent;
 	*dec = decrementer_clockevent;
-	dec->cpumask = cpumask_of_cpu(cpu);
+	dec->cpumask = cpumask_of(cpu);
 
 
 	printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
 	printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
 	       dec->name, dec->mult, dec->shift, cpu);
 	       dec->name, dec->mult, dec->shift, cpu);

+ 2 - 2
arch/powerpc/platforms/pseries/xics.c

@@ -332,7 +332,7 @@ static void xics_eoi_lpar(unsigned int virq)
 	lpar_xirr_info_set((0xff << 24) | irq);
 	lpar_xirr_info_set((0xff << 24) | irq);
 }
 }
 
 
-static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
+static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
 {
 {
 	unsigned int irq;
 	unsigned int irq;
 	int status;
 	int status;
@@ -870,7 +870,7 @@ void xics_migrate_irqs_away(void)
 
 
 		/* Reset affinity to all cpus */
 		/* Reset affinity to all cpus */
 		irq_desc[virq].affinity = CPU_MASK_ALL;
 		irq_desc[virq].affinity = CPU_MASK_ALL;
-		desc->chip->set_affinity(virq, CPU_MASK_ALL);
+		desc->chip->set_affinity(virq, cpu_all_mask);
 unlock:
 unlock:
 		spin_unlock_irqrestore(&desc->lock, flags);
 		spin_unlock_irqrestore(&desc->lock, flags);
 	}
 	}

+ 2 - 2
arch/powerpc/sysdev/mpic.c

@@ -806,7 +806,7 @@ static void mpic_end_ipi(unsigned int irq)
 
 
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 
-void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
+void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 {
 {
 	struct mpic *mpic = mpic_from_irq(irq);
 	struct mpic *mpic = mpic_from_irq(irq);
 	unsigned int src = mpic_irq_to_hw(irq);
 	unsigned int src = mpic_irq_to_hw(irq);
@@ -818,7 +818,7 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
 	} else {
 	} else {
 		cpumask_t tmp;
 		cpumask_t tmp;
 
 
-		cpus_and(tmp, cpumask, cpu_online_map);
+		cpumask_and(&tmp, cpumask, cpu_online_mask);
 
 
 		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
 		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
 			       mpic_physmask(cpus_addr(tmp)[0]));
 			       mpic_physmask(cpus_addr(tmp)[0]));

+ 1 - 1
arch/powerpc/sysdev/mpic.h

@@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic)
 
 
 extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
 extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
 extern void mpic_set_vector(unsigned int virq, unsigned int vector);
 extern void mpic_set_vector(unsigned int virq, unsigned int vector);
-extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask);
+extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask);
 
 
 #endif /* _POWERPC_SYSDEV_MPIC_H */
 #endif /* _POWERPC_SYSDEV_MPIC_H */

+ 1 - 0
arch/s390/Kconfig

@@ -83,6 +83,7 @@ config S390
 	select HAVE_KRETPROBES
 	select HAVE_KRETPROBES
 	select HAVE_KVM if 64BIT
 	select HAVE_KVM if 64BIT
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK
+	select INIT_ALL_POSSIBLE
 
 
 source "init/Kconfig"
 source "init/Kconfig"
 
 

+ 0 - 6
arch/s390/kernel/smp.c

@@ -55,12 +55,6 @@
 struct _lowcore *lowcore_ptr[NR_CPUS];
 struct _lowcore *lowcore_ptr[NR_CPUS];
 EXPORT_SYMBOL(lowcore_ptr);
 EXPORT_SYMBOL(lowcore_ptr);
 
 
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_online_map);
-
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_possible_map);
-
 static struct task_struct *current_set[NR_CPUS];
 static struct task_struct *current_set[NR_CPUS];
 
 
 static u8 smp_cpu_type;
 static u8 smp_cpu_type;

+ 1 - 1
arch/s390/kernel/time.c

@@ -160,7 +160,7 @@ void init_cpu_timer(void)
 	cd->min_delta_ns	= 1;
 	cd->min_delta_ns	= 1;
 	cd->max_delta_ns	= LONG_MAX;
 	cd->max_delta_ns	= LONG_MAX;
 	cd->rating		= 400;
 	cd->rating		= 400;
-	cd->cpumask		= cpumask_of_cpu(cpu);
+	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= s390_next_event;
 	cd->set_next_event	= s390_next_event;
 	cd->set_mode		= s390_set_mode;
 	cd->set_mode		= s390_set_mode;
 
 

+ 1 - 1
arch/sh/include/asm/smp.h

@@ -31,7 +31,7 @@ enum {
 };
 };
 
 
 void smp_message_recv(unsigned int msg);
 void smp_message_recv(unsigned int msg);
-void smp_timer_broadcast(cpumask_t mask);
+void smp_timer_broadcast(const struct cpumask *mask);
 
 
 void local_timer_interrupt(void);
 void local_timer_interrupt(void);
 void local_timer_setup(unsigned int cpu);
 void local_timer_setup(unsigned int cpu);

+ 0 - 1
arch/sh/include/asm/topology.h

@@ -5,7 +5,6 @@
 
 
 /* sched_domains SD_NODE_INIT for sh machines */
 /* sched_domains SD_NODE_INIT for sh machines */
 #define SD_NODE_INIT (struct sched_domain) {		\
 #define SD_NODE_INIT (struct sched_domain) {		\
-	.span			= CPU_MASK_NONE,	\
 	.parent			= NULL,			\
 	.parent			= NULL,			\
 	.child			= NULL,			\
 	.child			= NULL,			\
 	.groups			= NULL,			\
 	.groups			= NULL,			\

+ 2 - 8
arch/sh/kernel/smp.c

@@ -31,12 +31,6 @@
 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
 int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
 int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
 
 
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
 static inline void __init smp_store_cpu_info(unsigned int cpu)
 static inline void __init smp_store_cpu_info(unsigned int cpu)
 {
 {
 	struct sh_cpuinfo *c = cpu_data + cpu;
 	struct sh_cpuinfo *c = cpu_data + cpu;
@@ -190,11 +184,11 @@ void arch_send_call_function_single_ipi(int cpu)
 	plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
 	plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
 }
 }
 
 
-void smp_timer_broadcast(cpumask_t mask)
+void smp_timer_broadcast(const struct cpumask *mask)
 {
 {
 	int cpu;
 	int cpu;
 
 
-	for_each_cpu_mask(cpu, mask)
+	for_each_cpu(cpu, mask)
 		plat_send_ipi(cpu, SMP_MSG_TIMER);
 		plat_send_ipi(cpu, SMP_MSG_TIMER);
 }
 }
 
 

+ 1 - 1
arch/sh/kernel/timers/timer-broadcast.c

@@ -51,7 +51,7 @@ void __cpuinit local_timer_setup(unsigned int cpu)
 	clk->mult		= 1;
 	clk->mult		= 1;
 	clk->set_mode		= dummy_timer_set_mode;
 	clk->set_mode		= dummy_timer_set_mode;
 	clk->broadcast		= smp_timer_broadcast;
 	clk->broadcast		= smp_timer_broadcast;
-	clk->cpumask		= cpumask_of_cpu(cpu);
+	clk->cpumask		= cpumask_of(cpu);
 
 
 	clockevents_register_device(clk);
 	clockevents_register_device(clk);
 }
 }

+ 1 - 1
arch/sh/kernel/timers/timer-tmu.c

@@ -263,7 +263,7 @@ static int tmu_timer_init(void)
 	tmu0_clockevent.min_delta_ns =
 	tmu0_clockevent.min_delta_ns =
 			clockevent_delta2ns(1, &tmu0_clockevent);
 			clockevent_delta2ns(1, &tmu0_clockevent);
 
 
-	tmu0_clockevent.cpumask = cpumask_of_cpu(0);
+	tmu0_clockevent.cpumask = cpumask_of(0);
 
 
 	clockevents_register_device(&tmu0_clockevent);
 	clockevents_register_device(&tmu0_clockevent);
 
 

+ 0 - 2
arch/sparc/include/asm/smp_32.h

@@ -29,8 +29,6 @@
  */
  */
 
 
 extern unsigned char boot_cpu_id;
 extern unsigned char boot_cpu_id;
-extern cpumask_t phys_cpu_present_map;
-#define cpu_possible_map phys_cpu_present_map
 
 
 typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
 typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
 		       unsigned long, unsigned long);
 		       unsigned long, unsigned long);

+ 7 - 4
arch/sparc/kernel/irq_64.c

@@ -312,7 +312,8 @@ static void sun4u_irq_enable(unsigned int virt_irq)
 	}
 	}
 }
 }
 
 
-static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
+static void sun4u_set_affinity(unsigned int virt_irq,
+			       const struct cpumask *mask)
 {
 {
 	sun4u_irq_enable(virt_irq);
 	sun4u_irq_enable(virt_irq);
 }
 }
@@ -362,7 +363,8 @@ static void sun4v_irq_enable(unsigned int virt_irq)
 		       ino, err);
 		       ino, err);
 }
 }
 
 
-static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
+static void sun4v_set_affinity(unsigned int virt_irq,
+			       const struct cpumask *mask)
 {
 {
 	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
 	unsigned int ino = virt_irq_table[virt_irq].dev_ino;
 	unsigned long cpuid = irq_choose_cpu(virt_irq);
 	unsigned long cpuid = irq_choose_cpu(virt_irq);
@@ -429,7 +431,8 @@ static void sun4v_virq_enable(unsigned int virt_irq)
 		       dev_handle, dev_ino, err);
 		       dev_handle, dev_ino, err);
 }
 }
 
 
-static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
+static void sun4v_virt_set_affinity(unsigned int virt_irq,
+				    const struct cpumask *mask)
 {
 {
 	unsigned long cpuid, dev_handle, dev_ino;
 	unsigned long cpuid, dev_handle, dev_ino;
 	int err;
 	int err;
@@ -851,7 +854,7 @@ void fixup_irqs(void)
 		    !(irq_desc[irq].status & IRQ_PER_CPU)) {
 		    !(irq_desc[irq].status & IRQ_PER_CPU)) {
 			if (irq_desc[irq].chip->set_affinity)
 			if (irq_desc[irq].chip->set_affinity)
 				irq_desc[irq].chip->set_affinity(irq,
 				irq_desc[irq].chip->set_affinity(irq,
-					irq_desc[irq].affinity);
+					&irq_desc[irq].affinity);
 		}
 		}
 		spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
 		spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
 	}
 	}

+ 1 - 1
arch/sparc/kernel/of_device_64.c

@@ -780,7 +780,7 @@ out:
 	if (nid != -1) {
 	if (nid != -1) {
 		cpumask_t numa_mask = node_to_cpumask(nid);
 		cpumask_t numa_mask = node_to_cpumask(nid);
 
 
-		irq_set_affinity(irq, numa_mask);
+		irq_set_affinity(irq, &numa_mask);
 	}
 	}
 
 
 	return irq;
 	return irq;

+ 1 - 1
arch/sparc/kernel/pci_msi.c

@@ -288,7 +288,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
 	if (nid != -1) {
 	if (nid != -1) {
 		cpumask_t numa_mask = node_to_cpumask(nid);
 		cpumask_t numa_mask = node_to_cpumask(nid);
 
 
-		irq_set_affinity(irq, numa_mask);
+		irq_set_affinity(irq, &numa_mask);
 	}
 	}
 	err = request_irq(irq, sparc64_msiq_interrupt, 0,
 	err = request_irq(irq, sparc64_msiq_interrupt, 0,
 			  "MSIQ",
 			  "MSIQ",

+ 2 - 4
arch/sparc/kernel/smp_32.c

@@ -39,8 +39,6 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
 unsigned char boot_cpu_id = 0;
 unsigned char boot_cpu_id = 0;
 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
 
 
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 
 
 /* The only guaranteed locking primitive available on all Sparc
 /* The only guaranteed locking primitive available on all Sparc
@@ -334,7 +332,7 @@ void __init smp_setup_cpu_possible_map(void)
 	instance = 0;
 	instance = 0;
 	while (!cpu_find_by_instance(instance, NULL, &mid)) {
 	while (!cpu_find_by_instance(instance, NULL, &mid)) {
 		if (mid < NR_CPUS) {
 		if (mid < NR_CPUS) {
-			cpu_set(mid, phys_cpu_present_map);
+			cpu_set(mid, cpu_possible_map);
 			cpu_set(mid, cpu_present_map);
 			cpu_set(mid, cpu_present_map);
 		}
 		}
 		instance++;
 		instance++;
@@ -354,7 +352,7 @@ void __init smp_prepare_boot_cpu(void)
 
 
 	current_thread_info()->cpu = cpuid;
 	current_thread_info()->cpu = cpuid;
 	cpu_set(cpuid, cpu_online_map);
 	cpu_set(cpuid, cpu_online_map);
-	cpu_set(cpuid, phys_cpu_present_map);
+	cpu_set(cpuid, cpu_possible_map);
 }
 }
 
 
 int __cpuinit __cpu_up(unsigned int cpu)
 int __cpuinit __cpu_up(unsigned int cpu)

+ 0 - 4
arch/sparc/kernel/smp_64.c

@@ -49,14 +49,10 @@
 
 
 int sparc64_multi_core __read_mostly;
 int sparc64_multi_core __read_mostly;
 
 
-cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
-cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
 	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
 
-EXPORT_SYMBOL(cpu_possible_map);
-EXPORT_SYMBOL(cpu_online_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_SYMBOL(cpu_core_map);
 EXPORT_SYMBOL(cpu_core_map);
 
 

+ 0 - 4
arch/sparc/kernel/sparc_ksyms_32.c

@@ -112,10 +112,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 /* IRQ implementation. */
 /* IRQ implementation. */
 EXPORT_SYMBOL(synchronize_irq);
 EXPORT_SYMBOL(synchronize_irq);
-
-/* CPU online map and active count. */
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(phys_cpu_present_map);
 #endif
 #endif
 
 
 EXPORT_SYMBOL(__udelay);
 EXPORT_SYMBOL(__udelay);

+ 1 - 1
arch/sparc/kernel/time_64.c

@@ -763,7 +763,7 @@ void __devinit setup_sparc64_timer(void)
 	sevt = &__get_cpu_var(sparc64_events);
 	sevt = &__get_cpu_var(sparc64_events);
 
 
 	memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
 	memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
-	sevt->cpumask = cpumask_of_cpu(smp_processor_id());
+	sevt->cpumask = cpumask_of(smp_processor_id());
 
 
 	clockevents_register_device(sevt);
 	clockevents_register_device(sevt);
 }
 }

+ 0 - 7
arch/um/kernel/smp.c

@@ -25,13 +25,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 #include "irq_user.h"
 #include "irq_user.h"
 #include "os.h"
 #include "os.h"
 
 
-/* CPU online map, set by smp_boot_cpus */
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-cpumask_t cpu_possible_map = CPU_MASK_NONE;
-
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
-
 /* Per CPU bogomips and other parameters
 /* Per CPU bogomips and other parameters
  * The only piece used here is the ipi pipe, which is set before SMP is
  * The only piece used here is the ipi pipe, which is set before SMP is
  * started and never changed.
  * started and never changed.

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä