|
@@ -55,7 +55,7 @@
|
|
* The Primary thread of each non-boot processor was started from the OF client
|
|
* The Primary thread of each non-boot processor was started from the OF client
|
|
* interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
|
|
* interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
|
|
*/
|
|
*/
|
|
-static cpumask_t of_spin_map;
|
|
|
|
|
|
+static cpumask_var_t of_spin_mask;
|
|
|
|
|
|
/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
|
|
/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
|
|
int smp_query_cpu_stopped(unsigned int pcpu)
|
|
int smp_query_cpu_stopped(unsigned int pcpu)
|
|
@@ -98,7 +98,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
|
|
unsigned int pcpu;
|
|
unsigned int pcpu;
|
|
int start_cpu;
|
|
int start_cpu;
|
|
|
|
|
|
- if (cpu_isset(lcpu, of_spin_map))
|
|
|
|
|
|
+ if (cpumask_test_cpu(lcpu, of_spin_mask))
|
|
/* Already started by OF and sitting in spin loop */
|
|
/* Already started by OF and sitting in spin loop */
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
@@ -106,7 +106,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
|
|
|
|
|
|
/* Check to see if the CPU out of FW already for kexec */
|
|
/* Check to see if the CPU out of FW already for kexec */
|
|
if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
|
|
if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
|
|
- cpu_set(lcpu, of_spin_map);
|
|
|
|
|
|
+ cpumask_set_cpu(lcpu, of_spin_mask);
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -143,7 +143,7 @@ static void __devinit smp_xics_setup_cpu(int cpu)
|
|
if (firmware_has_feature(FW_FEATURE_SPLPAR))
|
|
if (firmware_has_feature(FW_FEATURE_SPLPAR))
|
|
vpa_init(cpu);
|
|
vpa_init(cpu);
|
|
|
|
|
|
- cpu_clear(cpu, of_spin_map);
|
|
|
|
|
|
+ cpumask_clear_cpu(cpu, of_spin_mask);
|
|
set_cpu_current_state(cpu, CPU_STATE_ONLINE);
|
|
set_cpu_current_state(cpu, CPU_STATE_ONLINE);
|
|
set_default_offline_state(cpu);
|
|
set_default_offline_state(cpu);
|
|
|
|
|
|
@@ -214,17 +214,19 @@ static void __init smp_init_pseries(void)
|
|
|
|
|
|
pr_debug(" -> smp_init_pSeries()\n");
|
|
pr_debug(" -> smp_init_pSeries()\n");
|
|
|
|
|
|
|
|
+ alloc_bootmem_cpumask_var(&of_spin_mask);
|
|
|
|
+
|
|
/* Mark threads which are still spinning in hold loops. */
|
|
/* Mark threads which are still spinning in hold loops. */
|
|
if (cpu_has_feature(CPU_FTR_SMT)) {
|
|
if (cpu_has_feature(CPU_FTR_SMT)) {
|
|
for_each_present_cpu(i) {
|
|
for_each_present_cpu(i) {
|
|
if (cpu_thread_in_core(i) == 0)
|
|
if (cpu_thread_in_core(i) == 0)
|
|
- cpu_set(i, of_spin_map);
|
|
|
|
|
|
+ cpumask_set_cpu(i, of_spin_mask);
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
- of_spin_map = cpu_present_map;
|
|
|
|
|
|
+ cpumask_copy(of_spin_mask, cpu_present_mask);
|
|
}
|
|
}
|
|
|
|
|
|
- cpu_clear(boot_cpuid, of_spin_map);
|
|
|
|
|
|
+ cpumask_clear_cpu(boot_cpuid, of_spin_mask);
|
|
|
|
|
|
/* Non-lpar has additional take/give timebase */
|
|
/* Non-lpar has additional take/give timebase */
|
|
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
|
|
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
|