|
@@ -480,7 +480,7 @@ static void iucv_setmask_mp(void)
|
|
|
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
|
|
|
!cpu_isset(cpu, iucv_irq_cpumask))
|
|
|
smp_call_function_single(cpu, iucv_allow_cpu,
|
|
|
- NULL, 0, 1);
|
|
|
+ NULL, 1);
|
|
|
preempt_enable();
|
|
|
}
|
|
|
|
|
@@ -498,7 +498,7 @@ static void iucv_setmask_up(void)
|
|
|
cpumask = iucv_irq_cpumask;
|
|
|
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
|
|
|
for_each_cpu_mask(cpu, cpumask)
|
|
|
- smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1);
|
|
|
+ smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -523,7 +523,7 @@ static int iucv_enable(void)
|
|
|
rc = -EIO;
|
|
|
preempt_disable();
|
|
|
for_each_online_cpu(cpu)
|
|
|
- smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
|
|
|
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
|
|
|
preempt_enable();
|
|
|
if (cpus_empty(iucv_buffer_cpumask))
|
|
|
/* No cpu could declare an iucv buffer. */
|
|
@@ -580,7 +580,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
|
|
|
case CPU_ONLINE_FROZEN:
|
|
|
case CPU_DOWN_FAILED:
|
|
|
case CPU_DOWN_FAILED_FROZEN:
|
|
|
- smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
|
|
|
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
|
|
|
break;
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
case CPU_DOWN_PREPARE_FROZEN:
|
|
@@ -589,10 +589,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
|
|
|
if (cpus_empty(cpumask))
|
|
|
/* Can't offline last IUCV enabled cpu. */
|
|
|
return NOTIFY_BAD;
|
|
|
- smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1);
|
|
|
+ smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
|
|
|
if (cpus_empty(iucv_irq_cpumask))
|
|
|
smp_call_function_single(first_cpu(iucv_buffer_cpumask),
|
|
|
- iucv_allow_cpu, NULL, 0, 1);
|
|
|
+ iucv_allow_cpu, NULL, 1);
|
|
|
break;
|
|
|
}
|
|
|
return NOTIFY_OK;
|
|
@@ -652,7 +652,7 @@ static void iucv_cleanup_queue(void)
|
|
|
* pending interrupts force them to the work queue by calling
|
|
|
* an empty function on all cpus.
|
|
|
*/
|
|
|
- smp_call_function(__iucv_cleanup_queue, NULL, 0, 1);
|
|
|
+ smp_call_function(__iucv_cleanup_queue, NULL, 1);
|
|
|
spin_lock_irq(&iucv_queue_lock);
|
|
|
list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
|
|
|
/* Remove stale work items from the task queue. */
|