|
@@ -483,7 +483,7 @@ void flush_tlb_all(void)
|
|
|
* it goes straight through and wastes no time serializing
|
|
|
* anything. Worst case is that we lose a reschedule ...
|
|
|
*/
|
|
|
-void smp_send_reschedule(int cpu)
|
|
|
+void native_smp_send_reschedule(int cpu)
|
|
|
{
|
|
|
WARN_ON(cpu_is_offline(cpu));
|
|
|
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
|
|
@@ -560,9 +560,9 @@ static void __smp_call_function(void (*func) (void *info), void *info,
|
|
|
* You must not call this function with disabled interrupts or from a
|
|
|
* hardware interrupt handler or from a bottom half handler.
|
|
|
*/
|
|
|
-int smp_call_function_mask(cpumask_t mask,
|
|
|
- void (*func)(void *), void *info,
|
|
|
- int wait)
|
|
|
+int native_smp_call_function_mask(cpumask_t mask,
|
|
|
+ void (*func)(void *), void *info,
|
|
|
+ int wait)
|
|
|
{
|
|
|
struct call_data_struct data;
|
|
|
cpumask_t allbutself;
|
|
@@ -681,7 +681,7 @@ static void stop_this_cpu (void * dummy)
|
|
|
* this function calls the 'stop' function on all other CPUs in the system.
|
|
|
*/
|
|
|
|
|
|
-void smp_send_stop(void)
|
|
|
+void native_smp_send_stop(void)
|
|
|
{
|
|
|
/* Don't deadlock on the call lock in panic */
|
|
|
int nolock = !spin_trylock(&call_lock);
|
|
@@ -757,3 +757,14 @@ int safe_smp_processor_id(void)
|
|
|
|
|
|
return cpuid >= 0 ? cpuid : 0;
|
|
|
}
|
|
|
+
|
|
|
+struct smp_ops smp_ops = {
|
|
|
+ .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
|
|
|
+ .smp_prepare_cpus = native_smp_prepare_cpus,
|
|
|
+ .cpu_up = native_cpu_up,
|
|
|
+ .smp_cpus_done = native_smp_cpus_done,
|
|
|
+
|
|
|
+ .smp_send_stop = native_smp_send_stop,
|
|
|
+ .smp_send_reschedule = native_smp_send_reschedule,
|
|
|
+ .smp_call_function_mask = native_smp_call_function_mask,
|
|
|
+};
|