|
@@ -119,112 +119,22 @@ static inline void
|
|
native_apic_mem_write(APIC_ICR, cfg);
|
|
native_apic_mem_write(APIC_ICR, cfg);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void
|
|
|
|
-default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
|
|
|
|
-{
|
|
|
|
- unsigned long query_cpu;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Hack. The clustered APIC addressing mode doesn't allow us to send
|
|
|
|
- * to an arbitrary mask, so I do a unicast to each CPU instead.
|
|
|
|
- * - mbligh
|
|
|
|
- */
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- for_each_cpu(query_cpu, mask) {
|
|
|
|
- __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
|
|
|
|
- query_cpu), vector, APIC_DEST_PHYSICAL);
|
|
|
|
- }
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void
|
|
|
|
-default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector)
|
|
|
|
-{
|
|
|
|
- unsigned int this_cpu = smp_processor_id();
|
|
|
|
- unsigned int query_cpu;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- /* See Hack comment above */
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- for_each_cpu(query_cpu, mask) {
|
|
|
|
- if (query_cpu == this_cpu)
|
|
|
|
- continue;
|
|
|
|
- __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
|
|
|
|
- query_cpu), vector, APIC_DEST_PHYSICAL);
|
|
|
|
- }
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
|
|
+extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
|
|
|
|
+ int vector);
|
|
|
|
+extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
|
|
|
+ int vector);
|
|
#include <asm/genapic.h>
|
|
#include <asm/genapic.h>
|
|
|
|
|
|
-static inline void
|
|
|
|
-default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector)
|
|
|
|
-{
|
|
|
|
- unsigned long flags;
|
|
|
|
- unsigned int query_cpu;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Hack. The clustered APIC addressing mode doesn't allow us to send
|
|
|
|
- * to an arbitrary mask, so I do a unicasts to each CPU instead. This
|
|
|
|
- * should be modified to do 1 message per cluster ID - mbligh
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- for_each_cpu(query_cpu, mask)
|
|
|
|
- __default_send_IPI_dest_field(
|
|
|
|
- apic->cpu_to_logical_apicid(query_cpu), vector,
|
|
|
|
- apic->dest_logical);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void
|
|
|
|
-default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector)
|
|
|
|
-{
|
|
|
|
- unsigned long flags;
|
|
|
|
- unsigned int query_cpu;
|
|
|
|
- unsigned int this_cpu = smp_processor_id();
|
|
|
|
-
|
|
|
|
- /* See Hack comment above */
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- for_each_cpu(query_cpu, mask) {
|
|
|
|
- if (query_cpu == this_cpu)
|
|
|
|
- continue;
|
|
|
|
- __default_send_IPI_dest_field(
|
|
|
|
- apic->cpu_to_logical_apicid(query_cpu), vector,
|
|
|
|
- apic->dest_logical);
|
|
|
|
- }
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
-}
|
|
|
|
|
|
+extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
|
|
|
+ int vector);
|
|
|
|
+extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
|
|
|
|
+ int vector);
|
|
|
|
|
|
/* Avoid include hell */
|
|
/* Avoid include hell */
|
|
#define NMI_VECTOR 0x02
|
|
#define NMI_VECTOR 0x02
|
|
|
|
|
|
extern int no_broadcast;
|
|
extern int no_broadcast;
|
|
|
|
|
|
-#ifndef CONFIG_X86_64
|
|
|
|
-/*
|
|
|
|
- * This is only used on smaller machines.
|
|
|
|
- */
|
|
|
|
-static inline void default_send_IPI_mask_bitmask_logical(const struct cpumask *cpumask, int vector)
|
|
|
|
-{
|
|
|
|
- unsigned long mask = cpumask_bits(cpumask)[0];
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
|
|
|
|
- __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void default_send_IPI_mask_logical(const struct cpumask *mask, int vector)
|
|
|
|
-{
|
|
|
|
- default_send_IPI_mask_bitmask_logical(mask, vector);
|
|
|
|
-}
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
static inline void __default_local_send_IPI_allbutself(int vector)
|
|
static inline void __default_local_send_IPI_allbutself(int vector)
|
|
{
|
|
{
|
|
if (no_broadcast || vector == NMI_VECTOR)
|
|
if (no_broadcast || vector == NMI_VECTOR)
|
|
@@ -242,22 +152,11 @@ static inline void __default_local_send_IPI_all(int vector)
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#ifdef CONFIG_X86_32
|
|
-static inline void default_send_IPI_allbutself(int vector)
|
|
|
|
-{
|
|
|
|
- /*
|
|
|
|
- * if there are no other CPUs in the system then we get an APIC send
|
|
|
|
- * error if we try to broadcast, thus avoid sending IPIs in this case.
|
|
|
|
- */
|
|
|
|
- if (!(num_online_cpus() > 1))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- __default_local_send_IPI_allbutself(vector);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void default_send_IPI_all(int vector)
|
|
|
|
-{
|
|
|
|
- __default_local_send_IPI_all(vector);
|
|
|
|
-}
|
|
|
|
|
|
+extern void default_send_IPI_mask_logical(const struct cpumask *mask,
|
|
|
|
+ int vector);
|
|
|
|
+extern void default_send_IPI_allbutself(int vector);
|
|
|
|
+extern void default_send_IPI_all(int vector);
|
|
|
|
+extern void default_send_IPI_self(int vector);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#endif
|
|
#endif
|