|
@@ -41,7 +41,6 @@
|
|
|
#include <asm/sections.h>
|
|
|
#include <asm/tlbflush.h>
|
|
|
#include <asm/ptrace.h>
|
|
|
-#include <asm/localtimer.h>
|
|
|
#include <asm/smp_plat.h>
|
|
|
#include <asm/virt.h>
|
|
|
#include <asm/mach/arch.h>
|
|
@@ -133,8 +132,6 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
-static void percpu_timer_stop(void);
|
|
|
-
|
|
|
static int platform_cpu_kill(unsigned int cpu)
|
|
|
{
|
|
|
if (smp_ops.cpu_kill)
|
|
@@ -177,11 +174,6 @@ int __cpuinit __cpu_disable(void)
|
|
|
*/
|
|
|
migrate_irqs();
|
|
|
|
|
|
- /*
|
|
|
- * Stop the local timer for this CPU.
|
|
|
- */
|
|
|
- percpu_timer_stop();
|
|
|
-
|
|
|
/*
|
|
|
* Flush user cache and TLB mappings, and then remove this CPU
|
|
|
* from the vm mask set of all processes.
|
|
@@ -303,8 +295,6 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
|
|
|
store_cpu_topology(cpuid);
|
|
|
}
|
|
|
|
|
|
-static void percpu_timer_setup(void);
|
|
|
-
|
|
|
/*
|
|
|
* This is the secondary CPU boot entry. We're using this CPUs
|
|
|
* idle thread stack, but a set of temporary page tables.
|
|
@@ -359,11 +349,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|
|
set_cpu_online(cpu, true);
|
|
|
complete(&cpu_running);
|
|
|
|
|
|
- /*
|
|
|
- * Setup the percpu timer for this CPU.
|
|
|
- */
|
|
|
- percpu_timer_setup();
|
|
|
-
|
|
|
local_irq_enable();
|
|
|
local_fiq_enable();
|
|
|
|
|
@@ -409,12 +394,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
if (max_cpus > ncores)
|
|
|
max_cpus = ncores;
|
|
|
if (ncores > 1 && max_cpus) {
|
|
|
- /*
|
|
|
- * Enable the local timer or broadcast device for the
|
|
|
- * boot CPU, but only if we have more than one CPU.
|
|
|
- */
|
|
|
- percpu_timer_setup();
|
|
|
-
|
|
|
/*
|
|
|
* Initialise the present map, which describes the set of CPUs
|
|
|
* actually populated at the present time. A platform should
|
|
@@ -491,11 +470,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
|
|
|
return sum;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Timer (local or broadcast) support
|
|
|
- */
|
|
|
-static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
|
|
|
-
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
|
void tick_broadcast(const struct cpumask *mask)
|
|
|
{
|
|
@@ -503,49 +477,6 @@ void tick_broadcast(const struct cpumask *mask)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static struct local_timer_ops *lt_ops;
|
|
|
-
|
|
|
-#ifdef CONFIG_LOCAL_TIMERS
|
|
|
-int local_timer_register(struct local_timer_ops *ops)
|
|
|
-{
|
|
|
- if (!is_smp() || !setup_max_cpus)
|
|
|
- return -ENXIO;
|
|
|
-
|
|
|
- if (lt_ops)
|
|
|
- return -EBUSY;
|
|
|
-
|
|
|
- lt_ops = ops;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
-static void __cpuinit percpu_timer_setup(void)
|
|
|
-{
|
|
|
- unsigned int cpu = smp_processor_id();
|
|
|
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
|
|
|
-
|
|
|
- evt->cpumask = cpumask_of(cpu);
|
|
|
-
|
|
|
- if (lt_ops)
|
|
|
- lt_ops->setup(evt);
|
|
|
-}
|
|
|
-
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
-/*
|
|
|
- * The generic clock events code purposely does not stop the local timer
|
|
|
- * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
|
|
|
- * manually here.
|
|
|
- */
|
|
|
-static void percpu_timer_stop(void)
|
|
|
-{
|
|
|
- unsigned int cpu = smp_processor_id();
|
|
|
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
|
|
|
-
|
|
|
- if (lt_ops)
|
|
|
- lt_ops->stop(evt);
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
static DEFINE_RAW_SPINLOCK(stop_lock);
|
|
|
|
|
|
/*
|