|
@@ -33,7 +33,6 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
|
|
|
ktime_t tick_next_period;
|
|
|
ktime_t tick_period;
|
|
|
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
|
|
|
-static DEFINE_RAW_SPINLOCK(tick_device_lock);
|
|
|
|
|
|
/*
|
|
|
* Debugging: see timer_list.c
|
|
@@ -206,16 +205,14 @@ static void tick_setup_device(struct tick_device *td,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Check, if the new registered device should be used.
|
|
|
+ * Check, if the new registered device should be used. Called with
|
|
|
+ * clockevents_lock held and interrupts disabled.
|
|
|
*/
|
|
|
void tick_check_new_device(struct clock_event_device *newdev)
|
|
|
{
|
|
|
struct clock_event_device *curdev;
|
|
|
struct tick_device *td;
|
|
|
int cpu;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- raw_spin_lock_irqsave(&tick_device_lock, flags);
|
|
|
|
|
|
cpu = smp_processor_id();
|
|
|
if (!cpumask_test_cpu(cpu, newdev->cpumask))
|
|
@@ -273,8 +270,6 @@ void tick_check_new_device(struct clock_event_device *newdev)
|
|
|
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
|
|
|
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
|
|
|
tick_oneshot_notify();
|
|
|
-
|
|
|
- raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
|
return;
|
|
|
|
|
|
out_bc:
|
|
@@ -282,7 +277,6 @@ out_bc:
|
|
|
* Can the new device be used as a broadcast device ?
|
|
|
*/
|
|
|
tick_install_broadcast_device(newdev);
|
|
|
- raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -311,9 +305,7 @@ static void tick_shutdown(unsigned int *cpup)
|
|
|
{
|
|
|
struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
|
|
|
struct clock_event_device *dev = td->evtdev;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&tick_device_lock, flags);
|
|
|
td->mode = TICKDEV_MODE_PERIODIC;
|
|
|
if (dev) {
|
|
|
/*
|
|
@@ -325,26 +317,20 @@ static void tick_shutdown(unsigned int *cpup)
|
|
|
dev->event_handler = clockevents_handle_noop;
|
|
|
td->evtdev = NULL;
|
|
|
}
|
|
|
- raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void tick_suspend(void)
|
|
|
{
|
|
|
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- raw_spin_lock_irqsave(&tick_device_lock, flags);
|
|
|
clockevents_shutdown(td->evtdev);
|
|
|
- raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void tick_resume(void)
|
|
|
{
|
|
|
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
|
|
- unsigned long flags;
|
|
|
int broadcast = tick_resume_broadcast();
|
|
|
|
|
|
- raw_spin_lock_irqsave(&tick_device_lock, flags);
|
|
|
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
|
|
|
|
|
|
if (!broadcast) {
|
|
@@ -353,9 +339,11 @@ static void tick_resume(void)
|
|
|
else
|
|
|
tick_resume_oneshot();
|
|
|
}
|
|
|
- raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Called with clockevents_lock held and interrupts disabled
|
|
|
+ */
|
|
|
void tick_notify(unsigned long reason, void *dev)
|
|
|
{
|
|
|
switch (reason) {
|