|
@@ -30,6 +30,7 @@
|
|
|
|
|
|
static struct tick_device tick_broadcast_device;
|
|
|
static cpumask_var_t tick_broadcast_mask;
|
|
|
+static cpumask_var_t tick_broadcast_on;
|
|
|
static cpumask_var_t tmpmask;
|
|
|
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
|
|
|
static int tick_broadcast_force;
|
|
@@ -140,8 +141,9 @@ static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
|
|
|
*/
|
|
|
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
|
|
|
{
|
|
|
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
|
|
unsigned long flags;
|
|
|
- int ret = 0;
|
|
|
+ int ret;
|
|
|
|
|
|
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
|
|
|
|
@@ -155,20 +157,59 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
|
|
|
dev->event_handler = tick_handle_periodic;
|
|
|
tick_device_setup_broadcast_func(dev);
|
|
|
cpumask_set_cpu(cpu, tick_broadcast_mask);
|
|
|
- tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
|
|
|
+ tick_broadcast_start_periodic(bc);
|
|
|
ret = 1;
|
|
|
} else {
|
|
|
/*
|
|
|
- * When the new device is not affected by the stop
|
|
|
- * feature and the cpu is marked in the broadcast mask
|
|
|
- * then clear the broadcast bit.
|
|
|
+ * Clear the broadcast bit for this cpu if the
|
|
|
+ * device is not power state affected.
|
|
|
*/
|
|
|
- if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
|
|
|
- int cpu = smp_processor_id();
|
|
|
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
|
|
|
cpumask_clear_cpu(cpu, tick_broadcast_mask);
|
|
|
- tick_broadcast_clear_oneshot(cpu);
|
|
|
- } else {
|
|
|
+ else
|
|
|
tick_device_setup_broadcast_func(dev);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Clear the broadcast bit if the CPU is not in
|
|
|
+ * periodic broadcast on state.
|
|
|
+ */
|
|
|
+ if (!cpumask_test_cpu(cpu, tick_broadcast_on))
|
|
|
+ cpumask_clear_cpu(cpu, tick_broadcast_mask);
|
|
|
+
|
|
|
+ switch (tick_broadcast_device.mode) {
|
|
|
+ case TICKDEV_MODE_ONESHOT:
|
|
|
+ /*
|
|
|
+ * If the system is in oneshot mode we can
|
|
|
+ * unconditionally clear the oneshot mask bit,
|
|
|
+ * because the CPU is running and therefore
|
|
|
+ * not in an idle state which causes the power
|
|
|
+ * state affected device to stop. Let the
|
|
|
+ * caller initialize the device.
|
|
|
+ */
|
|
|
+ tick_broadcast_clear_oneshot(cpu);
|
|
|
+ ret = 0;
|
|
|
+ break;
|
|
|
+
|
|
|
+ case TICKDEV_MODE_PERIODIC:
|
|
|
+ /*
|
|
|
+ * If the system is in periodic mode, check
|
|
|
+ * whether the broadcast device can be
|
|
|
+ * switched off now.
|
|
|
+ */
|
|
|
+ if (cpumask_empty(tick_broadcast_mask) && bc)
|
|
|
+ clockevents_shutdown(bc);
|
|
|
+ /*
|
|
|
+ * If we kept the cpu in the broadcast mask,
|
|
|
+ * tell the caller to leave the per cpu device
|
|
|
+ * in shutdown state. The periodic interrupt
|
|
|
+ * is delivered by the broadcast device.
|
|
|
+ */
|
|
|
+ ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ /* Nothing to do */
|
|
|
+ ret = 0;
|
|
|
+ break;
|
|
|
}
|
|
|
}
|
|
|
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
|
@@ -298,6 +339,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
|
|
|
switch (*reason) {
|
|
|
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
|
|
|
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
|
|
|
+ cpumask_set_cpu(cpu, tick_broadcast_on);
|
|
|
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
|
|
|
if (tick_broadcast_device.mode ==
|
|
|
TICKDEV_MODE_PERIODIC)
|
|
@@ -307,8 +349,12 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
|
|
|
tick_broadcast_force = 1;
|
|
|
break;
|
|
|
case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
|
|
|
- if (!tick_broadcast_force &&
|
|
|
- cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
|
|
|
+ if (tick_broadcast_force)
|
|
|
+ break;
|
|
|
+ cpumask_clear_cpu(cpu, tick_broadcast_on);
|
|
|
+ if (!tick_device_is_functional(dev))
|
|
|
+ break;
|
|
|
+ if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
|
|
|
if (tick_broadcast_device.mode ==
|
|
|
TICKDEV_MODE_PERIODIC)
|
|
|
tick_setup_periodic(dev, 0);
|
|
@@ -366,6 +412,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
|
|
|
|
|
|
bc = tick_broadcast_device.evtdev;
|
|
|
cpumask_clear_cpu(cpu, tick_broadcast_mask);
|
|
|
+ cpumask_clear_cpu(cpu, tick_broadcast_on);
|
|
|
|
|
|
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
|
|
|
if (bc && cpumask_empty(tick_broadcast_mask))
|
|
@@ -821,6 +868,7 @@ bool tick_broadcast_oneshot_available(void)
|
|
|
void __init tick_broadcast_init(void)
|
|
|
{
|
|
|
zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
|
|
|
+ zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
|
|
|
zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
|
|
|
#ifdef CONFIG_TICK_ONESHOT
|
|
|
zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
|