|
@@ -175,6 +175,8 @@ static void tick_do_periodic_broadcast(void)
|
|
*/
|
|
*/
|
|
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
|
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
|
{
|
|
{
|
|
|
|
+ ktime_t next;
|
|
|
|
+
|
|
tick_do_periodic_broadcast();
|
|
tick_do_periodic_broadcast();
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -185,10 +187,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Setup the next period for devices, which do not have
|
|
* Setup the next period for devices, which do not have
|
|
- * periodic mode:
|
|
|
|
|
|
+ * periodic mode. We read dev->next_event first and add to it
|
|
|
|
+ * when the event alrady expired. clockevents_program_event()
|
|
|
|
+ * sets dev->next_event only when the event is really
|
|
|
|
+ * programmed to the device.
|
|
*/
|
|
*/
|
|
- for (;;) {
|
|
|
|
- ktime_t next = ktime_add(dev->next_event, tick_period);
|
|
|
|
|
|
+ for (next = dev->next_event; ;) {
|
|
|
|
+ next = ktime_add(next, tick_period);
|
|
|
|
|
|
if (!clockevents_program_event(dev, next, ktime_get()))
|
|
if (!clockevents_program_event(dev, next, ktime_get()))
|
|
return;
|
|
return;
|
|
@@ -205,7 +210,7 @@ static void tick_do_broadcast_on_off(void *why)
|
|
struct clock_event_device *bc, *dev;
|
|
struct clock_event_device *bc, *dev;
|
|
struct tick_device *td;
|
|
struct tick_device *td;
|
|
unsigned long flags, *reason = why;
|
|
unsigned long flags, *reason = why;
|
|
- int cpu;
|
|
|
|
|
|
+ int cpu, bc_stopped;
|
|
|
|
|
|
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
|
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
|
|
|
|
|
@@ -223,6 +228,8 @@ static void tick_do_broadcast_on_off(void *why)
|
|
if (!tick_device_is_functional(dev))
|
|
if (!tick_device_is_functional(dev))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
|
|
+ bc_stopped = cpus_empty(tick_broadcast_mask);
|
|
|
|
+
|
|
switch (*reason) {
|
|
switch (*reason) {
|
|
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
|
|
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
|
|
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
|
|
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
|
|
@@ -245,9 +252,10 @@ static void tick_do_broadcast_on_off(void *why)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- if (cpus_empty(tick_broadcast_mask))
|
|
|
|
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
|
|
|
|
- else {
|
|
|
|
|
|
+ if (cpus_empty(tick_broadcast_mask)) {
|
|
|
|
+ if (!bc_stopped)
|
|
|
|
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
|
|
|
|
+ } else if (bc_stopped) {
|
|
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
|
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
|
tick_broadcast_start_periodic(bc);
|
|
tick_broadcast_start_periodic(bc);
|
|
else
|
|
else
|
|
@@ -364,16 +372,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void)
|
|
static int tick_broadcast_set_event(ktime_t expires, int force)
|
|
static int tick_broadcast_set_event(ktime_t expires, int force)
|
|
{
|
|
{
|
|
struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
|
struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
|
- ktime_t now = ktime_get();
|
|
|
|
- int res;
|
|
|
|
-
|
|
|
|
- for(;;) {
|
|
|
|
- res = clockevents_program_event(bc, expires, now);
|
|
|
|
- if (!res || !force)
|
|
|
|
- return res;
|
|
|
|
- now = ktime_get();
|
|
|
|
- expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
|
|
|
|
- }
|
|
|
|
|
|
+
|
|
|
|
+ return tick_dev_program_event(bc, expires, force);
|
|
}
|
|
}
|
|
|
|
|
|
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
|
|
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
|
|
@@ -491,14 +491,52 @@ static void tick_broadcast_clear_oneshot(int cpu)
|
|
cpu_clear(cpu, tick_broadcast_oneshot_mask);
|
|
cpu_clear(cpu, tick_broadcast_oneshot_mask);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires)
|
|
|
|
+{
|
|
|
|
+ struct tick_device *td;
|
|
|
|
+ int cpu;
|
|
|
|
+
|
|
|
|
+ for_each_cpu_mask_nr(cpu, *mask) {
|
|
|
|
+ td = &per_cpu(tick_cpu_device, cpu);
|
|
|
|
+ if (td->evtdev)
|
|
|
|
+ td->evtdev->next_event = expires;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* tick_broadcast_setup_oneshot - setup the broadcast device
|
|
* tick_broadcast_setup_oneshot - setup the broadcast device
|
|
*/
|
|
*/
|
|
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
|
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
|
{
|
|
{
|
|
- bc->event_handler = tick_handle_oneshot_broadcast;
|
|
|
|
- clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
|
|
|
|
- bc->next_event.tv64 = KTIME_MAX;
|
|
|
|
|
|
+ /* Set it up only once ! */
|
|
|
|
+ if (bc->event_handler != tick_handle_oneshot_broadcast) {
|
|
|
|
+ int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
|
|
|
|
+ int cpu = smp_processor_id();
|
|
|
|
+ cpumask_t mask;
|
|
|
|
+
|
|
|
|
+ bc->event_handler = tick_handle_oneshot_broadcast;
|
|
|
|
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
|
|
|
|
+
|
|
|
|
+ /* Take the do_timer update */
|
|
|
|
+ tick_do_timer_cpu = cpu;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * We must be careful here. There might be other CPUs
|
|
|
|
+ * waiting for periodic broadcast. We need to set the
|
|
|
|
+ * oneshot_mask bits for those and program the
|
|
|
|
+ * broadcast device to fire.
|
|
|
|
+ */
|
|
|
|
+ mask = tick_broadcast_mask;
|
|
|
|
+ cpu_clear(cpu, mask);
|
|
|
|
+ cpus_or(tick_broadcast_oneshot_mask,
|
|
|
|
+ tick_broadcast_oneshot_mask, mask);
|
|
|
|
+
|
|
|
|
+ if (was_periodic && !cpus_empty(mask)) {
|
|
|
|
+ tick_broadcast_init_next_event(&mask, tick_next_period);
|
|
|
|
+ tick_broadcast_set_event(tick_next_period, 1);
|
|
|
|
+ } else
|
|
|
|
+ bc->next_event.tv64 = KTIME_MAX;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|