|
@@ -238,8 +238,9 @@ void clockevents_exchange_device(struct clock_event_device *old,
|
|
|
*/
|
|
|
void clockevents_notify(unsigned long reason, void *arg)
|
|
|
{
|
|
|
- struct list_head *node, *tmp;
|
|
|
+ struct clock_event_device *dev, *tmp;
|
|
|
unsigned long flags;
|
|
|
+ int cpu;
|
|
|
|
|
|
raw_spin_lock_irqsave(&clockevents_lock, flags);
|
|
|
clockevents_do_notify(reason, arg);
|
|
@@ -250,8 +251,19 @@ void clockevents_notify(unsigned long reason, void *arg)
|
|
|
* Unregister the clock event devices which were
|
|
|
* released from the users in the notify chain.
|
|
|
*/
|
|
|
- list_for_each_safe(node, tmp, &clockevents_released)
|
|
|
- list_del(node);
|
|
|
+ list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
|
|
|
+ list_del(&dev->list);
|
|
|
+ /*
|
|
|
+ * Now check whether the CPU has left unused per cpu devices
|
|
|
+ */
|
|
|
+ cpu = *((int *)arg);
|
|
|
+ list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
|
|
|
+ if (cpumask_test_cpu(cpu, dev->cpumask) &&
|
|
|
+ cpumask_weight(dev->cpumask) == 1) {
|
|
|
+ BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
|
|
|
+ list_del(&dev->list);
|
|
|
+ }
|
|
|
+ }
|
|
|
break;
|
|
|
default:
|
|
|
break;
|