Browse Source

Merge branch 'pm-cpuidle-next' into linux-next

* pm-cpuidle-next:
  cpuidle: imx6: remove timer broadcast initialization
  cpuidle: OMAP4: remove timer broadcast initialization
  cpuidle: ux500: remove timer broadcast initialization
  cpuidle: initialize the broadcast timer framework
  timer: move enum definition out of ifdef section
  cpuidle: kirkwood: fix coccicheck warnings
  cpuidle / kirkwood: remove redundant Kconfig option
  cpuidle / ux500 : use CPUIDLE_FLAG_TIMER_STOP flag
  cpuidle / imx6 : use CPUIDLE_FLAG_TIMER_STOP flag
  cpuidle / omap4 : use CPUIDLE_FLAG_TIMER_STOP flag
  cpuidle : handle clockevent notify from the cpuidle framework
Rafael J. Wysocki 12 years ago
parent
commit
f69e44b205

+ 0 - 1
arch/arm/configs/kirkwood_defconfig

@@ -56,7 +56,6 @@ CONFIG_AEABI=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CPU_IDLE=y
-CONFIG_CPU_IDLE_KIRKWOOD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y

+ 2 - 21
arch/arm/mach-imx/cpuidle-imx6q.c

@@ -6,7 +6,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/clockchips.h>
 #include <linux/cpuidle.h>
 #include <linux/module.h>
 #include <asm/cpuidle.h>
@@ -21,10 +20,6 @@ static DEFINE_SPINLOCK(master_lock);
 static int imx6q_enter_wait(struct cpuidle_device *dev,
 			    struct cpuidle_driver *drv, int index)
 {
-	int cpu = dev->cpu;
-
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
-
 	if (atomic_inc_return(&master) == num_online_cpus()) {
 		/*
 		 * With this lock, we prevent other cpu to exit and enter
@@ -43,22 +38,10 @@ idle:
 	cpu_do_idle();
 done:
 	atomic_dec(&master);
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
 
 	return index;
 }
 
-/*
- * For each cpu, setup the broadcast timer because local timer
- * stops for the states other than WFI.
- */
-static void imx6q_setup_broadcast_timer(void *arg)
-{
-	int cpu = smp_processor_id();
-
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
-
 static struct cpuidle_driver imx6q_cpuidle_driver = {
 	.name = "imx6q_cpuidle",
 	.owner = THIS_MODULE,
@@ -70,7 +53,8 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
 		{
 			.exit_latency = 50,
 			.target_residency = 75,
-			.flags = CPUIDLE_FLAG_TIME_VALID,
+			.flags = CPUIDLE_FLAG_TIME_VALID |
+			         CPUIDLE_FLAG_TIMER_STOP,
 			.enter = imx6q_enter_wait,
 			.name = "WAIT",
 			.desc = "Clock off",
@@ -88,8 +72,5 @@ int __init imx6q_cpuidle_init(void)
 	/* Set chicken bit to get a reliable WAIT mode support */
 	imx6q_set_chicken_bit();
 
-	/* Configure the broadcast timer on each cpu */
-	on_each_cpu(imx6q_setup_broadcast_timer, NULL, 1);
-
 	return imx_cpuidle_init(&imx6q_cpuidle_driver);
 }

+ 4 - 21
arch/arm/mach-omap2/cpuidle44xx.c

@@ -14,7 +14,6 @@
 #include <linux/cpuidle.h>
 #include <linux/cpu_pm.h>
 #include <linux/export.h>
-#include <linux/clockchips.h>
 
 #include <asm/proc-fns.h>
 
@@ -82,7 +81,6 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
 			int index)
 {
 	struct omap4_idle_statedata *cx = &omap4_idle_data[index];
-	int cpu_id = smp_processor_id();
 
 	local_fiq_disable();
 
@@ -109,8 +107,6 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
 		}
 	}
 
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
-
 	/*
 	 * Call idle CPU PM enter notifier chain so that
 	 * VFP and per CPU interrupt context is saved.
@@ -152,8 +148,6 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
 	if (omap4_mpuss_read_prev_context_state())
 		cpu_cluster_pm_exit();
 
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
-
 fail:
 	cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
 	cpu_done[dev->cpu] = false;
@@ -163,16 +157,6 @@ fail:
 	return index;
 }
 
-/*
- * For each cpu, setup the broadcast timer because local timers
- * stops for the states above C1.
- */
-static void omap_setup_broadcast_timer(void *arg)
-{
-	int cpu = smp_processor_id();
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
-
 static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
 
 static struct cpuidle_driver omap4_idle_driver = {
@@ -193,7 +177,8 @@ static struct cpuidle_driver omap4_idle_driver = {
 			/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
 			.exit_latency = 328 + 440,
 			.target_residency = 960,
-			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
+			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
+			         CPUIDLE_FLAG_TIMER_STOP,
 			.enter = omap4_enter_idle_coupled,
 			.name = "C2",
 			.desc = "MPUSS CSWR",
@@ -202,7 +187,8 @@ static struct cpuidle_driver omap4_idle_driver = {
 			/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
 			.exit_latency = 460 + 518,
 			.target_residency = 1100,
-			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
+			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
+			         CPUIDLE_FLAG_TIMER_STOP,
 			.enter = omap4_enter_idle_coupled,
 			.name = "C3",
 			.desc = "MPUSS OSWR",
@@ -236,9 +222,6 @@ int __init omap4_idle_init(void)
 	if (!cpu_clkdm[0] || !cpu_clkdm[1])
 		return -ENODEV;
 
-	/* Configure the broadcast timer on each cpu */
-	on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
-
 	for_each_cpu(cpu_id, cpu_online_mask) {
 		dev = &per_cpu(omap4_idle_dev, cpu_id);
 		dev->cpu = cpu_id;

+ 2 - 23
arch/arm/mach-ux500/cpuidle.c

@@ -11,7 +11,6 @@
 
 #include <linux/module.h>
 #include <linux/cpuidle.h>
-#include <linux/clockchips.h>
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
 #include <linux/smp.h>
@@ -30,8 +29,6 @@ static inline int ux500_enter_idle(struct cpuidle_device *dev,
 	int this_cpu = smp_processor_id();
 	bool recouple = false;
 
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &this_cpu);
-
 	if (atomic_inc_return(&master) == num_online_cpus()) {
 
 		/* With this lock, we prevent the other cpu to exit and enter
@@ -91,8 +88,6 @@ out:
 		spin_unlock(&master_lock);
 	}
 
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &this_cpu);
-
 	return index;
 }
 
@@ -106,7 +101,8 @@ static struct cpuidle_driver ux500_idle_driver = {
 			.enter		  = ux500_enter_idle,
 			.exit_latency	  = 70,
 			.target_residency = 260,
-			.flags		  = CPUIDLE_FLAG_TIME_VALID,
+			.flags		  = CPUIDLE_FLAG_TIME_VALID |
+			                    CPUIDLE_FLAG_TIMER_STOP,
 			.name		  = "ApIdle",
 			.desc		  = "ARM Retention",
 		},
@@ -115,16 +111,6 @@ static struct cpuidle_driver ux500_idle_driver = {
 	.state_count = 2,
 };
 
-/*
- * For each cpu, setup the broadcast timer because we will
- * need to migrate the timers for the states >= ApIdle.
- */
-static void ux500_setup_broadcast_timer(void *arg)
-{
-	int cpu = smp_processor_id();
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
-
 int __init ux500_idle_init(void)
 {
 	int ret, cpu;
@@ -134,13 +120,6 @@ int __init ux500_idle_init(void)
 	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
 			     PRCMU_WAKEUP(ABB));
 
-	/*
-	 * Configure the timer broadcast for each cpu, that must
-	 * be done from the cpu context, so we use a smp cross
-	 * call with 'on_each_cpu'.
-	 */
-	on_each_cpu(ux500_setup_broadcast_timer, NULL, 1);
-
 	ret = cpuidle_register_driver(&ux500_idle_driver);
 	if (ret) {
 		printk(KERN_ERR "failed to register ux500 idle driver\n");

+ 0 - 6
drivers/cpuidle/Kconfig

@@ -39,10 +39,4 @@ config CPU_IDLE_CALXEDA
 	help
 	  Select this to enable cpuidle on Calxeda processors.
 
-config CPU_IDLE_KIRKWOOD
-	bool "CPU Idle Driver for Kirkwood processors"
-	depends on ARCH_KIRKWOOD
-	help
-	  Select this to enable cpuidle on Kirkwood processors.
-
 endif

+ 1 - 1
drivers/cpuidle/Makefile

@@ -6,4 +6,4 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
 obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
 
 obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
-obj-$(CONFIG_CPU_IDLE_KIRKWOOD) += cpuidle-kirkwood.o
+obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o

+ 3 - 3
drivers/cpuidle/cpuidle-kirkwood.c

@@ -66,9 +66,9 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
 	if (res == NULL)
 		return -EINVAL;
 
-	ddr_operation_base = devm_request_and_ioremap(&pdev->dev, res);
-	if (!ddr_operation_base)
-		return -EADDRNOTAVAIL;
+	ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ddr_operation_base))
+		return PTR_ERR(ddr_operation_base);
 
 	device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
 	device->state_count = KIRKWOOD_MAX_STATES;

+ 9 - 0
drivers/cpuidle/cpuidle.c

@@ -8,6 +8,7 @@
  * This code is licenced under the GPL.
  */
 
+#include <linux/clockchips.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
 #include <linux/sched.h>
@@ -146,12 +147,20 @@ int cpuidle_idle_call(void)
 
 	trace_cpu_idle_rcuidle(next_state, dev->cpu);
 
+	if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+				   &dev->cpu);
+
 	if (cpuidle_state_is_coupled(dev, drv, next_state))
 		entered_state = cpuidle_enter_state_coupled(dev, drv,
 							    next_state);
 	else
 		entered_state = cpuidle_enter_state(dev, drv, next_state);
 
+	if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+				   &dev->cpu);
+
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
 	/* give the governor an opportunity to reflect on the outcome */

+ 29 - 2
drivers/cpuidle/driver.c

@@ -11,6 +11,8 @@
 #include <linux/mutex.h>
 #include <linux/module.h>
 #include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/clockchips.h>
 
 #include "cpuidle.h"
 
@@ -19,9 +21,28 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
 static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
 static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
 
-static void __cpuidle_driver_init(struct cpuidle_driver *drv)
+static void cpuidle_setup_broadcast_timer(void *arg)
 {
+	int cpu = smp_processor_id();
+	clockevents_notify((long)(arg), &cpu);
+}
+
+static void __cpuidle_driver_init(struct cpuidle_driver *drv, int cpu)
+{
+	int i;
+
 	drv->refcnt = 0;
+
+	for (i = drv->state_count - 1; i >= 0 ; i--) {
+
+		if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
+			continue;
+
+		drv->bctimer = 1;
+		on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
+				 (void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
+		break;
+	}
 }
 
 static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
@@ -35,7 +56,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
 	if (__cpuidle_get_cpu_driver(cpu))
 		return -EBUSY;
 
-	__cpuidle_driver_init(drv);
+	__cpuidle_driver_init(drv, cpu);
 
 	__cpuidle_set_cpu_driver(drv, cpu);
 
@@ -49,6 +70,12 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu)
 
 	if (!WARN_ON(drv->refcnt > 0))
 		__cpuidle_set_cpu_driver(NULL, cpu);
+
+	if (drv->bctimer) {
+		drv->bctimer = 0;
+		on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
+				 (void *)CLOCK_EVT_NOTIFY_BROADCAST_OFF, 1);
+	}
 }
 
 #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS

+ 16 - 16
include/linux/clockchips.h

@@ -8,6 +8,20 @@
 #ifndef _LINUX_CLOCKCHIPS_H
 #define _LINUX_CLOCKCHIPS_H
 
+/* Clock event notification values */
+enum clock_event_nofitiers {
+	CLOCK_EVT_NOTIFY_ADD,
+	CLOCK_EVT_NOTIFY_BROADCAST_ON,
+	CLOCK_EVT_NOTIFY_BROADCAST_OFF,
+	CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
+	CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+	CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+	CLOCK_EVT_NOTIFY_SUSPEND,
+	CLOCK_EVT_NOTIFY_RESUME,
+	CLOCK_EVT_NOTIFY_CPU_DYING,
+	CLOCK_EVT_NOTIFY_CPU_DEAD,
+};
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD
 
 #include <linux/clocksource.h>
@@ -26,20 +40,6 @@ enum clock_event_mode {
 	CLOCK_EVT_MODE_RESUME,
 };
 
-/* Clock event notification values */
-enum clock_event_nofitiers {
-	CLOCK_EVT_NOTIFY_ADD,
-	CLOCK_EVT_NOTIFY_BROADCAST_ON,
-	CLOCK_EVT_NOTIFY_BROADCAST_OFF,
-	CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
-	CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
-	CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
-	CLOCK_EVT_NOTIFY_SUSPEND,
-	CLOCK_EVT_NOTIFY_RESUME,
-	CLOCK_EVT_NOTIFY_CPU_DYING,
-	CLOCK_EVT_NOTIFY_CPU_DEAD,
-};
-
 /*
  * Clock event features
  */
@@ -173,7 +173,7 @@ extern int tick_receive_broadcast(void);
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void clockevents_notify(unsigned long reason, void *arg);
 #else
-# define clockevents_notify(reason, arg) do { } while (0)
+static inline void clockevents_notify(unsigned long reason, void *arg) {}
 #endif
 
 #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
@@ -181,7 +181,7 @@ extern void clockevents_notify(unsigned long reason, void *arg);
 static inline void clockevents_suspend(void) {}
 static inline void clockevents_resume(void) {}
 
-#define clockevents_notify(reason, arg) do { } while (0)
+static inline void clockevents_notify(unsigned long reason, void *arg) {}
 
 #endif
 

+ 3 - 0
include/linux/cpuidle.h

@@ -57,6 +57,7 @@ struct cpuidle_state {
 /* Idle State Flags */
 #define CPUIDLE_FLAG_TIME_VALID	(0x01) /* is residency time measurable? */
 #define CPUIDLE_FLAG_COUPLED	(0x02) /* state applies to multiple cpus */
+#define CPUIDLE_FLAG_TIMER_STOP (0x04)  /* timer is stopped on this state */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
@@ -106,6 +107,8 @@ struct cpuidle_driver {
 
 	/* set to 1 to use the core cpuidle time keeping (for all states). */
 	unsigned int		en_core_tk_irqen:1;
+        /* used by the cpuidle framework to setup the broadcast timer */
+	unsigned int            bctimer:1;
 	/* states array must be ordered in decreasing power consumption */
 	struct cpuidle_state	states[CPUIDLE_STATE_MAX];
 	int			state_count;