cpuidle44xx.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * OMAP4 CPU idle Routines
  3. *
  4. * Copyright (C) 2011 Texas Instruments, Inc.
  5. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  6. * Rajendra Nayak <rnayak@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/cpuidle.h>
  14. #include <linux/cpu_pm.h>
  15. #include <linux/export.h>
  16. #include <linux/clockchips.h>
  17. #include <asm/proc-fns.h>
  18. #include "common.h"
  19. #include "pm.h"
  20. #include "prm.h"
  21. #include "clockdomain.h"
  22. #ifdef CONFIG_CPU_IDLE
  23. /* Machine specific information */
  24. struct omap4_idle_statedata {
  25. u32 cpu_state;
  26. u32 mpu_logic_state;
  27. u32 mpu_state;
  28. };
  29. static struct omap4_idle_statedata omap4_idle_data[] = {
  30. {
  31. .cpu_state = PWRDM_POWER_ON,
  32. .mpu_state = PWRDM_POWER_ON,
  33. .mpu_logic_state = PWRDM_POWER_RET,
  34. },
  35. {
  36. .cpu_state = PWRDM_POWER_OFF,
  37. .mpu_state = PWRDM_POWER_RET,
  38. .mpu_logic_state = PWRDM_POWER_RET,
  39. },
  40. {
  41. .cpu_state = PWRDM_POWER_OFF,
  42. .mpu_state = PWRDM_POWER_RET,
  43. .mpu_logic_state = PWRDM_POWER_OFF,
  44. },
  45. };
  46. static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
  47. static struct clockdomain *cpu_clkdm[NR_CPUS];
  48. /**
  49. * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
  50. * @dev: cpuidle device
  51. * @drv: cpuidle driver
  52. * @index: the index of state to be entered
  53. *
  54. * Called from the CPUidle framework to program the device to the
  55. * specified low power state selected by the governor.
  56. * Returns the amount of time spent in the low power state.
  57. */
  58. static int omap4_enter_idle_simple(struct cpuidle_device *dev,
  59. struct cpuidle_driver *drv,
  60. int index)
  61. {
  62. local_fiq_disable();
  63. omap_do_wfi();
  64. local_fiq_enable();
  65. return index;
  66. }
  67. static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
  68. struct cpuidle_driver *drv,
  69. int index)
  70. {
  71. struct omap4_idle_statedata *cx = &omap4_idle_data[index];
  72. int cpu_id = smp_processor_id();
  73. local_fiq_disable();
  74. /*
  75. * CPU0 has to wait and stay ON until CPU1 is OFF state.
  76. * This is necessary to honour hardware recommondation
  77. * of triggeing all the possible low power modes once CPU1 is
  78. * out of coherency and in OFF mode.
  79. */
  80. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  81. while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF)
  82. cpu_relax();
  83. }
  84. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
  85. /*
  86. * Call idle CPU PM enter notifier chain so that
  87. * VFP and per CPU interrupt context is saved.
  88. */
  89. cpu_pm_enter();
  90. if (dev->cpu == 0) {
  91. pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
  92. omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
  93. /*
  94. * Call idle CPU cluster PM enter notifier chain
  95. * to save GIC and wakeupgen context.
  96. */
  97. if ((cx->mpu_state == PWRDM_POWER_RET) &&
  98. (cx->mpu_logic_state == PWRDM_POWER_OFF))
  99. cpu_cluster_pm_enter();
  100. }
  101. omap4_enter_lowpower(dev->cpu, cx->cpu_state);
  102. /* Wakeup CPU1 only if it is not offlined */
  103. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  104. clkdm_wakeup(cpu_clkdm[1]);
  105. clkdm_allow_idle(cpu_clkdm[1]);
  106. }
  107. /*
  108. * Call idle CPU PM exit notifier chain to restore
  109. * VFP and per CPU IRQ context.
  110. */
  111. cpu_pm_exit();
  112. /*
  113. * Call idle CPU cluster PM exit notifier chain
  114. * to restore GIC and wakeupgen context.
  115. */
  116. if (omap4_mpuss_read_prev_context_state())
  117. cpu_cluster_pm_exit();
  118. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
  119. local_fiq_enable();
  120. return index;
  121. }
  122. DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
  123. struct cpuidle_driver omap4_idle_driver = {
  124. .name = "omap4_idle",
  125. .owner = THIS_MODULE,
  126. .en_core_tk_irqen = 1,
  127. .states = {
  128. {
  129. /* C1 - CPU0 ON + CPU1 ON + MPU ON */
  130. .exit_latency = 2 + 2,
  131. .target_residency = 5,
  132. .flags = CPUIDLE_FLAG_TIME_VALID,
  133. .enter = omap4_enter_idle_simple,
  134. .name = "C1",
  135. .desc = "MPUSS ON"
  136. },
  137. {
  138. /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
  139. .exit_latency = 328 + 440,
  140. .target_residency = 960,
  141. .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
  142. .enter = omap4_enter_idle_coupled,
  143. .name = "C2",
  144. .desc = "MPUSS CSWR",
  145. },
  146. {
  147. /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
  148. .exit_latency = 460 + 518,
  149. .target_residency = 1100,
  150. .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
  151. .enter = omap4_enter_idle_coupled,
  152. .name = "C3",
  153. .desc = "MPUSS OSWR",
  154. },
  155. },
  156. .state_count = ARRAY_SIZE(omap4_idle_data),
  157. .safe_state_index = 0,
  158. };
  159. /**
  160. * omap4_idle_init - Init routine for OMAP4 idle
  161. *
  162. * Registers the OMAP4 specific cpuidle driver to the cpuidle
  163. * framework with the valid set of states.
  164. */
  165. int __init omap4_idle_init(void)
  166. {
  167. struct cpuidle_device *dev;
  168. unsigned int cpu_id = 0;
  169. mpu_pd = pwrdm_lookup("mpu_pwrdm");
  170. cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
  171. cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
  172. if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
  173. return -ENODEV;
  174. cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
  175. cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
  176. if (!cpu_clkdm[0] || !cpu_clkdm[1])
  177. return -ENODEV;
  178. for_each_cpu(cpu_id, cpu_online_mask) {
  179. dev = &per_cpu(omap4_idle_dev, cpu_id);
  180. dev->cpu = cpu_id;
  181. dev->coupled_cpus = *cpu_online_mask;
  182. cpuidle_register_driver(&omap4_idle_driver);
  183. if (cpuidle_register_device(dev)) {
  184. pr_err("%s: CPUidle register failed\n", __func__);
  185. return -EIO;
  186. }
  187. }
  188. return 0;
  189. }
  190. #else
  191. int __init omap4_idle_init(void)
  192. {
  193. return 0;
  194. }
  195. #endif /* CONFIG_CPU_IDLE */