cpuidle44xx.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * OMAP4+ CPU idle Routines
  3. *
  4. * Copyright (C) 2011-2013 Texas Instruments, Inc.
  5. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  6. * Rajendra Nayak <rnayak@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/cpuidle.h>
  14. #include <linux/cpu_pm.h>
  15. #include <linux/export.h>
  16. #include <asm/proc-fns.h>
  17. #include "common.h"
  18. #include "pm.h"
  19. #include "prm.h"
  20. #include "clockdomain.h"
  21. /* Machine specific information */
  22. struct idle_statedata {
  23. u32 cpu_state;
  24. u32 mpu_logic_state;
  25. u32 mpu_state;
  26. };
  27. static struct idle_statedata omap4_idle_data[] = {
  28. {
  29. .cpu_state = PWRDM_POWER_ON,
  30. .mpu_state = PWRDM_POWER_ON,
  31. .mpu_logic_state = PWRDM_POWER_RET,
  32. },
  33. {
  34. .cpu_state = PWRDM_POWER_OFF,
  35. .mpu_state = PWRDM_POWER_RET,
  36. .mpu_logic_state = PWRDM_POWER_RET,
  37. },
  38. {
  39. .cpu_state = PWRDM_POWER_OFF,
  40. .mpu_state = PWRDM_POWER_RET,
  41. .mpu_logic_state = PWRDM_POWER_OFF,
  42. },
  43. };
  44. static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
  45. static struct clockdomain *cpu_clkdm[NR_CPUS];
  46. static atomic_t abort_barrier;
  47. static bool cpu_done[NR_CPUS];
  48. static struct idle_statedata *state_ptr = &omap4_idle_data[0];
  49. /* Private functions */
  50. /**
  51. * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
  52. * @dev: cpuidle device
  53. * @drv: cpuidle driver
  54. * @index: the index of state to be entered
  55. *
  56. * Called from the CPUidle framework to program the device to the
  57. * specified low power state selected by the governor.
  58. * Returns the amount of time spent in the low power state.
  59. */
  60. static int omap_enter_idle_simple(struct cpuidle_device *dev,
  61. struct cpuidle_driver *drv,
  62. int index)
  63. {
  64. local_fiq_disable();
  65. omap_do_wfi();
  66. local_fiq_enable();
  67. return index;
  68. }
  69. static int omap_enter_idle_coupled(struct cpuidle_device *dev,
  70. struct cpuidle_driver *drv,
  71. int index)
  72. {
  73. struct idle_statedata *cx = state_ptr + index;
  74. local_fiq_disable();
  75. /*
  76. * CPU0 has to wait and stay ON until CPU1 is OFF state.
  77. * This is necessary to honour hardware recommondation
  78. * of triggeing all the possible low power modes once CPU1 is
  79. * out of coherency and in OFF mode.
  80. */
  81. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  82. while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
  83. cpu_relax();
  84. /*
  85. * CPU1 could have already entered & exited idle
  86. * without hitting off because of a wakeup
  87. * or a failed attempt to hit off mode. Check for
  88. * that here, otherwise we could spin forever
  89. * waiting for CPU1 off.
  90. */
  91. if (cpu_done[1])
  92. goto fail;
  93. }
  94. }
  95. /*
  96. * Call idle CPU PM enter notifier chain so that
  97. * VFP and per CPU interrupt context is saved.
  98. */
  99. cpu_pm_enter();
  100. if (dev->cpu == 0) {
  101. pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
  102. omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
  103. /*
  104. * Call idle CPU cluster PM enter notifier chain
  105. * to save GIC and wakeupgen context.
  106. */
  107. if ((cx->mpu_state == PWRDM_POWER_RET) &&
  108. (cx->mpu_logic_state == PWRDM_POWER_OFF))
  109. cpu_cluster_pm_enter();
  110. }
  111. omap4_enter_lowpower(dev->cpu, cx->cpu_state);
  112. cpu_done[dev->cpu] = true;
  113. /* Wakeup CPU1 only if it is not offlined */
  114. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  115. clkdm_wakeup(cpu_clkdm[1]);
  116. clkdm_allow_idle(cpu_clkdm[1]);
  117. }
  118. /*
  119. * Call idle CPU PM exit notifier chain to restore
  120. * VFP and per CPU IRQ context.
  121. */
  122. cpu_pm_exit();
  123. /*
  124. * Call idle CPU cluster PM exit notifier chain
  125. * to restore GIC and wakeupgen context.
  126. */
  127. if ((cx->mpu_state == PWRDM_POWER_RET) &&
  128. (cx->mpu_logic_state == PWRDM_POWER_OFF))
  129. cpu_cluster_pm_exit();
  130. fail:
  131. cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
  132. cpu_done[dev->cpu] = false;
  133. local_fiq_enable();
  134. return index;
  135. }
  136. static DEFINE_PER_CPU(struct cpuidle_device, omap_idle_dev);
  137. static struct cpuidle_driver omap4_idle_driver = {
  138. .name = "omap4_idle",
  139. .owner = THIS_MODULE,
  140. .states = {
  141. {
  142. /* C1 - CPU0 ON + CPU1 ON + MPU ON */
  143. .exit_latency = 2 + 2,
  144. .target_residency = 5,
  145. .flags = CPUIDLE_FLAG_TIME_VALID,
  146. .enter = omap_enter_idle_simple,
  147. .name = "C1",
  148. .desc = "CPUx ON, MPUSS ON"
  149. },
  150. {
  151. /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
  152. .exit_latency = 328 + 440,
  153. .target_residency = 960,
  154. .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
  155. CPUIDLE_FLAG_TIMER_STOP,
  156. .enter = omap_enter_idle_coupled,
  157. .name = "C2",
  158. .desc = "CPUx OFF, MPUSS CSWR",
  159. },
  160. {
  161. /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
  162. .exit_latency = 460 + 518,
  163. .target_residency = 1100,
  164. .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
  165. CPUIDLE_FLAG_TIMER_STOP,
  166. .enter = omap_enter_idle_coupled,
  167. .name = "C3",
  168. .desc = "CPUx OFF, MPUSS OSWR",
  169. },
  170. },
  171. .state_count = ARRAY_SIZE(omap4_idle_data),
  172. .safe_state_index = 0,
  173. };
  174. /* Public functions */
  175. /**
  176. * omap4_idle_init - Init routine for OMAP4+ idle
  177. *
  178. * Registers the OMAP4+ specific cpuidle driver to the cpuidle
  179. * framework with the valid set of states.
  180. */
  181. int __init omap4_idle_init(void)
  182. {
  183. struct cpuidle_device *dev;
  184. unsigned int cpu_id = 0;
  185. mpu_pd = pwrdm_lookup("mpu_pwrdm");
  186. cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
  187. cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
  188. if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
  189. return -ENODEV;
  190. cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
  191. cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
  192. if (!cpu_clkdm[0] || !cpu_clkdm[1])
  193. return -ENODEV;
  194. if (cpuidle_register_driver(&omap4_idle_driver)) {
  195. pr_err("%s: CPUidle driver register failed\n", __func__);
  196. return -EIO;
  197. }
  198. for_each_cpu(cpu_id, cpu_online_mask) {
  199. dev = &per_cpu(omap_idle_dev, cpu_id);
  200. dev->cpu = cpu_id;
  201. #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
  202. dev->coupled_cpus = *cpu_online_mask;
  203. #endif
  204. if (cpuidle_register_device(dev)) {
  205. pr_err("%s: CPUidle register failed\n", __func__);
  206. cpuidle_unregister_driver(&omap4_idle_driver);
  207. return -EIO;
  208. }
  209. }
  210. return 0;
  211. }