cpuidle.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /*
  2. * arch/arm/mach-kirkwood/cpuidle.c
  3. *
  4. * CPU idle Marvell Kirkwood SoCs
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. *
  10. * The cpu idle uses wait-for-interrupt and DDR self refresh in order
  11. * to implement two idle states -
  12. * #1 wait-for-interrupt
  13. * #2 wait-for-interrupt and DDR self refresh
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/init.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/cpuidle.h>
  19. #include <linux/io.h>
  20. #include <asm/proc-fns.h>
  21. #include <mach/kirkwood.h>
  22. #define KIRKWOOD_MAX_STATES 2
  23. static struct cpuidle_driver kirkwood_idle_driver = {
  24. .name = "kirkwood_idle",
  25. .owner = THIS_MODULE,
  26. };
  27. static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
  28. /* Actual code that puts the SoC in different idle states */
  29. static int kirkwood_enter_idle(struct cpuidle_device *dev,
  30. struct cpuidle_state *state)
  31. {
  32. struct timeval before, after;
  33. int idle_time;
  34. local_irq_disable();
  35. do_gettimeofday(&before);
  36. if (state == &dev->states[0])
  37. /* Wait for interrupt state */
  38. cpu_do_idle();
  39. else if (state == &dev->states[1]) {
  40. /*
  41. * Following write will put DDR in self refresh.
  42. * Note that we have 256 cycles before DDR puts it
  43. * self in self-refresh, so the wait-for-interrupt
  44. * call afterwards won't get the DDR from self refresh
  45. * mode.
  46. */
  47. writel(0x7, DDR_OPERATION_BASE);
  48. cpu_do_idle();
  49. }
  50. do_gettimeofday(&after);
  51. local_irq_enable();
  52. idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
  53. (after.tv_usec - before.tv_usec);
  54. return idle_time;
  55. }
  56. /* Initialize CPU idle by registering the idle states */
  57. static int kirkwood_init_cpuidle(void)
  58. {
  59. struct cpuidle_device *device;
  60. cpuidle_register_driver(&kirkwood_idle_driver);
  61. device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
  62. device->state_count = KIRKWOOD_MAX_STATES;
  63. /* Wait for interrupt state */
  64. device->states[0].enter = kirkwood_enter_idle;
  65. device->states[0].exit_latency = 1;
  66. device->states[0].target_residency = 10000;
  67. device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
  68. strcpy(device->states[0].name, "WFI");
  69. strcpy(device->states[0].desc, "Wait for interrupt");
  70. /* Wait for interrupt and DDR self refresh state */
  71. device->states[1].enter = kirkwood_enter_idle;
  72. device->states[1].exit_latency = 10;
  73. device->states[1].target_residency = 10000;
  74. device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
  75. strcpy(device->states[1].name, "DDR SR");
  76. strcpy(device->states[1].desc, "WFI and DDR Self Refresh");
  77. if (cpuidle_register_device(device)) {
  78. printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n");
  79. return -EIO;
  80. }
  81. return 0;
  82. }
  83. device_initcall(kirkwood_init_cpuidle);