cpuidle.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM)
  3. *
  4. * Based on the work of Rickard Andersson <rickard.andersson@stericsson.com>
  5. * and Jonas Aaberg <jonas.aberg@stericsson.com>.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/cpuidle.h>
  13. #include <linux/clockchips.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/atomic.h>
  16. #include <linux/smp.h>
  17. #include <linux/mfd/dbx500-prcmu.h>
  18. #include <asm/cpuidle.h>
  19. #include <asm/proc-fns.h>
  20. static atomic_t master = ATOMIC_INIT(0);
  21. static DEFINE_SPINLOCK(master_lock);
  22. static DEFINE_PER_CPU(struct cpuidle_device, ux500_cpuidle_device);
  23. static inline int ux500_enter_idle(struct cpuidle_device *dev,
  24. struct cpuidle_driver *drv, int index)
  25. {
  26. int this_cpu = smp_processor_id();
  27. bool recouple = false;
  28. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &this_cpu);
  29. if (atomic_inc_return(&master) == num_online_cpus()) {
  30. /* With this lock, we prevent the other cpu to exit and enter
  31. * this function again and become the master */
  32. if (!spin_trylock(&master_lock))
  33. goto wfi;
  34. /* decouple the gic from the A9 cores */
  35. if (prcmu_gic_decouple()) {
  36. spin_unlock(&master_lock);
  37. goto out;
  38. }
  39. /* If an error occur, we will have to recouple the gic
  40. * manually */
  41. recouple = true;
  42. /* At this state, as the gic is decoupled, if the other
  43. * cpu is in WFI, we have the guarantee it won't be wake
  44. * up, so we can safely go to retention */
  45. if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1))
  46. goto out;
  47. /* The prcmu will be in charge of watching the interrupts
  48. * and wake up the cpus */
  49. if (prcmu_copy_gic_settings())
  50. goto out;
  51. /* Check in the meantime an interrupt did
  52. * not occur on the gic ... */
  53. if (prcmu_gic_pending_irq())
  54. goto out;
  55. /* ... and the prcmu */
  56. if (prcmu_pending_irq())
  57. goto out;
  58. /* Go to the retention state, the prcmu will wait for the
  59. * cpu to go WFI and this is what happens after exiting this
  60. * 'master' critical section */
  61. if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true))
  62. goto out;
  63. /* When we switch to retention, the prcmu is in charge
  64. * of recoupling the gic automatically */
  65. recouple = false;
  66. spin_unlock(&master_lock);
  67. }
  68. wfi:
  69. cpu_do_idle();
  70. out:
  71. atomic_dec(&master);
  72. if (recouple) {
  73. prcmu_gic_recouple();
  74. spin_unlock(&master_lock);
  75. }
  76. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &this_cpu);
  77. return index;
  78. }
  79. static struct cpuidle_driver ux500_idle_driver = {
  80. .name = "ux500_idle",
  81. .owner = THIS_MODULE,
  82. .en_core_tk_irqen = 1,
  83. .states = {
  84. ARM_CPUIDLE_WFI_STATE,
  85. {
  86. .enter = ux500_enter_idle,
  87. .exit_latency = 70,
  88. .target_residency = 260,
  89. .flags = CPUIDLE_FLAG_TIME_VALID,
  90. .name = "ApIdle",
  91. .desc = "ARM Retention",
  92. },
  93. },
  94. .safe_state_index = 0,
  95. .state_count = 2,
  96. };
  97. /*
  98. * For each cpu, setup the broadcast timer because we will
  99. * need to migrate the timers for the states >= ApIdle.
  100. */
  101. static void ux500_setup_broadcast_timer(void *arg)
  102. {
  103. int cpu = smp_processor_id();
  104. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
  105. }
  106. int __init ux500_idle_init(void)
  107. {
  108. int ret, cpu;
  109. struct cpuidle_device *device;
  110. /* Configure wake up reasons */
  111. prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
  112. PRCMU_WAKEUP(ABB));
  113. /*
  114. * Configure the timer broadcast for each cpu, that must
  115. * be done from the cpu context, so we use a smp cross
  116. * call with 'on_each_cpu'.
  117. */
  118. on_each_cpu(ux500_setup_broadcast_timer, NULL, 1);
  119. ret = cpuidle_register_driver(&ux500_idle_driver);
  120. if (ret) {
  121. printk(KERN_ERR "failed to register ux500 idle driver\n");
  122. return ret;
  123. }
  124. for_each_online_cpu(cpu) {
  125. device = &per_cpu(ux500_cpuidle_device, cpu);
  126. device->cpu = cpu;
  127. ret = cpuidle_register_device(device);
  128. if (ret) {
  129. printk(KERN_ERR "Failed to register cpuidle "
  130. "device for cpu%d\n", cpu);
  131. goto out_unregister;
  132. }
  133. }
  134. out:
  135. return ret;
  136. out_unregister:
  137. for_each_online_cpu(cpu) {
  138. device = &per_cpu(ux500_cpuidle_device, cpu);
  139. cpuidle_unregister_device(device);
  140. }
  141. cpuidle_unregister_driver(&ux500_idle_driver);
  142. goto out;
  143. }
  144. device_initcall(ux500_idle_init);