ladder.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /*
  2. * ladder.c - the residency ladder algorithm
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. *
  8. * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  9. * Shaohua Li <shaohua.li@intel.com>
  10. * Adam Belay <abelay@novell.com>
  11. *
  12. * This code is licenced under the GPL.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/cpuidle.h>
  16. #include <linux/pm_qos_params.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/jiffies.h>
  19. #include <asm/io.h>
  20. #include <asm/uaccess.h>
  21. #define PROMOTION_COUNT 4
  22. #define DEMOTION_COUNT 1
  23. struct ladder_device_state {
  24. struct {
  25. u32 promotion_count;
  26. u32 demotion_count;
  27. u32 promotion_time;
  28. u32 demotion_time;
  29. } threshold;
  30. struct {
  31. int promotion_count;
  32. int demotion_count;
  33. } stats;
  34. };
  35. struct ladder_device {
  36. struct ladder_device_state states[CPUIDLE_STATE_MAX];
  37. int last_state_idx;
  38. };
  39. static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
  40. /**
  41. * ladder_do_selection - prepares private data for a state change
  42. * @ldev: the ladder device
  43. * @old_idx: the current state index
  44. * @new_idx: the new target state index
  45. */
  46. static inline void ladder_do_selection(struct ladder_device *ldev,
  47. int old_idx, int new_idx)
  48. {
  49. ldev->states[old_idx].stats.promotion_count = 0;
  50. ldev->states[old_idx].stats.demotion_count = 0;
  51. ldev->last_state_idx = new_idx;
  52. }
  53. /**
  54. * ladder_select_state - selects the next state to enter
  55. * @dev: the CPU
  56. */
  57. static int ladder_select_state(struct cpuidle_device *dev)
  58. {
  59. struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
  60. struct ladder_device_state *last_state;
  61. int last_residency, last_idx = ldev->last_state_idx;
  62. int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
  63. if (unlikely(!ldev))
  64. return 0;
  65. /* Special case when user has set very strict latency requirement */
  66. if (unlikely(latency_req == 0)) {
  67. ladder_do_selection(ldev, last_idx, 0);
  68. return 0;
  69. }
  70. last_state = &ldev->states[last_idx];
  71. if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
  72. last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
  73. else
  74. last_residency = last_state->threshold.promotion_time + 1;
  75. /* consider promotion */
  76. if (last_idx < dev->state_count - 1 &&
  77. last_residency > last_state->threshold.promotion_time &&
  78. dev->states[last_idx + 1].exit_latency <= latency_req) {
  79. last_state->stats.promotion_count++;
  80. last_state->stats.demotion_count = 0;
  81. if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
  82. ladder_do_selection(ldev, last_idx, last_idx + 1);
  83. return last_idx + 1;
  84. }
  85. }
  86. /* consider demotion */
  87. if (last_idx > CPUIDLE_DRIVER_STATE_START &&
  88. dev->states[last_idx].exit_latency > latency_req) {
  89. int i;
  90. for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
  91. if (dev->states[i].exit_latency <= latency_req)
  92. break;
  93. }
  94. ladder_do_selection(ldev, last_idx, i);
  95. return i;
  96. }
  97. if (last_idx > CPUIDLE_DRIVER_STATE_START &&
  98. last_residency < last_state->threshold.demotion_time) {
  99. last_state->stats.demotion_count++;
  100. last_state->stats.promotion_count = 0;
  101. if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
  102. ladder_do_selection(ldev, last_idx, last_idx - 1);
  103. return last_idx - 1;
  104. }
  105. }
  106. /* otherwise remain at the current state */
  107. return last_idx;
  108. }
  109. /**
  110. * ladder_enable_device - setup for the governor
  111. * @dev: the CPU
  112. */
  113. static int ladder_enable_device(struct cpuidle_device *dev)
  114. {
  115. int i;
  116. struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
  117. struct ladder_device_state *lstate;
  118. struct cpuidle_state *state;
  119. ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
  120. for (i = 0; i < dev->state_count; i++) {
  121. state = &dev->states[i];
  122. lstate = &ldev->states[i];
  123. lstate->stats.promotion_count = 0;
  124. lstate->stats.demotion_count = 0;
  125. lstate->threshold.promotion_count = PROMOTION_COUNT;
  126. lstate->threshold.demotion_count = DEMOTION_COUNT;
  127. if (i < dev->state_count - 1)
  128. lstate->threshold.promotion_time = state->exit_latency;
  129. if (i > 0)
  130. lstate->threshold.demotion_time = state->exit_latency;
  131. }
  132. return 0;
  133. }
  134. static struct cpuidle_governor ladder_governor = {
  135. .name = "ladder",
  136. .rating = 10,
  137. .enable = ladder_enable_device,
  138. .select = ladder_select_state,
  139. .owner = THIS_MODULE,
  140. };
  141. /**
  142. * init_ladder - initializes the governor
  143. */
  144. static int __init init_ladder(void)
  145. {
  146. return cpuidle_register_governor(&ladder_governor);
  147. }
  148. /**
  149. * exit_ladder - exits the governor
  150. */
  151. static void __exit exit_ladder(void)
  152. {
  153. cpuidle_unregister_governor(&ladder_governor);
  154. }
  155. MODULE_LICENSE("GPL");
  156. module_init(init_ladder);
  157. module_exit(exit_ladder);