ladder.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * ladder.c - the residency ladder algorithm
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. *
  8. * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  9. * Shaohua Li <shaohua.li@intel.com>
  10. * Adam Belay <abelay@novell.com>
  11. *
  12. * This code is licenced under the GPL.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/cpuidle.h>
  16. #include <linux/pm_qos_params.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/jiffies.h>
  19. #include <asm/io.h>
  20. #include <asm/uaccess.h>
  21. #define PROMOTION_COUNT 4
  22. #define DEMOTION_COUNT 1
  23. struct ladder_device_state {
  24. struct {
  25. u32 promotion_count;
  26. u32 demotion_count;
  27. u32 promotion_time;
  28. u32 demotion_time;
  29. } threshold;
  30. struct {
  31. int promotion_count;
  32. int demotion_count;
  33. } stats;
  34. };
  35. struct ladder_device {
  36. struct ladder_device_state states[CPUIDLE_STATE_MAX];
  37. int last_state_idx;
  38. };
  39. static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
  40. /**
  41. * ladder_do_selection - prepares private data for a state change
  42. * @ldev: the ladder device
  43. * @old_idx: the current state index
  44. * @new_idx: the new target state index
  45. */
  46. static inline void ladder_do_selection(struct ladder_device *ldev,
  47. int old_idx, int new_idx)
  48. {
  49. ldev->states[old_idx].stats.promotion_count = 0;
  50. ldev->states[old_idx].stats.demotion_count = 0;
  51. ldev->last_state_idx = new_idx;
  52. }
  53. /**
  54. * ladder_select_state - selects the next state to enter
  55. * @dev: the CPU
  56. */
  57. static int ladder_select_state(struct cpuidle_device *dev)
  58. {
  59. struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
  60. struct ladder_device_state *last_state;
  61. int last_residency, last_idx = ldev->last_state_idx;
  62. int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
  63. if (unlikely(!ldev))
  64. return 0;
  65. /* Special case when user has set very strict latency requirement */
  66. if (unlikely(latency_req == 0)) {
  67. ladder_do_selection(ldev, last_idx, 0);
  68. return 0;
  69. }
  70. last_state = &ldev->states[last_idx];
  71. if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
  72. last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
  73. else
  74. last_residency = last_state->threshold.promotion_time + 1;
  75. /* consider promotion */
  76. if (last_idx < dev->state_count - 1 &&
  77. last_residency > last_state->threshold.promotion_time &&
  78. dev->states[last_idx + 1].exit_latency <= latency_req) {
  79. last_state->stats.promotion_count++;
  80. last_state->stats.demotion_count = 0;
  81. if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
  82. ladder_do_selection(ldev, last_idx, last_idx + 1);
  83. return last_idx + 1;
  84. }
  85. }
  86. /* consider demotion */
  87. if (last_idx > CPUIDLE_DRIVER_STATE_START &&
  88. last_residency < last_state->threshold.demotion_time) {
  89. last_state->stats.demotion_count++;
  90. last_state->stats.promotion_count = 0;
  91. if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
  92. ladder_do_selection(ldev, last_idx, last_idx - 1);
  93. return last_idx - 1;
  94. }
  95. }
  96. /* otherwise remain at the current state */
  97. return last_idx;
  98. }
  99. /**
  100. * ladder_enable_device - setup for the governor
  101. * @dev: the CPU
  102. */
  103. static int ladder_enable_device(struct cpuidle_device *dev)
  104. {
  105. int i;
  106. struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
  107. struct ladder_device_state *lstate;
  108. struct cpuidle_state *state;
  109. ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
  110. for (i = 0; i < dev->state_count; i++) {
  111. state = &dev->states[i];
  112. lstate = &ldev->states[i];
  113. lstate->stats.promotion_count = 0;
  114. lstate->stats.demotion_count = 0;
  115. lstate->threshold.promotion_count = PROMOTION_COUNT;
  116. lstate->threshold.demotion_count = DEMOTION_COUNT;
  117. if (i < dev->state_count - 1)
  118. lstate->threshold.promotion_time = state->exit_latency;
  119. if (i > 0)
  120. lstate->threshold.demotion_time = state->exit_latency;
  121. }
  122. return 0;
  123. }
  124. static struct cpuidle_governor ladder_governor = {
  125. .name = "ladder",
  126. .rating = 10,
  127. .enable = ladder_enable_device,
  128. .select = ladder_select_state,
  129. .owner = THIS_MODULE,
  130. };
  131. /**
  132. * init_ladder - initializes the governor
  133. */
  134. static int __init init_ladder(void)
  135. {
  136. return cpuidle_register_governor(&ladder_governor);
  137. }
  138. /**
  139. * exit_ladder - exits the governor
  140. */
  141. static void __exit exit_ladder(void)
  142. {
  143. cpuidle_unregister_governor(&ladder_governor);
  144. }
  145. MODULE_LICENSE("GPL");
  146. module_init(init_ladder);
  147. module_exit(exit_ladder);