menu.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * menu.c - the menu idle governor
  3. *
  4. * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
  5. *
  6. * This code is licenced under the GPL.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/cpuidle.h>
  10. #include <linux/pm_qos_params.h>
  11. #include <linux/time.h>
  12. #include <linux/ktime.h>
  13. #include <linux/hrtimer.h>
  14. #include <linux/tick.h>
  15. #define BREAK_FUZZ 4 /* 4 us */
  16. #define PRED_HISTORY_PCT 50
  17. struct menu_device {
  18. int last_state_idx;
  19. unsigned int expected_us;
  20. unsigned int predicted_us;
  21. unsigned int current_predicted_us;
  22. unsigned int last_measured_us;
  23. unsigned int elapsed_us;
  24. };
  25. static DEFINE_PER_CPU(struct menu_device, menu_devices);
  26. /**
  27. * menu_select - selects the next idle state to enter
  28. * @dev: the CPU
  29. */
  30. static int menu_select(struct cpuidle_device *dev)
  31. {
  32. struct menu_device *data = &__get_cpu_var(menu_devices);
  33. int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
  34. int i;
  35. /* Special case when user has set very strict latency requirement */
  36. if (unlikely(latency_req == 0)) {
  37. data->last_state_idx = 0;
  38. return 0;
  39. }
  40. /* determine the expected residency time */
  41. data->expected_us =
  42. (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
  43. /* Recalculate predicted_us based on prediction_history_pct */
  44. data->predicted_us *= PRED_HISTORY_PCT;
  45. data->predicted_us += (100 - PRED_HISTORY_PCT) *
  46. data->current_predicted_us;
  47. data->predicted_us /= 100;
  48. /* find the deepest idle state that satisfies our constraints */
  49. for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
  50. struct cpuidle_state *s = &dev->states[i];
  51. if (s->target_residency > data->expected_us)
  52. break;
  53. if (s->target_residency > data->predicted_us)
  54. break;
  55. if (s->exit_latency > latency_req)
  56. break;
  57. }
  58. data->last_state_idx = i - 1;
  59. return i - 1;
  60. }
  61. /**
  62. * menu_reflect - attempts to guess what happened after entry
  63. * @dev: the CPU
  64. *
  65. * NOTE: it's important to be fast here because this operation will add to
  66. * the overall exit latency.
  67. */
  68. static void menu_reflect(struct cpuidle_device *dev)
  69. {
  70. struct menu_device *data = &__get_cpu_var(menu_devices);
  71. int last_idx = data->last_state_idx;
  72. unsigned int last_idle_us = cpuidle_get_last_residency(dev);
  73. struct cpuidle_state *target = &dev->states[last_idx];
  74. unsigned int measured_us;
  75. /*
  76. * Ugh, this idle state doesn't support residency measurements, so we
  77. * are basically lost in the dark. As a compromise, assume we slept
  78. * for one full standard timer tick. However, be aware that this
  79. * could potentially result in a suboptimal state transition.
  80. */
  81. if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
  82. last_idle_us = USEC_PER_SEC / HZ;
  83. /*
  84. * measured_us and elapsed_us are the cumulative idle time, since the
  85. * last time we were woken out of idle by an interrupt.
  86. */
  87. if (data->elapsed_us <= data->elapsed_us + last_idle_us)
  88. measured_us = data->elapsed_us + last_idle_us;
  89. else
  90. measured_us = -1;
  91. /* Predict time until next break event */
  92. data->current_predicted_us = max(measured_us, data->last_measured_us);
  93. if (last_idle_us + BREAK_FUZZ <
  94. data->expected_us - target->exit_latency) {
  95. data->last_measured_us = measured_us;
  96. data->elapsed_us = 0;
  97. } else {
  98. data->elapsed_us = measured_us;
  99. }
  100. }
  101. /**
  102. * menu_enable_device - scans a CPU's states and does setup
  103. * @dev: the CPU
  104. */
  105. static int menu_enable_device(struct cpuidle_device *dev)
  106. {
  107. struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
  108. memset(data, 0, sizeof(struct menu_device));
  109. return 0;
  110. }
  111. static struct cpuidle_governor menu_governor = {
  112. .name = "menu",
  113. .rating = 20,
  114. .enable = menu_enable_device,
  115. .select = menu_select,
  116. .reflect = menu_reflect,
  117. .owner = THIS_MODULE,
  118. };
  119. /**
  120. * init_menu - initializes the governor
  121. */
  122. static int __init init_menu(void)
  123. {
  124. return cpuidle_register_governor(&menu_governor);
  125. }
  126. /**
  127. * exit_menu - exits the governor
  128. */
  129. static void __exit exit_menu(void)
  130. {
  131. cpuidle_unregister_governor(&menu_governor);
  132. }
  133. MODULE_LICENSE("GPL");
  134. module_init(init_menu);
  135. module_exit(exit_menu);