cpufreq_conservative.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. /*
  2. * drivers/cpufreq/cpufreq_conservative.c
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  6. * Jun Nakajima <jun.nakajima@intel.com>
  7. * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/slab.h>
  14. #include "cpufreq_governor.h"
  15. /* Conservative governor macros */
  16. #define DEF_FREQUENCY_UP_THRESHOLD (80)
  17. #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
  18. #define DEF_FREQUENCY_STEP (5)
  19. #define DEF_SAMPLING_DOWN_FACTOR (1)
  20. #define MAX_SAMPLING_DOWN_FACTOR (10)
  21. static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
  22. static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
  23. struct cpufreq_policy *policy)
  24. {
  25. unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
  26. /* max freq cannot be less than 100. But who knows... */
  27. if (unlikely(freq_target == 0))
  28. freq_target = DEF_FREQUENCY_STEP;
  29. return freq_target;
  30. }
  31. /*
  32. * Every sampling_rate, we check, if current idle time is less than 20%
  33. * (default), then we try to increase frequency. Every sampling_rate *
  34. * sampling_down_factor, we check, if current idle time is more than 80%
  35. * (default), then we try to decrease frequency
  36. *
  37. * Any frequency increase takes it to the maximum frequency. Frequency reduction
  38. * happens at minimum steps of 5% (default) of maximum frequency
  39. */
  40. static void cs_check_cpu(int cpu, unsigned int load)
  41. {
  42. struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
  43. struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
  44. struct dbs_data *dbs_data = policy->governor_data;
  45. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  46. /*
  47. * break out if we 'cannot' reduce the speed as the user might
  48. * want freq_step to be zero
  49. */
  50. if (cs_tuners->freq_step == 0)
  51. return;
  52. /* Check for frequency increase */
  53. if (load > cs_tuners->up_threshold) {
  54. dbs_info->down_skip = 0;
  55. /* if we are already at full speed then break out early */
  56. if (dbs_info->requested_freq == policy->max)
  57. return;
  58. dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
  59. if (dbs_info->requested_freq > policy->max)
  60. dbs_info->requested_freq = policy->max;
  61. __cpufreq_driver_target(policy, dbs_info->requested_freq,
  62. CPUFREQ_RELATION_H);
  63. return;
  64. }
  65. /* if sampling_down_factor is active break out early */
  66. if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
  67. return;
  68. dbs_info->down_skip = 0;
  69. /* Check for frequency decrease */
  70. if (load < cs_tuners->down_threshold) {
  71. /*
  72. * if we cannot reduce the frequency anymore, break out early
  73. */
  74. if (policy->cur == policy->min)
  75. return;
  76. dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
  77. if (dbs_info->requested_freq < policy->min)
  78. dbs_info->requested_freq = policy->min;
  79. __cpufreq_driver_target(policy, dbs_info->requested_freq,
  80. CPUFREQ_RELATION_L);
  81. return;
  82. }
  83. }
  84. static void cs_dbs_timer(struct work_struct *work)
  85. {
  86. struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
  87. struct cs_cpu_dbs_info_s, cdbs.work.work);
  88. unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
  89. struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
  90. cpu);
  91. struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
  92. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  93. int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
  94. bool modify_all = true;
  95. mutex_lock(&core_dbs_info->cdbs.timer_mutex);
  96. if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
  97. modify_all = false;
  98. else
  99. dbs_check_cpu(dbs_data, cpu);
  100. gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
  101. mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
  102. }
  103. static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  104. void *data)
  105. {
  106. struct cpufreq_freqs *freq = data;
  107. struct cs_cpu_dbs_info_s *dbs_info =
  108. &per_cpu(cs_cpu_dbs_info, freq->cpu);
  109. struct cpufreq_policy *policy;
  110. if (!dbs_info->enable)
  111. return 0;
  112. policy = dbs_info->cdbs.cur_policy;
  113. /*
  114. * we only care if our internally tracked freq moves outside the 'valid'
  115. * ranges of frequency available to us otherwise we do not change it
  116. */
  117. if (dbs_info->requested_freq > policy->max
  118. || dbs_info->requested_freq < policy->min)
  119. dbs_info->requested_freq = freq->new;
  120. return 0;
  121. }
  122. /************************** sysfs interface ************************/
  123. static struct common_dbs_data cs_dbs_cdata;
  124. static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
  125. const char *buf, size_t count)
  126. {
  127. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  128. unsigned int input;
  129. int ret;
  130. ret = sscanf(buf, "%u", &input);
  131. if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
  132. return -EINVAL;
  133. cs_tuners->sampling_down_factor = input;
  134. return count;
  135. }
  136. static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
  137. size_t count)
  138. {
  139. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  140. unsigned int input;
  141. int ret;
  142. ret = sscanf(buf, "%u", &input);
  143. if (ret != 1)
  144. return -EINVAL;
  145. cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
  146. return count;
  147. }
  148. static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
  149. size_t count)
  150. {
  151. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  152. unsigned int input;
  153. int ret;
  154. ret = sscanf(buf, "%u", &input);
  155. if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
  156. return -EINVAL;
  157. cs_tuners->up_threshold = input;
  158. return count;
  159. }
  160. static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
  161. size_t count)
  162. {
  163. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  164. unsigned int input;
  165. int ret;
  166. ret = sscanf(buf, "%u", &input);
  167. /* cannot be lower than 11 otherwise freq will not fall */
  168. if (ret != 1 || input < 11 || input > 100 ||
  169. input >= cs_tuners->up_threshold)
  170. return -EINVAL;
  171. cs_tuners->down_threshold = input;
  172. return count;
  173. }
  174. static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
  175. const char *buf, size_t count)
  176. {
  177. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  178. unsigned int input, j;
  179. int ret;
  180. ret = sscanf(buf, "%u", &input);
  181. if (ret != 1)
  182. return -EINVAL;
  183. if (input > 1)
  184. input = 1;
  185. if (input == cs_tuners->ignore_nice_load) /* nothing to do */
  186. return count;
  187. cs_tuners->ignore_nice_load = input;
  188. /* we need to re-evaluate prev_cpu_idle */
  189. for_each_online_cpu(j) {
  190. struct cs_cpu_dbs_info_s *dbs_info;
  191. dbs_info = &per_cpu(cs_cpu_dbs_info, j);
  192. dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
  193. &dbs_info->cdbs.prev_cpu_wall, 0);
  194. if (cs_tuners->ignore_nice_load)
  195. dbs_info->cdbs.prev_cpu_nice =
  196. kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  197. }
  198. return count;
  199. }
  200. static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
  201. size_t count)
  202. {
  203. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  204. unsigned int input;
  205. int ret;
  206. ret = sscanf(buf, "%u", &input);
  207. if (ret != 1)
  208. return -EINVAL;
  209. if (input > 100)
  210. input = 100;
  211. /*
  212. * no need to test here if freq_step is zero as the user might actually
  213. * want this, they would be crazy though :)
  214. */
  215. cs_tuners->freq_step = input;
  216. return count;
  217. }
  218. show_store_one(cs, sampling_rate);
  219. show_store_one(cs, sampling_down_factor);
  220. show_store_one(cs, up_threshold);
  221. show_store_one(cs, down_threshold);
  222. show_store_one(cs, ignore_nice_load);
  223. show_store_one(cs, freq_step);
  224. declare_show_sampling_rate_min(cs);
  225. gov_sys_pol_attr_rw(sampling_rate);
  226. gov_sys_pol_attr_rw(sampling_down_factor);
  227. gov_sys_pol_attr_rw(up_threshold);
  228. gov_sys_pol_attr_rw(down_threshold);
  229. gov_sys_pol_attr_rw(ignore_nice_load);
  230. gov_sys_pol_attr_rw(freq_step);
  231. gov_sys_pol_attr_ro(sampling_rate_min);
  232. static struct attribute *dbs_attributes_gov_sys[] = {
  233. &sampling_rate_min_gov_sys.attr,
  234. &sampling_rate_gov_sys.attr,
  235. &sampling_down_factor_gov_sys.attr,
  236. &up_threshold_gov_sys.attr,
  237. &down_threshold_gov_sys.attr,
  238. &ignore_nice_load_gov_sys.attr,
  239. &freq_step_gov_sys.attr,
  240. NULL
  241. };
  242. static struct attribute_group cs_attr_group_gov_sys = {
  243. .attrs = dbs_attributes_gov_sys,
  244. .name = "conservative",
  245. };
  246. static struct attribute *dbs_attributes_gov_pol[] = {
  247. &sampling_rate_min_gov_pol.attr,
  248. &sampling_rate_gov_pol.attr,
  249. &sampling_down_factor_gov_pol.attr,
  250. &up_threshold_gov_pol.attr,
  251. &down_threshold_gov_pol.attr,
  252. &ignore_nice_load_gov_pol.attr,
  253. &freq_step_gov_pol.attr,
  254. NULL
  255. };
  256. static struct attribute_group cs_attr_group_gov_pol = {
  257. .attrs = dbs_attributes_gov_pol,
  258. .name = "conservative",
  259. };
  260. /************************** sysfs end ************************/
  261. static int cs_init(struct dbs_data *dbs_data)
  262. {
  263. struct cs_dbs_tuners *tuners;
  264. tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
  265. if (!tuners) {
  266. pr_err("%s: kzalloc failed\n", __func__);
  267. return -ENOMEM;
  268. }
  269. tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
  270. tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
  271. tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
  272. tuners->ignore_nice_load = 0;
  273. tuners->freq_step = DEF_FREQUENCY_STEP;
  274. dbs_data->tuners = tuners;
  275. dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
  276. jiffies_to_usecs(10);
  277. mutex_init(&dbs_data->mutex);
  278. return 0;
  279. }
  280. static void cs_exit(struct dbs_data *dbs_data)
  281. {
  282. kfree(dbs_data->tuners);
  283. }
  284. define_get_cpu_dbs_routines(cs_cpu_dbs_info);
  285. static struct notifier_block cs_cpufreq_notifier_block = {
  286. .notifier_call = dbs_cpufreq_notifier,
  287. };
  288. static struct cs_ops cs_ops = {
  289. .notifier_block = &cs_cpufreq_notifier_block,
  290. };
  291. static struct common_dbs_data cs_dbs_cdata = {
  292. .governor = GOV_CONSERVATIVE,
  293. .attr_group_gov_sys = &cs_attr_group_gov_sys,
  294. .attr_group_gov_pol = &cs_attr_group_gov_pol,
  295. .get_cpu_cdbs = get_cpu_cdbs,
  296. .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
  297. .gov_dbs_timer = cs_dbs_timer,
  298. .gov_check_cpu = cs_check_cpu,
  299. .gov_ops = &cs_ops,
  300. .init = cs_init,
  301. .exit = cs_exit,
  302. };
  303. static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
  304. unsigned int event)
  305. {
  306. return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
  307. }
  308. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
  309. static
  310. #endif
  311. struct cpufreq_governor cpufreq_gov_conservative = {
  312. .name = "conservative",
  313. .governor = cs_cpufreq_governor_dbs,
  314. .max_transition_latency = TRANSITION_LATENCY_LIMIT,
  315. .owner = THIS_MODULE,
  316. };
  317. static int __init cpufreq_gov_dbs_init(void)
  318. {
  319. return cpufreq_register_governor(&cpufreq_gov_conservative);
  320. }
  321. static void __exit cpufreq_gov_dbs_exit(void)
  322. {
  323. cpufreq_unregister_governor(&cpufreq_gov_conservative);
  324. }
  325. MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
  326. MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
  327. "Low Latency Frequency Transition capable processors "
  328. "optimised for use in a battery environment");
  329. MODULE_LICENSE("GPL");
  330. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
  331. fs_initcall(cpufreq_gov_dbs_init);
  332. #else
  333. module_init(cpufreq_gov_dbs_init);
  334. #endif
  335. module_exit(cpufreq_gov_dbs_exit);