cpufreq_ondemand.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * drivers/cpufreq/cpufreq_ondemand.c
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  6. * Jun Nakajima <jun.nakajima@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/cpufreq.h>
  14. #include <linux/init.h>
  15. #include <linux/kernel.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/kobject.h>
  18. #include <linux/module.h>
  19. #include <linux/mutex.h>
  20. #include <linux/percpu-defs.h>
  21. #include <linux/slab.h>
  22. #include <linux/sysfs.h>
  23. #include <linux/tick.h>
  24. #include <linux/types.h>
  25. #include "cpufreq_governor.h"
  26. /* On-demand governor macros */
  27. #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
  28. #define DEF_FREQUENCY_UP_THRESHOLD (80)
  29. #define DEF_SAMPLING_DOWN_FACTOR (1)
  30. #define MAX_SAMPLING_DOWN_FACTOR (100000)
  31. #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
  32. #define MICRO_FREQUENCY_UP_THRESHOLD (95)
  33. #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
  34. #define MIN_FREQUENCY_UP_THRESHOLD (11)
  35. #define MAX_FREQUENCY_UP_THRESHOLD (100)
  36. static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
  37. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
  38. static struct cpufreq_governor cpufreq_gov_ondemand;
  39. #endif
  40. static void ondemand_powersave_bias_init_cpu(int cpu)
  41. {
  42. struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  43. dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
  44. dbs_info->freq_lo = 0;
  45. }
  46. /*
  47. * Not all CPUs want IO time to be accounted as busy; this depends on how
  48. * efficient idling at a higher frequency/voltage is.
  49. * Pavel Machek says this is not so for various generations of AMD and old
  50. * Intel systems.
  51. * Mike Chan (android.com) claims this is also not true for ARM.
  52. * Because of this, whitelist specific known (series) of CPUs by default, and
  53. * leave all others up to the user.
  54. */
  55. static int should_io_be_busy(void)
  56. {
  57. #if defined(CONFIG_X86)
  58. /*
  59. * For Intel, Core 2 (model 15) and later have an efficient idle.
  60. */
  61. if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
  62. boot_cpu_data.x86 == 6 &&
  63. boot_cpu_data.x86_model >= 15)
  64. return 1;
  65. #endif
  66. return 0;
  67. }
  68. /*
  69. * Find right freq to be set now with powersave_bias on.
  70. * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
  71. * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
  72. */
  73. static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
  74. unsigned int freq_next, unsigned int relation)
  75. {
  76. unsigned int freq_req, freq_reduc, freq_avg;
  77. unsigned int freq_hi, freq_lo;
  78. unsigned int index = 0;
  79. unsigned int jiffies_total, jiffies_hi, jiffies_lo;
  80. struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
  81. policy->cpu);
  82. struct dbs_data *dbs_data = policy->governor_data;
  83. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  84. if (!dbs_info->freq_table) {
  85. dbs_info->freq_lo = 0;
  86. dbs_info->freq_lo_jiffies = 0;
  87. return freq_next;
  88. }
  89. cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
  90. relation, &index);
  91. freq_req = dbs_info->freq_table[index].frequency;
  92. freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
  93. freq_avg = freq_req - freq_reduc;
  94. /* Find freq bounds for freq_avg in freq_table */
  95. index = 0;
  96. cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
  97. CPUFREQ_RELATION_H, &index);
  98. freq_lo = dbs_info->freq_table[index].frequency;
  99. index = 0;
  100. cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
  101. CPUFREQ_RELATION_L, &index);
  102. freq_hi = dbs_info->freq_table[index].frequency;
  103. /* Find out how long we have to be in hi and lo freqs */
  104. if (freq_hi == freq_lo) {
  105. dbs_info->freq_lo = 0;
  106. dbs_info->freq_lo_jiffies = 0;
  107. return freq_lo;
  108. }
  109. jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
  110. jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
  111. jiffies_hi += ((freq_hi - freq_lo) / 2);
  112. jiffies_hi /= (freq_hi - freq_lo);
  113. jiffies_lo = jiffies_total - jiffies_hi;
  114. dbs_info->freq_lo = freq_lo;
  115. dbs_info->freq_lo_jiffies = jiffies_lo;
  116. dbs_info->freq_hi_jiffies = jiffies_hi;
  117. return freq_hi;
  118. }
  119. static void ondemand_powersave_bias_init(void)
  120. {
  121. int i;
  122. for_each_online_cpu(i) {
  123. ondemand_powersave_bias_init_cpu(i);
  124. }
  125. }
  126. static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
  127. {
  128. struct dbs_data *dbs_data = p->governor_data;
  129. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  130. if (od_tuners->powersave_bias)
  131. freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
  132. else if (p->cur == p->max)
  133. return;
  134. __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
  135. CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
  136. }
  137. /*
  138. * Every sampling_rate, we check, if current idle time is less than 20%
  139. * (default), then we try to increase frequency. Every sampling_rate, we look
  140. * for the lowest frequency which can sustain the load while keeping idle time
  141. * over 30%. If such a frequency exist, we try to decrease to this frequency.
  142. *
  143. * Any frequency increase takes it to the maximum frequency. Frequency reduction
  144. * happens at minimum steps of 5% (default) of current frequency
  145. */
  146. static void od_check_cpu(int cpu, unsigned int load_freq)
  147. {
  148. struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  149. struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
  150. struct dbs_data *dbs_data = policy->governor_data;
  151. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  152. dbs_info->freq_lo = 0;
  153. /* Check for frequency increase */
  154. if (load_freq > od_tuners->up_threshold * policy->cur) {
  155. /* If switching to max speed, apply sampling_down_factor */
  156. if (policy->cur < policy->max)
  157. dbs_info->rate_mult =
  158. od_tuners->sampling_down_factor;
  159. dbs_freq_increase(policy, policy->max);
  160. return;
  161. }
  162. /* Check for frequency decrease */
  163. /* if we cannot reduce the frequency anymore, break out early */
  164. if (policy->cur == policy->min)
  165. return;
  166. /*
  167. * The optimal frequency is the frequency that is the lowest that can
  168. * support the current CPU usage without triggering the up policy. To be
  169. * safe, we focus 10 points under the threshold.
  170. */
  171. if (load_freq < od_tuners->adj_up_threshold
  172. * policy->cur) {
  173. unsigned int freq_next;
  174. freq_next = load_freq / od_tuners->adj_up_threshold;
  175. /* No longer fully busy, reset rate_mult */
  176. dbs_info->rate_mult = 1;
  177. if (freq_next < policy->min)
  178. freq_next = policy->min;
  179. if (!od_tuners->powersave_bias) {
  180. __cpufreq_driver_target(policy, freq_next,
  181. CPUFREQ_RELATION_L);
  182. } else {
  183. int freq = powersave_bias_target(policy, freq_next,
  184. CPUFREQ_RELATION_L);
  185. __cpufreq_driver_target(policy, freq,
  186. CPUFREQ_RELATION_L);
  187. }
  188. }
  189. }
  190. static void od_dbs_timer(struct work_struct *work)
  191. {
  192. struct delayed_work *dw = to_delayed_work(work);
  193. struct od_cpu_dbs_info_s *dbs_info =
  194. container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
  195. unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
  196. struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
  197. cpu);
  198. struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
  199. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  200. int delay, sample_type = core_dbs_info->sample_type;
  201. bool eval_load;
  202. mutex_lock(&core_dbs_info->cdbs.timer_mutex);
  203. eval_load = need_load_eval(&core_dbs_info->cdbs,
  204. od_tuners->sampling_rate);
  205. /* Common NORMAL_SAMPLE setup */
  206. core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
  207. if (sample_type == OD_SUB_SAMPLE) {
  208. delay = core_dbs_info->freq_lo_jiffies;
  209. if (eval_load)
  210. __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
  211. core_dbs_info->freq_lo,
  212. CPUFREQ_RELATION_H);
  213. } else {
  214. if (eval_load)
  215. dbs_check_cpu(dbs_data, cpu);
  216. if (core_dbs_info->freq_lo) {
  217. /* Setup timer for SUB_SAMPLE */
  218. core_dbs_info->sample_type = OD_SUB_SAMPLE;
  219. delay = core_dbs_info->freq_hi_jiffies;
  220. } else {
  221. delay = delay_for_sampling_rate(od_tuners->sampling_rate
  222. * core_dbs_info->rate_mult);
  223. }
  224. }
  225. schedule_delayed_work_on(smp_processor_id(), dw, delay);
  226. mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
  227. }
  228. /************************** sysfs interface ************************/
  229. static struct common_dbs_data od_dbs_cdata;
  230. /**
  231. * update_sampling_rate - update sampling rate effective immediately if needed.
  232. * @new_rate: new sampling rate
  233. *
  234. * If new rate is smaller than the old, simply updating
  235. * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
  236. * original sampling_rate was 1 second and the requested new sampling rate is 10
  237. * ms because the user needs immediate reaction from ondemand governor, but not
  238. * sure if higher frequency will be required or not, then, the governor may
  239. * change the sampling rate too late; up to 1 second later. Thus, if we are
  240. * reducing the sampling rate, we need to make the new value effective
  241. * immediately.
  242. */
  243. static void update_sampling_rate(struct dbs_data *dbs_data,
  244. unsigned int new_rate)
  245. {
  246. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  247. int cpu;
  248. od_tuners->sampling_rate = new_rate = max(new_rate,
  249. dbs_data->min_sampling_rate);
  250. for_each_online_cpu(cpu) {
  251. struct cpufreq_policy *policy;
  252. struct od_cpu_dbs_info_s *dbs_info;
  253. unsigned long next_sampling, appointed_at;
  254. policy = cpufreq_cpu_get(cpu);
  255. if (!policy)
  256. continue;
  257. if (policy->governor != &cpufreq_gov_ondemand) {
  258. cpufreq_cpu_put(policy);
  259. continue;
  260. }
  261. dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  262. cpufreq_cpu_put(policy);
  263. mutex_lock(&dbs_info->cdbs.timer_mutex);
  264. if (!delayed_work_pending(&dbs_info->cdbs.work)) {
  265. mutex_unlock(&dbs_info->cdbs.timer_mutex);
  266. continue;
  267. }
  268. next_sampling = jiffies + usecs_to_jiffies(new_rate);
  269. appointed_at = dbs_info->cdbs.work.timer.expires;
  270. if (time_before(next_sampling, appointed_at)) {
  271. mutex_unlock(&dbs_info->cdbs.timer_mutex);
  272. cancel_delayed_work_sync(&dbs_info->cdbs.work);
  273. mutex_lock(&dbs_info->cdbs.timer_mutex);
  274. schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
  275. usecs_to_jiffies(new_rate));
  276. }
  277. mutex_unlock(&dbs_info->cdbs.timer_mutex);
  278. }
  279. }
  280. static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
  281. size_t count)
  282. {
  283. unsigned int input;
  284. int ret;
  285. ret = sscanf(buf, "%u", &input);
  286. if (ret != 1)
  287. return -EINVAL;
  288. update_sampling_rate(dbs_data, input);
  289. return count;
  290. }
  291. static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
  292. size_t count)
  293. {
  294. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  295. unsigned int input;
  296. int ret;
  297. ret = sscanf(buf, "%u", &input);
  298. if (ret != 1)
  299. return -EINVAL;
  300. od_tuners->io_is_busy = !!input;
  301. return count;
  302. }
  303. static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
  304. size_t count)
  305. {
  306. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  307. unsigned int input;
  308. int ret;
  309. ret = sscanf(buf, "%u", &input);
  310. if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
  311. input < MIN_FREQUENCY_UP_THRESHOLD) {
  312. return -EINVAL;
  313. }
  314. /* Calculate the new adj_up_threshold */
  315. od_tuners->adj_up_threshold += input;
  316. od_tuners->adj_up_threshold -= od_tuners->up_threshold;
  317. od_tuners->up_threshold = input;
  318. return count;
  319. }
  320. static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
  321. const char *buf, size_t count)
  322. {
  323. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  324. unsigned int input, j;
  325. int ret;
  326. ret = sscanf(buf, "%u", &input);
  327. if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
  328. return -EINVAL;
  329. od_tuners->sampling_down_factor = input;
  330. /* Reset down sampling multiplier in case it was active */
  331. for_each_online_cpu(j) {
  332. struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
  333. j);
  334. dbs_info->rate_mult = 1;
  335. }
  336. return count;
  337. }
  338. static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
  339. size_t count)
  340. {
  341. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  342. unsigned int input;
  343. int ret;
  344. unsigned int j;
  345. ret = sscanf(buf, "%u", &input);
  346. if (ret != 1)
  347. return -EINVAL;
  348. if (input > 1)
  349. input = 1;
  350. if (input == od_tuners->ignore_nice) { /* nothing to do */
  351. return count;
  352. }
  353. od_tuners->ignore_nice = input;
  354. /* we need to re-evaluate prev_cpu_idle */
  355. for_each_online_cpu(j) {
  356. struct od_cpu_dbs_info_s *dbs_info;
  357. dbs_info = &per_cpu(od_cpu_dbs_info, j);
  358. dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
  359. &dbs_info->cdbs.prev_cpu_wall);
  360. if (od_tuners->ignore_nice)
  361. dbs_info->cdbs.prev_cpu_nice =
  362. kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  363. }
  364. return count;
  365. }
  366. static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
  367. size_t count)
  368. {
  369. struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  370. unsigned int input;
  371. int ret;
  372. ret = sscanf(buf, "%u", &input);
  373. if (ret != 1)
  374. return -EINVAL;
  375. if (input > 1000)
  376. input = 1000;
  377. od_tuners->powersave_bias = input;
  378. ondemand_powersave_bias_init();
  379. return count;
  380. }
  381. show_store_one(od, sampling_rate);
  382. show_store_one(od, io_is_busy);
  383. show_store_one(od, up_threshold);
  384. show_store_one(od, sampling_down_factor);
  385. show_store_one(od, ignore_nice);
  386. show_store_one(od, powersave_bias);
  387. declare_show_sampling_rate_min(od);
  388. gov_sys_pol_attr_rw(sampling_rate);
  389. gov_sys_pol_attr_rw(io_is_busy);
  390. gov_sys_pol_attr_rw(up_threshold);
  391. gov_sys_pol_attr_rw(sampling_down_factor);
  392. gov_sys_pol_attr_rw(ignore_nice);
  393. gov_sys_pol_attr_rw(powersave_bias);
  394. gov_sys_pol_attr_ro(sampling_rate_min);
  395. static struct attribute *dbs_attributes_gov_sys[] = {
  396. &sampling_rate_min_gov_sys.attr,
  397. &sampling_rate_gov_sys.attr,
  398. &up_threshold_gov_sys.attr,
  399. &sampling_down_factor_gov_sys.attr,
  400. &ignore_nice_gov_sys.attr,
  401. &powersave_bias_gov_sys.attr,
  402. &io_is_busy_gov_sys.attr,
  403. NULL
  404. };
  405. static struct attribute_group od_attr_group_gov_sys = {
  406. .attrs = dbs_attributes_gov_sys,
  407. .name = "ondemand",
  408. };
  409. static struct attribute *dbs_attributes_gov_pol[] = {
  410. &sampling_rate_min_gov_pol.attr,
  411. &sampling_rate_gov_pol.attr,
  412. &up_threshold_gov_pol.attr,
  413. &sampling_down_factor_gov_pol.attr,
  414. &ignore_nice_gov_pol.attr,
  415. &powersave_bias_gov_pol.attr,
  416. &io_is_busy_gov_pol.attr,
  417. NULL
  418. };
  419. static struct attribute_group od_attr_group_gov_pol = {
  420. .attrs = dbs_attributes_gov_pol,
  421. .name = "ondemand",
  422. };
  423. /************************** sysfs end ************************/
  424. static int od_init(struct dbs_data *dbs_data)
  425. {
  426. struct od_dbs_tuners *tuners;
  427. u64 idle_time;
  428. int cpu;
  429. tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
  430. if (!tuners) {
  431. pr_err("%s: kzalloc failed\n", __func__);
  432. return -ENOMEM;
  433. }
  434. cpu = get_cpu();
  435. idle_time = get_cpu_idle_time_us(cpu, NULL);
  436. put_cpu();
  437. if (idle_time != -1ULL) {
  438. /* Idle micro accounting is supported. Use finer thresholds */
  439. tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
  440. tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
  441. MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
  442. /*
  443. * In nohz/micro accounting case we set the minimum frequency
  444. * not depending on HZ, but fixed (very low). The deferred
  445. * timer might skip some samples if idle/sleeping as needed.
  446. */
  447. dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
  448. } else {
  449. tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
  450. tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
  451. DEF_FREQUENCY_DOWN_DIFFERENTIAL;
  452. /* For correct statistics, we need 10 ticks for each measure */
  453. dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
  454. jiffies_to_usecs(10);
  455. }
  456. tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
  457. tuners->ignore_nice = 0;
  458. tuners->powersave_bias = 0;
  459. tuners->io_is_busy = should_io_be_busy();
  460. dbs_data->tuners = tuners;
  461. pr_info("%s: tuners %p\n", __func__, tuners);
  462. mutex_init(&dbs_data->mutex);
  463. return 0;
  464. }
  465. static void od_exit(struct dbs_data *dbs_data)
  466. {
  467. kfree(dbs_data->tuners);
  468. }
  469. define_get_cpu_dbs_routines(od_cpu_dbs_info);
  470. static struct od_ops od_ops = {
  471. .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
  472. .powersave_bias_target = powersave_bias_target,
  473. .freq_increase = dbs_freq_increase,
  474. };
  475. static struct common_dbs_data od_dbs_cdata = {
  476. .governor = GOV_ONDEMAND,
  477. .attr_group_gov_sys = &od_attr_group_gov_sys,
  478. .attr_group_gov_pol = &od_attr_group_gov_pol,
  479. .get_cpu_cdbs = get_cpu_cdbs,
  480. .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
  481. .gov_dbs_timer = od_dbs_timer,
  482. .gov_check_cpu = od_check_cpu,
  483. .gov_ops = &od_ops,
  484. .init = od_init,
  485. .exit = od_exit,
  486. };
  487. static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
  488. unsigned int event)
  489. {
  490. return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
  491. }
  492. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
  493. static
  494. #endif
  495. struct cpufreq_governor cpufreq_gov_ondemand = {
  496. .name = "ondemand",
  497. .governor = od_cpufreq_governor_dbs,
  498. .max_transition_latency = TRANSITION_LATENCY_LIMIT,
  499. .owner = THIS_MODULE,
  500. };
  501. static int __init cpufreq_gov_dbs_init(void)
  502. {
  503. return cpufreq_register_governor(&cpufreq_gov_ondemand);
  504. }
  505. static void __exit cpufreq_gov_dbs_exit(void)
  506. {
  507. cpufreq_unregister_governor(&cpufreq_gov_ondemand);
  508. }
  509. MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
  510. MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
  511. MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
  512. "Low Latency Frequency Transition capable processors");
  513. MODULE_LICENSE("GPL");
  514. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
  515. fs_initcall(cpufreq_gov_dbs_init);
  516. #else
  517. module_init(cpufreq_gov_dbs_init);
  518. #endif
  519. module_exit(cpufreq_gov_dbs_exit);