cpufreq_ondemand.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. /*
  2. * drivers/cpufreq/cpufreq_ondemand.c
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  6. * Jun Nakajima <jun.nakajima@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/cpufreq.h>
  16. #include <linux/cpu.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/kernel_stat.h>
  19. #include <linux/mutex.h>
  20. /*
  21. * dbs is used in this file as a shortform for demandbased switching
  22. * It helps to keep variable names smaller, simpler
  23. */
  24. #define DEF_FREQUENCY_UP_THRESHOLD (80)
  25. #define MIN_FREQUENCY_UP_THRESHOLD (11)
  26. #define MAX_FREQUENCY_UP_THRESHOLD (100)
  27. /*
  28. * The polling frequency of this governor depends on the capability of
  29. * the processor. Default polling frequency is 1000 times the transition
  30. * latency of the processor. The governor will work on any processor with
  31. * transition latency <= 10mS, using appropriate sampling
  32. * rate.
  33. * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
  34. * this governor will not work.
  35. * All times here are in uS.
  36. */
  37. static unsigned int def_sampling_rate;
  38. #define MIN_SAMPLING_RATE_RATIO (2)
  39. /* for correct statistics, we need at least 10 ticks between each measure */
  40. #define MIN_STAT_SAMPLING_RATE \
  41. (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
  42. #define MIN_SAMPLING_RATE \
  43. (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
  44. #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
  45. #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
  46. #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
  47. static void do_dbs_timer(struct work_struct *work);
  48. /* Sampling types */
  49. enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
  50. struct cpu_dbs_info_s {
  51. cputime64_t prev_cpu_idle;
  52. cputime64_t prev_cpu_wall;
  53. struct cpufreq_policy *cur_policy;
  54. struct delayed_work work;
  55. struct cpufreq_frequency_table *freq_table;
  56. unsigned int freq_lo;
  57. unsigned int freq_lo_jiffies;
  58. unsigned int freq_hi_jiffies;
  59. int cpu;
  60. unsigned int enable:1,
  61. sample_type:1;
  62. };
  63. static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
  64. static unsigned int dbs_enable; /* number of CPUs using this policy */
  65. /*
  66. * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
  67. * lock and dbs_mutex. cpu_hotplug lock should always be held before
  68. * dbs_mutex. If any function that can potentially take cpu_hotplug lock
  69. * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
  70. * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
  71. * is recursive for the same process. -Venki
  72. */
  73. static DEFINE_MUTEX(dbs_mutex);
  74. static struct workqueue_struct *kondemand_wq;
  75. static struct dbs_tuners {
  76. unsigned int sampling_rate;
  77. unsigned int up_threshold;
  78. unsigned int ignore_nice;
  79. unsigned int powersave_bias;
  80. } dbs_tuners_ins = {
  81. .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
  82. .ignore_nice = 0,
  83. .powersave_bias = 0,
  84. };
  85. static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
  86. {
  87. cputime64_t idle_time;
  88. cputime64_t cur_jiffies;
  89. cputime64_t busy_time;
  90. cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
  91. busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
  92. kstat_cpu(cpu).cpustat.system);
  93. busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
  94. busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
  95. busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
  96. if (!dbs_tuners_ins.ignore_nice) {
  97. busy_time = cputime64_add(busy_time,
  98. kstat_cpu(cpu).cpustat.nice);
  99. }
  100. idle_time = cputime64_sub(cur_jiffies, busy_time);
  101. return idle_time;
  102. }
  103. /*
  104. * Find right freq to be set now with powersave_bias on.
  105. * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
  106. * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
  107. */
  108. static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
  109. unsigned int freq_next,
  110. unsigned int relation)
  111. {
  112. unsigned int freq_req, freq_reduc, freq_avg;
  113. unsigned int freq_hi, freq_lo;
  114. unsigned int index = 0;
  115. unsigned int jiffies_total, jiffies_hi, jiffies_lo;
  116. struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
  117. if (!dbs_info->freq_table) {
  118. dbs_info->freq_lo = 0;
  119. dbs_info->freq_lo_jiffies = 0;
  120. return freq_next;
  121. }
  122. cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
  123. relation, &index);
  124. freq_req = dbs_info->freq_table[index].frequency;
  125. freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
  126. freq_avg = freq_req - freq_reduc;
  127. /* Find freq bounds for freq_avg in freq_table */
  128. index = 0;
  129. cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
  130. CPUFREQ_RELATION_H, &index);
  131. freq_lo = dbs_info->freq_table[index].frequency;
  132. index = 0;
  133. cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
  134. CPUFREQ_RELATION_L, &index);
  135. freq_hi = dbs_info->freq_table[index].frequency;
  136. /* Find out how long we have to be in hi and lo freqs */
  137. if (freq_hi == freq_lo) {
  138. dbs_info->freq_lo = 0;
  139. dbs_info->freq_lo_jiffies = 0;
  140. return freq_lo;
  141. }
  142. jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
  143. jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
  144. jiffies_hi += ((freq_hi - freq_lo) / 2);
  145. jiffies_hi /= (freq_hi - freq_lo);
  146. jiffies_lo = jiffies_total - jiffies_hi;
  147. dbs_info->freq_lo = freq_lo;
  148. dbs_info->freq_lo_jiffies = jiffies_lo;
  149. dbs_info->freq_hi_jiffies = jiffies_hi;
  150. return freq_hi;
  151. }
  152. static void ondemand_powersave_bias_init(void)
  153. {
  154. int i;
  155. for_each_online_cpu(i) {
  156. struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
  157. dbs_info->freq_table = cpufreq_frequency_get_table(i);
  158. dbs_info->freq_lo = 0;
  159. }
  160. }
  161. /************************** sysfs interface ************************/
  162. static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
  163. {
  164. return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
  165. }
  166. static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
  167. {
  168. return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
  169. }
  170. #define define_one_ro(_name) \
  171. static struct freq_attr _name = \
  172. __ATTR(_name, 0444, show_##_name, NULL)
  173. define_one_ro(sampling_rate_max);
  174. define_one_ro(sampling_rate_min);
  175. /* cpufreq_ondemand Governor Tunables */
  176. #define show_one(file_name, object) \
  177. static ssize_t show_##file_name \
  178. (struct cpufreq_policy *unused, char *buf) \
  179. { \
  180. return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
  181. }
  182. show_one(sampling_rate, sampling_rate);
  183. show_one(up_threshold, up_threshold);
  184. show_one(ignore_nice_load, ignore_nice);
  185. show_one(powersave_bias, powersave_bias);
  186. static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
  187. const char *buf, size_t count)
  188. {
  189. unsigned int input;
  190. int ret;
  191. ret = sscanf(buf, "%u", &input);
  192. mutex_lock(&dbs_mutex);
  193. if (ret != 1 || input > MAX_SAMPLING_RATE
  194. || input < MIN_SAMPLING_RATE) {
  195. mutex_unlock(&dbs_mutex);
  196. return -EINVAL;
  197. }
  198. dbs_tuners_ins.sampling_rate = input;
  199. mutex_unlock(&dbs_mutex);
  200. return count;
  201. }
  202. static ssize_t store_up_threshold(struct cpufreq_policy *unused,
  203. const char *buf, size_t count)
  204. {
  205. unsigned int input;
  206. int ret;
  207. ret = sscanf(buf, "%u", &input);
  208. mutex_lock(&dbs_mutex);
  209. if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
  210. input < MIN_FREQUENCY_UP_THRESHOLD) {
  211. mutex_unlock(&dbs_mutex);
  212. return -EINVAL;
  213. }
  214. dbs_tuners_ins.up_threshold = input;
  215. mutex_unlock(&dbs_mutex);
  216. return count;
  217. }
  218. static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
  219. const char *buf, size_t count)
  220. {
  221. unsigned int input;
  222. int ret;
  223. unsigned int j;
  224. ret = sscanf(buf, "%u", &input);
  225. if ( ret != 1 )
  226. return -EINVAL;
  227. if ( input > 1 )
  228. input = 1;
  229. mutex_lock(&dbs_mutex);
  230. if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
  231. mutex_unlock(&dbs_mutex);
  232. return count;
  233. }
  234. dbs_tuners_ins.ignore_nice = input;
  235. /* we need to re-evaluate prev_cpu_idle */
  236. for_each_online_cpu(j) {
  237. struct cpu_dbs_info_s *dbs_info;
  238. dbs_info = &per_cpu(cpu_dbs_info, j);
  239. dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
  240. dbs_info->prev_cpu_wall = get_jiffies_64();
  241. }
  242. mutex_unlock(&dbs_mutex);
  243. return count;
  244. }
  245. static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
  246. const char *buf, size_t count)
  247. {
  248. unsigned int input;
  249. int ret;
  250. ret = sscanf(buf, "%u", &input);
  251. if (ret != 1)
  252. return -EINVAL;
  253. if (input > 1000)
  254. input = 1000;
  255. mutex_lock(&dbs_mutex);
  256. dbs_tuners_ins.powersave_bias = input;
  257. ondemand_powersave_bias_init();
  258. mutex_unlock(&dbs_mutex);
  259. return count;
  260. }
  261. #define define_one_rw(_name) \
  262. static struct freq_attr _name = \
  263. __ATTR(_name, 0644, show_##_name, store_##_name)
  264. define_one_rw(sampling_rate);
  265. define_one_rw(up_threshold);
  266. define_one_rw(ignore_nice_load);
  267. define_one_rw(powersave_bias);
  268. static struct attribute * dbs_attributes[] = {
  269. &sampling_rate_max.attr,
  270. &sampling_rate_min.attr,
  271. &sampling_rate.attr,
  272. &up_threshold.attr,
  273. &ignore_nice_load.attr,
  274. &powersave_bias.attr,
  275. NULL
  276. };
  277. static struct attribute_group dbs_attr_group = {
  278. .attrs = dbs_attributes,
  279. .name = "ondemand",
  280. };
  281. /************************** sysfs end ************************/
  282. static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
  283. {
  284. unsigned int idle_ticks, total_ticks;
  285. unsigned int load = 0;
  286. cputime64_t cur_jiffies;
  287. struct cpufreq_policy *policy;
  288. unsigned int j;
  289. if (!this_dbs_info->enable)
  290. return;
  291. this_dbs_info->freq_lo = 0;
  292. policy = this_dbs_info->cur_policy;
  293. cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
  294. total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
  295. this_dbs_info->prev_cpu_wall);
  296. this_dbs_info->prev_cpu_wall = get_jiffies_64();
  297. if (!total_ticks)
  298. return;
  299. /*
  300. * Every sampling_rate, we check, if current idle time is less
  301. * than 20% (default), then we try to increase frequency
  302. * Every sampling_rate, we look for a the lowest
  303. * frequency which can sustain the load while keeping idle time over
  304. * 30%. If such a frequency exist, we try to decrease to this frequency.
  305. *
  306. * Any frequency increase takes it to the maximum frequency.
  307. * Frequency reduction happens at minimum steps of
  308. * 5% (default) of current frequency
  309. */
  310. /* Get Idle Time */
  311. idle_ticks = UINT_MAX;
  312. for_each_cpu_mask(j, policy->cpus) {
  313. cputime64_t total_idle_ticks;
  314. unsigned int tmp_idle_ticks;
  315. struct cpu_dbs_info_s *j_dbs_info;
  316. j_dbs_info = &per_cpu(cpu_dbs_info, j);
  317. total_idle_ticks = get_cpu_idle_time(j);
  318. tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
  319. j_dbs_info->prev_cpu_idle);
  320. j_dbs_info->prev_cpu_idle = total_idle_ticks;
  321. if (tmp_idle_ticks < idle_ticks)
  322. idle_ticks = tmp_idle_ticks;
  323. }
  324. if (likely(total_ticks > idle_ticks))
  325. load = (100 * (total_ticks - idle_ticks)) / total_ticks;
  326. /* Check for frequency increase */
  327. if (load > dbs_tuners_ins.up_threshold) {
  328. /* if we are already at full speed then break out early */
  329. if (!dbs_tuners_ins.powersave_bias) {
  330. if (policy->cur == policy->max)
  331. return;
  332. __cpufreq_driver_target(policy, policy->max,
  333. CPUFREQ_RELATION_H);
  334. } else {
  335. int freq = powersave_bias_target(policy, policy->max,
  336. CPUFREQ_RELATION_H);
  337. __cpufreq_driver_target(policy, freq,
  338. CPUFREQ_RELATION_L);
  339. }
  340. return;
  341. }
  342. /* Check for frequency decrease */
  343. /* if we cannot reduce the frequency anymore, break out early */
  344. if (policy->cur == policy->min)
  345. return;
  346. /*
  347. * The optimal frequency is the frequency that is the lowest that
  348. * can support the current CPU usage without triggering the up
  349. * policy. To be safe, we focus 10 points under the threshold.
  350. */
  351. if (load < (dbs_tuners_ins.up_threshold - 10)) {
  352. unsigned int freq_next, freq_cur;
  353. freq_cur = __cpufreq_driver_getavg(policy);
  354. if (!freq_cur)
  355. freq_cur = policy->cur;
  356. freq_next = (freq_cur * load) /
  357. (dbs_tuners_ins.up_threshold - 10);
  358. if (!dbs_tuners_ins.powersave_bias) {
  359. __cpufreq_driver_target(policy, freq_next,
  360. CPUFREQ_RELATION_L);
  361. } else {
  362. int freq = powersave_bias_target(policy, freq_next,
  363. CPUFREQ_RELATION_L);
  364. __cpufreq_driver_target(policy, freq,
  365. CPUFREQ_RELATION_L);
  366. }
  367. }
  368. }
  369. static void do_dbs_timer(struct work_struct *work)
  370. {
  371. struct cpu_dbs_info_s *dbs_info =
  372. container_of(work, struct cpu_dbs_info_s, work.work);
  373. unsigned int cpu = dbs_info->cpu;
  374. int sample_type = dbs_info->sample_type;
  375. /* We want all CPUs to do sampling nearly on same jiffy */
  376. int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
  377. delay -= jiffies % delay;
  378. if (lock_policy_rwsem_write(cpu) < 0)
  379. return;
  380. if (!dbs_info->enable) {
  381. unlock_policy_rwsem_write(cpu);
  382. return;
  383. }
  384. /* Common NORMAL_SAMPLE setup */
  385. dbs_info->sample_type = DBS_NORMAL_SAMPLE;
  386. if (!dbs_tuners_ins.powersave_bias ||
  387. sample_type == DBS_NORMAL_SAMPLE) {
  388. dbs_check_cpu(dbs_info);
  389. if (dbs_info->freq_lo) {
  390. /* Setup timer for SUB_SAMPLE */
  391. dbs_info->sample_type = DBS_SUB_SAMPLE;
  392. delay = dbs_info->freq_hi_jiffies;
  393. }
  394. } else {
  395. __cpufreq_driver_target(dbs_info->cur_policy,
  396. dbs_info->freq_lo,
  397. CPUFREQ_RELATION_H);
  398. }
  399. queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
  400. unlock_policy_rwsem_write(cpu);
  401. }
  402. static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
  403. {
  404. /* We want all CPUs to do sampling nearly on same jiffy */
  405. int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
  406. delay -= jiffies % delay;
  407. dbs_info->enable = 1;
  408. ondemand_powersave_bias_init();
  409. dbs_info->sample_type = DBS_NORMAL_SAMPLE;
  410. INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
  411. queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
  412. delay);
  413. }
  414. static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
  415. {
  416. dbs_info->enable = 0;
  417. cancel_delayed_work(&dbs_info->work);
  418. }
  419. static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
  420. unsigned int event)
  421. {
  422. unsigned int cpu = policy->cpu;
  423. struct cpu_dbs_info_s *this_dbs_info;
  424. unsigned int j;
  425. int rc;
  426. this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
  427. switch (event) {
  428. case CPUFREQ_GOV_START:
  429. if ((!cpu_online(cpu)) || (!policy->cur))
  430. return -EINVAL;
  431. if (this_dbs_info->enable) /* Already enabled */
  432. break;
  433. mutex_lock(&dbs_mutex);
  434. dbs_enable++;
  435. rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
  436. if (rc) {
  437. dbs_enable--;
  438. mutex_unlock(&dbs_mutex);
  439. return rc;
  440. }
  441. for_each_cpu_mask(j, policy->cpus) {
  442. struct cpu_dbs_info_s *j_dbs_info;
  443. j_dbs_info = &per_cpu(cpu_dbs_info, j);
  444. j_dbs_info->cur_policy = policy;
  445. j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
  446. j_dbs_info->prev_cpu_wall = get_jiffies_64();
  447. }
  448. this_dbs_info->cpu = cpu;
  449. /*
  450. * Start the timerschedule work, when this governor
  451. * is used for first time
  452. */
  453. if (dbs_enable == 1) {
  454. unsigned int latency;
  455. /* policy latency is in nS. Convert it to uS first */
  456. latency = policy->cpuinfo.transition_latency / 1000;
  457. if (latency == 0)
  458. latency = 1;
  459. def_sampling_rate = latency *
  460. DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
  461. if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
  462. def_sampling_rate = MIN_STAT_SAMPLING_RATE;
  463. dbs_tuners_ins.sampling_rate = def_sampling_rate;
  464. }
  465. dbs_timer_init(this_dbs_info);
  466. mutex_unlock(&dbs_mutex);
  467. break;
  468. case CPUFREQ_GOV_STOP:
  469. mutex_lock(&dbs_mutex);
  470. dbs_timer_exit(this_dbs_info);
  471. sysfs_remove_group(&policy->kobj, &dbs_attr_group);
  472. dbs_enable--;
  473. mutex_unlock(&dbs_mutex);
  474. break;
  475. case CPUFREQ_GOV_LIMITS:
  476. mutex_lock(&dbs_mutex);
  477. if (policy->max < this_dbs_info->cur_policy->cur)
  478. __cpufreq_driver_target(this_dbs_info->cur_policy,
  479. policy->max,
  480. CPUFREQ_RELATION_H);
  481. else if (policy->min > this_dbs_info->cur_policy->cur)
  482. __cpufreq_driver_target(this_dbs_info->cur_policy,
  483. policy->min,
  484. CPUFREQ_RELATION_L);
  485. mutex_unlock(&dbs_mutex);
  486. break;
  487. }
  488. return 0;
  489. }
  490. struct cpufreq_governor cpufreq_gov_ondemand = {
  491. .name = "ondemand",
  492. .governor = cpufreq_governor_dbs,
  493. .max_transition_latency = TRANSITION_LATENCY_LIMIT,
  494. .owner = THIS_MODULE,
  495. };
  496. EXPORT_SYMBOL(cpufreq_gov_ondemand);
  497. static int __init cpufreq_gov_dbs_init(void)
  498. {
  499. kondemand_wq = create_workqueue("kondemand");
  500. if (!kondemand_wq) {
  501. printk(KERN_ERR "Creation of kondemand failed\n");
  502. return -EFAULT;
  503. }
  504. return cpufreq_register_governor(&cpufreq_gov_ondemand);
  505. }
  506. static void __exit cpufreq_gov_dbs_exit(void)
  507. {
  508. cpufreq_unregister_governor(&cpufreq_gov_ondemand);
  509. destroy_workqueue(kondemand_wq);
  510. }
  511. MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
  512. MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
  513. MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
  514. "Low Latency Frequency Transition capable processors");
  515. MODULE_LICENSE("GPL");
  516. module_init(cpufreq_gov_dbs_init);
  517. module_exit(cpufreq_gov_dbs_exit);