intel_pstate.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. /*
  2. * intel_pstate.c: Native P state management for Intel processors
  3. *
  4. * (C) Copyright 2012 Intel Corporation
  5. * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; version 2
  10. * of the License.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/module.h>
  15. #include <linux/ktime.h>
  16. #include <linux/hrtimer.h>
  17. #include <linux/tick.h>
  18. #include <linux/slab.h>
  19. #include <linux/sched.h>
  20. #include <linux/list.h>
  21. #include <linux/cpu.h>
  22. #include <linux/cpufreq.h>
  23. #include <linux/sysfs.h>
  24. #include <linux/types.h>
  25. #include <linux/fs.h>
  26. #include <linux/debugfs.h>
  27. #include <trace/events/power.h>
  28. #include <asm/div64.h>
  29. #include <asm/msr.h>
  30. #include <asm/cpu_device_id.h>
  31. #define SAMPLE_COUNT 3
  32. #define FRAC_BITS 8
  33. #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
  34. #define fp_toint(X) ((X) >> FRAC_BITS)
  35. static inline int32_t mul_fp(int32_t x, int32_t y)
  36. {
  37. return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
  38. }
  39. static inline int32_t div_fp(int32_t x, int32_t y)
  40. {
  41. return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
  42. }
  43. struct sample {
  44. int core_pct_busy;
  45. u64 aperf;
  46. u64 mperf;
  47. int freq;
  48. };
  49. struct pstate_data {
  50. int current_pstate;
  51. int min_pstate;
  52. int max_pstate;
  53. int turbo_pstate;
  54. };
  55. struct _pid {
  56. int setpoint;
  57. int32_t integral;
  58. int32_t p_gain;
  59. int32_t i_gain;
  60. int32_t d_gain;
  61. int deadband;
  62. int last_err;
  63. };
  64. struct cpudata {
  65. int cpu;
  66. char name[64];
  67. struct timer_list timer;
  68. struct pstate_adjust_policy *pstate_policy;
  69. struct pstate_data pstate;
  70. struct _pid pid;
  71. int min_pstate_count;
  72. u64 prev_aperf;
  73. u64 prev_mperf;
  74. int sample_ptr;
  75. struct sample samples[SAMPLE_COUNT];
  76. };
  77. static struct cpudata **all_cpu_data;
  78. struct pstate_adjust_policy {
  79. int sample_rate_ms;
  80. int deadband;
  81. int setpoint;
  82. int p_gain_pct;
  83. int d_gain_pct;
  84. int i_gain_pct;
  85. };
  86. static struct pstate_adjust_policy default_policy = {
  87. .sample_rate_ms = 10,
  88. .deadband = 0,
  89. .setpoint = 109,
  90. .p_gain_pct = 17,
  91. .d_gain_pct = 0,
  92. .i_gain_pct = 4,
  93. };
  94. struct perf_limits {
  95. int no_turbo;
  96. int max_perf_pct;
  97. int min_perf_pct;
  98. int32_t max_perf;
  99. int32_t min_perf;
  100. int max_policy_pct;
  101. int max_sysfs_pct;
  102. };
  103. static struct perf_limits limits = {
  104. .no_turbo = 0,
  105. .max_perf_pct = 100,
  106. .max_perf = int_tofp(1),
  107. .min_perf_pct = 0,
  108. .min_perf = 0,
  109. .max_policy_pct = 100,
  110. .max_sysfs_pct = 100,
  111. };
  112. static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
  113. int deadband, int integral) {
  114. pid->setpoint = setpoint;
  115. pid->deadband = deadband;
  116. pid->integral = int_tofp(integral);
  117. pid->last_err = setpoint - busy;
  118. }
  119. static inline void pid_p_gain_set(struct _pid *pid, int percent)
  120. {
  121. pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
  122. }
  123. static inline void pid_i_gain_set(struct _pid *pid, int percent)
  124. {
  125. pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
  126. }
  127. static inline void pid_d_gain_set(struct _pid *pid, int percent)
  128. {
  129. pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
  130. }
  131. static signed int pid_calc(struct _pid *pid, int busy)
  132. {
  133. signed int err, result;
  134. int32_t pterm, dterm, fp_error;
  135. int32_t integral_limit;
  136. err = pid->setpoint - busy;
  137. fp_error = int_tofp(err);
  138. if (abs(err) <= pid->deadband)
  139. return 0;
  140. pterm = mul_fp(pid->p_gain, fp_error);
  141. pid->integral += fp_error;
  142. /* limit the integral term */
  143. integral_limit = int_tofp(30);
  144. if (pid->integral > integral_limit)
  145. pid->integral = integral_limit;
  146. if (pid->integral < -integral_limit)
  147. pid->integral = -integral_limit;
  148. dterm = mul_fp(pid->d_gain, (err - pid->last_err));
  149. pid->last_err = err;
  150. result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
  151. return (signed int)fp_toint(result);
  152. }
  153. static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
  154. {
  155. pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct);
  156. pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct);
  157. pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct);
  158. pid_reset(&cpu->pid,
  159. cpu->pstate_policy->setpoint,
  160. 100,
  161. cpu->pstate_policy->deadband,
  162. 0);
  163. }
  164. static inline void intel_pstate_reset_all_pid(void)
  165. {
  166. unsigned int cpu;
  167. for_each_online_cpu(cpu) {
  168. if (all_cpu_data[cpu])
  169. intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
  170. }
  171. }
  172. /************************** debugfs begin ************************/
  173. static int pid_param_set(void *data, u64 val)
  174. {
  175. *(u32 *)data = val;
  176. intel_pstate_reset_all_pid();
  177. return 0;
  178. }
  179. static int pid_param_get(void *data, u64 *val)
  180. {
  181. *val = *(u32 *)data;
  182. return 0;
  183. }
  184. DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
  185. pid_param_set, "%llu\n");
  186. struct pid_param {
  187. char *name;
  188. void *value;
  189. };
  190. static struct pid_param pid_files[] = {
  191. {"sample_rate_ms", &default_policy.sample_rate_ms},
  192. {"d_gain_pct", &default_policy.d_gain_pct},
  193. {"i_gain_pct", &default_policy.i_gain_pct},
  194. {"deadband", &default_policy.deadband},
  195. {"setpoint", &default_policy.setpoint},
  196. {"p_gain_pct", &default_policy.p_gain_pct},
  197. {NULL, NULL}
  198. };
  199. static struct dentry *debugfs_parent;
  200. static void intel_pstate_debug_expose_params(void)
  201. {
  202. int i = 0;
  203. debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
  204. if (IS_ERR_OR_NULL(debugfs_parent))
  205. return;
  206. while (pid_files[i].name) {
  207. debugfs_create_file(pid_files[i].name, 0660,
  208. debugfs_parent, pid_files[i].value,
  209. &fops_pid_param);
  210. i++;
  211. }
  212. }
  213. /************************** debugfs end ************************/
  214. /************************** sysfs begin ************************/
  215. #define show_one(file_name, object) \
  216. static ssize_t show_##file_name \
  217. (struct kobject *kobj, struct attribute *attr, char *buf) \
  218. { \
  219. return sprintf(buf, "%u\n", limits.object); \
  220. }
  221. static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
  222. const char *buf, size_t count)
  223. {
  224. unsigned int input;
  225. int ret;
  226. ret = sscanf(buf, "%u", &input);
  227. if (ret != 1)
  228. return -EINVAL;
  229. limits.no_turbo = clamp_t(int, input, 0 , 1);
  230. return count;
  231. }
  232. static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
  233. const char *buf, size_t count)
  234. {
  235. unsigned int input;
  236. int ret;
  237. ret = sscanf(buf, "%u", &input);
  238. if (ret != 1)
  239. return -EINVAL;
  240. limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
  241. limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
  242. limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
  243. return count;
  244. }
  245. static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
  246. const char *buf, size_t count)
  247. {
  248. unsigned int input;
  249. int ret;
  250. ret = sscanf(buf, "%u", &input);
  251. if (ret != 1)
  252. return -EINVAL;
  253. limits.min_perf_pct = clamp_t(int, input, 0 , 100);
  254. limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
  255. return count;
  256. }
  257. show_one(no_turbo, no_turbo);
  258. show_one(max_perf_pct, max_perf_pct);
  259. show_one(min_perf_pct, min_perf_pct);
  260. define_one_global_rw(no_turbo);
  261. define_one_global_rw(max_perf_pct);
  262. define_one_global_rw(min_perf_pct);
  263. static struct attribute *intel_pstate_attributes[] = {
  264. &no_turbo.attr,
  265. &max_perf_pct.attr,
  266. &min_perf_pct.attr,
  267. NULL
  268. };
  269. static struct attribute_group intel_pstate_attr_group = {
  270. .attrs = intel_pstate_attributes,
  271. };
  272. static struct kobject *intel_pstate_kobject;
  273. static void intel_pstate_sysfs_expose_params(void)
  274. {
  275. int rc;
  276. intel_pstate_kobject = kobject_create_and_add("intel_pstate",
  277. &cpu_subsys.dev_root->kobj);
  278. BUG_ON(!intel_pstate_kobject);
  279. rc = sysfs_create_group(intel_pstate_kobject,
  280. &intel_pstate_attr_group);
  281. BUG_ON(rc);
  282. }
  283. /************************** sysfs end ************************/
  284. static int intel_pstate_min_pstate(void)
  285. {
  286. u64 value;
  287. rdmsrl(MSR_PLATFORM_INFO, value);
  288. return (value >> 40) & 0xFF;
  289. }
  290. static int intel_pstate_max_pstate(void)
  291. {
  292. u64 value;
  293. rdmsrl(MSR_PLATFORM_INFO, value);
  294. return (value >> 8) & 0xFF;
  295. }
  296. static int intel_pstate_turbo_pstate(void)
  297. {
  298. u64 value;
  299. int nont, ret;
  300. rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
  301. nont = intel_pstate_max_pstate();
  302. ret = ((value) & 255);
  303. if (ret <= nont)
  304. ret = nont;
  305. return ret;
  306. }
  307. static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
  308. {
  309. int max_perf = cpu->pstate.turbo_pstate;
  310. int min_perf;
  311. if (limits.no_turbo)
  312. max_perf = cpu->pstate.max_pstate;
  313. max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
  314. *max = clamp_t(int, max_perf,
  315. cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
  316. min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
  317. *min = clamp_t(int, min_perf,
  318. cpu->pstate.min_pstate, max_perf);
  319. }
  320. static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
  321. {
  322. int max_perf, min_perf;
  323. intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
  324. pstate = clamp_t(int, pstate, min_perf, max_perf);
  325. if (pstate == cpu->pstate.current_pstate)
  326. return;
  327. trace_cpu_frequency(pstate * 100000, cpu->cpu);
  328. cpu->pstate.current_pstate = pstate;
  329. wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
  330. }
  331. static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
  332. {
  333. int target;
  334. target = cpu->pstate.current_pstate + steps;
  335. intel_pstate_set_pstate(cpu, target);
  336. }
  337. static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
  338. {
  339. int target;
  340. target = cpu->pstate.current_pstate - steps;
  341. intel_pstate_set_pstate(cpu, target);
  342. }
  343. static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
  344. {
  345. sprintf(cpu->name, "Intel 2nd generation core");
  346. cpu->pstate.min_pstate = intel_pstate_min_pstate();
  347. cpu->pstate.max_pstate = intel_pstate_max_pstate();
  348. cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate();
  349. /*
  350. * goto max pstate so we don't slow up boot if we are built-in if we are
  351. * a module we will take care of it during normal operation
  352. */
  353. intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
  354. }
  355. static inline void intel_pstate_calc_busy(struct cpudata *cpu,
  356. struct sample *sample)
  357. {
  358. u64 core_pct;
  359. core_pct = div64_u64(sample->aperf * 100, sample->mperf);
  360. sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
  361. sample->core_pct_busy = core_pct;
  362. }
  363. static inline void intel_pstate_sample(struct cpudata *cpu)
  364. {
  365. u64 aperf, mperf;
  366. rdmsrl(MSR_IA32_APERF, aperf);
  367. rdmsrl(MSR_IA32_MPERF, mperf);
  368. cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
  369. cpu->samples[cpu->sample_ptr].aperf = aperf;
  370. cpu->samples[cpu->sample_ptr].mperf = mperf;
  371. cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
  372. cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
  373. intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
  374. cpu->prev_aperf = aperf;
  375. cpu->prev_mperf = mperf;
  376. }
  377. static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
  378. {
  379. int sample_time, delay;
  380. sample_time = cpu->pstate_policy->sample_rate_ms;
  381. delay = msecs_to_jiffies(sample_time);
  382. mod_timer_pinned(&cpu->timer, jiffies + delay);
  383. }
  384. static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
  385. {
  386. int32_t busy_scaled;
  387. int32_t core_busy, turbo_pstate, current_pstate;
  388. core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
  389. turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
  390. current_pstate = int_tofp(cpu->pstate.current_pstate);
  391. busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
  392. return fp_toint(busy_scaled);
  393. }
  394. static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
  395. {
  396. int busy_scaled;
  397. struct _pid *pid;
  398. signed int ctl = 0;
  399. int steps;
  400. pid = &cpu->pid;
  401. busy_scaled = intel_pstate_get_scaled_busy(cpu);
  402. ctl = pid_calc(pid, busy_scaled);
  403. steps = abs(ctl);
  404. if (ctl < 0)
  405. intel_pstate_pstate_increase(cpu, steps);
  406. else
  407. intel_pstate_pstate_decrease(cpu, steps);
  408. }
  409. static void intel_pstate_timer_func(unsigned long __data)
  410. {
  411. struct cpudata *cpu = (struct cpudata *) __data;
  412. intel_pstate_sample(cpu);
  413. intel_pstate_adjust_busy_pstate(cpu);
  414. if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
  415. cpu->min_pstate_count++;
  416. if (!(cpu->min_pstate_count % 5)) {
  417. intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
  418. }
  419. } else
  420. cpu->min_pstate_count = 0;
  421. intel_pstate_set_sample_time(cpu);
  422. }
  423. #define ICPU(model, policy) \
  424. { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
  425. static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
  426. ICPU(0x2a, default_policy),
  427. ICPU(0x2d, default_policy),
  428. {}
  429. };
  430. MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
  431. static int intel_pstate_init_cpu(unsigned int cpunum)
  432. {
  433. const struct x86_cpu_id *id;
  434. struct cpudata *cpu;
  435. id = x86_match_cpu(intel_pstate_cpu_ids);
  436. if (!id)
  437. return -ENODEV;
  438. all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
  439. if (!all_cpu_data[cpunum])
  440. return -ENOMEM;
  441. cpu = all_cpu_data[cpunum];
  442. intel_pstate_get_cpu_pstates(cpu);
  443. cpu->cpu = cpunum;
  444. cpu->pstate_policy =
  445. (struct pstate_adjust_policy *)id->driver_data;
  446. init_timer_deferrable(&cpu->timer);
  447. cpu->timer.function = intel_pstate_timer_func;
  448. cpu->timer.data =
  449. (unsigned long)cpu;
  450. cpu->timer.expires = jiffies + HZ/100;
  451. intel_pstate_busy_pid_reset(cpu);
  452. intel_pstate_sample(cpu);
  453. intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
  454. add_timer_on(&cpu->timer, cpunum);
  455. pr_info("Intel pstate controlling: cpu %d\n", cpunum);
  456. return 0;
  457. }
  458. static unsigned int intel_pstate_get(unsigned int cpu_num)
  459. {
  460. struct sample *sample;
  461. struct cpudata *cpu;
  462. cpu = all_cpu_data[cpu_num];
  463. if (!cpu)
  464. return 0;
  465. sample = &cpu->samples[cpu->sample_ptr];
  466. return sample->freq;
  467. }
  468. static int intel_pstate_set_policy(struct cpufreq_policy *policy)
  469. {
  470. struct cpudata *cpu;
  471. cpu = all_cpu_data[policy->cpu];
  472. if (!policy->cpuinfo.max_freq)
  473. return -ENODEV;
  474. if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
  475. limits.min_perf_pct = 100;
  476. limits.min_perf = int_tofp(1);
  477. limits.max_perf_pct = 100;
  478. limits.max_perf = int_tofp(1);
  479. limits.no_turbo = 0;
  480. return 0;
  481. }
  482. limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
  483. limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
  484. limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
  485. limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
  486. limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
  487. limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
  488. limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
  489. return 0;
  490. }
  491. static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
  492. {
  493. cpufreq_verify_within_limits(policy,
  494. policy->cpuinfo.min_freq,
  495. policy->cpuinfo.max_freq);
  496. if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
  497. (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
  498. return -EINVAL;
  499. return 0;
  500. }
  501. static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy)
  502. {
  503. int cpu = policy->cpu;
  504. del_timer(&all_cpu_data[cpu]->timer);
  505. kfree(all_cpu_data[cpu]);
  506. all_cpu_data[cpu] = NULL;
  507. return 0;
  508. }
  509. static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
  510. {
  511. int rc, min_pstate, max_pstate;
  512. struct cpudata *cpu;
  513. rc = intel_pstate_init_cpu(policy->cpu);
  514. if (rc)
  515. return rc;
  516. cpu = all_cpu_data[policy->cpu];
  517. if (!limits.no_turbo &&
  518. limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
  519. policy->policy = CPUFREQ_POLICY_PERFORMANCE;
  520. else
  521. policy->policy = CPUFREQ_POLICY_POWERSAVE;
  522. intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
  523. policy->min = min_pstate * 100000;
  524. policy->max = max_pstate * 100000;
  525. /* cpuinfo and default policy values */
  526. policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
  527. policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
  528. policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
  529. cpumask_set_cpu(policy->cpu, policy->cpus);
  530. return 0;
  531. }
  532. static struct cpufreq_driver intel_pstate_driver = {
  533. .flags = CPUFREQ_CONST_LOOPS,
  534. .verify = intel_pstate_verify_policy,
  535. .setpolicy = intel_pstate_set_policy,
  536. .get = intel_pstate_get,
  537. .init = intel_pstate_cpu_init,
  538. .exit = intel_pstate_cpu_exit,
  539. .name = "intel_pstate",
  540. .owner = THIS_MODULE,
  541. };
  542. static int __initdata no_load;
  543. static int intel_pstate_msrs_not_valid(void)
  544. {
  545. /* Check that all the msr's we are using are valid. */
  546. u64 aperf, mperf, tmp;
  547. rdmsrl(MSR_IA32_APERF, aperf);
  548. rdmsrl(MSR_IA32_MPERF, mperf);
  549. if (!intel_pstate_min_pstate() ||
  550. !intel_pstate_max_pstate() ||
  551. !intel_pstate_turbo_pstate())
  552. return -ENODEV;
  553. rdmsrl(MSR_IA32_APERF, tmp);
  554. if (!(tmp - aperf))
  555. return -ENODEV;
  556. rdmsrl(MSR_IA32_MPERF, tmp);
  557. if (!(tmp - mperf))
  558. return -ENODEV;
  559. return 0;
  560. }
  561. static int __init intel_pstate_init(void)
  562. {
  563. int cpu, rc = 0;
  564. const struct x86_cpu_id *id;
  565. if (no_load)
  566. return -ENODEV;
  567. id = x86_match_cpu(intel_pstate_cpu_ids);
  568. if (!id)
  569. return -ENODEV;
  570. if (intel_pstate_msrs_not_valid())
  571. return -ENODEV;
  572. pr_info("Intel P-state driver initializing.\n");
  573. all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
  574. if (!all_cpu_data)
  575. return -ENOMEM;
  576. rc = cpufreq_register_driver(&intel_pstate_driver);
  577. if (rc)
  578. goto out;
  579. intel_pstate_debug_expose_params();
  580. intel_pstate_sysfs_expose_params();
  581. return rc;
  582. out:
  583. get_online_cpus();
  584. for_each_online_cpu(cpu) {
  585. if (all_cpu_data[cpu]) {
  586. del_timer_sync(&all_cpu_data[cpu]->timer);
  587. kfree(all_cpu_data[cpu]);
  588. }
  589. }
  590. put_online_cpus();
  591. vfree(all_cpu_data);
  592. return -ENODEV;
  593. }
  594. device_initcall(intel_pstate_init);
  595. static int __init intel_pstate_setup(char *str)
  596. {
  597. if (!str)
  598. return -EINVAL;
  599. if (!strcmp(str, "disable"))
  600. no_load = 1;
  601. return 0;
  602. }
  603. early_param("intel_pstate", intel_pstate_setup);
  604. MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
  605. MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
  606. MODULE_LICENSE("GPL");