processor_throttling.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299
  1. /*
  2. * processor_throttling.c - Throttling submodule of the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or (at
  15. * your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License along
  23. * with this program; if not, write to the Free Software Foundation, Inc.,
  24. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  25. *
  26. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/sched.h>
  32. #include <linux/cpufreq.h>
  33. #include <linux/proc_fs.h>
  34. #include <linux/seq_file.h>
  35. #include <asm/io.h>
  36. #include <asm/uaccess.h>
  37. #include <acpi/acpi_bus.h>
  38. #include <acpi/acpi_drivers.h>
  39. #include <acpi/processor.h>
  40. #define ACPI_PROCESSOR_CLASS "processor"
  41. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  42. ACPI_MODULE_NAME("processor_throttling");
  43. struct throttling_tstate {
  44. unsigned int cpu; /* cpu nr */
  45. int target_state; /* target T-state */
  46. };
  47. #define THROTTLING_PRECHANGE (1)
  48. #define THROTTLING_POSTCHANGE (2)
  49. static int acpi_processor_get_throttling(struct acpi_processor *pr);
  50. int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
  51. static int acpi_processor_update_tsd_coord(void)
  52. {
  53. int count, count_target;
  54. int retval = 0;
  55. unsigned int i, j;
  56. cpumask_var_t covered_cpus;
  57. struct acpi_processor *pr, *match_pr;
  58. struct acpi_tsd_package *pdomain, *match_pdomain;
  59. struct acpi_processor_throttling *pthrottling, *match_pthrottling;
  60. if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
  61. return -ENOMEM;
  62. /*
  63. * Now that we have _TSD data from all CPUs, lets setup T-state
  64. * coordination between all CPUs.
  65. */
  66. for_each_possible_cpu(i) {
  67. pr = per_cpu(processors, i);
  68. if (!pr)
  69. continue;
  70. /* Basic validity check for domain info */
  71. pthrottling = &(pr->throttling);
  72. /*
  73. * If tsd package for one cpu is invalid, the coordination
  74. * among all CPUs is thought as invalid.
  75. * Maybe it is ugly.
  76. */
  77. if (!pthrottling->tsd_valid_flag) {
  78. retval = -EINVAL;
  79. break;
  80. }
  81. }
  82. if (retval)
  83. goto err_ret;
  84. cpumask_clear(covered_cpus);
  85. for_each_possible_cpu(i) {
  86. pr = per_cpu(processors, i);
  87. if (!pr)
  88. continue;
  89. if (cpumask_test_cpu(i, covered_cpus))
  90. continue;
  91. pthrottling = &pr->throttling;
  92. pdomain = &(pthrottling->domain_info);
  93. cpumask_set_cpu(i, pthrottling->shared_cpu_map);
  94. cpumask_set_cpu(i, covered_cpus);
  95. /*
  96. * If the number of processor in the TSD domain is 1, it is
  97. * unnecessary to parse the coordination for this CPU.
  98. */
  99. if (pdomain->num_processors <= 1)
  100. continue;
  101. /* Validate the Domain info */
  102. count_target = pdomain->num_processors;
  103. count = 1;
  104. for_each_possible_cpu(j) {
  105. if (i == j)
  106. continue;
  107. match_pr = per_cpu(processors, j);
  108. if (!match_pr)
  109. continue;
  110. match_pthrottling = &(match_pr->throttling);
  111. match_pdomain = &(match_pthrottling->domain_info);
  112. if (match_pdomain->domain != pdomain->domain)
  113. continue;
  114. /* Here i and j are in the same domain.
  115. * If two TSD packages have the same domain, they
  116. * should have the same num_porcessors and
  117. * coordination type. Otherwise it will be regarded
  118. * as illegal.
  119. */
  120. if (match_pdomain->num_processors != count_target) {
  121. retval = -EINVAL;
  122. goto err_ret;
  123. }
  124. if (pdomain->coord_type != match_pdomain->coord_type) {
  125. retval = -EINVAL;
  126. goto err_ret;
  127. }
  128. cpumask_set_cpu(j, covered_cpus);
  129. cpumask_set_cpu(j, pthrottling->shared_cpu_map);
  130. count++;
  131. }
  132. for_each_possible_cpu(j) {
  133. if (i == j)
  134. continue;
  135. match_pr = per_cpu(processors, j);
  136. if (!match_pr)
  137. continue;
  138. match_pthrottling = &(match_pr->throttling);
  139. match_pdomain = &(match_pthrottling->domain_info);
  140. if (match_pdomain->domain != pdomain->domain)
  141. continue;
  142. /*
  143. * If some CPUS have the same domain, they
  144. * will have the same shared_cpu_map.
  145. */
  146. cpumask_copy(match_pthrottling->shared_cpu_map,
  147. pthrottling->shared_cpu_map);
  148. }
  149. }
  150. err_ret:
  151. free_cpumask_var(covered_cpus);
  152. for_each_possible_cpu(i) {
  153. pr = per_cpu(processors, i);
  154. if (!pr)
  155. continue;
  156. /*
  157. * Assume no coordination on any error parsing domain info.
  158. * The coordination type will be forced as SW_ALL.
  159. */
  160. if (retval) {
  161. pthrottling = &(pr->throttling);
  162. cpumask_clear(pthrottling->shared_cpu_map);
  163. cpumask_set_cpu(i, pthrottling->shared_cpu_map);
  164. pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  165. }
  166. }
  167. return retval;
  168. }
  169. /*
  170. * Update the T-state coordination after the _TSD
  171. * data for all cpus is obtained.
  172. */
  173. void acpi_processor_throttling_init(void)
  174. {
  175. if (acpi_processor_update_tsd_coord())
  176. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  177. "Assume no T-state coordination\n"));
  178. return;
  179. }
  180. static int acpi_processor_throttling_notifier(unsigned long event, void *data)
  181. {
  182. struct throttling_tstate *p_tstate = data;
  183. struct acpi_processor *pr;
  184. unsigned int cpu ;
  185. int target_state;
  186. struct acpi_processor_limit *p_limit;
  187. struct acpi_processor_throttling *p_throttling;
  188. cpu = p_tstate->cpu;
  189. pr = per_cpu(processors, cpu);
  190. if (!pr) {
  191. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
  192. return 0;
  193. }
  194. if (!pr->flags.throttling) {
  195. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
  196. "unsupported on CPU %d\n", cpu));
  197. return 0;
  198. }
  199. target_state = p_tstate->target_state;
  200. p_throttling = &(pr->throttling);
  201. switch (event) {
  202. case THROTTLING_PRECHANGE:
  203. /*
  204. * Prechange event is used to choose one proper t-state,
  205. * which meets the limits of thermal, user and _TPC.
  206. */
  207. p_limit = &pr->limit;
  208. if (p_limit->thermal.tx > target_state)
  209. target_state = p_limit->thermal.tx;
  210. if (p_limit->user.tx > target_state)
  211. target_state = p_limit->user.tx;
  212. if (pr->throttling_platform_limit > target_state)
  213. target_state = pr->throttling_platform_limit;
  214. if (target_state >= p_throttling->state_count) {
  215. printk(KERN_WARNING
  216. "Exceed the limit of T-state \n");
  217. target_state = p_throttling->state_count - 1;
  218. }
  219. p_tstate->target_state = target_state;
  220. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
  221. "target T-state of CPU %d is T%d\n",
  222. cpu, target_state));
  223. break;
  224. case THROTTLING_POSTCHANGE:
  225. /*
  226. * Postchange event is only used to update the
  227. * T-state flag of acpi_processor_throttling.
  228. */
  229. p_throttling->state = target_state;
  230. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
  231. "CPU %d is switched to T%d\n",
  232. cpu, target_state));
  233. break;
  234. default:
  235. printk(KERN_WARNING
  236. "Unsupported Throttling notifier event\n");
  237. break;
  238. }
  239. return 0;
  240. }
  241. /*
  242. * _TPC - Throttling Present Capabilities
  243. */
  244. static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
  245. {
  246. acpi_status status = 0;
  247. unsigned long long tpc = 0;
  248. if (!pr)
  249. return -EINVAL;
  250. status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
  251. if (ACPI_FAILURE(status)) {
  252. if (status != AE_NOT_FOUND) {
  253. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
  254. }
  255. return -ENODEV;
  256. }
  257. pr->throttling_platform_limit = (int)tpc;
  258. return 0;
  259. }
  260. int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
  261. {
  262. int result = 0;
  263. int throttling_limit;
  264. int current_state;
  265. struct acpi_processor_limit *limit;
  266. int target_state;
  267. result = acpi_processor_get_platform_limit(pr);
  268. if (result) {
  269. /* Throttling Limit is unsupported */
  270. return result;
  271. }
  272. throttling_limit = pr->throttling_platform_limit;
  273. if (throttling_limit >= pr->throttling.state_count) {
  274. /* Uncorrect Throttling Limit */
  275. return -EINVAL;
  276. }
  277. current_state = pr->throttling.state;
  278. if (current_state > throttling_limit) {
  279. /*
  280. * The current state can meet the requirement of
  281. * _TPC limit. But it is reasonable that OSPM changes
  282. * t-states from high to low for better performance.
  283. * Of course the limit condition of thermal
  284. * and user should be considered.
  285. */
  286. limit = &pr->limit;
  287. target_state = throttling_limit;
  288. if (limit->thermal.tx > target_state)
  289. target_state = limit->thermal.tx;
  290. if (limit->user.tx > target_state)
  291. target_state = limit->user.tx;
  292. } else if (current_state == throttling_limit) {
  293. /*
  294. * Unnecessary to change the throttling state
  295. */
  296. return 0;
  297. } else {
  298. /*
  299. * If the current state is lower than the limit of _TPC, it
  300. * will be forced to switch to the throttling state defined
  301. * by throttling_platfor_limit.
  302. * Because the previous state meets with the limit condition
  303. * of thermal and user, it is unnecessary to check it again.
  304. */
  305. target_state = throttling_limit;
  306. }
  307. return acpi_processor_set_throttling(pr, target_state);
  308. }
  309. /*
  310. * _PTC - Processor Throttling Control (and status) register location
  311. */
  312. static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
  313. {
  314. int result = 0;
  315. acpi_status status = 0;
  316. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  317. union acpi_object *ptc = NULL;
  318. union acpi_object obj = { 0 };
  319. struct acpi_processor_throttling *throttling;
  320. status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
  321. if (ACPI_FAILURE(status)) {
  322. if (status != AE_NOT_FOUND) {
  323. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
  324. }
  325. return -ENODEV;
  326. }
  327. ptc = (union acpi_object *)buffer.pointer;
  328. if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
  329. || (ptc->package.count != 2)) {
  330. printk(KERN_ERR PREFIX "Invalid _PTC data\n");
  331. result = -EFAULT;
  332. goto end;
  333. }
  334. /*
  335. * control_register
  336. */
  337. obj = ptc->package.elements[0];
  338. if ((obj.type != ACPI_TYPE_BUFFER)
  339. || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  340. || (obj.buffer.pointer == NULL)) {
  341. printk(KERN_ERR PREFIX
  342. "Invalid _PTC data (control_register)\n");
  343. result = -EFAULT;
  344. goto end;
  345. }
  346. memcpy(&pr->throttling.control_register, obj.buffer.pointer,
  347. sizeof(struct acpi_ptc_register));
  348. /*
  349. * status_register
  350. */
  351. obj = ptc->package.elements[1];
  352. if ((obj.type != ACPI_TYPE_BUFFER)
  353. || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  354. || (obj.buffer.pointer == NULL)) {
  355. printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
  356. result = -EFAULT;
  357. goto end;
  358. }
  359. memcpy(&pr->throttling.status_register, obj.buffer.pointer,
  360. sizeof(struct acpi_ptc_register));
  361. throttling = &pr->throttling;
  362. if ((throttling->control_register.bit_width +
  363. throttling->control_register.bit_offset) > 32) {
  364. printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
  365. result = -EFAULT;
  366. goto end;
  367. }
  368. if ((throttling->status_register.bit_width +
  369. throttling->status_register.bit_offset) > 32) {
  370. printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
  371. result = -EFAULT;
  372. goto end;
  373. }
  374. end:
  375. kfree(buffer.pointer);
  376. return result;
  377. }
  378. /*
  379. * _TSS - Throttling Supported States
  380. */
  381. static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
  382. {
  383. int result = 0;
  384. acpi_status status = AE_OK;
  385. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  386. struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  387. struct acpi_buffer state = { 0, NULL };
  388. union acpi_object *tss = NULL;
  389. int i;
  390. status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
  391. if (ACPI_FAILURE(status)) {
  392. if (status != AE_NOT_FOUND) {
  393. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
  394. }
  395. return -ENODEV;
  396. }
  397. tss = buffer.pointer;
  398. if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
  399. printk(KERN_ERR PREFIX "Invalid _TSS data\n");
  400. result = -EFAULT;
  401. goto end;
  402. }
  403. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  404. tss->package.count));
  405. pr->throttling.state_count = tss->package.count;
  406. pr->throttling.states_tss =
  407. kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
  408. GFP_KERNEL);
  409. if (!pr->throttling.states_tss) {
  410. result = -ENOMEM;
  411. goto end;
  412. }
  413. for (i = 0; i < pr->throttling.state_count; i++) {
  414. struct acpi_processor_tx_tss *tx =
  415. (struct acpi_processor_tx_tss *)&(pr->throttling.
  416. states_tss[i]);
  417. state.length = sizeof(struct acpi_processor_tx_tss);
  418. state.pointer = tx;
  419. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
  420. status = acpi_extract_package(&(tss->package.elements[i]),
  421. &format, &state);
  422. if (ACPI_FAILURE(status)) {
  423. ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
  424. result = -EFAULT;
  425. kfree(pr->throttling.states_tss);
  426. goto end;
  427. }
  428. if (!tx->freqpercentage) {
  429. printk(KERN_ERR PREFIX
  430. "Invalid _TSS data: freq is zero\n");
  431. result = -EFAULT;
  432. kfree(pr->throttling.states_tss);
  433. goto end;
  434. }
  435. }
  436. end:
  437. kfree(buffer.pointer);
  438. return result;
  439. }
  440. /*
  441. * _TSD - T-State Dependencies
  442. */
  443. static int acpi_processor_get_tsd(struct acpi_processor *pr)
  444. {
  445. int result = 0;
  446. acpi_status status = AE_OK;
  447. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  448. struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  449. struct acpi_buffer state = { 0, NULL };
  450. union acpi_object *tsd = NULL;
  451. struct acpi_tsd_package *pdomain;
  452. struct acpi_processor_throttling *pthrottling;
  453. pthrottling = &pr->throttling;
  454. pthrottling->tsd_valid_flag = 0;
  455. status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
  456. if (ACPI_FAILURE(status)) {
  457. if (status != AE_NOT_FOUND) {
  458. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
  459. }
  460. return -ENODEV;
  461. }
  462. tsd = buffer.pointer;
  463. if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
  464. printk(KERN_ERR PREFIX "Invalid _TSD data\n");
  465. result = -EFAULT;
  466. goto end;
  467. }
  468. if (tsd->package.count != 1) {
  469. printk(KERN_ERR PREFIX "Invalid _TSD data\n");
  470. result = -EFAULT;
  471. goto end;
  472. }
  473. pdomain = &(pr->throttling.domain_info);
  474. state.length = sizeof(struct acpi_tsd_package);
  475. state.pointer = pdomain;
  476. status = acpi_extract_package(&(tsd->package.elements[0]),
  477. &format, &state);
  478. if (ACPI_FAILURE(status)) {
  479. printk(KERN_ERR PREFIX "Invalid _TSD data\n");
  480. result = -EFAULT;
  481. goto end;
  482. }
  483. if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
  484. printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
  485. result = -EFAULT;
  486. goto end;
  487. }
  488. if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
  489. printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
  490. result = -EFAULT;
  491. goto end;
  492. }
  493. pthrottling = &pr->throttling;
  494. pthrottling->tsd_valid_flag = 1;
  495. pthrottling->shared_type = pdomain->coord_type;
  496. cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
  497. /*
  498. * If the coordination type is not defined in ACPI spec,
  499. * the tsd_valid_flag will be clear and coordination type
  500. * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
  501. */
  502. if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
  503. pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
  504. pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
  505. pthrottling->tsd_valid_flag = 0;
  506. pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  507. }
  508. end:
  509. kfree(buffer.pointer);
  510. return result;
  511. }
  512. /* --------------------------------------------------------------------------
  513. Throttling Control
  514. -------------------------------------------------------------------------- */
  515. static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
  516. {
  517. int state = 0;
  518. u32 value = 0;
  519. u32 duty_mask = 0;
  520. u32 duty_value = 0;
  521. if (!pr)
  522. return -EINVAL;
  523. if (!pr->flags.throttling)
  524. return -ENODEV;
  525. pr->throttling.state = 0;
  526. duty_mask = pr->throttling.state_count - 1;
  527. duty_mask <<= pr->throttling.duty_offset;
  528. local_irq_disable();
  529. value = inl(pr->throttling.address);
  530. /*
  531. * Compute the current throttling state when throttling is enabled
  532. * (bit 4 is on).
  533. */
  534. if (value & 0x10) {
  535. duty_value = value & duty_mask;
  536. duty_value >>= pr->throttling.duty_offset;
  537. if (duty_value)
  538. state = pr->throttling.state_count - duty_value;
  539. }
  540. pr->throttling.state = state;
  541. local_irq_enable();
  542. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  543. "Throttling state is T%d (%d%% throttling applied)\n",
  544. state, pr->throttling.states[state].performance));
  545. return 0;
  546. }
  547. #ifdef CONFIG_X86
  548. static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  549. acpi_integer * value)
  550. {
  551. struct cpuinfo_x86 *c;
  552. u64 msr_high, msr_low;
  553. unsigned int cpu;
  554. u64 msr = 0;
  555. int ret = -1;
  556. cpu = pr->id;
  557. c = &cpu_data(cpu);
  558. if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  559. !cpu_has(c, X86_FEATURE_ACPI)) {
  560. printk(KERN_ERR PREFIX
  561. "HARDWARE addr space,NOT supported yet\n");
  562. } else {
  563. msr_low = 0;
  564. msr_high = 0;
  565. rdmsr_safe(MSR_IA32_THERM_CONTROL,
  566. (u32 *)&msr_low , (u32 *) &msr_high);
  567. msr = (msr_high << 32) | msr_low;
  568. *value = (acpi_integer) msr;
  569. ret = 0;
  570. }
  571. return ret;
  572. }
  573. static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  574. {
  575. struct cpuinfo_x86 *c;
  576. unsigned int cpu;
  577. int ret = -1;
  578. u64 msr;
  579. cpu = pr->id;
  580. c = &cpu_data(cpu);
  581. if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  582. !cpu_has(c, X86_FEATURE_ACPI)) {
  583. printk(KERN_ERR PREFIX
  584. "HARDWARE addr space,NOT supported yet\n");
  585. } else {
  586. msr = value;
  587. wrmsr_safe(MSR_IA32_THERM_CONTROL,
  588. msr & 0xffffffff, msr >> 32);
  589. ret = 0;
  590. }
  591. return ret;
  592. }
  593. #else
  594. static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  595. acpi_integer * value)
  596. {
  597. printk(KERN_ERR PREFIX
  598. "HARDWARE addr space,NOT supported yet\n");
  599. return -1;
  600. }
  601. static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  602. {
  603. printk(KERN_ERR PREFIX
  604. "HARDWARE addr space,NOT supported yet\n");
  605. return -1;
  606. }
  607. #endif
  608. static int acpi_read_throttling_status(struct acpi_processor *pr,
  609. acpi_integer *value)
  610. {
  611. u32 bit_width, bit_offset;
  612. u64 ptc_value;
  613. u64 ptc_mask;
  614. struct acpi_processor_throttling *throttling;
  615. int ret = -1;
  616. throttling = &pr->throttling;
  617. switch (throttling->status_register.space_id) {
  618. case ACPI_ADR_SPACE_SYSTEM_IO:
  619. ptc_value = 0;
  620. bit_width = throttling->status_register.bit_width;
  621. bit_offset = throttling->status_register.bit_offset;
  622. acpi_os_read_port((acpi_io_address) throttling->status_register.
  623. address, (u32 *) &ptc_value,
  624. (u32) (bit_width + bit_offset));
  625. ptc_mask = (1 << bit_width) - 1;
  626. *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
  627. ret = 0;
  628. break;
  629. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  630. ret = acpi_throttling_rdmsr(pr, value);
  631. break;
  632. default:
  633. printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  634. (u32) (throttling->status_register.space_id));
  635. }
  636. return ret;
  637. }
  638. static int acpi_write_throttling_state(struct acpi_processor *pr,
  639. acpi_integer value)
  640. {
  641. u32 bit_width, bit_offset;
  642. u64 ptc_value;
  643. u64 ptc_mask;
  644. struct acpi_processor_throttling *throttling;
  645. int ret = -1;
  646. throttling = &pr->throttling;
  647. switch (throttling->control_register.space_id) {
  648. case ACPI_ADR_SPACE_SYSTEM_IO:
  649. bit_width = throttling->control_register.bit_width;
  650. bit_offset = throttling->control_register.bit_offset;
  651. ptc_mask = (1 << bit_width) - 1;
  652. ptc_value = value & ptc_mask;
  653. acpi_os_write_port((acpi_io_address) throttling->
  654. control_register.address,
  655. (u32) (ptc_value << bit_offset),
  656. (u32) (bit_width + bit_offset));
  657. ret = 0;
  658. break;
  659. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  660. ret = acpi_throttling_wrmsr(pr, value);
  661. break;
  662. default:
  663. printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  664. (u32) (throttling->control_register.space_id));
  665. }
  666. return ret;
  667. }
  668. static int acpi_get_throttling_state(struct acpi_processor *pr,
  669. acpi_integer value)
  670. {
  671. int i;
  672. for (i = 0; i < pr->throttling.state_count; i++) {
  673. struct acpi_processor_tx_tss *tx =
  674. (struct acpi_processor_tx_tss *)&(pr->throttling.
  675. states_tss[i]);
  676. if (tx->control == value)
  677. return i;
  678. }
  679. return -1;
  680. }
  681. static int acpi_get_throttling_value(struct acpi_processor *pr,
  682. int state, acpi_integer *value)
  683. {
  684. int ret = -1;
  685. if (state >= 0 && state <= pr->throttling.state_count) {
  686. struct acpi_processor_tx_tss *tx =
  687. (struct acpi_processor_tx_tss *)&(pr->throttling.
  688. states_tss[state]);
  689. *value = tx->control;
  690. ret = 0;
  691. }
  692. return ret;
  693. }
  694. static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
  695. {
  696. int state = 0;
  697. int ret;
  698. acpi_integer value;
  699. if (!pr)
  700. return -EINVAL;
  701. if (!pr->flags.throttling)
  702. return -ENODEV;
  703. pr->throttling.state = 0;
  704. value = 0;
  705. ret = acpi_read_throttling_status(pr, &value);
  706. if (ret >= 0) {
  707. state = acpi_get_throttling_state(pr, value);
  708. pr->throttling.state = state;
  709. }
  710. return 0;
  711. }
  712. static int acpi_processor_get_throttling(struct acpi_processor *pr)
  713. {
  714. cpumask_var_t saved_mask;
  715. int ret;
  716. if (!pr)
  717. return -EINVAL;
  718. if (!pr->flags.throttling)
  719. return -ENODEV;
  720. if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
  721. return -ENOMEM;
  722. /*
  723. * Migrate task to the cpu pointed by pr.
  724. */
  725. cpumask_copy(saved_mask, &current->cpus_allowed);
  726. /* FIXME: use work_on_cpu() */
  727. set_cpus_allowed_ptr(current, cpumask_of(pr->id));
  728. ret = pr->throttling.acpi_processor_get_throttling(pr);
  729. /* restore the previous state */
  730. set_cpus_allowed_ptr(current, saved_mask);
  731. free_cpumask_var(saved_mask);
  732. return ret;
  733. }
  734. static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
  735. {
  736. int i, step;
  737. if (!pr->throttling.address) {
  738. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
  739. return -EINVAL;
  740. } else if (!pr->throttling.duty_width) {
  741. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
  742. return -EINVAL;
  743. }
  744. /* TBD: Support duty_cycle values that span bit 4. */
  745. else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
  746. printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
  747. return -EINVAL;
  748. }
  749. pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
  750. /*
  751. * Compute state values. Note that throttling displays a linear power
  752. * performance relationship (at 50% performance the CPU will consume
  753. * 50% power). Values are in 1/10th of a percent to preserve accuracy.
  754. */
  755. step = (1000 / pr->throttling.state_count);
  756. for (i = 0; i < pr->throttling.state_count; i++) {
  757. pr->throttling.states[i].performance = 1000 - step * i;
  758. pr->throttling.states[i].power = 1000 - step * i;
  759. }
  760. return 0;
  761. }
  762. static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
  763. int state)
  764. {
  765. u32 value = 0;
  766. u32 duty_mask = 0;
  767. u32 duty_value = 0;
  768. if (!pr)
  769. return -EINVAL;
  770. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  771. return -EINVAL;
  772. if (!pr->flags.throttling)
  773. return -ENODEV;
  774. if (state == pr->throttling.state)
  775. return 0;
  776. if (state < pr->throttling_platform_limit)
  777. return -EPERM;
  778. /*
  779. * Calculate the duty_value and duty_mask.
  780. */
  781. if (state) {
  782. duty_value = pr->throttling.state_count - state;
  783. duty_value <<= pr->throttling.duty_offset;
  784. /* Used to clear all duty_value bits */
  785. duty_mask = pr->throttling.state_count - 1;
  786. duty_mask <<= acpi_gbl_FADT.duty_offset;
  787. duty_mask = ~duty_mask;
  788. }
  789. local_irq_disable();
  790. /*
  791. * Disable throttling by writing a 0 to bit 4. Note that we must
  792. * turn it off before you can change the duty_value.
  793. */
  794. value = inl(pr->throttling.address);
  795. if (value & 0x10) {
  796. value &= 0xFFFFFFEF;
  797. outl(value, pr->throttling.address);
  798. }
  799. /*
  800. * Write the new duty_value and then enable throttling. Note
  801. * that a state value of 0 leaves throttling disabled.
  802. */
  803. if (state) {
  804. value &= duty_mask;
  805. value |= duty_value;
  806. outl(value, pr->throttling.address);
  807. value |= 0x00000010;
  808. outl(value, pr->throttling.address);
  809. }
  810. pr->throttling.state = state;
  811. local_irq_enable();
  812. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  813. "Throttling state set to T%d (%d%%)\n", state,
  814. (pr->throttling.states[state].performance ? pr->
  815. throttling.states[state].performance / 10 : 0)));
  816. return 0;
  817. }
  818. static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
  819. int state)
  820. {
  821. int ret;
  822. acpi_integer value;
  823. if (!pr)
  824. return -EINVAL;
  825. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  826. return -EINVAL;
  827. if (!pr->flags.throttling)
  828. return -ENODEV;
  829. if (state == pr->throttling.state)
  830. return 0;
  831. if (state < pr->throttling_platform_limit)
  832. return -EPERM;
  833. value = 0;
  834. ret = acpi_get_throttling_value(pr, state, &value);
  835. if (ret >= 0) {
  836. acpi_write_throttling_state(pr, value);
  837. pr->throttling.state = state;
  838. }
  839. return 0;
  840. }
  841. int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
  842. {
  843. cpumask_var_t saved_mask;
  844. int ret = 0;
  845. unsigned int i;
  846. struct acpi_processor *match_pr;
  847. struct acpi_processor_throttling *p_throttling;
  848. struct throttling_tstate t_state;
  849. cpumask_var_t online_throttling_cpus;
  850. if (!pr)
  851. return -EINVAL;
  852. if (!pr->flags.throttling)
  853. return -ENODEV;
  854. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  855. return -EINVAL;
  856. if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
  857. return -ENOMEM;
  858. if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
  859. free_cpumask_var(saved_mask);
  860. return -ENOMEM;
  861. }
  862. cpumask_copy(saved_mask, &current->cpus_allowed);
  863. t_state.target_state = state;
  864. p_throttling = &(pr->throttling);
  865. cpumask_and(online_throttling_cpus, cpu_online_mask,
  866. p_throttling->shared_cpu_map);
  867. /*
  868. * The throttling notifier will be called for every
  869. * affected cpu in order to get one proper T-state.
  870. * The notifier event is THROTTLING_PRECHANGE.
  871. */
  872. for_each_cpu(i, online_throttling_cpus) {
  873. t_state.cpu = i;
  874. acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
  875. &t_state);
  876. }
  877. /*
  878. * The function of acpi_processor_set_throttling will be called
  879. * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
  880. * it is necessary to call it for every affected cpu. Otherwise
  881. * it can be called only for the cpu pointed by pr.
  882. */
  883. if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
  884. /* FIXME: use work_on_cpu() */
  885. set_cpus_allowed_ptr(current, cpumask_of(pr->id));
  886. ret = p_throttling->acpi_processor_set_throttling(pr,
  887. t_state.target_state);
  888. } else {
  889. /*
  890. * When the T-state coordination is SW_ALL or HW_ALL,
  891. * it is necessary to set T-state for every affected
  892. * cpus.
  893. */
  894. for_each_cpu(i, online_throttling_cpus) {
  895. match_pr = per_cpu(processors, i);
  896. /*
  897. * If the pointer is invalid, we will report the
  898. * error message and continue.
  899. */
  900. if (!match_pr) {
  901. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  902. "Invalid Pointer for CPU %d\n", i));
  903. continue;
  904. }
  905. /*
  906. * If the throttling control is unsupported on CPU i,
  907. * we will report the error message and continue.
  908. */
  909. if (!match_pr->flags.throttling) {
  910. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  911. "Throttling Controll is unsupported "
  912. "on CPU %d\n", i));
  913. continue;
  914. }
  915. t_state.cpu = i;
  916. /* FIXME: use work_on_cpu() */
  917. set_cpus_allowed_ptr(current, cpumask_of(i));
  918. ret = match_pr->throttling.
  919. acpi_processor_set_throttling(
  920. match_pr, t_state.target_state);
  921. }
  922. }
  923. /*
  924. * After the set_throttling is called, the
  925. * throttling notifier is called for every
  926. * affected cpu to update the T-states.
  927. * The notifier event is THROTTLING_POSTCHANGE
  928. */
  929. for_each_cpu(i, online_throttling_cpus) {
  930. t_state.cpu = i;
  931. acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
  932. &t_state);
  933. }
  934. /* restore the previous state */
  935. /* FIXME: use work_on_cpu() */
  936. set_cpus_allowed_ptr(current, saved_mask);
  937. free_cpumask_var(online_throttling_cpus);
  938. free_cpumask_var(saved_mask);
  939. return ret;
  940. }
  941. int acpi_processor_get_throttling_info(struct acpi_processor *pr)
  942. {
  943. int result = 0;
  944. struct acpi_processor_throttling *pthrottling;
  945. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  946. "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
  947. pr->throttling.address,
  948. pr->throttling.duty_offset,
  949. pr->throttling.duty_width));
  950. if (!pr)
  951. return -EINVAL;
  952. /*
  953. * Evaluate _PTC, _TSS and _TPC
  954. * They must all be present or none of them can be used.
  955. */
  956. if (acpi_processor_get_throttling_control(pr) ||
  957. acpi_processor_get_throttling_states(pr) ||
  958. acpi_processor_get_platform_limit(pr))
  959. {
  960. pr->throttling.acpi_processor_get_throttling =
  961. &acpi_processor_get_throttling_fadt;
  962. pr->throttling.acpi_processor_set_throttling =
  963. &acpi_processor_set_throttling_fadt;
  964. if (acpi_processor_get_fadt_info(pr))
  965. return 0;
  966. } else {
  967. pr->throttling.acpi_processor_get_throttling =
  968. &acpi_processor_get_throttling_ptc;
  969. pr->throttling.acpi_processor_set_throttling =
  970. &acpi_processor_set_throttling_ptc;
  971. }
  972. /*
  973. * If TSD package for one CPU can't be parsed successfully, it means
  974. * that this CPU will have no coordination with other CPUs.
  975. */
  976. if (acpi_processor_get_tsd(pr)) {
  977. pthrottling = &pr->throttling;
  978. pthrottling->tsd_valid_flag = 0;
  979. cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
  980. pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  981. }
  982. /*
  983. * PIIX4 Errata: We don't support throttling on the original PIIX4.
  984. * This shouldn't be an issue as few (if any) mobile systems ever
  985. * used this part.
  986. */
  987. if (errata.piix4.throttle) {
  988. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  989. "Throttling not supported on PIIX4 A- or B-step\n"));
  990. return 0;
  991. }
  992. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  993. pr->throttling.state_count));
  994. pr->flags.throttling = 1;
  995. /*
  996. * Disable throttling (if enabled). We'll let subsequent policy (e.g.
  997. * thermal) decide to lower performance if it so chooses, but for now
  998. * we'll crank up the speed.
  999. */
  1000. result = acpi_processor_get_throttling(pr);
  1001. if (result)
  1002. goto end;
  1003. if (pr->throttling.state) {
  1004. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  1005. "Disabling throttling (was T%d)\n",
  1006. pr->throttling.state));
  1007. result = acpi_processor_set_throttling(pr, 0);
  1008. if (result)
  1009. goto end;
  1010. }
  1011. end:
  1012. if (result)
  1013. pr->flags.throttling = 0;
  1014. return result;
  1015. }
  1016. /* proc interface */
  1017. static int acpi_processor_throttling_seq_show(struct seq_file *seq,
  1018. void *offset)
  1019. {
  1020. struct acpi_processor *pr = seq->private;
  1021. int i = 0;
  1022. int result = 0;
  1023. if (!pr)
  1024. goto end;
  1025. if (!(pr->throttling.state_count > 0)) {
  1026. seq_puts(seq, "<not supported>\n");
  1027. goto end;
  1028. }
  1029. result = acpi_processor_get_throttling(pr);
  1030. if (result) {
  1031. seq_puts(seq,
  1032. "Could not determine current throttling state.\n");
  1033. goto end;
  1034. }
  1035. seq_printf(seq, "state count: %d\n"
  1036. "active state: T%d\n"
  1037. "state available: T%d to T%d\n",
  1038. pr->throttling.state_count, pr->throttling.state,
  1039. pr->throttling_platform_limit,
  1040. pr->throttling.state_count - 1);
  1041. seq_puts(seq, "states:\n");
  1042. if (pr->throttling.acpi_processor_get_throttling ==
  1043. acpi_processor_get_throttling_fadt) {
  1044. for (i = 0; i < pr->throttling.state_count; i++)
  1045. seq_printf(seq, " %cT%d: %02d%%\n",
  1046. (i == pr->throttling.state ? '*' : ' '), i,
  1047. (pr->throttling.states[i].performance ? pr->
  1048. throttling.states[i].performance / 10 : 0));
  1049. } else {
  1050. for (i = 0; i < pr->throttling.state_count; i++)
  1051. seq_printf(seq, " %cT%d: %02d%%\n",
  1052. (i == pr->throttling.state ? '*' : ' '), i,
  1053. (int)pr->throttling.states_tss[i].
  1054. freqpercentage);
  1055. }
  1056. end:
  1057. return 0;
  1058. }
  1059. static int acpi_processor_throttling_open_fs(struct inode *inode,
  1060. struct file *file)
  1061. {
  1062. return single_open(file, acpi_processor_throttling_seq_show,
  1063. PDE(inode)->data);
  1064. }
  1065. static ssize_t acpi_processor_write_throttling(struct file *file,
  1066. const char __user * buffer,
  1067. size_t count, loff_t * data)
  1068. {
  1069. int result = 0;
  1070. struct seq_file *m = file->private_data;
  1071. struct acpi_processor *pr = m->private;
  1072. char state_string[5] = "";
  1073. char *charp = NULL;
  1074. size_t state_val = 0;
  1075. char tmpbuf[5] = "";
  1076. if (!pr || (count > sizeof(state_string) - 1))
  1077. return -EINVAL;
  1078. if (copy_from_user(state_string, buffer, count))
  1079. return -EFAULT;
  1080. state_string[count] = '\0';
  1081. if ((count > 0) && (state_string[count-1] == '\n'))
  1082. state_string[count-1] = '\0';
  1083. charp = state_string;
  1084. if ((state_string[0] == 't') || (state_string[0] == 'T'))
  1085. charp++;
  1086. state_val = simple_strtoul(charp, NULL, 0);
  1087. if (state_val >= pr->throttling.state_count)
  1088. return -EINVAL;
  1089. snprintf(tmpbuf, 5, "%zu", state_val);
  1090. if (strcmp(tmpbuf, charp) != 0)
  1091. return -EINVAL;
  1092. result = acpi_processor_set_throttling(pr, state_val);
  1093. if (result)
  1094. return result;
  1095. return count;
  1096. }
  1097. const struct file_operations acpi_processor_throttling_fops = {
  1098. .owner = THIS_MODULE,
  1099. .open = acpi_processor_throttling_open_fs,
  1100. .read = seq_read,
  1101. .write = acpi_processor_write_throttling,
  1102. .llseek = seq_lseek,
  1103. .release = single_release,
  1104. };