processor_throttling.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /*
  2. * processor_throttling.c - Throttling submodule of the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or (at
  15. * your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License along
  23. * with this program; if not, write to the Free Software Foundation, Inc.,
  24. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  25. *
  26. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/sched.h>
  32. #include <linux/cpufreq.h>
  33. #include <linux/proc_fs.h>
  34. #include <linux/seq_file.h>
  35. #include <asm/io.h>
  36. #include <asm/uaccess.h>
  37. #include <acpi/acpi_bus.h>
  38. #include <acpi/processor.h>
  39. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  40. #define ACPI_PROCESSOR_CLASS "processor"
  41. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  42. ACPI_MODULE_NAME("processor_throttling");
  43. struct throttling_tstate {
  44. unsigned int cpu; /* cpu nr */
  45. int target_state; /* target T-state */
  46. };
  47. #define THROTTLING_PRECHANGE (1)
  48. #define THROTTLING_POSTCHANGE (2)
  49. static int acpi_processor_get_throttling(struct acpi_processor *pr);
  50. int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
  51. static int acpi_processor_update_tsd_coord(void)
  52. {
  53. int count, count_target;
  54. int retval = 0;
  55. unsigned int i, j;
  56. cpumask_t covered_cpus;
  57. struct acpi_processor *pr, *match_pr;
  58. struct acpi_tsd_package *pdomain, *match_pdomain;
  59. struct acpi_processor_throttling *pthrottling, *match_pthrottling;
  60. /*
  61. * Now that we have _TSD data from all CPUs, lets setup T-state
  62. * coordination among all CPUs.
  63. */
  64. for_each_possible_cpu(i) {
  65. pr = processors[i];
  66. if (!pr)
  67. continue;
  68. /* Basic validity check for domain info */
  69. pthrottling = &(pr->throttling);
  70. /*
  71. * If tsd package for one cpu is invalid, the coordination
  72. * among all CPUs is thought as invalid.
  73. * Maybe it is ugly.
  74. */
  75. if (!pthrottling->tsd_valid_flag) {
  76. retval = -EINVAL;
  77. break;
  78. }
  79. }
  80. if (retval)
  81. goto err_ret;
  82. cpus_clear(covered_cpus);
  83. for_each_possible_cpu(i) {
  84. pr = processors[i];
  85. if (!pr)
  86. continue;
  87. if (cpu_isset(i, covered_cpus))
  88. continue;
  89. pthrottling = &pr->throttling;
  90. pdomain = &(pthrottling->domain_info);
  91. cpu_set(i, pthrottling->shared_cpu_map);
  92. cpu_set(i, covered_cpus);
  93. /*
  94. * If the number of processor in the TSD domain is 1, it is
  95. * unnecessary to parse the coordination for this CPU.
  96. */
  97. if (pdomain->num_processors <= 1)
  98. continue;
  99. /* Validate the Domain info */
  100. count_target = pdomain->num_processors;
  101. count = 1;
  102. for_each_possible_cpu(j) {
  103. if (i == j)
  104. continue;
  105. match_pr = processors[j];
  106. if (!match_pr)
  107. continue;
  108. match_pthrottling = &(match_pr->throttling);
  109. match_pdomain = &(match_pthrottling->domain_info);
  110. if (match_pdomain->domain != pdomain->domain)
  111. continue;
  112. /* Here i and j are in the same domain.
  113. * If two TSD packages have the same domain, they
  114. * should have the same num_porcessors and
  115. * coordination type. Otherwise it will be regarded
  116. * as illegal.
  117. */
  118. if (match_pdomain->num_processors != count_target) {
  119. retval = -EINVAL;
  120. goto err_ret;
  121. }
  122. if (pdomain->coord_type != match_pdomain->coord_type) {
  123. retval = -EINVAL;
  124. goto err_ret;
  125. }
  126. cpu_set(j, covered_cpus);
  127. cpu_set(j, pthrottling->shared_cpu_map);
  128. count++;
  129. }
  130. for_each_possible_cpu(j) {
  131. if (i == j)
  132. continue;
  133. match_pr = processors[j];
  134. if (!match_pr)
  135. continue;
  136. match_pthrottling = &(match_pr->throttling);
  137. match_pdomain = &(match_pthrottling->domain_info);
  138. if (match_pdomain->domain != pdomain->domain)
  139. continue;
  140. /*
  141. * If some CPUS have the same domain, they
  142. * will have the same shared_cpu_map.
  143. */
  144. match_pthrottling->shared_cpu_map =
  145. pthrottling->shared_cpu_map;
  146. }
  147. }
  148. err_ret:
  149. for_each_possible_cpu(i) {
  150. pr = processors[i];
  151. if (!pr)
  152. continue;
  153. /*
  154. * Assume no coordination on any error parsing domain info.
  155. * The coordination type will be forced as SW_ALL.
  156. */
  157. if (retval) {
  158. pthrottling = &(pr->throttling);
  159. cpus_clear(pthrottling->shared_cpu_map);
  160. cpu_set(i, pthrottling->shared_cpu_map);
  161. pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  162. }
  163. }
  164. return retval;
  165. }
  166. /*
  167. * Update the T-state coordination after the _TSD
  168. * data for all cpus is obtained.
  169. */
  170. void acpi_processor_throttling_init(void)
  171. {
  172. if (acpi_processor_update_tsd_coord())
  173. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  174. "Assume no T-state coordination\n"));
  175. return;
  176. }
  177. static int acpi_processor_throttling_notifier(unsigned long event, void *data)
  178. {
  179. struct throttling_tstate *p_tstate = data;
  180. struct acpi_processor *pr;
  181. unsigned int cpu ;
  182. int target_state;
  183. struct acpi_processor_limit *p_limit;
  184. struct acpi_processor_throttling *p_throttling;
  185. cpu = p_tstate->cpu;
  186. pr = processors[cpu];
  187. if (!pr) {
  188. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
  189. return 0;
  190. }
  191. if (!pr->flags.throttling) {
  192. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
  193. "unsupported on CPU %d\n", cpu));
  194. return 0;
  195. }
  196. target_state = p_tstate->target_state;
  197. p_throttling = &(pr->throttling);
  198. switch (event) {
  199. case THROTTLING_PRECHANGE:
  200. /*
  201. * Prechange event is used to choose one proper t-state,
  202. * which meets the limits of thermal, user and _TPC.
  203. */
  204. p_limit = &pr->limit;
  205. if (p_limit->thermal.tx > target_state)
  206. target_state = p_limit->thermal.tx;
  207. if (p_limit->user.tx > target_state)
  208. target_state = p_limit->user.tx;
  209. if (pr->throttling_platform_limit > target_state)
  210. target_state = pr->throttling_platform_limit;
  211. if (target_state >= p_throttling->state_count) {
  212. printk(KERN_WARNING
  213. "Exceed the limit of T-state \n");
  214. target_state = p_throttling->state_count - 1;
  215. }
  216. p_tstate->target_state = target_state;
  217. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
  218. "target T-state of CPU %d is T%d\n",
  219. cpu, target_state));
  220. break;
  221. case THROTTLING_POSTCHANGE:
  222. /*
  223. * Postchange event is only used to update the
  224. * T-state flag of acpi_processor_throttling.
  225. */
  226. p_throttling->state = target_state;
  227. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
  228. "CPU %d is switched to T%d\n",
  229. cpu, target_state));
  230. break;
  231. default:
  232. printk(KERN_WARNING
  233. "Unsupported Throttling notifier event\n");
  234. break;
  235. }
  236. return 0;
  237. }
  238. /*
  239. * _TPC - Throttling Present Capabilities
  240. */
  241. static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
  242. {
  243. acpi_status status = 0;
  244. unsigned long tpc = 0;
  245. if (!pr)
  246. return -EINVAL;
  247. status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
  248. if (ACPI_FAILURE(status)) {
  249. if (status != AE_NOT_FOUND) {
  250. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
  251. }
  252. return -ENODEV;
  253. }
  254. pr->throttling_platform_limit = (int)tpc;
  255. return 0;
  256. }
  257. int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
  258. {
  259. int result = 0;
  260. int throttling_limit;
  261. int current_state;
  262. struct acpi_processor_limit *limit;
  263. int target_state;
  264. result = acpi_processor_get_platform_limit(pr);
  265. if (result) {
  266. /* Throttling Limit is unsupported */
  267. return result;
  268. }
  269. throttling_limit = pr->throttling_platform_limit;
  270. if (throttling_limit >= pr->throttling.state_count) {
  271. /* Uncorrect Throttling Limit */
  272. return -EINVAL;
  273. }
  274. current_state = pr->throttling.state;
  275. if (current_state > throttling_limit) {
  276. /*
  277. * The current state can meet the requirement of
  278. * _TPC limit. But it is reasonable that OSPM changes
  279. * t-states from high to low for better performance.
  280. * Of course the limit condition of thermal
  281. * and user should be considered.
  282. */
  283. limit = &pr->limit;
  284. target_state = throttling_limit;
  285. if (limit->thermal.tx > target_state)
  286. target_state = limit->thermal.tx;
  287. if (limit->user.tx > target_state)
  288. target_state = limit->user.tx;
  289. } else if (current_state == throttling_limit) {
  290. /*
  291. * Unnecessary to change the throttling state
  292. */
  293. return 0;
  294. } else {
  295. /*
  296. * If the current state is lower than the limit of _TPC, it
  297. * will be forced to switch to the throttling state defined
  298. * by throttling_platfor_limit.
  299. * Because the previous state meets with the limit condition
  300. * of thermal and user, it is unnecessary to check it again.
  301. */
  302. target_state = throttling_limit;
  303. }
  304. return acpi_processor_set_throttling(pr, target_state);
  305. }
  306. /*
  307. * _PTC - Processor Throttling Control (and status) register location
  308. */
  309. static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
  310. {
  311. int result = 0;
  312. acpi_status status = 0;
  313. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  314. union acpi_object *ptc = NULL;
  315. union acpi_object obj = { 0 };
  316. struct acpi_processor_throttling *throttling;
  317. status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
  318. if (ACPI_FAILURE(status)) {
  319. if (status != AE_NOT_FOUND) {
  320. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
  321. }
  322. return -ENODEV;
  323. }
  324. ptc = (union acpi_object *)buffer.pointer;
  325. if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
  326. || (ptc->package.count != 2)) {
  327. printk(KERN_ERR PREFIX "Invalid _PTC data\n");
  328. result = -EFAULT;
  329. goto end;
  330. }
  331. /*
  332. * control_register
  333. */
  334. obj = ptc->package.elements[0];
  335. if ((obj.type != ACPI_TYPE_BUFFER)
  336. || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  337. || (obj.buffer.pointer == NULL)) {
  338. printk(KERN_ERR PREFIX
  339. "Invalid _PTC data (control_register)\n");
  340. result = -EFAULT;
  341. goto end;
  342. }
  343. memcpy(&pr->throttling.control_register, obj.buffer.pointer,
  344. sizeof(struct acpi_ptc_register));
  345. /*
  346. * status_register
  347. */
  348. obj = ptc->package.elements[1];
  349. if ((obj.type != ACPI_TYPE_BUFFER)
  350. || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  351. || (obj.buffer.pointer == NULL)) {
  352. printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
  353. result = -EFAULT;
  354. goto end;
  355. }
  356. memcpy(&pr->throttling.status_register, obj.buffer.pointer,
  357. sizeof(struct acpi_ptc_register));
  358. throttling = &pr->throttling;
  359. if ((throttling->control_register.bit_width +
  360. throttling->control_register.bit_offset) > 32) {
  361. printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
  362. result = -EFAULT;
  363. goto end;
  364. }
  365. if ((throttling->status_register.bit_width +
  366. throttling->status_register.bit_offset) > 32) {
  367. printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
  368. result = -EFAULT;
  369. goto end;
  370. }
  371. end:
  372. kfree(buffer.pointer);
  373. return result;
  374. }
  375. /*
  376. * _TSS - Throttling Supported States
  377. */
  378. static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
  379. {
  380. int result = 0;
  381. acpi_status status = AE_OK;
  382. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  383. struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  384. struct acpi_buffer state = { 0, NULL };
  385. union acpi_object *tss = NULL;
  386. int i;
  387. status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
  388. if (ACPI_FAILURE(status)) {
  389. if (status != AE_NOT_FOUND) {
  390. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
  391. }
  392. return -ENODEV;
  393. }
  394. tss = buffer.pointer;
  395. if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
  396. printk(KERN_ERR PREFIX "Invalid _TSS data\n");
  397. result = -EFAULT;
  398. goto end;
  399. }
  400. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  401. tss->package.count));
  402. pr->throttling.state_count = tss->package.count;
  403. pr->throttling.states_tss =
  404. kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
  405. GFP_KERNEL);
  406. if (!pr->throttling.states_tss) {
  407. result = -ENOMEM;
  408. goto end;
  409. }
  410. for (i = 0; i < pr->throttling.state_count; i++) {
  411. struct acpi_processor_tx_tss *tx =
  412. (struct acpi_processor_tx_tss *)&(pr->throttling.
  413. states_tss[i]);
  414. state.length = sizeof(struct acpi_processor_tx_tss);
  415. state.pointer = tx;
  416. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
  417. status = acpi_extract_package(&(tss->package.elements[i]),
  418. &format, &state);
  419. if (ACPI_FAILURE(status)) {
  420. ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
  421. result = -EFAULT;
  422. kfree(pr->throttling.states_tss);
  423. goto end;
  424. }
  425. if (!tx->freqpercentage) {
  426. printk(KERN_ERR PREFIX
  427. "Invalid _TSS data: freq is zero\n");
  428. result = -EFAULT;
  429. kfree(pr->throttling.states_tss);
  430. goto end;
  431. }
  432. }
  433. end:
  434. kfree(buffer.pointer);
  435. return result;
  436. }
  437. /*
  438. * _TSD - T-State Dependencies
  439. */
  440. static int acpi_processor_get_tsd(struct acpi_processor *pr)
  441. {
  442. int result = 0;
  443. acpi_status status = AE_OK;
  444. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  445. struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  446. struct acpi_buffer state = { 0, NULL };
  447. union acpi_object *tsd = NULL;
  448. struct acpi_tsd_package *pdomain;
  449. struct acpi_processor_throttling *pthrottling;
  450. pthrottling = &pr->throttling;
  451. pthrottling->tsd_valid_flag = 0;
  452. status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
  453. if (ACPI_FAILURE(status)) {
  454. if (status != AE_NOT_FOUND) {
  455. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
  456. }
  457. return -ENODEV;
  458. }
  459. tsd = buffer.pointer;
  460. if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
  461. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  462. result = -EFAULT;
  463. goto end;
  464. }
  465. if (tsd->package.count != 1) {
  466. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  467. result = -EFAULT;
  468. goto end;
  469. }
  470. pdomain = &(pr->throttling.domain_info);
  471. state.length = sizeof(struct acpi_tsd_package);
  472. state.pointer = pdomain;
  473. status = acpi_extract_package(&(tsd->package.elements[0]),
  474. &format, &state);
  475. if (ACPI_FAILURE(status)) {
  476. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  477. result = -EFAULT;
  478. goto end;
  479. }
  480. if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
  481. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
  482. result = -EFAULT;
  483. goto end;
  484. }
  485. if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
  486. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
  487. result = -EFAULT;
  488. goto end;
  489. }
  490. pthrottling = &pr->throttling;
  491. pthrottling->tsd_valid_flag = 1;
  492. pthrottling->shared_type = pdomain->coord_type;
  493. cpu_set(pr->id, pthrottling->shared_cpu_map);
  494. /*
  495. * If the coordination type is not defined in ACPI spec,
  496. * the tsd_valid_flag will be clear and coordination type
  497. * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
  498. */
  499. if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
  500. pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
  501. pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
  502. pthrottling->tsd_valid_flag = 0;
  503. pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  504. }
  505. end:
  506. kfree(buffer.pointer);
  507. return result;
  508. }
  509. /* --------------------------------------------------------------------------
  510. Throttling Control
  511. -------------------------------------------------------------------------- */
  512. static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
  513. {
  514. int state = 0;
  515. u32 value = 0;
  516. u32 duty_mask = 0;
  517. u32 duty_value = 0;
  518. if (!pr)
  519. return -EINVAL;
  520. if (!pr->flags.throttling)
  521. return -ENODEV;
  522. pr->throttling.state = 0;
  523. duty_mask = pr->throttling.state_count - 1;
  524. duty_mask <<= pr->throttling.duty_offset;
  525. local_irq_disable();
  526. value = inl(pr->throttling.address);
  527. /*
  528. * Compute the current throttling state when throttling is enabled
  529. * (bit 4 is on).
  530. */
  531. if (value & 0x10) {
  532. duty_value = value & duty_mask;
  533. duty_value >>= pr->throttling.duty_offset;
  534. if (duty_value)
  535. state = pr->throttling.state_count - duty_value;
  536. }
  537. pr->throttling.state = state;
  538. local_irq_enable();
  539. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  540. "Throttling state is T%d (%d%% throttling applied)\n",
  541. state, pr->throttling.states[state].performance));
  542. return 0;
  543. }
  544. #ifdef CONFIG_X86
  545. static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  546. acpi_integer * value)
  547. {
  548. struct cpuinfo_x86 *c;
  549. u64 msr_high, msr_low;
  550. unsigned int cpu;
  551. u64 msr = 0;
  552. int ret = -1;
  553. cpu = pr->id;
  554. c = &cpu_data(cpu);
  555. if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  556. !cpu_has(c, X86_FEATURE_ACPI)) {
  557. printk(KERN_ERR PREFIX
  558. "HARDWARE addr space,NOT supported yet\n");
  559. } else {
  560. msr_low = 0;
  561. msr_high = 0;
  562. rdmsr_safe(MSR_IA32_THERM_CONTROL,
  563. (u32 *)&msr_low , (u32 *) &msr_high);
  564. msr = (msr_high << 32) | msr_low;
  565. *value = (acpi_integer) msr;
  566. ret = 0;
  567. }
  568. return ret;
  569. }
  570. static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  571. {
  572. struct cpuinfo_x86 *c;
  573. unsigned int cpu;
  574. int ret = -1;
  575. u64 msr;
  576. cpu = pr->id;
  577. c = &cpu_data(cpu);
  578. if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  579. !cpu_has(c, X86_FEATURE_ACPI)) {
  580. printk(KERN_ERR PREFIX
  581. "HARDWARE addr space,NOT supported yet\n");
  582. } else {
  583. msr = value;
  584. wrmsr_safe(MSR_IA32_THERM_CONTROL,
  585. msr & 0xffffffff, msr >> 32);
  586. ret = 0;
  587. }
  588. return ret;
  589. }
  590. #else
  591. static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  592. acpi_integer * value)
  593. {
  594. printk(KERN_ERR PREFIX
  595. "HARDWARE addr space,NOT supported yet\n");
  596. return -1;
  597. }
  598. static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  599. {
  600. printk(KERN_ERR PREFIX
  601. "HARDWARE addr space,NOT supported yet\n");
  602. return -1;
  603. }
  604. #endif
  605. static int acpi_read_throttling_status(struct acpi_processor *pr,
  606. acpi_integer *value)
  607. {
  608. u32 bit_width, bit_offset;
  609. u64 ptc_value;
  610. u64 ptc_mask;
  611. struct acpi_processor_throttling *throttling;
  612. int ret = -1;
  613. throttling = &pr->throttling;
  614. switch (throttling->status_register.space_id) {
  615. case ACPI_ADR_SPACE_SYSTEM_IO:
  616. ptc_value = 0;
  617. bit_width = throttling->status_register.bit_width;
  618. bit_offset = throttling->status_register.bit_offset;
  619. acpi_os_read_port((acpi_io_address) throttling->status_register.
  620. address, (u32 *) &ptc_value,
  621. (u32) (bit_width + bit_offset));
  622. ptc_mask = (1 << bit_width) - 1;
  623. *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
  624. ret = 0;
  625. break;
  626. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  627. ret = acpi_throttling_rdmsr(pr, value);
  628. break;
  629. default:
  630. printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  631. (u32) (throttling->status_register.space_id));
  632. }
  633. return ret;
  634. }
  635. static int acpi_write_throttling_state(struct acpi_processor *pr,
  636. acpi_integer value)
  637. {
  638. u32 bit_width, bit_offset;
  639. u64 ptc_value;
  640. u64 ptc_mask;
  641. struct acpi_processor_throttling *throttling;
  642. int ret = -1;
  643. throttling = &pr->throttling;
  644. switch (throttling->control_register.space_id) {
  645. case ACPI_ADR_SPACE_SYSTEM_IO:
  646. bit_width = throttling->control_register.bit_width;
  647. bit_offset = throttling->control_register.bit_offset;
  648. ptc_mask = (1 << bit_width) - 1;
  649. ptc_value = value & ptc_mask;
  650. acpi_os_write_port((acpi_io_address) throttling->
  651. control_register.address,
  652. (u32) (ptc_value << bit_offset),
  653. (u32) (bit_width + bit_offset));
  654. ret = 0;
  655. break;
  656. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  657. ret = acpi_throttling_wrmsr(pr, value);
  658. break;
  659. default:
  660. printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  661. (u32) (throttling->control_register.space_id));
  662. }
  663. return ret;
  664. }
  665. static int acpi_get_throttling_state(struct acpi_processor *pr,
  666. acpi_integer value)
  667. {
  668. int i;
  669. for (i = 0; i < pr->throttling.state_count; i++) {
  670. struct acpi_processor_tx_tss *tx =
  671. (struct acpi_processor_tx_tss *)&(pr->throttling.
  672. states_tss[i]);
  673. if (tx->control == value)
  674. break;
  675. }
  676. if (i > pr->throttling.state_count)
  677. i = -1;
  678. return i;
  679. }
  680. static int acpi_get_throttling_value(struct acpi_processor *pr,
  681. int state, acpi_integer *value)
  682. {
  683. int ret = -1;
  684. if (state >= 0 && state <= pr->throttling.state_count) {
  685. struct acpi_processor_tx_tss *tx =
  686. (struct acpi_processor_tx_tss *)&(pr->throttling.
  687. states_tss[state]);
  688. *value = tx->control;
  689. ret = 0;
  690. }
  691. return ret;
  692. }
  693. static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
  694. {
  695. int state = 0;
  696. int ret;
  697. acpi_integer value;
  698. if (!pr)
  699. return -EINVAL;
  700. if (!pr->flags.throttling)
  701. return -ENODEV;
  702. pr->throttling.state = 0;
  703. value = 0;
  704. ret = acpi_read_throttling_status(pr, &value);
  705. if (ret >= 0) {
  706. state = acpi_get_throttling_state(pr, value);
  707. pr->throttling.state = state;
  708. }
  709. return 0;
  710. }
  711. static int acpi_processor_get_throttling(struct acpi_processor *pr)
  712. {
  713. cpumask_t saved_mask;
  714. int ret;
  715. if (!pr)
  716. return -EINVAL;
  717. if (!pr->flags.throttling)
  718. return -ENODEV;
  719. /*
  720. * Migrate task to the cpu pointed by pr.
  721. */
  722. saved_mask = current->cpus_allowed;
  723. set_cpus_allowed(current, cpumask_of_cpu(pr->id));
  724. ret = pr->throttling.acpi_processor_get_throttling(pr);
  725. /* restore the previous state */
  726. set_cpus_allowed(current, saved_mask);
  727. return ret;
  728. }
  729. static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
  730. {
  731. int i, step;
  732. if (!pr->throttling.address) {
  733. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
  734. return -EINVAL;
  735. } else if (!pr->throttling.duty_width) {
  736. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
  737. return -EINVAL;
  738. }
  739. /* TBD: Support duty_cycle values that span bit 4. */
  740. else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
  741. printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
  742. return -EINVAL;
  743. }
  744. pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
  745. /*
  746. * Compute state values. Note that throttling displays a linear power
  747. * performance relationship (at 50% performance the CPU will consume
  748. * 50% power). Values are in 1/10th of a percent to preserve accuracy.
  749. */
  750. step = (1000 / pr->throttling.state_count);
  751. for (i = 0; i < pr->throttling.state_count; i++) {
  752. pr->throttling.states[i].performance = 1000 - step * i;
  753. pr->throttling.states[i].power = 1000 - step * i;
  754. }
  755. return 0;
  756. }
  757. static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
  758. int state)
  759. {
  760. u32 value = 0;
  761. u32 duty_mask = 0;
  762. u32 duty_value = 0;
  763. if (!pr)
  764. return -EINVAL;
  765. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  766. return -EINVAL;
  767. if (!pr->flags.throttling)
  768. return -ENODEV;
  769. if (state == pr->throttling.state)
  770. return 0;
  771. if (state < pr->throttling_platform_limit)
  772. return -EPERM;
  773. /*
  774. * Calculate the duty_value and duty_mask.
  775. */
  776. if (state) {
  777. duty_value = pr->throttling.state_count - state;
  778. duty_value <<= pr->throttling.duty_offset;
  779. /* Used to clear all duty_value bits */
  780. duty_mask = pr->throttling.state_count - 1;
  781. duty_mask <<= acpi_gbl_FADT.duty_offset;
  782. duty_mask = ~duty_mask;
  783. }
  784. local_irq_disable();
  785. /*
  786. * Disable throttling by writing a 0 to bit 4. Note that we must
  787. * turn it off before you can change the duty_value.
  788. */
  789. value = inl(pr->throttling.address);
  790. if (value & 0x10) {
  791. value &= 0xFFFFFFEF;
  792. outl(value, pr->throttling.address);
  793. }
  794. /*
  795. * Write the new duty_value and then enable throttling. Note
  796. * that a state value of 0 leaves throttling disabled.
  797. */
  798. if (state) {
  799. value &= duty_mask;
  800. value |= duty_value;
  801. outl(value, pr->throttling.address);
  802. value |= 0x00000010;
  803. outl(value, pr->throttling.address);
  804. }
  805. pr->throttling.state = state;
  806. local_irq_enable();
  807. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  808. "Throttling state set to T%d (%d%%)\n", state,
  809. (pr->throttling.states[state].performance ? pr->
  810. throttling.states[state].performance / 10 : 0)));
  811. return 0;
  812. }
  813. static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
  814. int state)
  815. {
  816. int ret;
  817. acpi_integer value;
  818. if (!pr)
  819. return -EINVAL;
  820. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  821. return -EINVAL;
  822. if (!pr->flags.throttling)
  823. return -ENODEV;
  824. if (state == pr->throttling.state)
  825. return 0;
  826. if (state < pr->throttling_platform_limit)
  827. return -EPERM;
  828. value = 0;
  829. ret = acpi_get_throttling_value(pr, state, &value);
  830. if (ret >= 0) {
  831. acpi_write_throttling_state(pr, value);
  832. pr->throttling.state = state;
  833. }
  834. return 0;
  835. }
  836. int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
  837. {
  838. cpumask_t saved_mask;
  839. int ret;
  840. if (!pr)
  841. return -EINVAL;
  842. if (!pr->flags.throttling)
  843. return -ENODEV;
  844. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  845. return -EINVAL;
  846. /*
  847. * Migrate task to the cpu pointed by pr.
  848. */
  849. saved_mask = current->cpus_allowed;
  850. set_cpus_allowed(current, cpumask_of_cpu(pr->id));
  851. ret = pr->throttling.acpi_processor_set_throttling(pr, state);
  852. /* restore the previous state */
  853. set_cpus_allowed(current, saved_mask);
  854. return ret;
  855. }
  856. int acpi_processor_get_throttling_info(struct acpi_processor *pr)
  857. {
  858. int result = 0;
  859. struct acpi_processor_throttling *pthrottling;
  860. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  861. "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
  862. pr->throttling.address,
  863. pr->throttling.duty_offset,
  864. pr->throttling.duty_width));
  865. if (!pr)
  866. return -EINVAL;
  867. /*
  868. * Evaluate _PTC, _TSS and _TPC
  869. * They must all be present or none of them can be used.
  870. */
  871. if (acpi_processor_get_throttling_control(pr) ||
  872. acpi_processor_get_throttling_states(pr) ||
  873. acpi_processor_get_platform_limit(pr))
  874. {
  875. pr->throttling.acpi_processor_get_throttling =
  876. &acpi_processor_get_throttling_fadt;
  877. pr->throttling.acpi_processor_set_throttling =
  878. &acpi_processor_set_throttling_fadt;
  879. if (acpi_processor_get_fadt_info(pr))
  880. return 0;
  881. } else {
  882. pr->throttling.acpi_processor_get_throttling =
  883. &acpi_processor_get_throttling_ptc;
  884. pr->throttling.acpi_processor_set_throttling =
  885. &acpi_processor_set_throttling_ptc;
  886. }
  887. /*
  888. * If TSD package for one CPU can't be parsed successfully, it means
  889. * that this CPU will have no coordination with other CPUs.
  890. */
  891. if (acpi_processor_get_tsd(pr)) {
  892. pthrottling = &pr->throttling;
  893. pthrottling->tsd_valid_flag = 0;
  894. cpu_set(pr->id, pthrottling->shared_cpu_map);
  895. pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  896. }
  897. /*
  898. * PIIX4 Errata: We don't support throttling on the original PIIX4.
  899. * This shouldn't be an issue as few (if any) mobile systems ever
  900. * used this part.
  901. */
  902. if (errata.piix4.throttle) {
  903. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  904. "Throttling not supported on PIIX4 A- or B-step\n"));
  905. return 0;
  906. }
  907. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  908. pr->throttling.state_count));
  909. pr->flags.throttling = 1;
  910. /*
  911. * Disable throttling (if enabled). We'll let subsequent policy (e.g.
  912. * thermal) decide to lower performance if it so chooses, but for now
  913. * we'll crank up the speed.
  914. */
  915. result = acpi_processor_get_throttling(pr);
  916. if (result)
  917. goto end;
  918. if (pr->throttling.state) {
  919. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  920. "Disabling throttling (was T%d)\n",
  921. pr->throttling.state));
  922. result = acpi_processor_set_throttling(pr, 0);
  923. if (result)
  924. goto end;
  925. }
  926. end:
  927. if (result)
  928. pr->flags.throttling = 0;
  929. return result;
  930. }
  931. /* proc interface */
  932. static int acpi_processor_throttling_seq_show(struct seq_file *seq,
  933. void *offset)
  934. {
  935. struct acpi_processor *pr = seq->private;
  936. int i = 0;
  937. int result = 0;
  938. if (!pr)
  939. goto end;
  940. if (!(pr->throttling.state_count > 0)) {
  941. seq_puts(seq, "<not supported>\n");
  942. goto end;
  943. }
  944. result = acpi_processor_get_throttling(pr);
  945. if (result) {
  946. seq_puts(seq,
  947. "Could not determine current throttling state.\n");
  948. goto end;
  949. }
  950. seq_printf(seq, "state count: %d\n"
  951. "active state: T%d\n"
  952. "state available: T%d to T%d\n",
  953. pr->throttling.state_count, pr->throttling.state,
  954. pr->throttling_platform_limit,
  955. pr->throttling.state_count - 1);
  956. seq_puts(seq, "states:\n");
  957. if (pr->throttling.acpi_processor_get_throttling ==
  958. acpi_processor_get_throttling_fadt) {
  959. for (i = 0; i < pr->throttling.state_count; i++)
  960. seq_printf(seq, " %cT%d: %02d%%\n",
  961. (i == pr->throttling.state ? '*' : ' '), i,
  962. (pr->throttling.states[i].performance ? pr->
  963. throttling.states[i].performance / 10 : 0));
  964. } else {
  965. for (i = 0; i < pr->throttling.state_count; i++)
  966. seq_printf(seq, " %cT%d: %02d%%\n",
  967. (i == pr->throttling.state ? '*' : ' '), i,
  968. (int)pr->throttling.states_tss[i].
  969. freqpercentage);
  970. }
  971. end:
  972. return 0;
  973. }
  974. static int acpi_processor_throttling_open_fs(struct inode *inode,
  975. struct file *file)
  976. {
  977. return single_open(file, acpi_processor_throttling_seq_show,
  978. PDE(inode)->data);
  979. }
  980. static ssize_t acpi_processor_write_throttling(struct file *file,
  981. const char __user * buffer,
  982. size_t count, loff_t * data)
  983. {
  984. int result = 0;
  985. struct seq_file *m = file->private_data;
  986. struct acpi_processor *pr = m->private;
  987. char state_string[12] = { '\0' };
  988. if (!pr || (count > sizeof(state_string) - 1))
  989. return -EINVAL;
  990. if (copy_from_user(state_string, buffer, count))
  991. return -EFAULT;
  992. state_string[count] = '\0';
  993. result = acpi_processor_set_throttling(pr,
  994. simple_strtoul(state_string,
  995. NULL, 0));
  996. if (result)
  997. return result;
  998. return count;
  999. }
  1000. struct file_operations acpi_processor_throttling_fops = {
  1001. .open = acpi_processor_throttling_open_fs,
  1002. .read = seq_read,
  1003. .write = acpi_processor_write_throttling,
  1004. .llseek = seq_lseek,
  1005. .release = single_release,
  1006. };