processor_throttling.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926
  1. /*
  2. * processor_throttling.c - Throttling submodule of the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or (at
  15. * your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License along
  23. * with this program; if not, write to the Free Software Foundation, Inc.,
  24. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  25. *
  26. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/sched.h>
  32. #include <linux/cpufreq.h>
  33. #include <linux/proc_fs.h>
  34. #include <linux/seq_file.h>
  35. #include <asm/io.h>
  36. #include <asm/uaccess.h>
  37. #include <acpi/acpi_bus.h>
  38. #include <acpi/processor.h>
  39. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  40. #define ACPI_PROCESSOR_CLASS "processor"
  41. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  42. ACPI_MODULE_NAME("processor_throttling");
  43. static int acpi_processor_get_throttling(struct acpi_processor *pr);
  44. int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
  45. /*
  46. * _TPC - Throttling Present Capabilities
  47. */
  48. static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
  49. {
  50. acpi_status status = 0;
  51. unsigned long tpc = 0;
  52. if (!pr)
  53. return -EINVAL;
  54. status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
  55. if (ACPI_FAILURE(status)) {
  56. if (status != AE_NOT_FOUND) {
  57. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
  58. }
  59. return -ENODEV;
  60. }
  61. pr->throttling_platform_limit = (int)tpc;
  62. return 0;
  63. }
  64. int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
  65. {
  66. int result = 0;
  67. int throttling_limit;
  68. int current_state;
  69. struct acpi_processor_limit *limit;
  70. int target_state;
  71. result = acpi_processor_get_platform_limit(pr);
  72. if (result) {
  73. /* Throttling Limit is unsupported */
  74. return result;
  75. }
  76. throttling_limit = pr->throttling_platform_limit;
  77. if (throttling_limit >= pr->throttling.state_count) {
  78. /* Uncorrect Throttling Limit */
  79. return -EINVAL;
  80. }
  81. current_state = pr->throttling.state;
  82. if (current_state > throttling_limit) {
  83. /*
  84. * The current state can meet the requirement of
  85. * _TPC limit. But it is reasonable that OSPM changes
  86. * t-states from high to low for better performance.
  87. * Of course the limit condition of thermal
  88. * and user should be considered.
  89. */
  90. limit = &pr->limit;
  91. target_state = throttling_limit;
  92. if (limit->thermal.tx > target_state)
  93. target_state = limit->thermal.tx;
  94. if (limit->user.tx > target_state)
  95. target_state = limit->user.tx;
  96. } else if (current_state == throttling_limit) {
  97. /*
  98. * Unnecessary to change the throttling state
  99. */
  100. return 0;
  101. } else {
  102. /*
  103. * If the current state is lower than the limit of _TPC, it
  104. * will be forced to switch to the throttling state defined
  105. * by throttling_platfor_limit.
  106. * Because the previous state meets with the limit condition
  107. * of thermal and user, it is unnecessary to check it again.
  108. */
  109. target_state = throttling_limit;
  110. }
  111. return acpi_processor_set_throttling(pr, target_state);
  112. }
  113. /*
  114. * _PTC - Processor Throttling Control (and status) register location
  115. */
  116. static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
  117. {
  118. int result = 0;
  119. acpi_status status = 0;
  120. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  121. union acpi_object *ptc = NULL;
  122. union acpi_object obj = { 0 };
  123. struct acpi_processor_throttling *throttling;
  124. status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
  125. if (ACPI_FAILURE(status)) {
  126. if (status != AE_NOT_FOUND) {
  127. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
  128. }
  129. return -ENODEV;
  130. }
  131. ptc = (union acpi_object *)buffer.pointer;
  132. if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
  133. || (ptc->package.count != 2)) {
  134. printk(KERN_ERR PREFIX "Invalid _PTC data\n");
  135. result = -EFAULT;
  136. goto end;
  137. }
  138. /*
  139. * control_register
  140. */
  141. obj = ptc->package.elements[0];
  142. if ((obj.type != ACPI_TYPE_BUFFER)
  143. || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  144. || (obj.buffer.pointer == NULL)) {
  145. printk(KERN_ERR PREFIX
  146. "Invalid _PTC data (control_register)\n");
  147. result = -EFAULT;
  148. goto end;
  149. }
  150. memcpy(&pr->throttling.control_register, obj.buffer.pointer,
  151. sizeof(struct acpi_ptc_register));
  152. /*
  153. * status_register
  154. */
  155. obj = ptc->package.elements[1];
  156. if ((obj.type != ACPI_TYPE_BUFFER)
  157. || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  158. || (obj.buffer.pointer == NULL)) {
  159. printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
  160. result = -EFAULT;
  161. goto end;
  162. }
  163. memcpy(&pr->throttling.status_register, obj.buffer.pointer,
  164. sizeof(struct acpi_ptc_register));
  165. throttling = &pr->throttling;
  166. if ((throttling->control_register.bit_width +
  167. throttling->control_register.bit_offset) > 32) {
  168. printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
  169. result = -EFAULT;
  170. goto end;
  171. }
  172. if ((throttling->status_register.bit_width +
  173. throttling->status_register.bit_offset) > 32) {
  174. printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
  175. result = -EFAULT;
  176. goto end;
  177. }
  178. end:
  179. kfree(buffer.pointer);
  180. return result;
  181. }
  182. /*
  183. * _TSS - Throttling Supported States
  184. */
  185. static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
  186. {
  187. int result = 0;
  188. acpi_status status = AE_OK;
  189. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  190. struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  191. struct acpi_buffer state = { 0, NULL };
  192. union acpi_object *tss = NULL;
  193. int i;
  194. status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
  195. if (ACPI_FAILURE(status)) {
  196. if (status != AE_NOT_FOUND) {
  197. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
  198. }
  199. return -ENODEV;
  200. }
  201. tss = buffer.pointer;
  202. if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
  203. printk(KERN_ERR PREFIX "Invalid _TSS data\n");
  204. result = -EFAULT;
  205. goto end;
  206. }
  207. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  208. tss->package.count));
  209. pr->throttling.state_count = tss->package.count;
  210. pr->throttling.states_tss =
  211. kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
  212. GFP_KERNEL);
  213. if (!pr->throttling.states_tss) {
  214. result = -ENOMEM;
  215. goto end;
  216. }
  217. for (i = 0; i < pr->throttling.state_count; i++) {
  218. struct acpi_processor_tx_tss *tx =
  219. (struct acpi_processor_tx_tss *)&(pr->throttling.
  220. states_tss[i]);
  221. state.length = sizeof(struct acpi_processor_tx_tss);
  222. state.pointer = tx;
  223. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
  224. status = acpi_extract_package(&(tss->package.elements[i]),
  225. &format, &state);
  226. if (ACPI_FAILURE(status)) {
  227. ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
  228. result = -EFAULT;
  229. kfree(pr->throttling.states_tss);
  230. goto end;
  231. }
  232. if (!tx->freqpercentage) {
  233. printk(KERN_ERR PREFIX
  234. "Invalid _TSS data: freq is zero\n");
  235. result = -EFAULT;
  236. kfree(pr->throttling.states_tss);
  237. goto end;
  238. }
  239. }
  240. end:
  241. kfree(buffer.pointer);
  242. return result;
  243. }
  244. /*
  245. * _TSD - T-State Dependencies
  246. */
  247. static int acpi_processor_get_tsd(struct acpi_processor *pr)
  248. {
  249. int result = 0;
  250. acpi_status status = AE_OK;
  251. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  252. struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  253. struct acpi_buffer state = { 0, NULL };
  254. union acpi_object *tsd = NULL;
  255. struct acpi_tsd_package *pdomain;
  256. status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
  257. if (ACPI_FAILURE(status)) {
  258. if (status != AE_NOT_FOUND) {
  259. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
  260. }
  261. return -ENODEV;
  262. }
  263. tsd = buffer.pointer;
  264. if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
  265. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  266. result = -EFAULT;
  267. goto end;
  268. }
  269. if (tsd->package.count != 1) {
  270. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  271. result = -EFAULT;
  272. goto end;
  273. }
  274. pdomain = &(pr->throttling.domain_info);
  275. state.length = sizeof(struct acpi_tsd_package);
  276. state.pointer = pdomain;
  277. status = acpi_extract_package(&(tsd->package.elements[0]),
  278. &format, &state);
  279. if (ACPI_FAILURE(status)) {
  280. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  281. result = -EFAULT;
  282. goto end;
  283. }
  284. if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
  285. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
  286. result = -EFAULT;
  287. goto end;
  288. }
  289. if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
  290. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
  291. result = -EFAULT;
  292. goto end;
  293. }
  294. end:
  295. kfree(buffer.pointer);
  296. return result;
  297. }
  298. /* --------------------------------------------------------------------------
  299. Throttling Control
  300. -------------------------------------------------------------------------- */
  301. static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
  302. {
  303. int state = 0;
  304. u32 value = 0;
  305. u32 duty_mask = 0;
  306. u32 duty_value = 0;
  307. if (!pr)
  308. return -EINVAL;
  309. if (!pr->flags.throttling)
  310. return -ENODEV;
  311. pr->throttling.state = 0;
  312. duty_mask = pr->throttling.state_count - 1;
  313. duty_mask <<= pr->throttling.duty_offset;
  314. local_irq_disable();
  315. value = inl(pr->throttling.address);
  316. /*
  317. * Compute the current throttling state when throttling is enabled
  318. * (bit 4 is on).
  319. */
  320. if (value & 0x10) {
  321. duty_value = value & duty_mask;
  322. duty_value >>= pr->throttling.duty_offset;
  323. if (duty_value)
  324. state = pr->throttling.state_count - duty_value;
  325. }
  326. pr->throttling.state = state;
  327. local_irq_enable();
  328. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  329. "Throttling state is T%d (%d%% throttling applied)\n",
  330. state, pr->throttling.states[state].performance));
  331. return 0;
  332. }
  333. #ifdef CONFIG_X86
  334. static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  335. acpi_integer * value)
  336. {
  337. struct cpuinfo_x86 *c;
  338. u64 msr_high, msr_low;
  339. unsigned int cpu;
  340. u64 msr = 0;
  341. int ret = -1;
  342. cpu = pr->id;
  343. c = &cpu_data(cpu);
  344. if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  345. !cpu_has(c, X86_FEATURE_ACPI)) {
  346. printk(KERN_ERR PREFIX
  347. "HARDWARE addr space,NOT supported yet\n");
  348. } else {
  349. msr_low = 0;
  350. msr_high = 0;
  351. rdmsr_safe(MSR_IA32_THERM_CONTROL,
  352. (u32 *)&msr_low , (u32 *) &msr_high);
  353. msr = (msr_high << 32) | msr_low;
  354. *value = (acpi_integer) msr;
  355. ret = 0;
  356. }
  357. return ret;
  358. }
  359. static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  360. {
  361. struct cpuinfo_x86 *c;
  362. unsigned int cpu;
  363. int ret = -1;
  364. u64 msr;
  365. cpu = pr->id;
  366. c = &cpu_data(cpu);
  367. if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  368. !cpu_has(c, X86_FEATURE_ACPI)) {
  369. printk(KERN_ERR PREFIX
  370. "HARDWARE addr space,NOT supported yet\n");
  371. } else {
  372. msr = value;
  373. wrmsr_safe(MSR_IA32_THERM_CONTROL,
  374. msr & 0xffffffff, msr >> 32);
  375. ret = 0;
  376. }
  377. return ret;
  378. }
  379. #else
  380. static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  381. acpi_integer * value)
  382. {
  383. printk(KERN_ERR PREFIX
  384. "HARDWARE addr space,NOT supported yet\n");
  385. return -1;
  386. }
  387. static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  388. {
  389. printk(KERN_ERR PREFIX
  390. "HARDWARE addr space,NOT supported yet\n");
  391. return -1;
  392. }
  393. #endif
  394. static int acpi_read_throttling_status(struct acpi_processor *pr,
  395. acpi_integer *value)
  396. {
  397. u32 bit_width, bit_offset;
  398. u64 ptc_value;
  399. u64 ptc_mask;
  400. struct acpi_processor_throttling *throttling;
  401. int ret = -1;
  402. throttling = &pr->throttling;
  403. switch (throttling->status_register.space_id) {
  404. case ACPI_ADR_SPACE_SYSTEM_IO:
  405. ptc_value = 0;
  406. bit_width = throttling->status_register.bit_width;
  407. bit_offset = throttling->status_register.bit_offset;
  408. acpi_os_read_port((acpi_io_address) throttling->status_register.
  409. address, (u32 *) &ptc_value,
  410. (u32) (bit_width + bit_offset));
  411. ptc_mask = (1 << bit_width) - 1;
  412. *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
  413. ret = 0;
  414. break;
  415. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  416. ret = acpi_throttling_rdmsr(pr, value);
  417. break;
  418. default:
  419. printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  420. (u32) (throttling->status_register.space_id));
  421. }
  422. return ret;
  423. }
  424. static int acpi_write_throttling_state(struct acpi_processor *pr,
  425. acpi_integer value)
  426. {
  427. u32 bit_width, bit_offset;
  428. u64 ptc_value;
  429. u64 ptc_mask;
  430. struct acpi_processor_throttling *throttling;
  431. int ret = -1;
  432. throttling = &pr->throttling;
  433. switch (throttling->control_register.space_id) {
  434. case ACPI_ADR_SPACE_SYSTEM_IO:
  435. bit_width = throttling->control_register.bit_width;
  436. bit_offset = throttling->control_register.bit_offset;
  437. ptc_mask = (1 << bit_width) - 1;
  438. ptc_value = value & ptc_mask;
  439. acpi_os_write_port((acpi_io_address) throttling->
  440. control_register.address,
  441. (u32) (ptc_value << bit_offset),
  442. (u32) (bit_width + bit_offset));
  443. ret = 0;
  444. break;
  445. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  446. ret = acpi_throttling_wrmsr(pr, value);
  447. break;
  448. default:
  449. printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  450. (u32) (throttling->control_register.space_id));
  451. }
  452. return ret;
  453. }
  454. static int acpi_get_throttling_state(struct acpi_processor *pr,
  455. acpi_integer value)
  456. {
  457. int i;
  458. for (i = 0; i < pr->throttling.state_count; i++) {
  459. struct acpi_processor_tx_tss *tx =
  460. (struct acpi_processor_tx_tss *)&(pr->throttling.
  461. states_tss[i]);
  462. if (tx->control == value)
  463. break;
  464. }
  465. if (i > pr->throttling.state_count)
  466. i = -1;
  467. return i;
  468. }
  469. static int acpi_get_throttling_value(struct acpi_processor *pr,
  470. int state, acpi_integer *value)
  471. {
  472. int ret = -1;
  473. if (state >= 0 && state <= pr->throttling.state_count) {
  474. struct acpi_processor_tx_tss *tx =
  475. (struct acpi_processor_tx_tss *)&(pr->throttling.
  476. states_tss[state]);
  477. *value = tx->control;
  478. ret = 0;
  479. }
  480. return ret;
  481. }
  482. static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
  483. {
  484. int state = 0;
  485. int ret;
  486. acpi_integer value;
  487. if (!pr)
  488. return -EINVAL;
  489. if (!pr->flags.throttling)
  490. return -ENODEV;
  491. pr->throttling.state = 0;
  492. value = 0;
  493. ret = acpi_read_throttling_status(pr, &value);
  494. if (ret >= 0) {
  495. state = acpi_get_throttling_state(pr, value);
  496. pr->throttling.state = state;
  497. }
  498. return 0;
  499. }
  500. static int acpi_processor_get_throttling(struct acpi_processor *pr)
  501. {
  502. cpumask_t saved_mask;
  503. int ret;
  504. /*
  505. * Migrate task to the cpu pointed by pr.
  506. */
  507. saved_mask = current->cpus_allowed;
  508. set_cpus_allowed(current, cpumask_of_cpu(pr->id));
  509. ret = pr->throttling.acpi_processor_get_throttling(pr);
  510. /* restore the previous state */
  511. set_cpus_allowed(current, saved_mask);
  512. return ret;
  513. }
  514. static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
  515. {
  516. int i, step;
  517. if (!pr->throttling.address) {
  518. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
  519. return -EINVAL;
  520. } else if (!pr->throttling.duty_width) {
  521. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
  522. return -EINVAL;
  523. }
  524. /* TBD: Support duty_cycle values that span bit 4. */
  525. else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
  526. printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
  527. return -EINVAL;
  528. }
  529. pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
  530. /*
  531. * Compute state values. Note that throttling displays a linear power
  532. * performance relationship (at 50% performance the CPU will consume
  533. * 50% power). Values are in 1/10th of a percent to preserve accuracy.
  534. */
  535. step = (1000 / pr->throttling.state_count);
  536. for (i = 0; i < pr->throttling.state_count; i++) {
  537. pr->throttling.states[i].performance = 1000 - step * i;
  538. pr->throttling.states[i].power = 1000 - step * i;
  539. }
  540. return 0;
  541. }
  542. static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
  543. int state)
  544. {
  545. u32 value = 0;
  546. u32 duty_mask = 0;
  547. u32 duty_value = 0;
  548. if (!pr)
  549. return -EINVAL;
  550. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  551. return -EINVAL;
  552. if (!pr->flags.throttling)
  553. return -ENODEV;
  554. if (state == pr->throttling.state)
  555. return 0;
  556. if (state < pr->throttling_platform_limit)
  557. return -EPERM;
  558. /*
  559. * Calculate the duty_value and duty_mask.
  560. */
  561. if (state) {
  562. duty_value = pr->throttling.state_count - state;
  563. duty_value <<= pr->throttling.duty_offset;
  564. /* Used to clear all duty_value bits */
  565. duty_mask = pr->throttling.state_count - 1;
  566. duty_mask <<= acpi_gbl_FADT.duty_offset;
  567. duty_mask = ~duty_mask;
  568. }
  569. local_irq_disable();
  570. /*
  571. * Disable throttling by writing a 0 to bit 4. Note that we must
  572. * turn it off before you can change the duty_value.
  573. */
  574. value = inl(pr->throttling.address);
  575. if (value & 0x10) {
  576. value &= 0xFFFFFFEF;
  577. outl(value, pr->throttling.address);
  578. }
  579. /*
  580. * Write the new duty_value and then enable throttling. Note
  581. * that a state value of 0 leaves throttling disabled.
  582. */
  583. if (state) {
  584. value &= duty_mask;
  585. value |= duty_value;
  586. outl(value, pr->throttling.address);
  587. value |= 0x00000010;
  588. outl(value, pr->throttling.address);
  589. }
  590. pr->throttling.state = state;
  591. local_irq_enable();
  592. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  593. "Throttling state set to T%d (%d%%)\n", state,
  594. (pr->throttling.states[state].performance ? pr->
  595. throttling.states[state].performance / 10 : 0)));
  596. return 0;
  597. }
  598. static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
  599. int state)
  600. {
  601. int ret;
  602. acpi_integer value;
  603. if (!pr)
  604. return -EINVAL;
  605. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  606. return -EINVAL;
  607. if (!pr->flags.throttling)
  608. return -ENODEV;
  609. if (state == pr->throttling.state)
  610. return 0;
  611. if (state < pr->throttling_platform_limit)
  612. return -EPERM;
  613. value = 0;
  614. ret = acpi_get_throttling_value(pr, state, &value);
  615. if (ret >= 0) {
  616. acpi_write_throttling_state(pr, value);
  617. pr->throttling.state = state;
  618. }
  619. return 0;
  620. }
  621. int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
  622. {
  623. cpumask_t saved_mask;
  624. int ret;
  625. /*
  626. * Migrate task to the cpu pointed by pr.
  627. */
  628. saved_mask = current->cpus_allowed;
  629. set_cpus_allowed(current, cpumask_of_cpu(pr->id));
  630. ret = pr->throttling.acpi_processor_set_throttling(pr, state);
  631. /* restore the previous state */
  632. set_cpus_allowed(current, saved_mask);
  633. return ret;
  634. }
  635. int acpi_processor_get_throttling_info(struct acpi_processor *pr)
  636. {
  637. int result = 0;
  638. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  639. "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
  640. pr->throttling.address,
  641. pr->throttling.duty_offset,
  642. pr->throttling.duty_width));
  643. if (!pr)
  644. return -EINVAL;
  645. /*
  646. * Evaluate _PTC, _TSS and _TPC
  647. * They must all be present or none of them can be used.
  648. */
  649. if (acpi_processor_get_throttling_control(pr) ||
  650. acpi_processor_get_throttling_states(pr) ||
  651. acpi_processor_get_platform_limit(pr))
  652. {
  653. pr->throttling.acpi_processor_get_throttling =
  654. &acpi_processor_get_throttling_fadt;
  655. pr->throttling.acpi_processor_set_throttling =
  656. &acpi_processor_set_throttling_fadt;
  657. if (acpi_processor_get_fadt_info(pr))
  658. return 0;
  659. } else {
  660. pr->throttling.acpi_processor_get_throttling =
  661. &acpi_processor_get_throttling_ptc;
  662. pr->throttling.acpi_processor_set_throttling =
  663. &acpi_processor_set_throttling_ptc;
  664. }
  665. acpi_processor_get_tsd(pr);
  666. /*
  667. * PIIX4 Errata: We don't support throttling on the original PIIX4.
  668. * This shouldn't be an issue as few (if any) mobile systems ever
  669. * used this part.
  670. */
  671. if (errata.piix4.throttle) {
  672. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  673. "Throttling not supported on PIIX4 A- or B-step\n"));
  674. return 0;
  675. }
  676. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  677. pr->throttling.state_count));
  678. pr->flags.throttling = 1;
  679. /*
  680. * Disable throttling (if enabled). We'll let subsequent policy (e.g.
  681. * thermal) decide to lower performance if it so chooses, but for now
  682. * we'll crank up the speed.
  683. */
  684. result = acpi_processor_get_throttling(pr);
  685. if (result)
  686. goto end;
  687. if (pr->throttling.state) {
  688. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  689. "Disabling throttling (was T%d)\n",
  690. pr->throttling.state));
  691. result = acpi_processor_set_throttling(pr, 0);
  692. if (result)
  693. goto end;
  694. }
  695. end:
  696. if (result)
  697. pr->flags.throttling = 0;
  698. return result;
  699. }
  700. /* proc interface */
  701. static int acpi_processor_throttling_seq_show(struct seq_file *seq,
  702. void *offset)
  703. {
  704. struct acpi_processor *pr = seq->private;
  705. int i = 0;
  706. int result = 0;
  707. if (!pr)
  708. goto end;
  709. if (!(pr->throttling.state_count > 0)) {
  710. seq_puts(seq, "<not supported>\n");
  711. goto end;
  712. }
  713. result = acpi_processor_get_throttling(pr);
  714. if (result) {
  715. seq_puts(seq,
  716. "Could not determine current throttling state.\n");
  717. goto end;
  718. }
  719. seq_printf(seq, "state count: %d\n"
  720. "active state: T%d\n"
  721. "state available: T%d to T%d\n",
  722. pr->throttling.state_count, pr->throttling.state,
  723. pr->throttling_platform_limit,
  724. pr->throttling.state_count - 1);
  725. seq_puts(seq, "states:\n");
  726. if (pr->throttling.acpi_processor_get_throttling ==
  727. acpi_processor_get_throttling_fadt) {
  728. for (i = 0; i < pr->throttling.state_count; i++)
  729. seq_printf(seq, " %cT%d: %02d%%\n",
  730. (i == pr->throttling.state ? '*' : ' '), i,
  731. (pr->throttling.states[i].performance ? pr->
  732. throttling.states[i].performance / 10 : 0));
  733. } else {
  734. for (i = 0; i < pr->throttling.state_count; i++)
  735. seq_printf(seq, " %cT%d: %02d%%\n",
  736. (i == pr->throttling.state ? '*' : ' '), i,
  737. (int)pr->throttling.states_tss[i].
  738. freqpercentage);
  739. }
  740. end:
  741. return 0;
  742. }
  743. static int acpi_processor_throttling_open_fs(struct inode *inode,
  744. struct file *file)
  745. {
  746. return single_open(file, acpi_processor_throttling_seq_show,
  747. PDE(inode)->data);
  748. }
  749. static ssize_t acpi_processor_write_throttling(struct file *file,
  750. const char __user * buffer,
  751. size_t count, loff_t * data)
  752. {
  753. int result = 0;
  754. struct seq_file *m = file->private_data;
  755. struct acpi_processor *pr = m->private;
  756. char state_string[12] = { '\0' };
  757. if (!pr || (count > sizeof(state_string) - 1))
  758. return -EINVAL;
  759. if (copy_from_user(state_string, buffer, count))
  760. return -EFAULT;
  761. state_string[count] = '\0';
  762. result = acpi_processor_set_throttling(pr,
  763. simple_strtoul(state_string,
  764. NULL, 0));
  765. if (result)
  766. return result;
  767. return count;
  768. }
  769. struct file_operations acpi_processor_throttling_fops = {
  770. .open = acpi_processor_throttling_open_fs,
  771. .read = seq_read,
  772. .write = acpi_processor_write_throttling,
  773. .llseek = seq_lseek,
  774. .release = single_release,
  775. };