processor_throttling.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277
  1. /*
  2. * processor_throttling.c - Throttling submodule of the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or (at
  15. * your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License along
  23. * with this program; if not, write to the Free Software Foundation, Inc.,
  24. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  25. *
  26. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/sched.h>
  32. #include <linux/cpufreq.h>
  33. #include <linux/proc_fs.h>
  34. #include <linux/seq_file.h>
  35. #include <asm/io.h>
  36. #include <asm/uaccess.h>
  37. #include <acpi/acpi_bus.h>
  38. #include <acpi/processor.h>
  39. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  40. #define ACPI_PROCESSOR_CLASS "processor"
  41. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  42. ACPI_MODULE_NAME("processor_throttling");
  43. struct throttling_tstate {
  44. unsigned int cpu; /* cpu nr */
  45. int target_state; /* target T-state */
  46. };
  47. #define THROTTLING_PRECHANGE (1)
  48. #define THROTTLING_POSTCHANGE (2)
  49. static int acpi_processor_get_throttling(struct acpi_processor *pr);
  50. int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
  51. static int acpi_processor_update_tsd_coord(void)
  52. {
  53. int count, count_target;
  54. int retval = 0;
  55. unsigned int i, j;
  56. cpumask_t covered_cpus;
  57. struct acpi_processor *pr, *match_pr;
  58. struct acpi_tsd_package *pdomain, *match_pdomain;
  59. struct acpi_processor_throttling *pthrottling, *match_pthrottling;
  60. /*
  61. * Now that we have _TSD data from all CPUs, lets setup T-state
  62. * coordination between all CPUs.
  63. */
  64. for_each_possible_cpu(i) {
  65. pr = per_cpu(processors, i);
  66. if (!pr)
  67. continue;
  68. /* Basic validity check for domain info */
  69. pthrottling = &(pr->throttling);
  70. /*
  71. * If tsd package for one cpu is invalid, the coordination
  72. * among all CPUs is thought as invalid.
  73. * Maybe it is ugly.
  74. */
  75. if (!pthrottling->tsd_valid_flag) {
  76. retval = -EINVAL;
  77. break;
  78. }
  79. }
  80. if (retval)
  81. goto err_ret;
  82. cpus_clear(covered_cpus);
  83. for_each_possible_cpu(i) {
  84. pr = per_cpu(processors, i);
  85. if (!pr)
  86. continue;
  87. if (cpu_isset(i, covered_cpus))
  88. continue;
  89. pthrottling = &pr->throttling;
  90. pdomain = &(pthrottling->domain_info);
  91. cpu_set(i, pthrottling->shared_cpu_map);
  92. cpu_set(i, covered_cpus);
  93. /*
  94. * If the number of processor in the TSD domain is 1, it is
  95. * unnecessary to parse the coordination for this CPU.
  96. */
  97. if (pdomain->num_processors <= 1)
  98. continue;
  99. /* Validate the Domain info */
  100. count_target = pdomain->num_processors;
  101. count = 1;
  102. for_each_possible_cpu(j) {
  103. if (i == j)
  104. continue;
  105. match_pr = per_cpu(processors, j);
  106. if (!match_pr)
  107. continue;
  108. match_pthrottling = &(match_pr->throttling);
  109. match_pdomain = &(match_pthrottling->domain_info);
  110. if (match_pdomain->domain != pdomain->domain)
  111. continue;
  112. /* Here i and j are in the same domain.
  113. * If two TSD packages have the same domain, they
  114. * should have the same num_porcessors and
  115. * coordination type. Otherwise it will be regarded
  116. * as illegal.
  117. */
  118. if (match_pdomain->num_processors != count_target) {
  119. retval = -EINVAL;
  120. goto err_ret;
  121. }
  122. if (pdomain->coord_type != match_pdomain->coord_type) {
  123. retval = -EINVAL;
  124. goto err_ret;
  125. }
  126. cpu_set(j, covered_cpus);
  127. cpu_set(j, pthrottling->shared_cpu_map);
  128. count++;
  129. }
  130. for_each_possible_cpu(j) {
  131. if (i == j)
  132. continue;
  133. match_pr = per_cpu(processors, j);
  134. if (!match_pr)
  135. continue;
  136. match_pthrottling = &(match_pr->throttling);
  137. match_pdomain = &(match_pthrottling->domain_info);
  138. if (match_pdomain->domain != pdomain->domain)
  139. continue;
  140. /*
  141. * If some CPUS have the same domain, they
  142. * will have the same shared_cpu_map.
  143. */
  144. match_pthrottling->shared_cpu_map =
  145. pthrottling->shared_cpu_map;
  146. }
  147. }
  148. err_ret:
  149. for_each_possible_cpu(i) {
  150. pr = per_cpu(processors, i);
  151. if (!pr)
  152. continue;
  153. /*
  154. * Assume no coordination on any error parsing domain info.
  155. * The coordination type will be forced as SW_ALL.
  156. */
  157. if (retval) {
  158. pthrottling = &(pr->throttling);
  159. cpus_clear(pthrottling->shared_cpu_map);
  160. cpu_set(i, pthrottling->shared_cpu_map);
  161. pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  162. }
  163. }
  164. return retval;
  165. }
  166. /*
  167. * Update the T-state coordination after the _TSD
  168. * data for all cpus is obtained.
  169. */
  170. void acpi_processor_throttling_init(void)
  171. {
  172. if (acpi_processor_update_tsd_coord())
  173. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  174. "Assume no T-state coordination\n"));
  175. return;
  176. }
  177. static int acpi_processor_throttling_notifier(unsigned long event, void *data)
  178. {
  179. struct throttling_tstate *p_tstate = data;
  180. struct acpi_processor *pr;
  181. unsigned int cpu ;
  182. int target_state;
  183. struct acpi_processor_limit *p_limit;
  184. struct acpi_processor_throttling *p_throttling;
  185. cpu = p_tstate->cpu;
  186. pr = per_cpu(processors, cpu);
  187. if (!pr) {
  188. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
  189. return 0;
  190. }
  191. if (!pr->flags.throttling) {
  192. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
  193. "unsupported on CPU %d\n", cpu));
  194. return 0;
  195. }
  196. target_state = p_tstate->target_state;
  197. p_throttling = &(pr->throttling);
  198. switch (event) {
  199. case THROTTLING_PRECHANGE:
  200. /*
  201. * Prechange event is used to choose one proper t-state,
  202. * which meets the limits of thermal, user and _TPC.
  203. */
  204. p_limit = &pr->limit;
  205. if (p_limit->thermal.tx > target_state)
  206. target_state = p_limit->thermal.tx;
  207. if (p_limit->user.tx > target_state)
  208. target_state = p_limit->user.tx;
  209. if (pr->throttling_platform_limit > target_state)
  210. target_state = pr->throttling_platform_limit;
  211. if (target_state >= p_throttling->state_count) {
  212. printk(KERN_WARNING
  213. "Exceed the limit of T-state \n");
  214. target_state = p_throttling->state_count - 1;
  215. }
  216. p_tstate->target_state = target_state;
  217. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
  218. "target T-state of CPU %d is T%d\n",
  219. cpu, target_state));
  220. break;
  221. case THROTTLING_POSTCHANGE:
  222. /*
  223. * Postchange event is only used to update the
  224. * T-state flag of acpi_processor_throttling.
  225. */
  226. p_throttling->state = target_state;
  227. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
  228. "CPU %d is switched to T%d\n",
  229. cpu, target_state));
  230. break;
  231. default:
  232. printk(KERN_WARNING
  233. "Unsupported Throttling notifier event\n");
  234. break;
  235. }
  236. return 0;
  237. }
  238. /*
  239. * _TPC - Throttling Present Capabilities
  240. */
  241. static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
  242. {
  243. acpi_status status = 0;
  244. unsigned long tpc = 0;
  245. if (!pr)
  246. return -EINVAL;
  247. status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
  248. if (ACPI_FAILURE(status)) {
  249. if (status != AE_NOT_FOUND) {
  250. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
  251. }
  252. return -ENODEV;
  253. }
  254. pr->throttling_platform_limit = (int)tpc;
  255. return 0;
  256. }
  257. int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
  258. {
  259. int result = 0;
  260. int throttling_limit;
  261. int current_state;
  262. struct acpi_processor_limit *limit;
  263. int target_state;
  264. result = acpi_processor_get_platform_limit(pr);
  265. if (result) {
  266. /* Throttling Limit is unsupported */
  267. return result;
  268. }
  269. throttling_limit = pr->throttling_platform_limit;
  270. if (throttling_limit >= pr->throttling.state_count) {
  271. /* Uncorrect Throttling Limit */
  272. return -EINVAL;
  273. }
  274. current_state = pr->throttling.state;
  275. if (current_state > throttling_limit) {
  276. /*
  277. * The current state can meet the requirement of
  278. * _TPC limit. But it is reasonable that OSPM changes
  279. * t-states from high to low for better performance.
  280. * Of course the limit condition of thermal
  281. * and user should be considered.
  282. */
  283. limit = &pr->limit;
  284. target_state = throttling_limit;
  285. if (limit->thermal.tx > target_state)
  286. target_state = limit->thermal.tx;
  287. if (limit->user.tx > target_state)
  288. target_state = limit->user.tx;
  289. } else if (current_state == throttling_limit) {
  290. /*
  291. * Unnecessary to change the throttling state
  292. */
  293. return 0;
  294. } else {
  295. /*
  296. * If the current state is lower than the limit of _TPC, it
  297. * will be forced to switch to the throttling state defined
  298. * by throttling_platfor_limit.
  299. * Because the previous state meets with the limit condition
  300. * of thermal and user, it is unnecessary to check it again.
  301. */
  302. target_state = throttling_limit;
  303. }
  304. return acpi_processor_set_throttling(pr, target_state);
  305. }
  306. /*
  307. * _PTC - Processor Throttling Control (and status) register location
  308. */
  309. static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
  310. {
  311. int result = 0;
  312. acpi_status status = 0;
  313. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  314. union acpi_object *ptc = NULL;
  315. union acpi_object obj = { 0 };
  316. struct acpi_processor_throttling *throttling;
  317. status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
  318. if (ACPI_FAILURE(status)) {
  319. if (status != AE_NOT_FOUND) {
  320. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
  321. }
  322. return -ENODEV;
  323. }
  324. ptc = (union acpi_object *)buffer.pointer;
  325. if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
  326. || (ptc->package.count != 2)) {
  327. printk(KERN_ERR PREFIX "Invalid _PTC data\n");
  328. result = -EFAULT;
  329. goto end;
  330. }
  331. /*
  332. * control_register
  333. */
  334. obj = ptc->package.elements[0];
  335. if ((obj.type != ACPI_TYPE_BUFFER)
  336. || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  337. || (obj.buffer.pointer == NULL)) {
  338. printk(KERN_ERR PREFIX
  339. "Invalid _PTC data (control_register)\n");
  340. result = -EFAULT;
  341. goto end;
  342. }
  343. memcpy(&pr->throttling.control_register, obj.buffer.pointer,
  344. sizeof(struct acpi_ptc_register));
  345. /*
  346. * status_register
  347. */
  348. obj = ptc->package.elements[1];
  349. if ((obj.type != ACPI_TYPE_BUFFER)
  350. || (obj.buffer.length < sizeof(struct acpi_ptc_register))
  351. || (obj.buffer.pointer == NULL)) {
  352. printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
  353. result = -EFAULT;
  354. goto end;
  355. }
  356. memcpy(&pr->throttling.status_register, obj.buffer.pointer,
  357. sizeof(struct acpi_ptc_register));
  358. throttling = &pr->throttling;
  359. if ((throttling->control_register.bit_width +
  360. throttling->control_register.bit_offset) > 32) {
  361. printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
  362. result = -EFAULT;
  363. goto end;
  364. }
  365. if ((throttling->status_register.bit_width +
  366. throttling->status_register.bit_offset) > 32) {
  367. printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
  368. result = -EFAULT;
  369. goto end;
  370. }
  371. end:
  372. kfree(buffer.pointer);
  373. return result;
  374. }
  375. /*
  376. * _TSS - Throttling Supported States
  377. */
  378. static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
  379. {
  380. int result = 0;
  381. acpi_status status = AE_OK;
  382. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  383. struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  384. struct acpi_buffer state = { 0, NULL };
  385. union acpi_object *tss = NULL;
  386. int i;
  387. status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
  388. if (ACPI_FAILURE(status)) {
  389. if (status != AE_NOT_FOUND) {
  390. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
  391. }
  392. return -ENODEV;
  393. }
  394. tss = buffer.pointer;
  395. if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
  396. printk(KERN_ERR PREFIX "Invalid _TSS data\n");
  397. result = -EFAULT;
  398. goto end;
  399. }
  400. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  401. tss->package.count));
  402. pr->throttling.state_count = tss->package.count;
  403. pr->throttling.states_tss =
  404. kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
  405. GFP_KERNEL);
  406. if (!pr->throttling.states_tss) {
  407. result = -ENOMEM;
  408. goto end;
  409. }
  410. for (i = 0; i < pr->throttling.state_count; i++) {
  411. struct acpi_processor_tx_tss *tx =
  412. (struct acpi_processor_tx_tss *)&(pr->throttling.
  413. states_tss[i]);
  414. state.length = sizeof(struct acpi_processor_tx_tss);
  415. state.pointer = tx;
  416. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
  417. status = acpi_extract_package(&(tss->package.elements[i]),
  418. &format, &state);
  419. if (ACPI_FAILURE(status)) {
  420. ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
  421. result = -EFAULT;
  422. kfree(pr->throttling.states_tss);
  423. goto end;
  424. }
  425. if (!tx->freqpercentage) {
  426. printk(KERN_ERR PREFIX
  427. "Invalid _TSS data: freq is zero\n");
  428. result = -EFAULT;
  429. kfree(pr->throttling.states_tss);
  430. goto end;
  431. }
  432. }
  433. end:
  434. kfree(buffer.pointer);
  435. return result;
  436. }
  437. /*
  438. * _TSD - T-State Dependencies
  439. */
  440. static int acpi_processor_get_tsd(struct acpi_processor *pr)
  441. {
  442. int result = 0;
  443. acpi_status status = AE_OK;
  444. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  445. struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
  446. struct acpi_buffer state = { 0, NULL };
  447. union acpi_object *tsd = NULL;
  448. struct acpi_tsd_package *pdomain;
  449. struct acpi_processor_throttling *pthrottling;
  450. pthrottling = &pr->throttling;
  451. pthrottling->tsd_valid_flag = 0;
  452. status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
  453. if (ACPI_FAILURE(status)) {
  454. if (status != AE_NOT_FOUND) {
  455. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
  456. }
  457. return -ENODEV;
  458. }
  459. tsd = buffer.pointer;
  460. if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
  461. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  462. result = -EFAULT;
  463. goto end;
  464. }
  465. if (tsd->package.count != 1) {
  466. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  467. result = -EFAULT;
  468. goto end;
  469. }
  470. pdomain = &(pr->throttling.domain_info);
  471. state.length = sizeof(struct acpi_tsd_package);
  472. state.pointer = pdomain;
  473. status = acpi_extract_package(&(tsd->package.elements[0]),
  474. &format, &state);
  475. if (ACPI_FAILURE(status)) {
  476. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
  477. result = -EFAULT;
  478. goto end;
  479. }
  480. if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
  481. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
  482. result = -EFAULT;
  483. goto end;
  484. }
  485. if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
  486. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
  487. result = -EFAULT;
  488. goto end;
  489. }
  490. pthrottling = &pr->throttling;
  491. pthrottling->tsd_valid_flag = 1;
  492. pthrottling->shared_type = pdomain->coord_type;
  493. cpu_set(pr->id, pthrottling->shared_cpu_map);
  494. /*
  495. * If the coordination type is not defined in ACPI spec,
  496. * the tsd_valid_flag will be clear and coordination type
  497. * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
  498. */
  499. if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
  500. pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
  501. pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
  502. pthrottling->tsd_valid_flag = 0;
  503. pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  504. }
  505. end:
  506. kfree(buffer.pointer);
  507. return result;
  508. }
  509. /* --------------------------------------------------------------------------
  510. Throttling Control
  511. -------------------------------------------------------------------------- */
  512. static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
  513. {
  514. int state = 0;
  515. u32 value = 0;
  516. u32 duty_mask = 0;
  517. u32 duty_value = 0;
  518. if (!pr)
  519. return -EINVAL;
  520. if (!pr->flags.throttling)
  521. return -ENODEV;
  522. pr->throttling.state = 0;
  523. duty_mask = pr->throttling.state_count - 1;
  524. duty_mask <<= pr->throttling.duty_offset;
  525. local_irq_disable();
  526. value = inl(pr->throttling.address);
  527. /*
  528. * Compute the current throttling state when throttling is enabled
  529. * (bit 4 is on).
  530. */
  531. if (value & 0x10) {
  532. duty_value = value & duty_mask;
  533. duty_value >>= pr->throttling.duty_offset;
  534. if (duty_value)
  535. state = pr->throttling.state_count - duty_value;
  536. }
  537. pr->throttling.state = state;
  538. local_irq_enable();
  539. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  540. "Throttling state is T%d (%d%% throttling applied)\n",
  541. state, pr->throttling.states[state].performance));
  542. return 0;
  543. }
  544. #ifdef CONFIG_X86
  545. static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  546. acpi_integer * value)
  547. {
  548. struct cpuinfo_x86 *c;
  549. u64 msr_high, msr_low;
  550. unsigned int cpu;
  551. u64 msr = 0;
  552. int ret = -1;
  553. cpu = pr->id;
  554. c = &cpu_data(cpu);
  555. if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  556. !cpu_has(c, X86_FEATURE_ACPI)) {
  557. printk(KERN_ERR PREFIX
  558. "HARDWARE addr space,NOT supported yet\n");
  559. } else {
  560. msr_low = 0;
  561. msr_high = 0;
  562. rdmsr_safe(MSR_IA32_THERM_CONTROL,
  563. (u32 *)&msr_low , (u32 *) &msr_high);
  564. msr = (msr_high << 32) | msr_low;
  565. *value = (acpi_integer) msr;
  566. ret = 0;
  567. }
  568. return ret;
  569. }
  570. static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  571. {
  572. struct cpuinfo_x86 *c;
  573. unsigned int cpu;
  574. int ret = -1;
  575. u64 msr;
  576. cpu = pr->id;
  577. c = &cpu_data(cpu);
  578. if ((c->x86_vendor != X86_VENDOR_INTEL) ||
  579. !cpu_has(c, X86_FEATURE_ACPI)) {
  580. printk(KERN_ERR PREFIX
  581. "HARDWARE addr space,NOT supported yet\n");
  582. } else {
  583. msr = value;
  584. wrmsr_safe(MSR_IA32_THERM_CONTROL,
  585. msr & 0xffffffff, msr >> 32);
  586. ret = 0;
  587. }
  588. return ret;
  589. }
  590. #else
  591. static int acpi_throttling_rdmsr(struct acpi_processor *pr,
  592. acpi_integer * value)
  593. {
  594. printk(KERN_ERR PREFIX
  595. "HARDWARE addr space,NOT supported yet\n");
  596. return -1;
  597. }
  598. static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
  599. {
  600. printk(KERN_ERR PREFIX
  601. "HARDWARE addr space,NOT supported yet\n");
  602. return -1;
  603. }
  604. #endif
  605. static int acpi_read_throttling_status(struct acpi_processor *pr,
  606. acpi_integer *value)
  607. {
  608. u32 bit_width, bit_offset;
  609. u64 ptc_value;
  610. u64 ptc_mask;
  611. struct acpi_processor_throttling *throttling;
  612. int ret = -1;
  613. throttling = &pr->throttling;
  614. switch (throttling->status_register.space_id) {
  615. case ACPI_ADR_SPACE_SYSTEM_IO:
  616. ptc_value = 0;
  617. bit_width = throttling->status_register.bit_width;
  618. bit_offset = throttling->status_register.bit_offset;
  619. acpi_os_read_port((acpi_io_address) throttling->status_register.
  620. address, (u32 *) &ptc_value,
  621. (u32) (bit_width + bit_offset));
  622. ptc_mask = (1 << bit_width) - 1;
  623. *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
  624. ret = 0;
  625. break;
  626. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  627. ret = acpi_throttling_rdmsr(pr, value);
  628. break;
  629. default:
  630. printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  631. (u32) (throttling->status_register.space_id));
  632. }
  633. return ret;
  634. }
  635. static int acpi_write_throttling_state(struct acpi_processor *pr,
  636. acpi_integer value)
  637. {
  638. u32 bit_width, bit_offset;
  639. u64 ptc_value;
  640. u64 ptc_mask;
  641. struct acpi_processor_throttling *throttling;
  642. int ret = -1;
  643. throttling = &pr->throttling;
  644. switch (throttling->control_register.space_id) {
  645. case ACPI_ADR_SPACE_SYSTEM_IO:
  646. bit_width = throttling->control_register.bit_width;
  647. bit_offset = throttling->control_register.bit_offset;
  648. ptc_mask = (1 << bit_width) - 1;
  649. ptc_value = value & ptc_mask;
  650. acpi_os_write_port((acpi_io_address) throttling->
  651. control_register.address,
  652. (u32) (ptc_value << bit_offset),
  653. (u32) (bit_width + bit_offset));
  654. ret = 0;
  655. break;
  656. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  657. ret = acpi_throttling_wrmsr(pr, value);
  658. break;
  659. default:
  660. printk(KERN_ERR PREFIX "Unknown addr space %d\n",
  661. (u32) (throttling->control_register.space_id));
  662. }
  663. return ret;
  664. }
  665. static int acpi_get_throttling_state(struct acpi_processor *pr,
  666. acpi_integer value)
  667. {
  668. int i;
  669. for (i = 0; i < pr->throttling.state_count; i++) {
  670. struct acpi_processor_tx_tss *tx =
  671. (struct acpi_processor_tx_tss *)&(pr->throttling.
  672. states_tss[i]);
  673. if (tx->control == value)
  674. break;
  675. }
  676. if (i > pr->throttling.state_count)
  677. i = -1;
  678. return i;
  679. }
  680. static int acpi_get_throttling_value(struct acpi_processor *pr,
  681. int state, acpi_integer *value)
  682. {
  683. int ret = -1;
  684. if (state >= 0 && state <= pr->throttling.state_count) {
  685. struct acpi_processor_tx_tss *tx =
  686. (struct acpi_processor_tx_tss *)&(pr->throttling.
  687. states_tss[state]);
  688. *value = tx->control;
  689. ret = 0;
  690. }
  691. return ret;
  692. }
  693. static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
  694. {
  695. int state = 0;
  696. int ret;
  697. acpi_integer value;
  698. if (!pr)
  699. return -EINVAL;
  700. if (!pr->flags.throttling)
  701. return -ENODEV;
  702. pr->throttling.state = 0;
  703. value = 0;
  704. ret = acpi_read_throttling_status(pr, &value);
  705. if (ret >= 0) {
  706. state = acpi_get_throttling_state(pr, value);
  707. pr->throttling.state = state;
  708. }
  709. return 0;
  710. }
  711. static int acpi_processor_get_throttling(struct acpi_processor *pr)
  712. {
  713. cpumask_t saved_mask;
  714. int ret;
  715. if (!pr)
  716. return -EINVAL;
  717. if (!pr->flags.throttling)
  718. return -ENODEV;
  719. /*
  720. * Migrate task to the cpu pointed by pr.
  721. */
  722. saved_mask = current->cpus_allowed;
  723. set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
  724. ret = pr->throttling.acpi_processor_get_throttling(pr);
  725. /* restore the previous state */
  726. set_cpus_allowed_ptr(current, &saved_mask);
  727. return ret;
  728. }
  729. static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
  730. {
  731. int i, step;
  732. if (!pr->throttling.address) {
  733. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
  734. return -EINVAL;
  735. } else if (!pr->throttling.duty_width) {
  736. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
  737. return -EINVAL;
  738. }
  739. /* TBD: Support duty_cycle values that span bit 4. */
  740. else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
  741. printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
  742. return -EINVAL;
  743. }
  744. pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
  745. /*
  746. * Compute state values. Note that throttling displays a linear power
  747. * performance relationship (at 50% performance the CPU will consume
  748. * 50% power). Values are in 1/10th of a percent to preserve accuracy.
  749. */
  750. step = (1000 / pr->throttling.state_count);
  751. for (i = 0; i < pr->throttling.state_count; i++) {
  752. pr->throttling.states[i].performance = 1000 - step * i;
  753. pr->throttling.states[i].power = 1000 - step * i;
  754. }
  755. return 0;
  756. }
  757. static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
  758. int state)
  759. {
  760. u32 value = 0;
  761. u32 duty_mask = 0;
  762. u32 duty_value = 0;
  763. if (!pr)
  764. return -EINVAL;
  765. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  766. return -EINVAL;
  767. if (!pr->flags.throttling)
  768. return -ENODEV;
  769. if (state == pr->throttling.state)
  770. return 0;
  771. if (state < pr->throttling_platform_limit)
  772. return -EPERM;
  773. /*
  774. * Calculate the duty_value and duty_mask.
  775. */
  776. if (state) {
  777. duty_value = pr->throttling.state_count - state;
  778. duty_value <<= pr->throttling.duty_offset;
  779. /* Used to clear all duty_value bits */
  780. duty_mask = pr->throttling.state_count - 1;
  781. duty_mask <<= acpi_gbl_FADT.duty_offset;
  782. duty_mask = ~duty_mask;
  783. }
  784. local_irq_disable();
  785. /*
  786. * Disable throttling by writing a 0 to bit 4. Note that we must
  787. * turn it off before you can change the duty_value.
  788. */
  789. value = inl(pr->throttling.address);
  790. if (value & 0x10) {
  791. value &= 0xFFFFFFEF;
  792. outl(value, pr->throttling.address);
  793. }
  794. /*
  795. * Write the new duty_value and then enable throttling. Note
  796. * that a state value of 0 leaves throttling disabled.
  797. */
  798. if (state) {
  799. value &= duty_mask;
  800. value |= duty_value;
  801. outl(value, pr->throttling.address);
  802. value |= 0x00000010;
  803. outl(value, pr->throttling.address);
  804. }
  805. pr->throttling.state = state;
  806. local_irq_enable();
  807. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  808. "Throttling state set to T%d (%d%%)\n", state,
  809. (pr->throttling.states[state].performance ? pr->
  810. throttling.states[state].performance / 10 : 0)));
  811. return 0;
  812. }
  813. static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
  814. int state)
  815. {
  816. int ret;
  817. acpi_integer value;
  818. if (!pr)
  819. return -EINVAL;
  820. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  821. return -EINVAL;
  822. if (!pr->flags.throttling)
  823. return -ENODEV;
  824. if (state == pr->throttling.state)
  825. return 0;
  826. if (state < pr->throttling_platform_limit)
  827. return -EPERM;
  828. value = 0;
  829. ret = acpi_get_throttling_value(pr, state, &value);
  830. if (ret >= 0) {
  831. acpi_write_throttling_state(pr, value);
  832. pr->throttling.state = state;
  833. }
  834. return 0;
  835. }
  836. int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
  837. {
  838. cpumask_t saved_mask;
  839. int ret = 0;
  840. unsigned int i;
  841. struct acpi_processor *match_pr;
  842. struct acpi_processor_throttling *p_throttling;
  843. struct throttling_tstate t_state;
  844. cpumask_t online_throttling_cpus;
  845. if (!pr)
  846. return -EINVAL;
  847. if (!pr->flags.throttling)
  848. return -ENODEV;
  849. if ((state < 0) || (state > (pr->throttling.state_count - 1)))
  850. return -EINVAL;
  851. saved_mask = current->cpus_allowed;
  852. t_state.target_state = state;
  853. p_throttling = &(pr->throttling);
  854. cpus_and(online_throttling_cpus, cpu_online_map,
  855. p_throttling->shared_cpu_map);
  856. /*
  857. * The throttling notifier will be called for every
  858. * affected cpu in order to get one proper T-state.
  859. * The notifier event is THROTTLING_PRECHANGE.
  860. */
  861. for_each_cpu_mask_nr(i, online_throttling_cpus) {
  862. t_state.cpu = i;
  863. acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
  864. &t_state);
  865. }
  866. /*
  867. * The function of acpi_processor_set_throttling will be called
  868. * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
  869. * it is necessary to call it for every affected cpu. Otherwise
  870. * it can be called only for the cpu pointed by pr.
  871. */
  872. if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
  873. set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
  874. ret = p_throttling->acpi_processor_set_throttling(pr,
  875. t_state.target_state);
  876. } else {
  877. /*
  878. * When the T-state coordination is SW_ALL or HW_ALL,
  879. * it is necessary to set T-state for every affected
  880. * cpus.
  881. */
  882. for_each_cpu_mask_nr(i, online_throttling_cpus) {
  883. match_pr = per_cpu(processors, i);
  884. /*
  885. * If the pointer is invalid, we will report the
  886. * error message and continue.
  887. */
  888. if (!match_pr) {
  889. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  890. "Invalid Pointer for CPU %d\n", i));
  891. continue;
  892. }
  893. /*
  894. * If the throttling control is unsupported on CPU i,
  895. * we will report the error message and continue.
  896. */
  897. if (!match_pr->flags.throttling) {
  898. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  899. "Throttling Controll is unsupported "
  900. "on CPU %d\n", i));
  901. continue;
  902. }
  903. t_state.cpu = i;
  904. set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
  905. ret = match_pr->throttling.
  906. acpi_processor_set_throttling(
  907. match_pr, t_state.target_state);
  908. }
  909. }
  910. /*
  911. * After the set_throttling is called, the
  912. * throttling notifier is called for every
  913. * affected cpu to update the T-states.
  914. * The notifier event is THROTTLING_POSTCHANGE
  915. */
  916. for_each_cpu_mask_nr(i, online_throttling_cpus) {
  917. t_state.cpu = i;
  918. acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
  919. &t_state);
  920. }
  921. /* restore the previous state */
  922. set_cpus_allowed_ptr(current, &saved_mask);
  923. return ret;
  924. }
  925. int acpi_processor_get_throttling_info(struct acpi_processor *pr)
  926. {
  927. int result = 0;
  928. struct acpi_processor_throttling *pthrottling;
  929. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  930. "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
  931. pr->throttling.address,
  932. pr->throttling.duty_offset,
  933. pr->throttling.duty_width));
  934. if (!pr)
  935. return -EINVAL;
  936. /*
  937. * Evaluate _PTC, _TSS and _TPC
  938. * They must all be present or none of them can be used.
  939. */
  940. if (acpi_processor_get_throttling_control(pr) ||
  941. acpi_processor_get_throttling_states(pr) ||
  942. acpi_processor_get_platform_limit(pr))
  943. {
  944. pr->throttling.acpi_processor_get_throttling =
  945. &acpi_processor_get_throttling_fadt;
  946. pr->throttling.acpi_processor_set_throttling =
  947. &acpi_processor_set_throttling_fadt;
  948. if (acpi_processor_get_fadt_info(pr))
  949. return 0;
  950. } else {
  951. pr->throttling.acpi_processor_get_throttling =
  952. &acpi_processor_get_throttling_ptc;
  953. pr->throttling.acpi_processor_set_throttling =
  954. &acpi_processor_set_throttling_ptc;
  955. }
  956. /*
  957. * If TSD package for one CPU can't be parsed successfully, it means
  958. * that this CPU will have no coordination with other CPUs.
  959. */
  960. if (acpi_processor_get_tsd(pr)) {
  961. pthrottling = &pr->throttling;
  962. pthrottling->tsd_valid_flag = 0;
  963. cpu_set(pr->id, pthrottling->shared_cpu_map);
  964. pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
  965. }
  966. /*
  967. * PIIX4 Errata: We don't support throttling on the original PIIX4.
  968. * This shouldn't be an issue as few (if any) mobile systems ever
  969. * used this part.
  970. */
  971. if (errata.piix4.throttle) {
  972. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  973. "Throttling not supported on PIIX4 A- or B-step\n"));
  974. return 0;
  975. }
  976. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
  977. pr->throttling.state_count));
  978. pr->flags.throttling = 1;
  979. /*
  980. * Disable throttling (if enabled). We'll let subsequent policy (e.g.
  981. * thermal) decide to lower performance if it so chooses, but for now
  982. * we'll crank up the speed.
  983. */
  984. result = acpi_processor_get_throttling(pr);
  985. if (result)
  986. goto end;
  987. if (pr->throttling.state) {
  988. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  989. "Disabling throttling (was T%d)\n",
  990. pr->throttling.state));
  991. result = acpi_processor_set_throttling(pr, 0);
  992. if (result)
  993. goto end;
  994. }
  995. end:
  996. if (result)
  997. pr->flags.throttling = 0;
  998. return result;
  999. }
  1000. /* proc interface */
  1001. static int acpi_processor_throttling_seq_show(struct seq_file *seq,
  1002. void *offset)
  1003. {
  1004. struct acpi_processor *pr = seq->private;
  1005. int i = 0;
  1006. int result = 0;
  1007. if (!pr)
  1008. goto end;
  1009. if (!(pr->throttling.state_count > 0)) {
  1010. seq_puts(seq, "<not supported>\n");
  1011. goto end;
  1012. }
  1013. result = acpi_processor_get_throttling(pr);
  1014. if (result) {
  1015. seq_puts(seq,
  1016. "Could not determine current throttling state.\n");
  1017. goto end;
  1018. }
  1019. seq_printf(seq, "state count: %d\n"
  1020. "active state: T%d\n"
  1021. "state available: T%d to T%d\n",
  1022. pr->throttling.state_count, pr->throttling.state,
  1023. pr->throttling_platform_limit,
  1024. pr->throttling.state_count - 1);
  1025. seq_puts(seq, "states:\n");
  1026. if (pr->throttling.acpi_processor_get_throttling ==
  1027. acpi_processor_get_throttling_fadt) {
  1028. for (i = 0; i < pr->throttling.state_count; i++)
  1029. seq_printf(seq, " %cT%d: %02d%%\n",
  1030. (i == pr->throttling.state ? '*' : ' '), i,
  1031. (pr->throttling.states[i].performance ? pr->
  1032. throttling.states[i].performance / 10 : 0));
  1033. } else {
  1034. for (i = 0; i < pr->throttling.state_count; i++)
  1035. seq_printf(seq, " %cT%d: %02d%%\n",
  1036. (i == pr->throttling.state ? '*' : ' '), i,
  1037. (int)pr->throttling.states_tss[i].
  1038. freqpercentage);
  1039. }
  1040. end:
  1041. return 0;
  1042. }
  1043. static int acpi_processor_throttling_open_fs(struct inode *inode,
  1044. struct file *file)
  1045. {
  1046. return single_open(file, acpi_processor_throttling_seq_show,
  1047. PDE(inode)->data);
  1048. }
  1049. static ssize_t acpi_processor_write_throttling(struct file *file,
  1050. const char __user * buffer,
  1051. size_t count, loff_t * data)
  1052. {
  1053. int result = 0;
  1054. struct seq_file *m = file->private_data;
  1055. struct acpi_processor *pr = m->private;
  1056. char state_string[5] = "";
  1057. char *charp = NULL;
  1058. size_t state_val = 0;
  1059. char tmpbuf[5] = "";
  1060. if (!pr || (count > sizeof(state_string) - 1))
  1061. return -EINVAL;
  1062. if (copy_from_user(state_string, buffer, count))
  1063. return -EFAULT;
  1064. state_string[count] = '\0';
  1065. if ((count > 0) && (state_string[count-1] == '\n'))
  1066. state_string[count-1] = '\0';
  1067. charp = state_string;
  1068. if ((state_string[0] == 't') || (state_string[0] == 'T'))
  1069. charp++;
  1070. state_val = simple_strtoul(charp, NULL, 0);
  1071. if (state_val >= pr->throttling.state_count)
  1072. return -EINVAL;
  1073. snprintf(tmpbuf, 5, "%zu", state_val);
  1074. if (strcmp(tmpbuf, charp) != 0)
  1075. return -EINVAL;
  1076. result = acpi_processor_set_throttling(pr, state_val);
  1077. if (result)
  1078. return result;
  1079. return count;
  1080. }
  1081. struct file_operations acpi_processor_throttling_fops = {
  1082. .owner = THIS_MODULE,
  1083. .open = acpi_processor_throttling_open_fs,
  1084. .read = seq_read,
  1085. .write = acpi_processor_write_throttling,
  1086. .llseek = seq_lseek,
  1087. .release = single_release,
  1088. };