processor_perflib.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809
  1. /*
  2. * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. *
  10. *
  11. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or (at
  16. * your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful, but
  19. * WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License along
  24. * with this program; if not, write to the Free Software Foundation, Inc.,
  25. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  26. *
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/cpufreq.h>
  32. #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
  33. #include <linux/proc_fs.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/mutex.h>
  36. #include <asm/uaccess.h>
  37. #endif
  38. #include <asm/cpufeature.h>
  39. #include <acpi/acpi_bus.h>
  40. #include <acpi/processor.h>
  41. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  42. #define ACPI_PROCESSOR_CLASS "processor"
  43. #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
  44. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  45. ACPI_MODULE_NAME("processor_perflib");
  46. static DEFINE_MUTEX(performance_mutex);
  47. /* Use cpufreq debug layer for _PPC changes. */
  48. #define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
  49. "cpufreq-core", msg)
  50. /*
  51. * _PPC support is implemented as a CPUfreq policy notifier:
  52. * This means each time a CPUfreq driver registered also with
  53. * the ACPI core is asked to change the speed policy, the maximum
  54. * value is adjusted so that it is within the platform limit.
  55. *
  56. * Also, when a new platform limit value is detected, the CPUfreq
  57. * policy is adjusted accordingly.
  58. */
  59. /* ignore_ppc:
  60. * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
  61. * ignore _PPC
  62. * 0 -> cpufreq low level drivers initialized -> consider _PPC values
  63. * 1 -> ignore _PPC totally -> forced by user through boot param
  64. */
  65. static int ignore_ppc = -1;
  66. module_param(ignore_ppc, int, 0644);
  67. MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
  68. "limited by BIOS, this should help");
  69. #define PPC_REGISTERED 1
  70. #define PPC_IN_USE 2
  71. static int acpi_processor_ppc_status;
  72. static int acpi_processor_ppc_notifier(struct notifier_block *nb,
  73. unsigned long event, void *data)
  74. {
  75. struct cpufreq_policy *policy = data;
  76. struct acpi_processor *pr;
  77. unsigned int ppc = 0;
  78. if (event == CPUFREQ_START && ignore_ppc <= 0) {
  79. ignore_ppc = 0;
  80. return 0;
  81. }
  82. if (ignore_ppc)
  83. return 0;
  84. if (event != CPUFREQ_INCOMPATIBLE)
  85. return 0;
  86. mutex_lock(&performance_mutex);
  87. pr = per_cpu(processors, policy->cpu);
  88. if (!pr || !pr->performance)
  89. goto out;
  90. ppc = (unsigned int)pr->performance_platform_limit;
  91. if (ppc >= pr->performance->state_count)
  92. goto out;
  93. cpufreq_verify_within_limits(policy, 0,
  94. pr->performance->states[ppc].
  95. core_frequency * 1000);
  96. out:
  97. mutex_unlock(&performance_mutex);
  98. return 0;
  99. }
  100. static struct notifier_block acpi_ppc_notifier_block = {
  101. .notifier_call = acpi_processor_ppc_notifier,
  102. };
  103. static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
  104. {
  105. acpi_status status = 0;
  106. unsigned long long ppc = 0;
  107. if (!pr)
  108. return -EINVAL;
  109. /*
  110. * _PPC indicates the maximum state currently supported by the platform
  111. * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
  112. */
  113. status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
  114. if (status != AE_NOT_FOUND)
  115. acpi_processor_ppc_status |= PPC_IN_USE;
  116. if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
  117. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
  118. return -ENODEV;
  119. }
  120. cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
  121. (int)ppc, ppc ? "" : "not");
  122. pr->performance_platform_limit = (int)ppc;
  123. return 0;
  124. }
  125. int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
  126. {
  127. int ret;
  128. if (ignore_ppc)
  129. return 0;
  130. ret = acpi_processor_get_platform_limit(pr);
  131. if (ret < 0)
  132. return (ret);
  133. else
  134. return cpufreq_update_policy(pr->id);
  135. }
  136. void acpi_processor_ppc_init(void)
  137. {
  138. if (!cpufreq_register_notifier
  139. (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
  140. acpi_processor_ppc_status |= PPC_REGISTERED;
  141. else
  142. printk(KERN_DEBUG
  143. "Warning: Processor Platform Limit not supported.\n");
  144. }
  145. void acpi_processor_ppc_exit(void)
  146. {
  147. if (acpi_processor_ppc_status & PPC_REGISTERED)
  148. cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
  149. CPUFREQ_POLICY_NOTIFIER);
  150. acpi_processor_ppc_status &= ~PPC_REGISTERED;
  151. }
  152. static int acpi_processor_get_performance_control(struct acpi_processor *pr)
  153. {
  154. int result = 0;
  155. acpi_status status = 0;
  156. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  157. union acpi_object *pct = NULL;
  158. union acpi_object obj = { 0 };
  159. status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
  160. if (ACPI_FAILURE(status)) {
  161. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
  162. return -ENODEV;
  163. }
  164. pct = (union acpi_object *)buffer.pointer;
  165. if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
  166. || (pct->package.count != 2)) {
  167. printk(KERN_ERR PREFIX "Invalid _PCT data\n");
  168. result = -EFAULT;
  169. goto end;
  170. }
  171. /*
  172. * control_register
  173. */
  174. obj = pct->package.elements[0];
  175. if ((obj.type != ACPI_TYPE_BUFFER)
  176. || (obj.buffer.length < sizeof(struct acpi_pct_register))
  177. || (obj.buffer.pointer == NULL)) {
  178. printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
  179. result = -EFAULT;
  180. goto end;
  181. }
  182. memcpy(&pr->performance->control_register, obj.buffer.pointer,
  183. sizeof(struct acpi_pct_register));
  184. /*
  185. * status_register
  186. */
  187. obj = pct->package.elements[1];
  188. if ((obj.type != ACPI_TYPE_BUFFER)
  189. || (obj.buffer.length < sizeof(struct acpi_pct_register))
  190. || (obj.buffer.pointer == NULL)) {
  191. printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
  192. result = -EFAULT;
  193. goto end;
  194. }
  195. memcpy(&pr->performance->status_register, obj.buffer.pointer,
  196. sizeof(struct acpi_pct_register));
  197. end:
  198. kfree(buffer.pointer);
  199. return result;
  200. }
  201. static int acpi_processor_get_performance_states(struct acpi_processor *pr)
  202. {
  203. int result = 0;
  204. acpi_status status = AE_OK;
  205. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  206. struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
  207. struct acpi_buffer state = { 0, NULL };
  208. union acpi_object *pss = NULL;
  209. int i;
  210. status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
  211. if (ACPI_FAILURE(status)) {
  212. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
  213. return -ENODEV;
  214. }
  215. pss = buffer.pointer;
  216. if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
  217. printk(KERN_ERR PREFIX "Invalid _PSS data\n");
  218. result = -EFAULT;
  219. goto end;
  220. }
  221. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
  222. pss->package.count));
  223. pr->performance->state_count = pss->package.count;
  224. pr->performance->states =
  225. kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
  226. GFP_KERNEL);
  227. if (!pr->performance->states) {
  228. result = -ENOMEM;
  229. goto end;
  230. }
  231. for (i = 0; i < pr->performance->state_count; i++) {
  232. struct acpi_processor_px *px = &(pr->performance->states[i]);
  233. state.length = sizeof(struct acpi_processor_px);
  234. state.pointer = px;
  235. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
  236. status = acpi_extract_package(&(pss->package.elements[i]),
  237. &format, &state);
  238. if (ACPI_FAILURE(status)) {
  239. ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
  240. result = -EFAULT;
  241. kfree(pr->performance->states);
  242. goto end;
  243. }
  244. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  245. "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
  246. i,
  247. (u32) px->core_frequency,
  248. (u32) px->power,
  249. (u32) px->transition_latency,
  250. (u32) px->bus_master_latency,
  251. (u32) px->control, (u32) px->status));
  252. if (!px->core_frequency) {
  253. printk(KERN_ERR PREFIX
  254. "Invalid _PSS data: freq is zero\n");
  255. result = -EFAULT;
  256. kfree(pr->performance->states);
  257. goto end;
  258. }
  259. }
  260. end:
  261. kfree(buffer.pointer);
  262. return result;
  263. }
  264. static int acpi_processor_get_performance_info(struct acpi_processor *pr)
  265. {
  266. int result = 0;
  267. acpi_status status = AE_OK;
  268. acpi_handle handle = NULL;
  269. if (!pr || !pr->performance || !pr->handle)
  270. return -EINVAL;
  271. status = acpi_get_handle(pr->handle, "_PCT", &handle);
  272. if (ACPI_FAILURE(status)) {
  273. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  274. "ACPI-based processor performance control unavailable\n"));
  275. return -ENODEV;
  276. }
  277. result = acpi_processor_get_performance_control(pr);
  278. if (result)
  279. goto update_bios;
  280. result = acpi_processor_get_performance_states(pr);
  281. if (result)
  282. goto update_bios;
  283. return 0;
  284. /*
  285. * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
  286. * the BIOS is older than the CPU and does not know its frequencies
  287. */
  288. update_bios:
  289. if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){
  290. if(boot_cpu_has(X86_FEATURE_EST))
  291. printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
  292. "frequency support\n");
  293. }
  294. return result;
  295. }
  296. int acpi_processor_notify_smm(struct module *calling_module)
  297. {
  298. acpi_status status;
  299. static int is_done = 0;
  300. if (!(acpi_processor_ppc_status & PPC_REGISTERED))
  301. return -EBUSY;
  302. if (!try_module_get(calling_module))
  303. return -EINVAL;
  304. /* is_done is set to negative if an error occured,
  305. * and to postitive if _no_ error occured, but SMM
  306. * was already notified. This avoids double notification
  307. * which might lead to unexpected results...
  308. */
  309. if (is_done > 0) {
  310. module_put(calling_module);
  311. return 0;
  312. } else if (is_done < 0) {
  313. module_put(calling_module);
  314. return is_done;
  315. }
  316. is_done = -EIO;
  317. /* Can't write pstate_control to smi_command if either value is zero */
  318. if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
  319. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
  320. module_put(calling_module);
  321. return 0;
  322. }
  323. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  324. "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
  325. acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
  326. status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
  327. (u32) acpi_gbl_FADT.pstate_control, 8);
  328. if (ACPI_FAILURE(status)) {
  329. ACPI_EXCEPTION((AE_INFO, status,
  330. "Failed to write pstate_control [0x%x] to "
  331. "smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
  332. acpi_gbl_FADT.smi_command));
  333. module_put(calling_module);
  334. return status;
  335. }
  336. /* Success. If there's no _PPC, we need to fear nothing, so
  337. * we can allow the cpufreq driver to be rmmod'ed. */
  338. is_done = 1;
  339. if (!(acpi_processor_ppc_status & PPC_IN_USE))
  340. module_put(calling_module);
  341. return 0;
  342. }
  343. EXPORT_SYMBOL(acpi_processor_notify_smm);
  344. #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
  345. /* /proc/acpi/processor/../performance interface (DEPRECATED) */
  346. static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
  347. static struct file_operations acpi_processor_perf_fops = {
  348. .owner = THIS_MODULE,
  349. .open = acpi_processor_perf_open_fs,
  350. .read = seq_read,
  351. .llseek = seq_lseek,
  352. .release = single_release,
  353. };
  354. static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
  355. {
  356. struct acpi_processor *pr = seq->private;
  357. int i;
  358. if (!pr)
  359. goto end;
  360. if (!pr->performance) {
  361. seq_puts(seq, "<not supported>\n");
  362. goto end;
  363. }
  364. seq_printf(seq, "state count: %d\n"
  365. "active state: P%d\n",
  366. pr->performance->state_count, pr->performance->state);
  367. seq_puts(seq, "states:\n");
  368. for (i = 0; i < pr->performance->state_count; i++)
  369. seq_printf(seq,
  370. " %cP%d: %d MHz, %d mW, %d uS\n",
  371. (i == pr->performance->state ? '*' : ' '), i,
  372. (u32) pr->performance->states[i].core_frequency,
  373. (u32) pr->performance->states[i].power,
  374. (u32) pr->performance->states[i].transition_latency);
  375. end:
  376. return 0;
  377. }
  378. static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
  379. {
  380. return single_open(file, acpi_processor_perf_seq_show,
  381. PDE(inode)->data);
  382. }
  383. static void acpi_cpufreq_add_file(struct acpi_processor *pr)
  384. {
  385. struct acpi_device *device = NULL;
  386. if (acpi_bus_get_device(pr->handle, &device))
  387. return;
  388. /* add file 'performance' [R/W] */
  389. proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO,
  390. acpi_device_dir(device),
  391. &acpi_processor_perf_fops, acpi_driver_data(device));
  392. return;
  393. }
  394. static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
  395. {
  396. struct acpi_device *device = NULL;
  397. if (acpi_bus_get_device(pr->handle, &device))
  398. return;
  399. /* remove file 'performance' */
  400. remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
  401. acpi_device_dir(device));
  402. return;
  403. }
  404. #else
  405. static void acpi_cpufreq_add_file(struct acpi_processor *pr)
  406. {
  407. return;
  408. }
  409. static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
  410. {
  411. return;
  412. }
  413. #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
  414. static int acpi_processor_get_psd(struct acpi_processor *pr)
  415. {
  416. int result = 0;
  417. acpi_status status = AE_OK;
  418. struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
  419. struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
  420. struct acpi_buffer state = {0, NULL};
  421. union acpi_object *psd = NULL;
  422. struct acpi_psd_package *pdomain;
  423. status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
  424. if (ACPI_FAILURE(status)) {
  425. return -ENODEV;
  426. }
  427. psd = buffer.pointer;
  428. if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
  429. printk(KERN_ERR PREFIX "Invalid _PSD data\n");
  430. result = -EFAULT;
  431. goto end;
  432. }
  433. if (psd->package.count != 1) {
  434. printk(KERN_ERR PREFIX "Invalid _PSD data\n");
  435. result = -EFAULT;
  436. goto end;
  437. }
  438. pdomain = &(pr->performance->domain_info);
  439. state.length = sizeof(struct acpi_psd_package);
  440. state.pointer = pdomain;
  441. status = acpi_extract_package(&(psd->package.elements[0]),
  442. &format, &state);
  443. if (ACPI_FAILURE(status)) {
  444. printk(KERN_ERR PREFIX "Invalid _PSD data\n");
  445. result = -EFAULT;
  446. goto end;
  447. }
  448. if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
  449. printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
  450. result = -EFAULT;
  451. goto end;
  452. }
  453. if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
  454. printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
  455. result = -EFAULT;
  456. goto end;
  457. }
  458. end:
  459. kfree(buffer.pointer);
  460. return result;
  461. }
  462. int acpi_processor_preregister_performance(
  463. struct acpi_processor_performance *performance)
  464. {
  465. int count, count_target;
  466. int retval = 0;
  467. unsigned int i, j;
  468. cpumask_t covered_cpus;
  469. struct acpi_processor *pr;
  470. struct acpi_psd_package *pdomain;
  471. struct acpi_processor *match_pr;
  472. struct acpi_psd_package *match_pdomain;
  473. mutex_lock(&performance_mutex);
  474. retval = 0;
  475. /* Call _PSD for all CPUs */
  476. for_each_possible_cpu(i) {
  477. pr = per_cpu(processors, i);
  478. if (!pr) {
  479. /* Look only at processors in ACPI namespace */
  480. continue;
  481. }
  482. if (pr->performance) {
  483. retval = -EBUSY;
  484. continue;
  485. }
  486. if (!performance || !percpu_ptr(performance, i)) {
  487. retval = -EINVAL;
  488. continue;
  489. }
  490. pr->performance = percpu_ptr(performance, i);
  491. cpu_set(i, pr->performance->shared_cpu_map);
  492. if (acpi_processor_get_psd(pr)) {
  493. retval = -EINVAL;
  494. continue;
  495. }
  496. }
  497. if (retval)
  498. goto err_ret;
  499. /*
  500. * Now that we have _PSD data from all CPUs, lets setup P-state
  501. * domain info.
  502. */
  503. for_each_possible_cpu(i) {
  504. pr = per_cpu(processors, i);
  505. if (!pr)
  506. continue;
  507. /* Basic validity check for domain info */
  508. pdomain = &(pr->performance->domain_info);
  509. if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
  510. (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
  511. retval = -EINVAL;
  512. goto err_ret;
  513. }
  514. if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
  515. pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
  516. pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
  517. retval = -EINVAL;
  518. goto err_ret;
  519. }
  520. }
  521. cpus_clear(covered_cpus);
  522. for_each_possible_cpu(i) {
  523. pr = per_cpu(processors, i);
  524. if (!pr)
  525. continue;
  526. if (cpu_isset(i, covered_cpus))
  527. continue;
  528. pdomain = &(pr->performance->domain_info);
  529. cpu_set(i, pr->performance->shared_cpu_map);
  530. cpu_set(i, covered_cpus);
  531. if (pdomain->num_processors <= 1)
  532. continue;
  533. /* Validate the Domain info */
  534. count_target = pdomain->num_processors;
  535. count = 1;
  536. if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
  537. pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  538. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
  539. pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
  540. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
  541. pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
  542. for_each_possible_cpu(j) {
  543. if (i == j)
  544. continue;
  545. match_pr = per_cpu(processors, j);
  546. if (!match_pr)
  547. continue;
  548. match_pdomain = &(match_pr->performance->domain_info);
  549. if (match_pdomain->domain != pdomain->domain)
  550. continue;
  551. /* Here i and j are in the same domain */
  552. if (match_pdomain->num_processors != count_target) {
  553. retval = -EINVAL;
  554. goto err_ret;
  555. }
  556. if (pdomain->coord_type != match_pdomain->coord_type) {
  557. retval = -EINVAL;
  558. goto err_ret;
  559. }
  560. cpu_set(j, covered_cpus);
  561. cpu_set(j, pr->performance->shared_cpu_map);
  562. count++;
  563. }
  564. for_each_possible_cpu(j) {
  565. if (i == j)
  566. continue;
  567. match_pr = per_cpu(processors, j);
  568. if (!match_pr)
  569. continue;
  570. match_pdomain = &(match_pr->performance->domain_info);
  571. if (match_pdomain->domain != pdomain->domain)
  572. continue;
  573. match_pr->performance->shared_type =
  574. pr->performance->shared_type;
  575. match_pr->performance->shared_cpu_map =
  576. pr->performance->shared_cpu_map;
  577. }
  578. }
  579. err_ret:
  580. for_each_possible_cpu(i) {
  581. pr = per_cpu(processors, i);
  582. if (!pr || !pr->performance)
  583. continue;
  584. /* Assume no coordination on any error parsing domain info */
  585. if (retval) {
  586. cpus_clear(pr->performance->shared_cpu_map);
  587. cpu_set(i, pr->performance->shared_cpu_map);
  588. pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  589. }
  590. pr->performance = NULL; /* Will be set for real in register */
  591. }
  592. mutex_unlock(&performance_mutex);
  593. return retval;
  594. }
  595. EXPORT_SYMBOL(acpi_processor_preregister_performance);
  596. int
  597. acpi_processor_register_performance(struct acpi_processor_performance
  598. *performance, unsigned int cpu)
  599. {
  600. struct acpi_processor *pr;
  601. if (!(acpi_processor_ppc_status & PPC_REGISTERED))
  602. return -EINVAL;
  603. mutex_lock(&performance_mutex);
  604. pr = per_cpu(processors, cpu);
  605. if (!pr) {
  606. mutex_unlock(&performance_mutex);
  607. return -ENODEV;
  608. }
  609. if (pr->performance) {
  610. mutex_unlock(&performance_mutex);
  611. return -EBUSY;
  612. }
  613. WARN_ON(!performance);
  614. pr->performance = performance;
  615. if (acpi_processor_get_performance_info(pr)) {
  616. pr->performance = NULL;
  617. mutex_unlock(&performance_mutex);
  618. return -EIO;
  619. }
  620. acpi_cpufreq_add_file(pr);
  621. mutex_unlock(&performance_mutex);
  622. return 0;
  623. }
  624. EXPORT_SYMBOL(acpi_processor_register_performance);
  625. void
  626. acpi_processor_unregister_performance(struct acpi_processor_performance
  627. *performance, unsigned int cpu)
  628. {
  629. struct acpi_processor *pr;
  630. mutex_lock(&performance_mutex);
  631. pr = per_cpu(processors, cpu);
  632. if (!pr) {
  633. mutex_unlock(&performance_mutex);
  634. return;
  635. }
  636. if (pr->performance)
  637. kfree(pr->performance->states);
  638. pr->performance = NULL;
  639. acpi_cpufreq_remove_file(pr);
  640. mutex_unlock(&performance_mutex);
  641. return;
  642. }
  643. EXPORT_SYMBOL(acpi_processor_unregister_performance);