processor_perflib.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /*
  2. * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. *
  10. *
  11. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or (at
  16. * your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful, but
  19. * WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License along
  24. * with this program; if not, write to the Free Software Foundation, Inc.,
  25. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  26. *
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/cpufreq.h>
  32. #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
  33. #include <linux/proc_fs.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/mutex.h>
  36. #include <asm/uaccess.h>
  37. #endif
  38. #include <acpi/acpi_bus.h>
  39. #include <acpi/processor.h>
  40. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  41. #define ACPI_PROCESSOR_CLASS "processor"
  42. #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
  43. #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
  44. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  45. ACPI_MODULE_NAME("acpi_processor")
  46. static DEFINE_MUTEX(performance_mutex);
  47. /*
  48. * _PPC support is implemented as a CPUfreq policy notifier:
  49. * This means each time a CPUfreq driver registered also with
  50. * the ACPI core is asked to change the speed policy, the maximum
  51. * value is adjusted so that it is within the platform limit.
  52. *
  53. * Also, when a new platform limit value is detected, the CPUfreq
  54. * policy is adjusted accordingly.
  55. */
  56. #define PPC_REGISTERED 1
  57. #define PPC_IN_USE 2
  58. static int acpi_processor_ppc_status = 0;
  59. static int acpi_processor_ppc_notifier(struct notifier_block *nb,
  60. unsigned long event, void *data)
  61. {
  62. struct cpufreq_policy *policy = data;
  63. struct acpi_processor *pr;
  64. unsigned int ppc = 0;
  65. mutex_lock(&performance_mutex);
  66. if (event != CPUFREQ_INCOMPATIBLE)
  67. goto out;
  68. pr = processors[policy->cpu];
  69. if (!pr || !pr->performance)
  70. goto out;
  71. ppc = (unsigned int)pr->performance_platform_limit;
  72. if (!ppc)
  73. goto out;
  74. if (ppc > pr->performance->state_count)
  75. goto out;
  76. cpufreq_verify_within_limits(policy, 0,
  77. pr->performance->states[ppc].
  78. core_frequency * 1000);
  79. out:
  80. mutex_unlock(&performance_mutex);
  81. return 0;
  82. }
  83. static struct notifier_block acpi_ppc_notifier_block = {
  84. .notifier_call = acpi_processor_ppc_notifier,
  85. };
  86. static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
  87. {
  88. acpi_status status = 0;
  89. unsigned long ppc = 0;
  90. ACPI_FUNCTION_TRACE("acpi_processor_get_platform_limit");
  91. if (!pr)
  92. return_VALUE(-EINVAL);
  93. /*
  94. * _PPC indicates the maximum state currently supported by the platform
  95. * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
  96. */
  97. status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
  98. if (status != AE_NOT_FOUND)
  99. acpi_processor_ppc_status |= PPC_IN_USE;
  100. if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
  101. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
  102. return_VALUE(-ENODEV);
  103. }
  104. pr->performance_platform_limit = (int)ppc;
  105. return_VALUE(0);
  106. }
  107. int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
  108. {
  109. int ret = acpi_processor_get_platform_limit(pr);
  110. if (ret < 0)
  111. return (ret);
  112. else
  113. return cpufreq_update_policy(pr->id);
  114. }
  115. void acpi_processor_ppc_init(void)
  116. {
  117. if (!cpufreq_register_notifier
  118. (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
  119. acpi_processor_ppc_status |= PPC_REGISTERED;
  120. else
  121. printk(KERN_DEBUG
  122. "Warning: Processor Platform Limit not supported.\n");
  123. }
  124. void acpi_processor_ppc_exit(void)
  125. {
  126. if (acpi_processor_ppc_status & PPC_REGISTERED)
  127. cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
  128. CPUFREQ_POLICY_NOTIFIER);
  129. acpi_processor_ppc_status &= ~PPC_REGISTERED;
  130. }
  131. static int acpi_processor_get_performance_control(struct acpi_processor *pr)
  132. {
  133. int result = 0;
  134. acpi_status status = 0;
  135. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  136. union acpi_object *pct = NULL;
  137. union acpi_object obj = { 0 };
  138. ACPI_FUNCTION_TRACE("acpi_processor_get_performance_control");
  139. status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
  140. if (ACPI_FAILURE(status)) {
  141. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
  142. return_VALUE(-ENODEV);
  143. }
  144. pct = (union acpi_object *)buffer.pointer;
  145. if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
  146. || (pct->package.count != 2)) {
  147. printk(KERN_ERR PREFIX "Invalid _PCT data\n");
  148. result = -EFAULT;
  149. goto end;
  150. }
  151. /*
  152. * control_register
  153. */
  154. obj = pct->package.elements[0];
  155. if ((obj.type != ACPI_TYPE_BUFFER)
  156. || (obj.buffer.length < sizeof(struct acpi_pct_register))
  157. || (obj.buffer.pointer == NULL)) {
  158. printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
  159. result = -EFAULT;
  160. goto end;
  161. }
  162. memcpy(&pr->performance->control_register, obj.buffer.pointer,
  163. sizeof(struct acpi_pct_register));
  164. /*
  165. * status_register
  166. */
  167. obj = pct->package.elements[1];
  168. if ((obj.type != ACPI_TYPE_BUFFER)
  169. || (obj.buffer.length < sizeof(struct acpi_pct_register))
  170. || (obj.buffer.pointer == NULL)) {
  171. printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
  172. result = -EFAULT;
  173. goto end;
  174. }
  175. memcpy(&pr->performance->status_register, obj.buffer.pointer,
  176. sizeof(struct acpi_pct_register));
  177. end:
  178. acpi_os_free(buffer.pointer);
  179. return_VALUE(result);
  180. }
  181. static int acpi_processor_get_performance_states(struct acpi_processor *pr)
  182. {
  183. int result = 0;
  184. acpi_status status = AE_OK;
  185. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  186. struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
  187. struct acpi_buffer state = { 0, NULL };
  188. union acpi_object *pss = NULL;
  189. int i;
  190. ACPI_FUNCTION_TRACE("acpi_processor_get_performance_states");
  191. status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
  192. if (ACPI_FAILURE(status)) {
  193. ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
  194. return_VALUE(-ENODEV);
  195. }
  196. pss = (union acpi_object *)buffer.pointer;
  197. if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
  198. printk(KERN_ERR PREFIX "Invalid _PSS data\n");
  199. result = -EFAULT;
  200. goto end;
  201. }
  202. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
  203. pss->package.count));
  204. pr->performance->state_count = pss->package.count;
  205. pr->performance->states =
  206. kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
  207. GFP_KERNEL);
  208. if (!pr->performance->states) {
  209. result = -ENOMEM;
  210. goto end;
  211. }
  212. for (i = 0; i < pr->performance->state_count; i++) {
  213. struct acpi_processor_px *px = &(pr->performance->states[i]);
  214. state.length = sizeof(struct acpi_processor_px);
  215. state.pointer = px;
  216. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
  217. status = acpi_extract_package(&(pss->package.elements[i]),
  218. &format, &state);
  219. if (ACPI_FAILURE(status)) {
  220. ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
  221. result = -EFAULT;
  222. kfree(pr->performance->states);
  223. goto end;
  224. }
  225. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  226. "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
  227. i,
  228. (u32) px->core_frequency,
  229. (u32) px->power,
  230. (u32) px->transition_latency,
  231. (u32) px->bus_master_latency,
  232. (u32) px->control, (u32) px->status));
  233. if (!px->core_frequency) {
  234. printk(KERN_ERR PREFIX
  235. "Invalid _PSS data: freq is zero\n");
  236. result = -EFAULT;
  237. kfree(pr->performance->states);
  238. goto end;
  239. }
  240. }
  241. end:
  242. acpi_os_free(buffer.pointer);
  243. return_VALUE(result);
  244. }
  245. static int acpi_processor_get_performance_info(struct acpi_processor *pr)
  246. {
  247. int result = 0;
  248. acpi_status status = AE_OK;
  249. acpi_handle handle = NULL;
  250. ACPI_FUNCTION_TRACE("acpi_processor_get_performance_info");
  251. if (!pr || !pr->performance || !pr->handle)
  252. return_VALUE(-EINVAL);
  253. status = acpi_get_handle(pr->handle, "_PCT", &handle);
  254. if (ACPI_FAILURE(status)) {
  255. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  256. "ACPI-based processor performance control unavailable\n"));
  257. return_VALUE(-ENODEV);
  258. }
  259. result = acpi_processor_get_performance_control(pr);
  260. if (result)
  261. return_VALUE(result);
  262. result = acpi_processor_get_performance_states(pr);
  263. if (result)
  264. return_VALUE(result);
  265. result = acpi_processor_get_platform_limit(pr);
  266. if (result)
  267. return_VALUE(result);
  268. return_VALUE(0);
  269. }
  270. int acpi_processor_notify_smm(struct module *calling_module)
  271. {
  272. acpi_status status;
  273. static int is_done = 0;
  274. ACPI_FUNCTION_TRACE("acpi_processor_notify_smm");
  275. if (!(acpi_processor_ppc_status & PPC_REGISTERED))
  276. return_VALUE(-EBUSY);
  277. if (!try_module_get(calling_module))
  278. return_VALUE(-EINVAL);
  279. /* is_done is set to negative if an error occured,
  280. * and to postitive if _no_ error occured, but SMM
  281. * was already notified. This avoids double notification
  282. * which might lead to unexpected results...
  283. */
  284. if (is_done > 0) {
  285. module_put(calling_module);
  286. return_VALUE(0);
  287. } else if (is_done < 0) {
  288. module_put(calling_module);
  289. return_VALUE(is_done);
  290. }
  291. is_done = -EIO;
  292. /* Can't write pstate_cnt to smi_cmd if either value is zero */
  293. if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
  294. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n"));
  295. module_put(calling_module);
  296. return_VALUE(0);
  297. }
  298. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  299. "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
  300. acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
  301. /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
  302. * it anyway, so we need to support it... */
  303. if (acpi_fadt_is_v1) {
  304. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  305. "Using v1.0 FADT reserved value for pstate_cnt\n"));
  306. }
  307. status = acpi_os_write_port(acpi_fadt.smi_cmd,
  308. (u32) acpi_fadt.pstate_cnt, 8);
  309. if (ACPI_FAILURE(status)) {
  310. ACPI_EXCEPTION((AE_INFO, status,
  311. "Failed to write pstate_cnt [0x%x] to "
  312. "smi_cmd [0x%x]", acpi_fadt.pstate_cnt,
  313. acpi_fadt.smi_cmd));
  314. module_put(calling_module);
  315. return_VALUE(status);
  316. }
  317. /* Success. If there's no _PPC, we need to fear nothing, so
  318. * we can allow the cpufreq driver to be rmmod'ed. */
  319. is_done = 1;
  320. if (!(acpi_processor_ppc_status & PPC_IN_USE))
  321. module_put(calling_module);
  322. return_VALUE(0);
  323. }
  324. EXPORT_SYMBOL(acpi_processor_notify_smm);
  325. #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
  326. /* /proc/acpi/processor/../performance interface (DEPRECATED) */
  327. static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
  328. static struct file_operations acpi_processor_perf_fops = {
  329. .open = acpi_processor_perf_open_fs,
  330. .read = seq_read,
  331. .llseek = seq_lseek,
  332. .release = single_release,
  333. };
  334. static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
  335. {
  336. struct acpi_processor *pr = (struct acpi_processor *)seq->private;
  337. int i;
  338. ACPI_FUNCTION_TRACE("acpi_processor_perf_seq_show");
  339. if (!pr)
  340. goto end;
  341. if (!pr->performance) {
  342. seq_puts(seq, "<not supported>\n");
  343. goto end;
  344. }
  345. seq_printf(seq, "state count: %d\n"
  346. "active state: P%d\n",
  347. pr->performance->state_count, pr->performance->state);
  348. seq_puts(seq, "states:\n");
  349. for (i = 0; i < pr->performance->state_count; i++)
  350. seq_printf(seq,
  351. " %cP%d: %d MHz, %d mW, %d uS\n",
  352. (i == pr->performance->state ? '*' : ' '), i,
  353. (u32) pr->performance->states[i].core_frequency,
  354. (u32) pr->performance->states[i].power,
  355. (u32) pr->performance->states[i].transition_latency);
  356. end:
  357. return_VALUE(0);
  358. }
  359. static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
  360. {
  361. return single_open(file, acpi_processor_perf_seq_show,
  362. PDE(inode)->data);
  363. }
  364. static ssize_t
  365. acpi_processor_write_performance(struct file *file,
  366. const char __user * buffer,
  367. size_t count, loff_t * data)
  368. {
  369. int result = 0;
  370. struct seq_file *m = (struct seq_file *)file->private_data;
  371. struct acpi_processor *pr = (struct acpi_processor *)m->private;
  372. struct acpi_processor_performance *perf;
  373. char state_string[12] = { '\0' };
  374. unsigned int new_state = 0;
  375. struct cpufreq_policy policy;
  376. ACPI_FUNCTION_TRACE("acpi_processor_write_performance");
  377. if (!pr || (count > sizeof(state_string) - 1))
  378. return_VALUE(-EINVAL);
  379. perf = pr->performance;
  380. if (!perf)
  381. return_VALUE(-EINVAL);
  382. if (copy_from_user(state_string, buffer, count))
  383. return_VALUE(-EFAULT);
  384. state_string[count] = '\0';
  385. new_state = simple_strtoul(state_string, NULL, 0);
  386. if (new_state >= perf->state_count)
  387. return_VALUE(-EINVAL);
  388. cpufreq_get_policy(&policy, pr->id);
  389. policy.cpu = pr->id;
  390. policy.min = perf->states[new_state].core_frequency * 1000;
  391. policy.max = perf->states[new_state].core_frequency * 1000;
  392. result = cpufreq_set_policy(&policy);
  393. if (result)
  394. return_VALUE(result);
  395. return_VALUE(count);
  396. }
  397. static void acpi_cpufreq_add_file(struct acpi_processor *pr)
  398. {
  399. struct proc_dir_entry *entry = NULL;
  400. struct acpi_device *device = NULL;
  401. ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
  402. if (acpi_bus_get_device(pr->handle, &device))
  403. return_VOID;
  404. /* add file 'performance' [R/W] */
  405. entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
  406. S_IFREG | S_IRUGO | S_IWUSR,
  407. acpi_device_dir(device));
  408. if (entry){
  409. acpi_processor_perf_fops.write = acpi_processor_write_performance;
  410. entry->proc_fops = &acpi_processor_perf_fops;
  411. entry->data = acpi_driver_data(device);
  412. entry->owner = THIS_MODULE;
  413. }
  414. return_VOID;
  415. }
  416. static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
  417. {
  418. struct acpi_device *device = NULL;
  419. ACPI_FUNCTION_TRACE("acpi_cpufreq_addfile");
  420. if (acpi_bus_get_device(pr->handle, &device))
  421. return_VOID;
  422. /* remove file 'performance' */
  423. remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
  424. acpi_device_dir(device));
  425. return_VOID;
  426. }
  427. #else
  428. static void acpi_cpufreq_add_file(struct acpi_processor *pr)
  429. {
  430. return;
  431. }
  432. static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
  433. {
  434. return;
  435. }
  436. #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
  437. static int acpi_processor_get_psd(struct acpi_processor *pr)
  438. {
  439. int result = 0;
  440. acpi_status status = AE_OK;
  441. struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
  442. struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
  443. struct acpi_buffer state = {0, NULL};
  444. union acpi_object *psd = NULL;
  445. struct acpi_psd_package *pdomain;
  446. status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
  447. if (ACPI_FAILURE(status)) {
  448. return -ENODEV;
  449. }
  450. psd = (union acpi_object *) buffer.pointer;
  451. if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
  452. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
  453. result = -EFAULT;
  454. goto end;
  455. }
  456. if (psd->package.count != 1) {
  457. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
  458. result = -EFAULT;
  459. goto end;
  460. }
  461. pdomain = &(pr->performance->domain_info);
  462. state.length = sizeof(struct acpi_psd_package);
  463. state.pointer = pdomain;
  464. status = acpi_extract_package(&(psd->package.elements[0]),
  465. &format, &state);
  466. if (ACPI_FAILURE(status)) {
  467. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
  468. result = -EFAULT;
  469. goto end;
  470. }
  471. if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
  472. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
  473. result = -EFAULT;
  474. goto end;
  475. }
  476. if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
  477. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
  478. result = -EFAULT;
  479. goto end;
  480. }
  481. end:
  482. acpi_os_free(buffer.pointer);
  483. return result;
  484. }
  485. int acpi_processor_preregister_performance(
  486. struct acpi_processor_performance **performance)
  487. {
  488. int count, count_target;
  489. int retval = 0;
  490. unsigned int i, j;
  491. cpumask_t covered_cpus;
  492. struct acpi_processor *pr;
  493. struct acpi_psd_package *pdomain;
  494. struct acpi_processor *match_pr;
  495. struct acpi_psd_package *match_pdomain;
  496. mutex_lock(&performance_mutex);
  497. retval = 0;
  498. /* Call _PSD for all CPUs */
  499. for_each_possible_cpu(i) {
  500. pr = processors[i];
  501. if (!pr) {
  502. /* Look only at processors in ACPI namespace */
  503. continue;
  504. }
  505. if (pr->performance) {
  506. retval = -EBUSY;
  507. continue;
  508. }
  509. if (!performance || !performance[i]) {
  510. retval = -EINVAL;
  511. continue;
  512. }
  513. pr->performance = performance[i];
  514. cpu_set(i, pr->performance->shared_cpu_map);
  515. if (acpi_processor_get_psd(pr)) {
  516. retval = -EINVAL;
  517. continue;
  518. }
  519. }
  520. if (retval)
  521. goto err_ret;
  522. /*
  523. * Now that we have _PSD data from all CPUs, lets setup P-state
  524. * domain info.
  525. */
  526. for_each_possible_cpu(i) {
  527. pr = processors[i];
  528. if (!pr)
  529. continue;
  530. /* Basic validity check for domain info */
  531. pdomain = &(pr->performance->domain_info);
  532. if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
  533. (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
  534. retval = -EINVAL;
  535. goto err_ret;
  536. }
  537. if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
  538. pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
  539. pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
  540. retval = -EINVAL;
  541. goto err_ret;
  542. }
  543. }
  544. cpus_clear(covered_cpus);
  545. for_each_possible_cpu(i) {
  546. pr = processors[i];
  547. if (!pr)
  548. continue;
  549. if (cpu_isset(i, covered_cpus))
  550. continue;
  551. pdomain = &(pr->performance->domain_info);
  552. cpu_set(i, pr->performance->shared_cpu_map);
  553. cpu_set(i, covered_cpus);
  554. if (pdomain->num_processors <= 1)
  555. continue;
  556. /* Validate the Domain info */
  557. count_target = pdomain->num_processors;
  558. count = 1;
  559. if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
  560. pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  561. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
  562. pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
  563. else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
  564. pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
  565. for_each_possible_cpu(j) {
  566. if (i == j)
  567. continue;
  568. match_pr = processors[j];
  569. if (!match_pr)
  570. continue;
  571. match_pdomain = &(match_pr->performance->domain_info);
  572. if (match_pdomain->domain != pdomain->domain)
  573. continue;
  574. /* Here i and j are in the same domain */
  575. if (match_pdomain->num_processors != count_target) {
  576. retval = -EINVAL;
  577. goto err_ret;
  578. }
  579. if (pdomain->coord_type != match_pdomain->coord_type) {
  580. retval = -EINVAL;
  581. goto err_ret;
  582. }
  583. cpu_set(j, covered_cpus);
  584. cpu_set(j, pr->performance->shared_cpu_map);
  585. count++;
  586. }
  587. for_each_possible_cpu(j) {
  588. if (i == j)
  589. continue;
  590. match_pr = processors[j];
  591. if (!match_pr)
  592. continue;
  593. match_pdomain = &(match_pr->performance->domain_info);
  594. if (match_pdomain->domain != pdomain->domain)
  595. continue;
  596. match_pr->performance->shared_type =
  597. pr->performance->shared_type;
  598. match_pr->performance->shared_cpu_map =
  599. pr->performance->shared_cpu_map;
  600. }
  601. }
  602. err_ret:
  603. if (retval) {
  604. ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
  605. }
  606. for_each_possible_cpu(i) {
  607. pr = processors[i];
  608. if (!pr || !pr->performance)
  609. continue;
  610. /* Assume no coordination on any error parsing domain info */
  611. if (retval) {
  612. cpus_clear(pr->performance->shared_cpu_map);
  613. cpu_set(i, pr->performance->shared_cpu_map);
  614. pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  615. }
  616. pr->performance = NULL; /* Will be set for real in register */
  617. }
  618. mutex_unlock(&performance_mutex);
  619. return retval;
  620. }
  621. EXPORT_SYMBOL(acpi_processor_preregister_performance);
  622. int
  623. acpi_processor_register_performance(struct acpi_processor_performance
  624. *performance, unsigned int cpu)
  625. {
  626. struct acpi_processor *pr;
  627. ACPI_FUNCTION_TRACE("acpi_processor_register_performance");
  628. if (!(acpi_processor_ppc_status & PPC_REGISTERED))
  629. return_VALUE(-EINVAL);
  630. mutex_lock(&performance_mutex);
  631. pr = processors[cpu];
  632. if (!pr) {
  633. mutex_unlock(&performance_mutex);
  634. return_VALUE(-ENODEV);
  635. }
  636. if (pr->performance) {
  637. mutex_unlock(&performance_mutex);
  638. return_VALUE(-EBUSY);
  639. }
  640. WARN_ON(!performance);
  641. pr->performance = performance;
  642. if (acpi_processor_get_performance_info(pr)) {
  643. pr->performance = NULL;
  644. mutex_unlock(&performance_mutex);
  645. return_VALUE(-EIO);
  646. }
  647. acpi_cpufreq_add_file(pr);
  648. mutex_unlock(&performance_mutex);
  649. return_VALUE(0);
  650. }
  651. EXPORT_SYMBOL(acpi_processor_register_performance);
  652. void
  653. acpi_processor_unregister_performance(struct acpi_processor_performance
  654. *performance, unsigned int cpu)
  655. {
  656. struct acpi_processor *pr;
  657. ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");
  658. mutex_lock(&performance_mutex);
  659. pr = processors[cpu];
  660. if (!pr) {
  661. mutex_unlock(&performance_mutex);
  662. return_VOID;
  663. }
  664. if (pr->performance)
  665. kfree(pr->performance->states);
  666. pr->performance = NULL;
  667. acpi_cpufreq_remove_file(pr);
  668. mutex_unlock(&performance_mutex);
  669. return_VOID;
  670. }
  671. EXPORT_SYMBOL(acpi_processor_unregister_performance);