processor_core.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180
  1. /*
  2. * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or (at
  15. * your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License along
  23. * with this program; if not, write to the Free Software Foundation, Inc.,
  24. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  25. *
  26. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27. * TBD:
  28. * 1. Make # power states dynamic.
  29. * 2. Support duty_cycle values that span bit 4.
  30. * 3. Optimize by having scheduler determine business instead of
  31. * having us try to calculate it here.
  32. * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/init.h>
  37. #include <linux/types.h>
  38. #include <linux/pci.h>
  39. #include <linux/pm.h>
  40. #include <linux/cpufreq.h>
  41. #include <linux/cpu.h>
  42. #include <linux/proc_fs.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/dmi.h>
  45. #include <linux/moduleparam.h>
  46. #include <linux/cpuidle.h>
  47. #include <asm/io.h>
  48. #include <asm/system.h>
  49. #include <asm/cpu.h>
  50. #include <asm/delay.h>
  51. #include <asm/uaccess.h>
  52. #include <asm/processor.h>
  53. #include <asm/smp.h>
  54. #include <asm/acpi.h>
  55. #include <acpi/acpi_bus.h>
  56. #include <acpi/acpi_drivers.h>
  57. #include <acpi/processor.h>
  58. #define ACPI_PROCESSOR_COMPONENT 0x01000000
  59. #define ACPI_PROCESSOR_CLASS "processor"
  60. #define ACPI_PROCESSOR_DEVICE_NAME "Processor"
  61. #define ACPI_PROCESSOR_FILE_INFO "info"
  62. #define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
  63. #define ACPI_PROCESSOR_FILE_LIMIT "limit"
  64. #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
  65. #define ACPI_PROCESSOR_NOTIFY_POWER 0x81
  66. #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
  67. #define ACPI_PROCESSOR_LIMIT_USER 0
  68. #define ACPI_PROCESSOR_LIMIT_THERMAL 1
  69. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  70. ACPI_MODULE_NAME("processor_core");
  71. MODULE_AUTHOR("Paul Diefenbaugh");
  72. MODULE_DESCRIPTION("ACPI Processor Driver");
  73. MODULE_LICENSE("GPL");
  74. static int acpi_processor_add(struct acpi_device *device);
  75. static int acpi_processor_start(struct acpi_device *device);
  76. static int acpi_processor_remove(struct acpi_device *device, int type);
  77. static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
  78. static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
  79. static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
  80. static int acpi_processor_handle_eject(struct acpi_processor *pr);
  81. static const struct acpi_device_id processor_device_ids[] = {
  82. {ACPI_PROCESSOR_HID, 0},
  83. {"", 0},
  84. };
  85. MODULE_DEVICE_TABLE(acpi, processor_device_ids);
  86. static struct acpi_driver acpi_processor_driver = {
  87. .name = "processor",
  88. .class = ACPI_PROCESSOR_CLASS,
  89. .ids = processor_device_ids,
  90. .ops = {
  91. .add = acpi_processor_add,
  92. .remove = acpi_processor_remove,
  93. .start = acpi_processor_start,
  94. .suspend = acpi_processor_suspend,
  95. .resume = acpi_processor_resume,
  96. },
  97. };
  98. #define INSTALL_NOTIFY_HANDLER 1
  99. #define UNINSTALL_NOTIFY_HANDLER 2
  100. static const struct file_operations acpi_processor_info_fops = {
  101. .owner = THIS_MODULE,
  102. .open = acpi_processor_info_open_fs,
  103. .read = seq_read,
  104. .llseek = seq_lseek,
  105. .release = single_release,
  106. };
  107. DEFINE_PER_CPU(struct acpi_processor *, processors);
  108. struct acpi_processor_errata errata __read_mostly;
  109. static int set_no_mwait(const struct dmi_system_id *id)
  110. {
  111. printk(KERN_NOTICE PREFIX "%s detected - "
  112. "disable mwait for CPU C-stetes\n", id->ident);
  113. idle_nomwait = 1;
  114. return 0;
  115. }
  116. static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
  117. {
  118. set_no_mwait, "IFL91 board", {
  119. DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
  120. DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
  121. DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
  122. DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
  123. {
  124. set_no_mwait, "Extensa 5220", {
  125. DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
  126. DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
  127. DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
  128. DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
  129. {},
  130. };
  131. /* --------------------------------------------------------------------------
  132. Errata Handling
  133. -------------------------------------------------------------------------- */
  134. static int acpi_processor_errata_piix4(struct pci_dev *dev)
  135. {
  136. u8 value1 = 0;
  137. u8 value2 = 0;
  138. if (!dev)
  139. return -EINVAL;
  140. /*
  141. * Note that 'dev' references the PIIX4 ACPI Controller.
  142. */
  143. switch (dev->revision) {
  144. case 0:
  145. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
  146. break;
  147. case 1:
  148. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
  149. break;
  150. case 2:
  151. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
  152. break;
  153. case 3:
  154. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
  155. break;
  156. default:
  157. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
  158. break;
  159. }
  160. switch (dev->revision) {
  161. case 0: /* PIIX4 A-step */
  162. case 1: /* PIIX4 B-step */
  163. /*
  164. * See specification changes #13 ("Manual Throttle Duty Cycle")
  165. * and #14 ("Enabling and Disabling Manual Throttle"), plus
  166. * erratum #5 ("STPCLK# Deassertion Time") from the January
  167. * 2002 PIIX4 specification update. Applies to only older
  168. * PIIX4 models.
  169. */
  170. errata.piix4.throttle = 1;
  171. case 2: /* PIIX4E */
  172. case 3: /* PIIX4M */
  173. /*
  174. * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
  175. * Livelock") from the January 2002 PIIX4 specification update.
  176. * Applies to all PIIX4 models.
  177. */
  178. /*
  179. * BM-IDE
  180. * ------
  181. * Find the PIIX4 IDE Controller and get the Bus Master IDE
  182. * Status register address. We'll use this later to read
  183. * each IDE controller's DMA status to make sure we catch all
  184. * DMA activity.
  185. */
  186. dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  187. PCI_DEVICE_ID_INTEL_82371AB,
  188. PCI_ANY_ID, PCI_ANY_ID, NULL);
  189. if (dev) {
  190. errata.piix4.bmisx = pci_resource_start(dev, 4);
  191. pci_dev_put(dev);
  192. }
  193. /*
  194. * Type-F DMA
  195. * ----------
  196. * Find the PIIX4 ISA Controller and read the Motherboard
  197. * DMA controller's status to see if Type-F (Fast) DMA mode
  198. * is enabled (bit 7) on either channel. Note that we'll
  199. * disable C3 support if this is enabled, as some legacy
  200. * devices won't operate well if fast DMA is disabled.
  201. */
  202. dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  203. PCI_DEVICE_ID_INTEL_82371AB_0,
  204. PCI_ANY_ID, PCI_ANY_ID, NULL);
  205. if (dev) {
  206. pci_read_config_byte(dev, 0x76, &value1);
  207. pci_read_config_byte(dev, 0x77, &value2);
  208. if ((value1 & 0x80) || (value2 & 0x80))
  209. errata.piix4.fdma = 1;
  210. pci_dev_put(dev);
  211. }
  212. break;
  213. }
  214. if (errata.piix4.bmisx)
  215. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  216. "Bus master activity detection (BM-IDE) erratum enabled\n"));
  217. if (errata.piix4.fdma)
  218. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  219. "Type-F DMA livelock erratum (C3 disabled)\n"));
  220. return 0;
  221. }
  222. static int acpi_processor_errata(struct acpi_processor *pr)
  223. {
  224. int result = 0;
  225. struct pci_dev *dev = NULL;
  226. if (!pr)
  227. return -EINVAL;
  228. /*
  229. * PIIX4
  230. */
  231. dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  232. PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
  233. PCI_ANY_ID, NULL);
  234. if (dev) {
  235. result = acpi_processor_errata_piix4(dev);
  236. pci_dev_put(dev);
  237. }
  238. return result;
  239. }
  240. /* --------------------------------------------------------------------------
  241. Common ACPI processor functions
  242. -------------------------------------------------------------------------- */
  243. /*
  244. * _PDC is required for a BIOS-OS handshake for most of the newer
  245. * ACPI processor features.
  246. */
  247. static int acpi_processor_set_pdc(struct acpi_processor *pr)
  248. {
  249. struct acpi_object_list *pdc_in = pr->pdc;
  250. acpi_status status = AE_OK;
  251. if (!pdc_in)
  252. return status;
  253. if (idle_nomwait) {
  254. /*
  255. * If mwait is disabled for CPU C-states, the C2C3_FFH access
  256. * mode will be disabled in the parameter of _PDC object.
  257. * Of course C1_FFH access mode will also be disabled.
  258. */
  259. union acpi_object *obj;
  260. u32 *buffer = NULL;
  261. obj = pdc_in->pointer;
  262. buffer = (u32 *)(obj->buffer.pointer);
  263. buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
  264. }
  265. status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
  266. if (ACPI_FAILURE(status))
  267. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  268. "Could not evaluate _PDC, using legacy perf. control...\n"));
  269. return status;
  270. }
  271. /* --------------------------------------------------------------------------
  272. FS Interface (/proc)
  273. -------------------------------------------------------------------------- */
  274. static struct proc_dir_entry *acpi_processor_dir = NULL;
  275. static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
  276. {
  277. struct acpi_processor *pr = seq->private;
  278. if (!pr)
  279. goto end;
  280. seq_printf(seq, "processor id: %d\n"
  281. "acpi id: %d\n"
  282. "bus mastering control: %s\n"
  283. "power management: %s\n"
  284. "throttling control: %s\n"
  285. "limit interface: %s\n",
  286. pr->id,
  287. pr->acpi_id,
  288. pr->flags.bm_control ? "yes" : "no",
  289. pr->flags.power ? "yes" : "no",
  290. pr->flags.throttling ? "yes" : "no",
  291. pr->flags.limit ? "yes" : "no");
  292. end:
  293. return 0;
  294. }
  295. static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
  296. {
  297. return single_open(file, acpi_processor_info_seq_show,
  298. PDE(inode)->data);
  299. }
  300. static int acpi_processor_add_fs(struct acpi_device *device)
  301. {
  302. struct proc_dir_entry *entry = NULL;
  303. if (!acpi_device_dir(device)) {
  304. acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
  305. acpi_processor_dir);
  306. if (!acpi_device_dir(device))
  307. return -ENODEV;
  308. }
  309. acpi_device_dir(device)->owner = THIS_MODULE;
  310. /* 'info' [R] */
  311. entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
  312. S_IRUGO, acpi_device_dir(device),
  313. &acpi_processor_info_fops,
  314. acpi_driver_data(device));
  315. if (!entry)
  316. return -EIO;
  317. /* 'throttling' [R/W] */
  318. entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
  319. S_IFREG | S_IRUGO | S_IWUSR,
  320. acpi_device_dir(device),
  321. &acpi_processor_throttling_fops,
  322. acpi_driver_data(device));
  323. if (!entry)
  324. return -EIO;
  325. /* 'limit' [R/W] */
  326. entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
  327. S_IFREG | S_IRUGO | S_IWUSR,
  328. acpi_device_dir(device),
  329. &acpi_processor_limit_fops,
  330. acpi_driver_data(device));
  331. if (!entry)
  332. return -EIO;
  333. return 0;
  334. }
  335. static int acpi_processor_remove_fs(struct acpi_device *device)
  336. {
  337. if (acpi_device_dir(device)) {
  338. remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
  339. acpi_device_dir(device));
  340. remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
  341. acpi_device_dir(device));
  342. remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
  343. acpi_device_dir(device));
  344. remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
  345. acpi_device_dir(device) = NULL;
  346. }
  347. return 0;
  348. }
  349. /* Use the acpiid in MADT to map cpus in case of SMP */
  350. #ifndef CONFIG_SMP
  351. static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
  352. #else
  353. static struct acpi_table_madt *madt;
  354. static int map_lapic_id(struct acpi_subtable_header *entry,
  355. u32 acpi_id, int *apic_id)
  356. {
  357. struct acpi_madt_local_apic *lapic =
  358. (struct acpi_madt_local_apic *)entry;
  359. if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
  360. lapic->processor_id == acpi_id) {
  361. *apic_id = lapic->id;
  362. return 1;
  363. }
  364. return 0;
  365. }
  366. static int map_lsapic_id(struct acpi_subtable_header *entry,
  367. u32 acpi_id, int *apic_id)
  368. {
  369. struct acpi_madt_local_sapic *lsapic =
  370. (struct acpi_madt_local_sapic *)entry;
  371. /* Only check enabled APICs*/
  372. if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
  373. /* First check against id */
  374. if (lsapic->processor_id == acpi_id) {
  375. *apic_id = (lsapic->id << 8) | lsapic->eid;
  376. return 1;
  377. /* Check against optional uid */
  378. } else if (entry->length >= 16 &&
  379. lsapic->uid == acpi_id) {
  380. *apic_id = lsapic->uid;
  381. return 1;
  382. }
  383. }
  384. return 0;
  385. }
  386. static int map_madt_entry(u32 acpi_id)
  387. {
  388. unsigned long madt_end, entry;
  389. int apic_id = -1;
  390. if (!madt)
  391. return apic_id;
  392. entry = (unsigned long)madt;
  393. madt_end = entry + madt->header.length;
  394. /* Parse all entries looking for a match. */
  395. entry += sizeof(struct acpi_table_madt);
  396. while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
  397. struct acpi_subtable_header *header =
  398. (struct acpi_subtable_header *)entry;
  399. if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
  400. if (map_lapic_id(header, acpi_id, &apic_id))
  401. break;
  402. } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
  403. if (map_lsapic_id(header, acpi_id, &apic_id))
  404. break;
  405. }
  406. entry += header->length;
  407. }
  408. return apic_id;
  409. }
  410. static int map_mat_entry(acpi_handle handle, u32 acpi_id)
  411. {
  412. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  413. union acpi_object *obj;
  414. struct acpi_subtable_header *header;
  415. int apic_id = -1;
  416. if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
  417. goto exit;
  418. if (!buffer.length || !buffer.pointer)
  419. goto exit;
  420. obj = buffer.pointer;
  421. if (obj->type != ACPI_TYPE_BUFFER ||
  422. obj->buffer.length < sizeof(struct acpi_subtable_header)) {
  423. goto exit;
  424. }
  425. header = (struct acpi_subtable_header *)obj->buffer.pointer;
  426. if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
  427. map_lapic_id(header, acpi_id, &apic_id);
  428. } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
  429. map_lsapic_id(header, acpi_id, &apic_id);
  430. }
  431. exit:
  432. if (buffer.pointer)
  433. kfree(buffer.pointer);
  434. return apic_id;
  435. }
  436. static int get_cpu_id(acpi_handle handle, u32 acpi_id)
  437. {
  438. int i;
  439. int apic_id = -1;
  440. apic_id = map_mat_entry(handle, acpi_id);
  441. if (apic_id == -1)
  442. apic_id = map_madt_entry(acpi_id);
  443. if (apic_id == -1)
  444. return apic_id;
  445. for_each_possible_cpu(i) {
  446. if (cpu_physical_id(i) == apic_id)
  447. return i;
  448. }
  449. return -1;
  450. }
  451. #endif
  452. /* --------------------------------------------------------------------------
  453. Driver Interface
  454. -------------------------------------------------------------------------- */
  455. static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
  456. {
  457. acpi_status status = 0;
  458. union acpi_object object = { 0 };
  459. struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
  460. int cpu_index;
  461. static int cpu0_initialized;
  462. if (!pr)
  463. return -EINVAL;
  464. if (num_online_cpus() > 1)
  465. errata.smp = TRUE;
  466. acpi_processor_errata(pr);
  467. /*
  468. * Check to see if we have bus mastering arbitration control. This
  469. * is required for proper C3 usage (to maintain cache coherency).
  470. */
  471. if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
  472. pr->flags.bm_control = 1;
  473. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  474. "Bus mastering arbitration control present\n"));
  475. } else
  476. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  477. "No bus mastering arbitration control\n"));
  478. /* Check if it is a Device with HID and UID */
  479. if (has_uid) {
  480. unsigned long value;
  481. status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
  482. NULL, &value);
  483. if (ACPI_FAILURE(status)) {
  484. printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
  485. return -ENODEV;
  486. }
  487. pr->acpi_id = value;
  488. } else {
  489. /*
  490. * Evalute the processor object. Note that it is common on SMP to
  491. * have the first (boot) processor with a valid PBLK address while
  492. * all others have a NULL address.
  493. */
  494. status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
  495. if (ACPI_FAILURE(status)) {
  496. printk(KERN_ERR PREFIX "Evaluating processor object\n");
  497. return -ENODEV;
  498. }
  499. /*
  500. * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
  501. * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
  502. */
  503. pr->acpi_id = object.processor.proc_id;
  504. }
  505. cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
  506. /* Handle UP system running SMP kernel, with no LAPIC in MADT */
  507. if (!cpu0_initialized && (cpu_index == -1) &&
  508. (num_online_cpus() == 1)) {
  509. cpu_index = 0;
  510. }
  511. cpu0_initialized = 1;
  512. pr->id = cpu_index;
  513. /*
  514. * Extra Processor objects may be enumerated on MP systems with
  515. * less than the max # of CPUs. They should be ignored _iff
  516. * they are physically not present.
  517. */
  518. if (pr->id == -1) {
  519. if (ACPI_FAILURE
  520. (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
  521. return -ENODEV;
  522. }
  523. }
  524. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
  525. pr->acpi_id));
  526. if (!object.processor.pblk_address)
  527. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
  528. else if (object.processor.pblk_length != 6)
  529. printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
  530. object.processor.pblk_length);
  531. else {
  532. pr->throttling.address = object.processor.pblk_address;
  533. pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
  534. pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
  535. pr->pblk = object.processor.pblk_address;
  536. /*
  537. * We don't care about error returns - we just try to mark
  538. * these reserved so that nobody else is confused into thinking
  539. * that this region might be unused..
  540. *
  541. * (In particular, allocating the IO range for Cardbus)
  542. */
  543. request_region(pr->throttling.address, 6, "ACPI CPU throttle");
  544. }
  545. /*
  546. * If ACPI describes a slot number for this CPU, we can use it
  547. * ensure we get the right value in the "physical id" field
  548. * of /proc/cpuinfo
  549. */
  550. status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
  551. if (ACPI_SUCCESS(status))
  552. arch_fix_phys_package_id(pr->id, object.integer.value);
  553. return 0;
  554. }
  555. static DEFINE_PER_CPU(void *, processor_device_array);
  556. static int __cpuinit acpi_processor_start(struct acpi_device *device)
  557. {
  558. int result = 0;
  559. acpi_status status = AE_OK;
  560. struct acpi_processor *pr;
  561. struct sys_device *sysdev;
  562. pr = acpi_driver_data(device);
  563. result = acpi_processor_get_info(pr, device->flags.unique_id);
  564. if (result) {
  565. /* Processor is physically not present */
  566. return 0;
  567. }
  568. BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
  569. /*
  570. * Buggy BIOS check
  571. * ACPI id of processors can be reported wrongly by the BIOS.
  572. * Don't trust it blindly
  573. */
  574. if (per_cpu(processor_device_array, pr->id) != NULL &&
  575. per_cpu(processor_device_array, pr->id) != device) {
  576. printk(KERN_WARNING "BIOS reported wrong ACPI id "
  577. "for the processor\n");
  578. return -ENODEV;
  579. }
  580. per_cpu(processor_device_array, pr->id) = device;
  581. per_cpu(processors, pr->id) = pr;
  582. result = acpi_processor_add_fs(device);
  583. if (result)
  584. goto end;
  585. sysdev = get_cpu_sysdev(pr->id);
  586. if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
  587. return -EFAULT;
  588. status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
  589. acpi_processor_notify, pr);
  590. /* _PDC call should be done before doing anything else (if reqd.). */
  591. arch_acpi_processor_init_pdc(pr);
  592. acpi_processor_set_pdc(pr);
  593. #ifdef CONFIG_CPU_FREQ
  594. acpi_processor_ppc_has_changed(pr);
  595. #endif
  596. acpi_processor_get_throttling_info(pr);
  597. acpi_processor_get_limit_info(pr);
  598. acpi_processor_power_init(pr, device);
  599. pr->cdev = thermal_cooling_device_register("Processor", device,
  600. &processor_cooling_ops);
  601. if (IS_ERR(pr->cdev)) {
  602. result = PTR_ERR(pr->cdev);
  603. goto end;
  604. }
  605. printk(KERN_INFO PREFIX
  606. "%s is registered as cooling_device%d\n",
  607. device->dev.bus_id, pr->cdev->id);
  608. result = sysfs_create_link(&device->dev.kobj,
  609. &pr->cdev->device.kobj,
  610. "thermal_cooling");
  611. if (result)
  612. printk(KERN_ERR PREFIX "Create sysfs link\n");
  613. result = sysfs_create_link(&pr->cdev->device.kobj,
  614. &device->dev.kobj,
  615. "device");
  616. if (result)
  617. printk(KERN_ERR PREFIX "Create sysfs link\n");
  618. if (pr->flags.throttling) {
  619. printk(KERN_INFO PREFIX "%s [%s] (supports",
  620. acpi_device_name(device), acpi_device_bid(device));
  621. printk(" %d throttling states", pr->throttling.state_count);
  622. printk(")\n");
  623. }
  624. end:
  625. return result;
  626. }
  627. static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
  628. {
  629. struct acpi_processor *pr = data;
  630. struct acpi_device *device = NULL;
  631. int saved;
  632. if (!pr)
  633. return;
  634. if (acpi_bus_get_device(pr->handle, &device))
  635. return;
  636. switch (event) {
  637. case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
  638. saved = pr->performance_platform_limit;
  639. acpi_processor_ppc_has_changed(pr);
  640. if (saved == pr->performance_platform_limit)
  641. break;
  642. acpi_bus_generate_proc_event(device, event,
  643. pr->performance_platform_limit);
  644. acpi_bus_generate_netlink_event(device->pnp.device_class,
  645. device->dev.bus_id, event,
  646. pr->performance_platform_limit);
  647. break;
  648. case ACPI_PROCESSOR_NOTIFY_POWER:
  649. acpi_processor_cst_has_changed(pr);
  650. acpi_bus_generate_proc_event(device, event, 0);
  651. acpi_bus_generate_netlink_event(device->pnp.device_class,
  652. device->dev.bus_id, event, 0);
  653. break;
  654. case ACPI_PROCESSOR_NOTIFY_THROTTLING:
  655. acpi_processor_tstate_has_changed(pr);
  656. acpi_bus_generate_proc_event(device, event, 0);
  657. acpi_bus_generate_netlink_event(device->pnp.device_class,
  658. device->dev.bus_id, event, 0);
  659. default:
  660. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  661. "Unsupported event [0x%x]\n", event));
  662. break;
  663. }
  664. return;
  665. }
  666. static int acpi_cpu_soft_notify(struct notifier_block *nfb,
  667. unsigned long action, void *hcpu)
  668. {
  669. unsigned int cpu = (unsigned long)hcpu;
  670. struct acpi_processor *pr = per_cpu(processors, cpu);
  671. if (action == CPU_ONLINE && pr) {
  672. acpi_processor_ppc_has_changed(pr);
  673. acpi_processor_cst_has_changed(pr);
  674. acpi_processor_tstate_has_changed(pr);
  675. }
  676. return NOTIFY_OK;
  677. }
  678. static struct notifier_block acpi_cpu_notifier =
  679. {
  680. .notifier_call = acpi_cpu_soft_notify,
  681. };
  682. static int acpi_processor_add(struct acpi_device *device)
  683. {
  684. struct acpi_processor *pr = NULL;
  685. if (!device)
  686. return -EINVAL;
  687. pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
  688. if (!pr)
  689. return -ENOMEM;
  690. pr->handle = device->handle;
  691. strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
  692. strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
  693. acpi_driver_data(device) = pr;
  694. return 0;
  695. }
  696. static int acpi_processor_remove(struct acpi_device *device, int type)
  697. {
  698. acpi_status status = AE_OK;
  699. struct acpi_processor *pr = NULL;
  700. if (!device || !acpi_driver_data(device))
  701. return -EINVAL;
  702. pr = acpi_driver_data(device);
  703. if (pr->id >= nr_cpu_ids) {
  704. kfree(pr);
  705. return 0;
  706. }
  707. if (type == ACPI_BUS_REMOVAL_EJECT) {
  708. if (acpi_processor_handle_eject(pr))
  709. return -EINVAL;
  710. }
  711. acpi_processor_power_exit(pr, device);
  712. status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
  713. acpi_processor_notify);
  714. sysfs_remove_link(&device->dev.kobj, "sysdev");
  715. acpi_processor_remove_fs(device);
  716. if (pr->cdev) {
  717. sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
  718. sysfs_remove_link(&pr->cdev->device.kobj, "device");
  719. thermal_cooling_device_unregister(pr->cdev);
  720. pr->cdev = NULL;
  721. }
  722. per_cpu(processors, pr->id) = NULL;
  723. per_cpu(processor_device_array, pr->id) = NULL;
  724. kfree(pr);
  725. return 0;
  726. }
  727. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  728. /****************************************************************************
  729. * Acpi processor hotplug support *
  730. ****************************************************************************/
  731. static int is_processor_present(acpi_handle handle)
  732. {
  733. acpi_status status;
  734. unsigned long sta = 0;
  735. status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
  736. if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
  737. return 1;
  738. /*
  739. * _STA is mandatory for a processor that supports hot plug
  740. */
  741. if (status == AE_NOT_FOUND)
  742. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  743. "Processor does not support hot plug\n"));
  744. else
  745. ACPI_EXCEPTION((AE_INFO, status,
  746. "Processor Device is not present"));
  747. return 0;
  748. }
  749. static
  750. int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
  751. {
  752. acpi_handle phandle;
  753. struct acpi_device *pdev;
  754. struct acpi_processor *pr;
  755. if (acpi_get_parent(handle, &phandle)) {
  756. return -ENODEV;
  757. }
  758. if (acpi_bus_get_device(phandle, &pdev)) {
  759. return -ENODEV;
  760. }
  761. if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
  762. return -ENODEV;
  763. }
  764. acpi_bus_start(*device);
  765. pr = acpi_driver_data(*device);
  766. if (!pr)
  767. return -ENODEV;
  768. if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
  769. kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
  770. }
  771. return 0;
  772. }
  773. static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
  774. u32 event, void *data)
  775. {
  776. struct acpi_processor *pr;
  777. struct acpi_device *device = NULL;
  778. int result;
  779. switch (event) {
  780. case ACPI_NOTIFY_BUS_CHECK:
  781. case ACPI_NOTIFY_DEVICE_CHECK:
  782. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  783. "Processor driver received %s event\n",
  784. (event == ACPI_NOTIFY_BUS_CHECK) ?
  785. "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
  786. if (!is_processor_present(handle))
  787. break;
  788. if (acpi_bus_get_device(handle, &device)) {
  789. result = acpi_processor_device_add(handle, &device);
  790. if (result)
  791. printk(KERN_ERR PREFIX
  792. "Unable to add the device\n");
  793. break;
  794. }
  795. pr = acpi_driver_data(device);
  796. if (!pr) {
  797. printk(KERN_ERR PREFIX "Driver data is NULL\n");
  798. break;
  799. }
  800. if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
  801. kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
  802. break;
  803. }
  804. result = acpi_processor_start(device);
  805. if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
  806. kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
  807. } else {
  808. printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
  809. acpi_device_bid(device));
  810. }
  811. break;
  812. case ACPI_NOTIFY_EJECT_REQUEST:
  813. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  814. "received ACPI_NOTIFY_EJECT_REQUEST\n"));
  815. if (acpi_bus_get_device(handle, &device)) {
  816. printk(KERN_ERR PREFIX
  817. "Device don't exist, dropping EJECT\n");
  818. break;
  819. }
  820. pr = acpi_driver_data(device);
  821. if (!pr) {
  822. printk(KERN_ERR PREFIX
  823. "Driver data is NULL, dropping EJECT\n");
  824. return;
  825. }
  826. if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
  827. kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
  828. break;
  829. default:
  830. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  831. "Unsupported event [0x%x]\n", event));
  832. break;
  833. }
  834. return;
  835. }
  836. static acpi_status
  837. processor_walk_namespace_cb(acpi_handle handle,
  838. u32 lvl, void *context, void **rv)
  839. {
  840. acpi_status status;
  841. int *action = context;
  842. acpi_object_type type = 0;
  843. status = acpi_get_type(handle, &type);
  844. if (ACPI_FAILURE(status))
  845. return (AE_OK);
  846. if (type != ACPI_TYPE_PROCESSOR)
  847. return (AE_OK);
  848. switch (*action) {
  849. case INSTALL_NOTIFY_HANDLER:
  850. acpi_install_notify_handler(handle,
  851. ACPI_SYSTEM_NOTIFY,
  852. acpi_processor_hotplug_notify,
  853. NULL);
  854. break;
  855. case UNINSTALL_NOTIFY_HANDLER:
  856. acpi_remove_notify_handler(handle,
  857. ACPI_SYSTEM_NOTIFY,
  858. acpi_processor_hotplug_notify);
  859. break;
  860. default:
  861. break;
  862. }
  863. return (AE_OK);
  864. }
  865. static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
  866. {
  867. if (!is_processor_present(handle)) {
  868. return AE_ERROR;
  869. }
  870. if (acpi_map_lsapic(handle, p_cpu))
  871. return AE_ERROR;
  872. if (arch_register_cpu(*p_cpu)) {
  873. acpi_unmap_lsapic(*p_cpu);
  874. return AE_ERROR;
  875. }
  876. return AE_OK;
  877. }
  878. static int acpi_processor_handle_eject(struct acpi_processor *pr)
  879. {
  880. if (cpu_online(pr->id))
  881. cpu_down(pr->id);
  882. arch_unregister_cpu(pr->id);
  883. acpi_unmap_lsapic(pr->id);
  884. return (0);
  885. }
  886. #else
  887. static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
  888. {
  889. return AE_ERROR;
  890. }
  891. static int acpi_processor_handle_eject(struct acpi_processor *pr)
  892. {
  893. return (-EINVAL);
  894. }
  895. #endif
  896. static
  897. void acpi_processor_install_hotplug_notify(void)
  898. {
  899. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  900. int action = INSTALL_NOTIFY_HANDLER;
  901. acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
  902. ACPI_ROOT_OBJECT,
  903. ACPI_UINT32_MAX,
  904. processor_walk_namespace_cb, &action, NULL);
  905. #endif
  906. register_hotcpu_notifier(&acpi_cpu_notifier);
  907. }
  908. static
  909. void acpi_processor_uninstall_hotplug_notify(void)
  910. {
  911. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  912. int action = UNINSTALL_NOTIFY_HANDLER;
  913. acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
  914. ACPI_ROOT_OBJECT,
  915. ACPI_UINT32_MAX,
  916. processor_walk_namespace_cb, &action, NULL);
  917. #endif
  918. unregister_hotcpu_notifier(&acpi_cpu_notifier);
  919. }
  920. /*
  921. * We keep the driver loaded even when ACPI is not running.
  922. * This is needed for the powernow-k8 driver, that works even without
  923. * ACPI, but needs symbols from this driver
  924. */
  925. static int __init acpi_processor_init(void)
  926. {
  927. int result = 0;
  928. memset(&errata, 0, sizeof(errata));
  929. #ifdef CONFIG_SMP
  930. if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
  931. (struct acpi_table_header **)&madt)))
  932. madt = NULL;
  933. #endif
  934. acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
  935. if (!acpi_processor_dir)
  936. return -ENOMEM;
  937. acpi_processor_dir->owner = THIS_MODULE;
  938. /*
  939. * Check whether the system is DMI table. If yes, OSPM
  940. * should not use mwait for CPU-states.
  941. */
  942. dmi_check_system(processor_idle_dmi_table);
  943. result = cpuidle_register_driver(&acpi_idle_driver);
  944. if (result < 0)
  945. goto out_proc;
  946. result = acpi_bus_register_driver(&acpi_processor_driver);
  947. if (result < 0)
  948. goto out_cpuidle;
  949. acpi_processor_install_hotplug_notify();
  950. acpi_thermal_cpufreq_init();
  951. acpi_processor_ppc_init();
  952. acpi_processor_throttling_init();
  953. return 0;
  954. out_cpuidle:
  955. cpuidle_unregister_driver(&acpi_idle_driver);
  956. out_proc:
  957. remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
  958. return result;
  959. }
  960. static void __exit acpi_processor_exit(void)
  961. {
  962. acpi_processor_ppc_exit();
  963. acpi_thermal_cpufreq_exit();
  964. acpi_processor_uninstall_hotplug_notify();
  965. acpi_bus_unregister_driver(&acpi_processor_driver);
  966. cpuidle_unregister_driver(&acpi_idle_driver);
  967. remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
  968. return;
  969. }
  970. module_init(acpi_processor_init);
  971. module_exit(acpi_processor_exit);
  972. EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
  973. MODULE_ALIAS("processor");