boot.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763
  1. /*
  2. * boot.c - Architecture-Specific Low-Level ACPI Boot Support
  3. *
  4. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  5. * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
  6. *
  7. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. *
  23. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  24. */
  25. #include <linux/init.h>
  26. #include <linux/acpi.h>
  27. #include <linux/acpi_pmtmr.h>
  28. #include <linux/efi.h>
  29. #include <linux/cpumask.h>
  30. #include <linux/module.h>
  31. #include <linux/dmi.h>
  32. #include <linux/irq.h>
  33. #include <linux/bootmem.h>
  34. #include <linux/ioport.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/io_apic.h>
  37. #include <asm/apic.h>
  38. #include <asm/io.h>
  39. #include <asm/mpspec.h>
  40. #include <asm/smp.h>
  41. #ifdef CONFIG_X86_LOCAL_APIC
  42. # include <mach_apic.h>
  43. #endif
  44. static int __initdata acpi_force = 0;
  45. #ifdef CONFIG_ACPI
  46. int acpi_disabled = 0;
  47. #else
  48. int acpi_disabled = 1;
  49. #endif
  50. EXPORT_SYMBOL(acpi_disabled);
  51. #ifdef CONFIG_X86_64
  52. #include <asm/proto.h>
  53. #include <asm/genapic.h>
  54. #else /* X86 */
  55. #ifdef CONFIG_X86_LOCAL_APIC
  56. #include <mach_apic.h>
  57. #include <mach_mpparse.h>
  58. #endif /* CONFIG_X86_LOCAL_APIC */
  59. #endif /* X86 */
  60. #define BAD_MADT_ENTRY(entry, end) ( \
  61. (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
  62. ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
  63. #define PREFIX "ACPI: "
  64. int acpi_noirq; /* skip ACPI IRQ initialization */
  65. int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
  66. EXPORT_SYMBOL(acpi_pci_disabled);
  67. int acpi_ht __initdata = 1; /* enable HT */
  68. int acpi_lapic;
  69. int acpi_ioapic;
  70. int acpi_strict;
  71. u8 acpi_sci_flags __initdata;
  72. int acpi_sci_override_gsi __initdata;
  73. int acpi_skip_timer_override __initdata;
  74. int acpi_use_timer_override __initdata;
  75. #ifdef CONFIG_X86_LOCAL_APIC
  76. static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
  77. #endif
  78. #ifndef __HAVE_ARCH_CMPXCHG
  79. #warning ACPI uses CMPXCHG, i486 and later hardware
  80. #endif
  81. /* --------------------------------------------------------------------------
  82. Boot-time Configuration
  83. -------------------------------------------------------------------------- */
  84. /*
  85. * The default interrupt routing model is PIC (8259). This gets
  86. * overridden if IOAPICs are enumerated (below).
  87. */
  88. enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  89. #ifdef CONFIG_X86_64
  90. /* rely on all ACPI tables being in the direct mapping */
  91. char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
  92. {
  93. if (!phys_addr || !size)
  94. return NULL;
  95. if (phys_addr+size <= (max_pfn_mapped << PAGE_SHIFT) + PAGE_SIZE)
  96. return __va(phys_addr);
  97. return NULL;
  98. }
  99. #else
  100. /*
  101. * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
  102. * to map the target physical address. The problem is that set_fixmap()
  103. * provides a single page, and it is possible that the page is not
  104. * sufficient.
  105. * By using this area, we can map up to MAX_IO_APICS pages temporarily,
  106. * i.e. until the next __va_range() call.
  107. *
  108. * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
  109. * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
  110. * count idx down while incrementing the phys address.
  111. */
  112. char *__init __acpi_map_table(unsigned long phys, unsigned long size)
  113. {
  114. unsigned long base, offset, mapped_size;
  115. int idx;
  116. if (phys + size < 8 * 1024 * 1024)
  117. return __va(phys);
  118. offset = phys & (PAGE_SIZE - 1);
  119. mapped_size = PAGE_SIZE - offset;
  120. set_fixmap(FIX_ACPI_END, phys);
  121. base = fix_to_virt(FIX_ACPI_END);
  122. /*
  123. * Most cases can be covered by the below.
  124. */
  125. idx = FIX_ACPI_END;
  126. while (mapped_size < size) {
  127. if (--idx < FIX_ACPI_BEGIN)
  128. return NULL; /* cannot handle this */
  129. phys += PAGE_SIZE;
  130. set_fixmap(idx, phys);
  131. mapped_size += PAGE_SIZE;
  132. }
  133. return ((unsigned char *)base + offset);
  134. }
  135. #endif
  136. #ifdef CONFIG_PCI_MMCONFIG
  137. /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
  138. struct acpi_mcfg_allocation *pci_mmcfg_config;
  139. int pci_mmcfg_config_num;
  140. int __init acpi_parse_mcfg(struct acpi_table_header *header)
  141. {
  142. struct acpi_table_mcfg *mcfg;
  143. unsigned long i;
  144. int config_size;
  145. if (!header)
  146. return -EINVAL;
  147. mcfg = (struct acpi_table_mcfg *)header;
  148. /* how many config structures do we have */
  149. pci_mmcfg_config_num = 0;
  150. i = header->length - sizeof(struct acpi_table_mcfg);
  151. while (i >= sizeof(struct acpi_mcfg_allocation)) {
  152. ++pci_mmcfg_config_num;
  153. i -= sizeof(struct acpi_mcfg_allocation);
  154. };
  155. if (pci_mmcfg_config_num == 0) {
  156. printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
  157. return -ENODEV;
  158. }
  159. config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
  160. pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
  161. if (!pci_mmcfg_config) {
  162. printk(KERN_WARNING PREFIX
  163. "No memory for MCFG config tables\n");
  164. return -ENOMEM;
  165. }
  166. memcpy(pci_mmcfg_config, &mcfg[1], config_size);
  167. for (i = 0; i < pci_mmcfg_config_num; ++i) {
  168. if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
  169. printk(KERN_ERR PREFIX
  170. "MMCONFIG not in low 4GB of memory\n");
  171. kfree(pci_mmcfg_config);
  172. pci_mmcfg_config_num = 0;
  173. return -ENODEV;
  174. }
  175. }
  176. return 0;
  177. }
  178. #endif /* CONFIG_PCI_MMCONFIG */
  179. #ifdef CONFIG_X86_LOCAL_APIC
  180. static int __init acpi_parse_madt(struct acpi_table_header *table)
  181. {
  182. struct acpi_table_madt *madt = NULL;
  183. if (!cpu_has_apic)
  184. return -EINVAL;
  185. madt = (struct acpi_table_madt *)table;
  186. if (!madt) {
  187. printk(KERN_WARNING PREFIX "Unable to map MADT\n");
  188. return -ENODEV;
  189. }
  190. if (madt->address) {
  191. acpi_lapic_addr = (u64) madt->address;
  192. printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
  193. madt->address);
  194. }
  195. acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
  196. return 0;
  197. }
  198. static void __cpuinit acpi_register_lapic(int id, u8 enabled)
  199. {
  200. if (!enabled) {
  201. ++disabled_cpus;
  202. return;
  203. }
  204. generic_processor_info(id, 0);
  205. }
  206. static int __init
  207. acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
  208. {
  209. struct acpi_madt_local_apic *processor = NULL;
  210. processor = (struct acpi_madt_local_apic *)header;
  211. if (BAD_MADT_ENTRY(processor, end))
  212. return -EINVAL;
  213. acpi_table_print_madt_entry(header);
  214. /*
  215. * We need to register disabled CPU as well to permit
  216. * counting disabled CPUs. This allows us to size
  217. * cpus_possible_map more accurately, to permit
  218. * to not preallocating memory for all NR_CPUS
  219. * when we use CPU hotplug.
  220. */
  221. acpi_register_lapic(processor->id, /* APIC ID */
  222. processor->lapic_flags & ACPI_MADT_ENABLED);
  223. return 0;
  224. }
  225. static int __init
  226. acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
  227. {
  228. struct acpi_madt_local_sapic *processor = NULL;
  229. processor = (struct acpi_madt_local_sapic *)header;
  230. if (BAD_MADT_ENTRY(processor, end))
  231. return -EINVAL;
  232. acpi_table_print_madt_entry(header);
  233. acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
  234. processor->lapic_flags & ACPI_MADT_ENABLED);
  235. return 0;
  236. }
  237. static int __init
  238. acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
  239. const unsigned long end)
  240. {
  241. struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
  242. lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
  243. if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
  244. return -EINVAL;
  245. acpi_lapic_addr = lapic_addr_ovr->address;
  246. return 0;
  247. }
  248. static int __init
  249. acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
  250. {
  251. struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
  252. lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
  253. if (BAD_MADT_ENTRY(lapic_nmi, end))
  254. return -EINVAL;
  255. acpi_table_print_madt_entry(header);
  256. if (lapic_nmi->lint != 1)
  257. printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
  258. return 0;
  259. }
  260. #endif /*CONFIG_X86_LOCAL_APIC */
  261. #ifdef CONFIG_X86_IO_APIC
  262. static int __init
  263. acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
  264. {
  265. struct acpi_madt_io_apic *ioapic = NULL;
  266. ioapic = (struct acpi_madt_io_apic *)header;
  267. if (BAD_MADT_ENTRY(ioapic, end))
  268. return -EINVAL;
  269. acpi_table_print_madt_entry(header);
  270. mp_register_ioapic(ioapic->id,
  271. ioapic->address, ioapic->global_irq_base);
  272. return 0;
  273. }
  274. /*
  275. * Parse Interrupt Source Override for the ACPI SCI
  276. */
  277. static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
  278. {
  279. if (trigger == 0) /* compatible SCI trigger is level */
  280. trigger = 3;
  281. if (polarity == 0) /* compatible SCI polarity is low */
  282. polarity = 3;
  283. /* Command-line over-ride via acpi_sci= */
  284. if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
  285. trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
  286. if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
  287. polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
  288. /*
  289. * mp_config_acpi_legacy_irqs() already setup IRQs < 16
  290. * If GSI is < 16, this will update its flags,
  291. * else it will create a new mp_irqs[] entry.
  292. */
  293. mp_override_legacy_irq(gsi, polarity, trigger, gsi);
  294. /*
  295. * stash over-ride to indicate we've been here
  296. * and for later update of acpi_gbl_FADT
  297. */
  298. acpi_sci_override_gsi = gsi;
  299. return;
  300. }
  301. static int __init
  302. acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
  303. const unsigned long end)
  304. {
  305. struct acpi_madt_interrupt_override *intsrc = NULL;
  306. intsrc = (struct acpi_madt_interrupt_override *)header;
  307. if (BAD_MADT_ENTRY(intsrc, end))
  308. return -EINVAL;
  309. acpi_table_print_madt_entry(header);
  310. if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
  311. acpi_sci_ioapic_setup(intsrc->global_irq,
  312. intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
  313. (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
  314. return 0;
  315. }
  316. if (acpi_skip_timer_override &&
  317. intsrc->source_irq == 0 && intsrc->global_irq == 2) {
  318. printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
  319. return 0;
  320. }
  321. mp_override_legacy_irq(intsrc->source_irq,
  322. intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
  323. (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
  324. intsrc->global_irq);
  325. return 0;
  326. }
  327. static int __init
  328. acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
  329. {
  330. struct acpi_madt_nmi_source *nmi_src = NULL;
  331. nmi_src = (struct acpi_madt_nmi_source *)header;
  332. if (BAD_MADT_ENTRY(nmi_src, end))
  333. return -EINVAL;
  334. acpi_table_print_madt_entry(header);
  335. /* TBD: Support nimsrc entries? */
  336. return 0;
  337. }
  338. #endif /* CONFIG_X86_IO_APIC */
  339. /*
  340. * acpi_pic_sci_set_trigger()
  341. *
  342. * use ELCR to set PIC-mode trigger type for SCI
  343. *
  344. * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
  345. * it may require Edge Trigger -- use "acpi_sci=edge"
  346. *
  347. * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
  348. * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
  349. * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
  350. * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
  351. */
  352. void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
  353. {
  354. unsigned int mask = 1 << irq;
  355. unsigned int old, new;
  356. /* Real old ELCR mask */
  357. old = inb(0x4d0) | (inb(0x4d1) << 8);
  358. /*
  359. * If we use ACPI to set PCI IRQs, then we should clear ELCR
  360. * since we will set it correctly as we enable the PCI irq
  361. * routing.
  362. */
  363. new = acpi_noirq ? old : 0;
  364. /*
  365. * Update SCI information in the ELCR, it isn't in the PCI
  366. * routing tables..
  367. */
  368. switch (trigger) {
  369. case 1: /* Edge - clear */
  370. new &= ~mask;
  371. break;
  372. case 3: /* Level - set */
  373. new |= mask;
  374. break;
  375. }
  376. if (old == new)
  377. return;
  378. printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
  379. outb(new, 0x4d0);
  380. outb(new >> 8, 0x4d1);
  381. }
  382. int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
  383. {
  384. *irq = gsi;
  385. return 0;
  386. }
  387. /*
  388. * success: return IRQ number (>=0)
  389. * failure: return < 0
  390. */
  391. int acpi_register_gsi(u32 gsi, int triggering, int polarity)
  392. {
  393. unsigned int irq;
  394. unsigned int plat_gsi = gsi;
  395. #ifdef CONFIG_PCI
  396. /*
  397. * Make sure all (legacy) PCI IRQs are set as level-triggered.
  398. */
  399. if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
  400. extern void eisa_set_level_irq(unsigned int irq);
  401. if (triggering == ACPI_LEVEL_SENSITIVE)
  402. eisa_set_level_irq(gsi);
  403. }
  404. #endif
  405. #ifdef CONFIG_X86_IO_APIC
  406. if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
  407. plat_gsi = mp_register_gsi(gsi, triggering, polarity);
  408. }
  409. #endif
  410. acpi_gsi_to_irq(plat_gsi, &irq);
  411. return irq;
  412. }
  413. /*
  414. * ACPI based hotplug support for CPU
  415. */
  416. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  417. static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
  418. {
  419. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  420. union acpi_object *obj;
  421. struct acpi_madt_local_apic *lapic;
  422. cpumask_t tmp_map, new_map;
  423. u8 physid;
  424. int cpu;
  425. if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
  426. return -EINVAL;
  427. if (!buffer.length || !buffer.pointer)
  428. return -EINVAL;
  429. obj = buffer.pointer;
  430. if (obj->type != ACPI_TYPE_BUFFER ||
  431. obj->buffer.length < sizeof(*lapic)) {
  432. kfree(buffer.pointer);
  433. return -EINVAL;
  434. }
  435. lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
  436. if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
  437. !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
  438. kfree(buffer.pointer);
  439. return -EINVAL;
  440. }
  441. physid = lapic->id;
  442. kfree(buffer.pointer);
  443. buffer.length = ACPI_ALLOCATE_BUFFER;
  444. buffer.pointer = NULL;
  445. tmp_map = cpu_present_map;
  446. acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
  447. /*
  448. * If mp_register_lapic successfully generates a new logical cpu
  449. * number, then the following will get us exactly what was mapped
  450. */
  451. cpus_andnot(new_map, cpu_present_map, tmp_map);
  452. if (cpus_empty(new_map)) {
  453. printk ("Unable to map lapic to logical cpu number\n");
  454. return -EINVAL;
  455. }
  456. cpu = first_cpu(new_map);
  457. *pcpu = cpu;
  458. return 0;
  459. }
  460. /* wrapper to silence section mismatch warning */
  461. int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
  462. {
  463. return _acpi_map_lsapic(handle, pcpu);
  464. }
  465. EXPORT_SYMBOL(acpi_map_lsapic);
  466. int acpi_unmap_lsapic(int cpu)
  467. {
  468. per_cpu(x86_cpu_to_apicid, cpu) = -1;
  469. cpu_clear(cpu, cpu_present_map);
  470. num_processors--;
  471. return (0);
  472. }
  473. EXPORT_SYMBOL(acpi_unmap_lsapic);
  474. #endif /* CONFIG_ACPI_HOTPLUG_CPU */
  475. int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
  476. {
  477. /* TBD */
  478. return -EINVAL;
  479. }
  480. EXPORT_SYMBOL(acpi_register_ioapic);
  481. int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
  482. {
  483. /* TBD */
  484. return -EINVAL;
  485. }
  486. EXPORT_SYMBOL(acpi_unregister_ioapic);
  487. static int __init acpi_parse_sbf(struct acpi_table_header *table)
  488. {
  489. struct acpi_table_boot *sb;
  490. sb = (struct acpi_table_boot *)table;
  491. if (!sb) {
  492. printk(KERN_WARNING PREFIX "Unable to map SBF\n");
  493. return -ENODEV;
  494. }
  495. sbf_port = sb->cmos_index; /* Save CMOS port */
  496. return 0;
  497. }
  498. #ifdef CONFIG_HPET_TIMER
  499. #include <asm/hpet.h>
  500. static struct __initdata resource *hpet_res;
  501. static int __init acpi_parse_hpet(struct acpi_table_header *table)
  502. {
  503. struct acpi_table_hpet *hpet_tbl;
  504. hpet_tbl = (struct acpi_table_hpet *)table;
  505. if (!hpet_tbl) {
  506. printk(KERN_WARNING PREFIX "Unable to map HPET\n");
  507. return -ENODEV;
  508. }
  509. if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
  510. printk(KERN_WARNING PREFIX "HPET timers must be located in "
  511. "memory.\n");
  512. return -1;
  513. }
  514. hpet_address = hpet_tbl->address.address;
  515. /*
  516. * Some broken BIOSes advertise HPET at 0x0. We really do not
  517. * want to allocate a resource there.
  518. */
  519. if (!hpet_address) {
  520. printk(KERN_WARNING PREFIX
  521. "HPET id: %#x base: %#lx is invalid\n",
  522. hpet_tbl->id, hpet_address);
  523. return 0;
  524. }
  525. #ifdef CONFIG_X86_64
  526. /*
  527. * Some even more broken BIOSes advertise HPET at
  528. * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
  529. * some noise:
  530. */
  531. if (hpet_address == 0xfed0000000000000UL) {
  532. if (!hpet_force_user) {
  533. printk(KERN_WARNING PREFIX "HPET id: %#x "
  534. "base: 0xfed0000000000000 is bogus\n "
  535. "try hpet=force on the kernel command line to "
  536. "fix it up to 0xfed00000.\n", hpet_tbl->id);
  537. hpet_address = 0;
  538. return 0;
  539. }
  540. printk(KERN_WARNING PREFIX
  541. "HPET id: %#x base: 0xfed0000000000000 fixed up "
  542. "to 0xfed00000.\n", hpet_tbl->id);
  543. hpet_address >>= 32;
  544. }
  545. #endif
  546. printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
  547. hpet_tbl->id, hpet_address);
  548. /*
  549. * Allocate and initialize the HPET firmware resource for adding into
  550. * the resource tree during the lateinit timeframe.
  551. */
  552. #define HPET_RESOURCE_NAME_SIZE 9
  553. hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
  554. hpet_res->name = (void *)&hpet_res[1];
  555. hpet_res->flags = IORESOURCE_MEM;
  556. snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
  557. hpet_tbl->sequence);
  558. hpet_res->start = hpet_address;
  559. hpet_res->end = hpet_address + (1 * 1024) - 1;
  560. return 0;
  561. }
  562. /*
  563. * hpet_insert_resource inserts the HPET resources used into the resource
  564. * tree.
  565. */
  566. static __init int hpet_insert_resource(void)
  567. {
  568. if (!hpet_res)
  569. return 1;
  570. return insert_resource(&iomem_resource, hpet_res);
  571. }
  572. late_initcall(hpet_insert_resource);
  573. #else
  574. #define acpi_parse_hpet NULL
  575. #endif
  576. static int __init acpi_parse_fadt(struct acpi_table_header *table)
  577. {
  578. #ifdef CONFIG_X86_PM_TIMER
  579. /* detect the location of the ACPI PM Timer */
  580. if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
  581. /* FADT rev. 2 */
  582. if (acpi_gbl_FADT.xpm_timer_block.space_id !=
  583. ACPI_ADR_SPACE_SYSTEM_IO)
  584. return 0;
  585. pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
  586. /*
  587. * "X" fields are optional extensions to the original V1.0
  588. * fields, so we must selectively expand V1.0 fields if the
  589. * corresponding X field is zero.
  590. */
  591. if (!pmtmr_ioport)
  592. pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
  593. } else {
  594. /* FADT rev. 1 */
  595. pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
  596. }
  597. if (pmtmr_ioport)
  598. printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
  599. pmtmr_ioport);
  600. #endif
  601. return 0;
  602. }
  603. #ifdef CONFIG_X86_LOCAL_APIC
  604. /*
  605. * Parse LAPIC entries in MADT
  606. * returns 0 on success, < 0 on error
  607. */
  608. static void __init acpi_register_lapic_address(unsigned long address)
  609. {
  610. mp_lapic_addr = address;
  611. set_fixmap_nocache(FIX_APIC_BASE, address);
  612. if (boot_cpu_physical_apicid == -1U)
  613. boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
  614. }
  615. static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
  616. {
  617. int count;
  618. if (!cpu_has_apic)
  619. return -ENODEV;
  620. /*
  621. * Note that the LAPIC address is obtained from the MADT (32-bit value)
  622. * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
  623. */
  624. count =
  625. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
  626. acpi_parse_lapic_addr_ovr, 0);
  627. if (count < 0) {
  628. printk(KERN_ERR PREFIX
  629. "Error parsing LAPIC address override entry\n");
  630. return count;
  631. }
  632. acpi_register_lapic_address(acpi_lapic_addr);
  633. return count;
  634. }
  635. static int __init acpi_parse_madt_lapic_entries(void)
  636. {
  637. int count;
  638. if (!cpu_has_apic)
  639. return -ENODEV;
  640. /*
  641. * Note that the LAPIC address is obtained from the MADT (32-bit value)
  642. * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
  643. */
  644. count =
  645. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
  646. acpi_parse_lapic_addr_ovr, 0);
  647. if (count < 0) {
  648. printk(KERN_ERR PREFIX
  649. "Error parsing LAPIC address override entry\n");
  650. return count;
  651. }
  652. acpi_register_lapic_address(acpi_lapic_addr);
  653. count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
  654. acpi_parse_sapic, MAX_APICS);
  655. if (!count)
  656. count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
  657. acpi_parse_lapic, MAX_APICS);
  658. if (!count) {
  659. printk(KERN_ERR PREFIX "No LAPIC entries present\n");
  660. /* TBD: Cleanup to allow fallback to MPS */
  661. return -ENODEV;
  662. } else if (count < 0) {
  663. printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
  664. /* TBD: Cleanup to allow fallback to MPS */
  665. return count;
  666. }
  667. count =
  668. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
  669. if (count < 0) {
  670. printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
  671. /* TBD: Cleanup to allow fallback to MPS */
  672. return count;
  673. }
  674. return 0;
  675. }
  676. #endif /* CONFIG_X86_LOCAL_APIC */
  677. #ifdef CONFIG_X86_IO_APIC
  678. #define MP_ISA_BUS 0
  679. #if defined(CONFIG_X86_ES7000) || defined(CONFIG_X86_GENERICARCH)
  680. extern int es7000_plat;
  681. #endif
  682. static struct {
  683. int apic_id;
  684. int gsi_base;
  685. int gsi_end;
  686. DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
  687. } mp_ioapic_routing[MAX_IO_APICS];
  688. static int mp_find_ioapic(int gsi)
  689. {
  690. int i = 0;
  691. /* Find the IOAPIC that manages this GSI. */
  692. for (i = 0; i < nr_ioapics; i++) {
  693. if ((gsi >= mp_ioapic_routing[i].gsi_base)
  694. && (gsi <= mp_ioapic_routing[i].gsi_end))
  695. return i;
  696. }
  697. printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
  698. return -1;
  699. }
  700. static u8 __init uniq_ioapic_id(u8 id)
  701. {
  702. #ifdef CONFIG_X86_32
  703. if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
  704. !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
  705. return io_apic_get_unique_id(nr_ioapics, id);
  706. else
  707. return id;
  708. #else
  709. int i;
  710. DECLARE_BITMAP(used, 256);
  711. bitmap_zero(used, 256);
  712. for (i = 0; i < nr_ioapics; i++) {
  713. struct mp_config_ioapic *ia = &mp_ioapics[i];
  714. __set_bit(ia->mp_apicid, used);
  715. }
  716. if (!test_bit(id, used))
  717. return id;
  718. return find_first_zero_bit(used, 256);
  719. #endif
  720. }
  721. static int bad_ioapic(unsigned long address)
  722. {
  723. if (nr_ioapics >= MAX_IO_APICS) {
  724. printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
  725. "(found %d)\n", MAX_IO_APICS, nr_ioapics);
  726. panic("Recompile kernel with bigger MAX_IO_APICS!\n");
  727. }
  728. if (!address) {
  729. printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
  730. " found in table, skipping!\n");
  731. return 1;
  732. }
  733. return 0;
  734. }
  735. void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
  736. {
  737. int idx = 0;
  738. if (bad_ioapic(address))
  739. return;
  740. idx = nr_ioapics;
  741. mp_ioapics[idx].mp_type = MP_IOAPIC;
  742. mp_ioapics[idx].mp_flags = MPC_APIC_USABLE;
  743. mp_ioapics[idx].mp_apicaddr = address;
  744. set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
  745. mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id);
  746. #ifdef CONFIG_X86_32
  747. mp_ioapics[idx].mp_apicver = io_apic_get_version(idx);
  748. #else
  749. mp_ioapics[idx].mp_apicver = 0;
  750. #endif
  751. /*
  752. * Build basic GSI lookup table to facilitate gsi->io_apic lookups
  753. * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
  754. */
  755. mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid;
  756. mp_ioapic_routing[idx].gsi_base = gsi_base;
  757. mp_ioapic_routing[idx].gsi_end = gsi_base +
  758. io_apic_get_redir_entries(idx);
  759. printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
  760. "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid,
  761. mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr,
  762. mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
  763. nr_ioapics++;
  764. }
  765. void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
  766. {
  767. int ioapic = -1;
  768. int pin = -1;
  769. /*
  770. * Convert 'gsi' to 'ioapic.pin'.
  771. */
  772. ioapic = mp_find_ioapic(gsi);
  773. if (ioapic < 0)
  774. return;
  775. pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
  776. /*
  777. * TBD: This check is for faulty timer entries, where the override
  778. * erroneously sets the trigger to level, resulting in a HUGE
  779. * increase of timer interrupts!
  780. */
  781. if ((bus_irq == 0) && (trigger == 3))
  782. trigger = 1;
  783. mp_irqs[mp_irq_entries].mp_type = MP_INTSRC;
  784. mp_irqs[mp_irq_entries].mp_irqtype = mp_INT;
  785. mp_irqs[mp_irq_entries].mp_irqflag = (trigger << 2) | polarity;
  786. mp_irqs[mp_irq_entries].mp_srcbus = MP_ISA_BUS;
  787. mp_irqs[mp_irq_entries].mp_srcbusirq = bus_irq; /* IRQ */
  788. mp_irqs[mp_irq_entries].mp_dstapic =
  789. mp_ioapics[ioapic].mp_apicid; /* APIC ID */
  790. mp_irqs[mp_irq_entries].mp_dstirq = pin; /* INTIN# */
  791. if (++mp_irq_entries == MAX_IRQ_SOURCES)
  792. panic("Max # of irq sources exceeded!!\n");
  793. }
  794. void __init mp_config_acpi_legacy_irqs(void)
  795. {
  796. int i = 0;
  797. int ioapic = -1;
  798. #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
  799. /*
  800. * Fabricate the legacy ISA bus (bus #31).
  801. */
  802. mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
  803. #endif
  804. set_bit(MP_ISA_BUS, mp_bus_not_pci);
  805. Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
  806. #if defined(CONFIG_X86_ES7000) || defined(CONFIG_X86_GENERICARCH)
  807. /*
  808. * Older generations of ES7000 have no legacy identity mappings
  809. */
  810. if (es7000_plat == 1)
  811. return;
  812. #endif
  813. /*
  814. * Locate the IOAPIC that manages the ISA IRQs (0-15).
  815. */
  816. ioapic = mp_find_ioapic(0);
  817. if (ioapic < 0)
  818. return;
  819. /*
  820. * Use the default configuration for the IRQs 0-15. Unless
  821. * overridden by (MADT) interrupt source override entries.
  822. */
  823. for (i = 0; i < 16; i++) {
  824. int idx;
  825. mp_irqs[mp_irq_entries].mp_type = MP_INTSRC;
  826. mp_irqs[mp_irq_entries].mp_irqflag = 0; /* Conforming */
  827. mp_irqs[mp_irq_entries].mp_srcbus = MP_ISA_BUS;
  828. mp_irqs[mp_irq_entries].mp_dstapic = mp_ioapics[ioapic].mp_apicid;
  829. for (idx = 0; idx < mp_irq_entries; idx++) {
  830. struct mp_config_intsrc *irq = mp_irqs + idx;
  831. /* Do we already have a mapping for this ISA IRQ? */
  832. if (irq->mp_srcbus == MP_ISA_BUS
  833. && irq->mp_srcbusirq == i)
  834. break;
  835. /* Do we already have a mapping for this IOAPIC pin */
  836. if ((irq->mp_dstapic ==
  837. mp_irqs[mp_irq_entries].mp_dstapic) &&
  838. (irq->mp_dstirq == i))
  839. break;
  840. }
  841. if (idx != mp_irq_entries) {
  842. printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
  843. continue; /* IRQ already used */
  844. }
  845. mp_irqs[mp_irq_entries].mp_irqtype = mp_INT;
  846. mp_irqs[mp_irq_entries].mp_srcbusirq = i; /* Identity mapped */
  847. mp_irqs[mp_irq_entries].mp_dstirq = i;
  848. if (++mp_irq_entries == MAX_IRQ_SOURCES)
  849. panic("Max # of irq sources exceeded!!\n");
  850. }
  851. }
  852. int mp_register_gsi(u32 gsi, int triggering, int polarity)
  853. {
  854. int ioapic;
  855. int ioapic_pin;
  856. #ifdef CONFIG_X86_32
  857. #define MAX_GSI_NUM 4096
  858. #define IRQ_COMPRESSION_START 64
  859. static int pci_irq = IRQ_COMPRESSION_START;
  860. /*
  861. * Mapping between Global System Interrupts, which
  862. * represent all possible interrupts, and IRQs
  863. * assigned to actual devices.
  864. */
  865. static int gsi_to_irq[MAX_GSI_NUM];
  866. #else
  867. if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
  868. return gsi;
  869. #endif
  870. /* Don't set up the ACPI SCI because it's already set up */
  871. if (acpi_gbl_FADT.sci_interrupt == gsi)
  872. return gsi;
  873. ioapic = mp_find_ioapic(gsi);
  874. if (ioapic < 0) {
  875. printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
  876. return gsi;
  877. }
  878. ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
  879. #ifdef CONFIG_X86_32
  880. if (ioapic_renumber_irq)
  881. gsi = ioapic_renumber_irq(ioapic, gsi);
  882. #endif
  883. /*
  884. * Avoid pin reprogramming. PRTs typically include entries
  885. * with redundant pin->gsi mappings (but unique PCI devices);
  886. * we only program the IOAPIC on the first.
  887. */
  888. if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
  889. printk(KERN_ERR "Invalid reference to IOAPIC pin "
  890. "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
  891. ioapic_pin);
  892. return gsi;
  893. }
  894. if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) {
  895. Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
  896. mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
  897. #ifdef CONFIG_X86_32
  898. return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
  899. #else
  900. return gsi;
  901. #endif
  902. }
  903. set_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed);
  904. #ifdef CONFIG_X86_32
  905. /*
  906. * For GSI >= 64, use IRQ compression
  907. */
  908. if ((gsi >= IRQ_COMPRESSION_START)
  909. && (triggering == ACPI_LEVEL_SENSITIVE)) {
  910. /*
  911. * For PCI devices assign IRQs in order, avoiding gaps
  912. * due to unused I/O APIC pins.
  913. */
  914. int irq = gsi;
  915. if (gsi < MAX_GSI_NUM) {
  916. /*
  917. * Retain the VIA chipset work-around (gsi > 15), but
  918. * avoid a problem where the 8254 timer (IRQ0) is setup
  919. * via an override (so it's not on pin 0 of the ioapic),
  920. * and at the same time, the pin 0 interrupt is a PCI
  921. * type. The gsi > 15 test could cause these two pins
  922. * to be shared as IRQ0, and they are not shareable.
  923. * So test for this condition, and if necessary, avoid
  924. * the pin collision.
  925. */
  926. gsi = pci_irq++;
  927. /*
  928. * Don't assign IRQ used by ACPI SCI
  929. */
  930. if (gsi == acpi_gbl_FADT.sci_interrupt)
  931. gsi = pci_irq++;
  932. gsi_to_irq[irq] = gsi;
  933. } else {
  934. printk(KERN_ERR "GSI %u is too high\n", gsi);
  935. return gsi;
  936. }
  937. }
  938. #endif
  939. io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
  940. triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
  941. polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
  942. return gsi;
  943. }
  944. int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
  945. u32 gsi, int triggering, int polarity)
  946. {
  947. struct mpc_config_intsrc intsrc;
  948. int ioapic;
  949. /* print the entry should happen on mptable identically */
  950. intsrc.mpc_type = MP_INTSRC;
  951. intsrc.mpc_irqtype = mp_INT;
  952. intsrc.mpc_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
  953. (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
  954. intsrc.mpc_srcbus = number;
  955. intsrc.mpc_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
  956. ioapic = mp_find_ioapic(gsi);
  957. intsrc.mpc_dstapic = mp_ioapic_routing[ioapic].apic_id;
  958. intsrc.mpc_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
  959. MP_intsrc_info(&intsrc);
  960. return 0;
  961. }
  962. /*
  963. * Parse IOAPIC related entries in MADT
  964. * returns 0 on success, < 0 on error
  965. */
  966. static int __init acpi_parse_madt_ioapic_entries(void)
  967. {
  968. int count;
  969. /*
  970. * ACPI interpreter is required to complete interrupt setup,
  971. * so if it is off, don't enumerate the io-apics with ACPI.
  972. * If MPS is present, it will handle them,
  973. * otherwise the system will stay in PIC mode
  974. */
  975. if (acpi_disabled || acpi_noirq) {
  976. return -ENODEV;
  977. }
  978. if (!cpu_has_apic)
  979. return -ENODEV;
  980. /*
  981. * if "noapic" boot option, don't look for IO-APICs
  982. */
  983. if (skip_ioapic_setup) {
  984. printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
  985. "due to 'noapic' option.\n");
  986. return -ENODEV;
  987. }
  988. count =
  989. acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
  990. MAX_IO_APICS);
  991. if (!count) {
  992. printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
  993. return -ENODEV;
  994. } else if (count < 0) {
  995. printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
  996. return count;
  997. }
  998. count =
  999. acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
  1000. NR_IRQ_VECTORS);
  1001. if (count < 0) {
  1002. printk(KERN_ERR PREFIX
  1003. "Error parsing interrupt source overrides entry\n");
  1004. /* TBD: Cleanup to allow fallback to MPS */
  1005. return count;
  1006. }
  1007. /*
  1008. * If BIOS did not supply an INT_SRC_OVR for the SCI
  1009. * pretend we got one so we can set the SCI flags.
  1010. */
  1011. if (!acpi_sci_override_gsi)
  1012. acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
  1013. /* Fill in identity legacy mapings where no override */
  1014. mp_config_acpi_legacy_irqs();
  1015. count =
  1016. acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
  1017. NR_IRQ_VECTORS);
  1018. if (count < 0) {
  1019. printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
  1020. /* TBD: Cleanup to allow fallback to MPS */
  1021. return count;
  1022. }
  1023. return 0;
  1024. }
  1025. #else
  1026. static inline int acpi_parse_madt_ioapic_entries(void)
  1027. {
  1028. return -1;
  1029. }
  1030. #endif /* !CONFIG_X86_IO_APIC */
  1031. static void __init early_acpi_process_madt(void)
  1032. {
  1033. #ifdef CONFIG_X86_LOCAL_APIC
  1034. int error;
  1035. if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
  1036. /*
  1037. * Parse MADT LAPIC entries
  1038. */
  1039. error = early_acpi_parse_madt_lapic_addr_ovr();
  1040. if (!error) {
  1041. acpi_lapic = 1;
  1042. smp_found_config = 1;
  1043. }
  1044. if (error == -EINVAL) {
  1045. /*
  1046. * Dell Precision Workstation 410, 610 come here.
  1047. */
  1048. printk(KERN_ERR PREFIX
  1049. "Invalid BIOS MADT, disabling ACPI\n");
  1050. disable_acpi();
  1051. }
  1052. }
  1053. #endif
  1054. }
  1055. static void __init acpi_process_madt(void)
  1056. {
  1057. #ifdef CONFIG_X86_LOCAL_APIC
  1058. int error;
  1059. if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
  1060. /*
  1061. * Parse MADT LAPIC entries
  1062. */
  1063. error = acpi_parse_madt_lapic_entries();
  1064. if (!error) {
  1065. acpi_lapic = 1;
  1066. #ifdef CONFIG_X86_GENERICARCH
  1067. generic_bigsmp_probe();
  1068. #endif
  1069. /*
  1070. * Parse MADT IO-APIC entries
  1071. */
  1072. error = acpi_parse_madt_ioapic_entries();
  1073. if (!error) {
  1074. acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
  1075. acpi_irq_balance_set(NULL);
  1076. acpi_ioapic = 1;
  1077. smp_found_config = 1;
  1078. setup_apic_routing();
  1079. }
  1080. }
  1081. if (error == -EINVAL) {
  1082. /*
  1083. * Dell Precision Workstation 410, 610 come here.
  1084. */
  1085. printk(KERN_ERR PREFIX
  1086. "Invalid BIOS MADT, disabling ACPI\n");
  1087. disable_acpi();
  1088. }
  1089. }
  1090. #endif
  1091. return;
  1092. }
  1093. #ifdef __i386__
  1094. static int __init disable_acpi_irq(const struct dmi_system_id *d)
  1095. {
  1096. if (!acpi_force) {
  1097. printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
  1098. d->ident);
  1099. acpi_noirq_set();
  1100. }
  1101. return 0;
  1102. }
  1103. static int __init disable_acpi_pci(const struct dmi_system_id *d)
  1104. {
  1105. if (!acpi_force) {
  1106. printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
  1107. d->ident);
  1108. acpi_disable_pci();
  1109. }
  1110. return 0;
  1111. }
  1112. static int __init dmi_disable_acpi(const struct dmi_system_id *d)
  1113. {
  1114. if (!acpi_force) {
  1115. printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
  1116. disable_acpi();
  1117. } else {
  1118. printk(KERN_NOTICE
  1119. "Warning: DMI blacklist says broken, but acpi forced\n");
  1120. }
  1121. return 0;
  1122. }
  1123. /*
  1124. * Limit ACPI to CPU enumeration for HT
  1125. */
  1126. static int __init force_acpi_ht(const struct dmi_system_id *d)
  1127. {
  1128. if (!acpi_force) {
  1129. printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
  1130. d->ident);
  1131. disable_acpi();
  1132. acpi_ht = 1;
  1133. } else {
  1134. printk(KERN_NOTICE
  1135. "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
  1136. }
  1137. return 0;
  1138. }
  1139. /*
  1140. * If your system is blacklisted here, but you find that acpi=force
  1141. * works for you, please contact acpi-devel@sourceforge.net
  1142. */
  1143. static struct dmi_system_id __initdata acpi_dmi_table[] = {
  1144. /*
  1145. * Boxes that need ACPI disabled
  1146. */
  1147. {
  1148. .callback = dmi_disable_acpi,
  1149. .ident = "IBM Thinkpad",
  1150. .matches = {
  1151. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1152. DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
  1153. },
  1154. },
  1155. /*
  1156. * Boxes that need acpi=ht
  1157. */
  1158. {
  1159. .callback = force_acpi_ht,
  1160. .ident = "FSC Primergy T850",
  1161. .matches = {
  1162. DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
  1163. DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
  1164. },
  1165. },
  1166. {
  1167. .callback = force_acpi_ht,
  1168. .ident = "HP VISUALIZE NT Workstation",
  1169. .matches = {
  1170. DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
  1171. DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
  1172. },
  1173. },
  1174. {
  1175. .callback = force_acpi_ht,
  1176. .ident = "Compaq Workstation W8000",
  1177. .matches = {
  1178. DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
  1179. DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
  1180. },
  1181. },
  1182. {
  1183. .callback = force_acpi_ht,
  1184. .ident = "ASUS P4B266",
  1185. .matches = {
  1186. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1187. DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
  1188. },
  1189. },
  1190. {
  1191. .callback = force_acpi_ht,
  1192. .ident = "ASUS P2B-DS",
  1193. .matches = {
  1194. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1195. DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
  1196. },
  1197. },
  1198. {
  1199. .callback = force_acpi_ht,
  1200. .ident = "ASUS CUR-DLS",
  1201. .matches = {
  1202. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1203. DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
  1204. },
  1205. },
  1206. {
  1207. .callback = force_acpi_ht,
  1208. .ident = "ABIT i440BX-W83977",
  1209. .matches = {
  1210. DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
  1211. DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
  1212. },
  1213. },
  1214. {
  1215. .callback = force_acpi_ht,
  1216. .ident = "IBM Bladecenter",
  1217. .matches = {
  1218. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1219. DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
  1220. },
  1221. },
  1222. {
  1223. .callback = force_acpi_ht,
  1224. .ident = "IBM eServer xSeries 360",
  1225. .matches = {
  1226. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1227. DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
  1228. },
  1229. },
  1230. {
  1231. .callback = force_acpi_ht,
  1232. .ident = "IBM eserver xSeries 330",
  1233. .matches = {
  1234. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1235. DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
  1236. },
  1237. },
  1238. {
  1239. .callback = force_acpi_ht,
  1240. .ident = "IBM eserver xSeries 440",
  1241. .matches = {
  1242. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1243. DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
  1244. },
  1245. },
  1246. /*
  1247. * Boxes that need ACPI PCI IRQ routing disabled
  1248. */
  1249. {
  1250. .callback = disable_acpi_irq,
  1251. .ident = "ASUS A7V",
  1252. .matches = {
  1253. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
  1254. DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
  1255. /* newer BIOS, Revision 1011, does work */
  1256. DMI_MATCH(DMI_BIOS_VERSION,
  1257. "ASUS A7V ACPI BIOS Revision 1007"),
  1258. },
  1259. },
  1260. {
  1261. /*
  1262. * Latest BIOS for IBM 600E (1.16) has bad pcinum
  1263. * for LPC bridge, which is needed for the PCI
  1264. * interrupt links to work. DSDT fix is in bug 5966.
  1265. * 2645, 2646 model numbers are shared with 600/600E/600X
  1266. */
  1267. .callback = disable_acpi_irq,
  1268. .ident = "IBM Thinkpad 600 Series 2645",
  1269. .matches = {
  1270. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1271. DMI_MATCH(DMI_BOARD_NAME, "2645"),
  1272. },
  1273. },
  1274. {
  1275. .callback = disable_acpi_irq,
  1276. .ident = "IBM Thinkpad 600 Series 2646",
  1277. .matches = {
  1278. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1279. DMI_MATCH(DMI_BOARD_NAME, "2646"),
  1280. },
  1281. },
  1282. /*
  1283. * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
  1284. */
  1285. { /* _BBN 0 bug */
  1286. .callback = disable_acpi_pci,
  1287. .ident = "ASUS PR-DLS",
  1288. .matches = {
  1289. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1290. DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
  1291. DMI_MATCH(DMI_BIOS_VERSION,
  1292. "ASUS PR-DLS ACPI BIOS Revision 1010"),
  1293. DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
  1294. },
  1295. },
  1296. {
  1297. .callback = disable_acpi_pci,
  1298. .ident = "Acer TravelMate 36x Laptop",
  1299. .matches = {
  1300. DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
  1301. DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
  1302. },
  1303. },
  1304. {}
  1305. };
  1306. #endif /* __i386__ */
  1307. /*
  1308. * acpi_boot_table_init() and acpi_boot_init()
  1309. * called from setup_arch(), always.
  1310. * 1. checksums all tables
  1311. * 2. enumerates lapics
  1312. * 3. enumerates io-apics
  1313. *
  1314. * acpi_table_init() is separate to allow reading SRAT without
  1315. * other side effects.
  1316. *
  1317. * side effects of acpi_boot_init:
  1318. * acpi_lapic = 1 if LAPIC found
  1319. * acpi_ioapic = 1 if IOAPIC found
  1320. * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
  1321. * if acpi_blacklisted() acpi_disabled = 1;
  1322. * acpi_irq_model=...
  1323. * ...
  1324. *
  1325. * return value: (currently ignored)
  1326. * 0: success
  1327. * !0: failure
  1328. */
  1329. int __init acpi_boot_table_init(void)
  1330. {
  1331. int error;
  1332. #ifdef __i386__
  1333. dmi_check_system(acpi_dmi_table);
  1334. #endif
  1335. /*
  1336. * If acpi_disabled, bail out
  1337. * One exception: acpi=ht continues far enough to enumerate LAPICs
  1338. */
  1339. if (acpi_disabled && !acpi_ht)
  1340. return 1;
  1341. /*
  1342. * Initialize the ACPI boot-time table parser.
  1343. */
  1344. error = acpi_table_init();
  1345. if (error) {
  1346. disable_acpi();
  1347. return error;
  1348. }
  1349. acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
  1350. /*
  1351. * blacklist may disable ACPI entirely
  1352. */
  1353. error = acpi_blacklisted();
  1354. if (error) {
  1355. if (acpi_force) {
  1356. printk(KERN_WARNING PREFIX "acpi=force override\n");
  1357. } else {
  1358. printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
  1359. disable_acpi();
  1360. return error;
  1361. }
  1362. }
  1363. return 0;
  1364. }
  1365. int __init early_acpi_boot_init(void)
  1366. {
  1367. /*
  1368. * If acpi_disabled, bail out
  1369. * One exception: acpi=ht continues far enough to enumerate LAPICs
  1370. */
  1371. if (acpi_disabled && !acpi_ht)
  1372. return 1;
  1373. /*
  1374. * Process the Multiple APIC Description Table (MADT), if present
  1375. */
  1376. early_acpi_process_madt();
  1377. return 0;
  1378. }
  1379. int __init acpi_boot_init(void)
  1380. {
  1381. /*
  1382. * If acpi_disabled, bail out
  1383. * One exception: acpi=ht continues far enough to enumerate LAPICs
  1384. */
  1385. if (acpi_disabled && !acpi_ht)
  1386. return 1;
  1387. acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
  1388. /*
  1389. * set sci_int and PM timer address
  1390. */
  1391. acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
  1392. /*
  1393. * Process the Multiple APIC Description Table (MADT), if present
  1394. */
  1395. acpi_process_madt();
  1396. acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
  1397. return 0;
  1398. }
  1399. static int __init parse_acpi(char *arg)
  1400. {
  1401. if (!arg)
  1402. return -EINVAL;
  1403. /* "acpi=off" disables both ACPI table parsing and interpreter */
  1404. if (strcmp(arg, "off") == 0) {
  1405. disable_acpi();
  1406. }
  1407. /* acpi=force to over-ride black-list */
  1408. else if (strcmp(arg, "force") == 0) {
  1409. acpi_force = 1;
  1410. acpi_ht = 1;
  1411. acpi_disabled = 0;
  1412. }
  1413. /* acpi=strict disables out-of-spec workarounds */
  1414. else if (strcmp(arg, "strict") == 0) {
  1415. acpi_strict = 1;
  1416. }
  1417. /* Limit ACPI just to boot-time to enable HT */
  1418. else if (strcmp(arg, "ht") == 0) {
  1419. if (!acpi_force)
  1420. disable_acpi();
  1421. acpi_ht = 1;
  1422. }
  1423. /* "acpi=noirq" disables ACPI interrupt routing */
  1424. else if (strcmp(arg, "noirq") == 0) {
  1425. acpi_noirq_set();
  1426. } else {
  1427. /* Core will printk when we return error. */
  1428. return -EINVAL;
  1429. }
  1430. return 0;
  1431. }
  1432. early_param("acpi", parse_acpi);
  1433. /* FIXME: Using pci= for an ACPI parameter is a travesty. */
  1434. static int __init parse_pci(char *arg)
  1435. {
  1436. if (arg && strcmp(arg, "noacpi") == 0)
  1437. acpi_disable_pci();
  1438. return 0;
  1439. }
  1440. early_param("pci", parse_pci);
  1441. #ifdef CONFIG_X86_IO_APIC
  1442. static int __init parse_acpi_skip_timer_override(char *arg)
  1443. {
  1444. acpi_skip_timer_override = 1;
  1445. return 0;
  1446. }
  1447. early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
  1448. static int __init parse_acpi_use_timer_override(char *arg)
  1449. {
  1450. acpi_use_timer_override = 1;
  1451. return 0;
  1452. }
  1453. early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
  1454. #endif /* CONFIG_X86_IO_APIC */
  1455. static int __init setup_acpi_sci(char *s)
  1456. {
  1457. if (!s)
  1458. return -EINVAL;
  1459. if (!strcmp(s, "edge"))
  1460. acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
  1461. (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
  1462. else if (!strcmp(s, "level"))
  1463. acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
  1464. (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
  1465. else if (!strcmp(s, "high"))
  1466. acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
  1467. (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
  1468. else if (!strcmp(s, "low"))
  1469. acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
  1470. (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
  1471. else
  1472. return -EINVAL;
  1473. return 0;
  1474. }
  1475. early_param("acpi_sci", setup_acpi_sci);
  1476. int __acpi_acquire_global_lock(unsigned int *lock)
  1477. {
  1478. unsigned int old, new, val;
  1479. do {
  1480. old = *lock;
  1481. new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
  1482. val = cmpxchg(lock, old, new);
  1483. } while (unlikely (val != old));
  1484. return (new < 3) ? -1 : 0;
  1485. }
  1486. int __acpi_release_global_lock(unsigned int *lock)
  1487. {
  1488. unsigned int old, new, val;
  1489. do {
  1490. old = *lock;
  1491. new = old & ~0x3;
  1492. val = cmpxchg(lock, old, new);
  1493. } while (unlikely (val != old));
  1494. return old & 0x1;
  1495. }