boot.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778
  1. /*
  2. * boot.c - Architecture-Specific Low-Level ACPI Boot Support
  3. *
  4. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  5. * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
  6. *
  7. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. *
  23. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  24. */
  25. #include <linux/init.h>
  26. #include <linux/acpi.h>
  27. #include <linux/acpi_pmtmr.h>
  28. #include <linux/efi.h>
  29. #include <linux/cpumask.h>
  30. #include <linux/module.h>
  31. #include <linux/dmi.h>
  32. #include <linux/irq.h>
  33. #include <linux/bootmem.h>
  34. #include <linux/ioport.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/io_apic.h>
  37. #include <asm/apic.h>
  38. #include <asm/io.h>
  39. #include <asm/mpspec.h>
  40. #include <asm/smp.h>
  41. #ifdef CONFIG_X86_LOCAL_APIC
  42. # include <mach_apic.h>
  43. #endif
  44. static int __initdata acpi_force = 0;
  45. #ifdef CONFIG_ACPI
  46. int acpi_disabled = 0;
  47. #else
  48. int acpi_disabled = 1;
  49. #endif
  50. EXPORT_SYMBOL(acpi_disabled);
  51. #ifdef CONFIG_X86_64
  52. #include <asm/proto.h>
  53. #include <asm/genapic.h>
  54. #else /* X86 */
  55. #ifdef CONFIG_X86_LOCAL_APIC
  56. #include <mach_apic.h>
  57. #include <mach_mpparse.h>
  58. #endif /* CONFIG_X86_LOCAL_APIC */
  59. #endif /* X86 */
  60. #define BAD_MADT_ENTRY(entry, end) ( \
  61. (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
  62. ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
  63. #define PREFIX "ACPI: "
  64. int acpi_noirq; /* skip ACPI IRQ initialization */
  65. int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
  66. EXPORT_SYMBOL(acpi_pci_disabled);
  67. int acpi_ht __initdata = 1; /* enable HT */
  68. int acpi_lapic;
  69. int acpi_ioapic;
  70. int acpi_strict;
  71. u8 acpi_sci_flags __initdata;
  72. int acpi_sci_override_gsi __initdata;
  73. int acpi_skip_timer_override __initdata;
  74. int acpi_use_timer_override __initdata;
  75. #ifdef CONFIG_X86_LOCAL_APIC
  76. static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
  77. #endif
  78. #ifndef __HAVE_ARCH_CMPXCHG
  79. #warning ACPI uses CMPXCHG, i486 and later hardware
  80. #endif
  81. /* --------------------------------------------------------------------------
  82. Boot-time Configuration
  83. -------------------------------------------------------------------------- */
  84. /*
  85. * The default interrupt routing model is PIC (8259). This gets
  86. * overridden if IOAPICs are enumerated (below).
  87. */
  88. enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  89. #ifdef CONFIG_X86_64
  90. /* rely on all ACPI tables being in the direct mapping */
  91. char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
  92. {
  93. if (!phys_addr || !size)
  94. return NULL;
  95. if (phys_addr+size <= (max_pfn_mapped << PAGE_SHIFT) + PAGE_SIZE)
  96. return __va(phys_addr);
  97. return NULL;
  98. }
  99. #else
  100. /*
  101. * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
  102. * to map the target physical address. The problem is that set_fixmap()
  103. * provides a single page, and it is possible that the page is not
  104. * sufficient.
  105. * By using this area, we can map up to MAX_IO_APICS pages temporarily,
  106. * i.e. until the next __va_range() call.
  107. *
  108. * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
  109. * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
  110. * count idx down while incrementing the phys address.
  111. */
  112. char *__init __acpi_map_table(unsigned long phys, unsigned long size)
  113. {
  114. unsigned long base, offset, mapped_size;
  115. int idx;
  116. if (phys + size < 8 * 1024 * 1024)
  117. return __va(phys);
  118. offset = phys & (PAGE_SIZE - 1);
  119. mapped_size = PAGE_SIZE - offset;
  120. set_fixmap(FIX_ACPI_END, phys);
  121. base = fix_to_virt(FIX_ACPI_END);
  122. /*
  123. * Most cases can be covered by the below.
  124. */
  125. idx = FIX_ACPI_END;
  126. while (mapped_size < size) {
  127. if (--idx < FIX_ACPI_BEGIN)
  128. return NULL; /* cannot handle this */
  129. phys += PAGE_SIZE;
  130. set_fixmap(idx, phys);
  131. mapped_size += PAGE_SIZE;
  132. }
  133. return ((unsigned char *)base + offset);
  134. }
  135. #endif
  136. #ifdef CONFIG_PCI_MMCONFIG
  137. /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
  138. struct acpi_mcfg_allocation *pci_mmcfg_config;
  139. int pci_mmcfg_config_num;
  140. int __init acpi_parse_mcfg(struct acpi_table_header *header)
  141. {
  142. struct acpi_table_mcfg *mcfg;
  143. unsigned long i;
  144. int config_size;
  145. if (!header)
  146. return -EINVAL;
  147. mcfg = (struct acpi_table_mcfg *)header;
  148. /* how many config structures do we have */
  149. pci_mmcfg_config_num = 0;
  150. i = header->length - sizeof(struct acpi_table_mcfg);
  151. while (i >= sizeof(struct acpi_mcfg_allocation)) {
  152. ++pci_mmcfg_config_num;
  153. i -= sizeof(struct acpi_mcfg_allocation);
  154. };
  155. if (pci_mmcfg_config_num == 0) {
  156. printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
  157. return -ENODEV;
  158. }
  159. config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
  160. pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
  161. if (!pci_mmcfg_config) {
  162. printk(KERN_WARNING PREFIX
  163. "No memory for MCFG config tables\n");
  164. return -ENOMEM;
  165. }
  166. memcpy(pci_mmcfg_config, &mcfg[1], config_size);
  167. for (i = 0; i < pci_mmcfg_config_num; ++i) {
  168. if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
  169. printk(KERN_ERR PREFIX
  170. "MMCONFIG not in low 4GB of memory\n");
  171. kfree(pci_mmcfg_config);
  172. pci_mmcfg_config_num = 0;
  173. return -ENODEV;
  174. }
  175. }
  176. return 0;
  177. }
  178. #endif /* CONFIG_PCI_MMCONFIG */
  179. #ifdef CONFIG_X86_LOCAL_APIC
  180. static int __init acpi_parse_madt(struct acpi_table_header *table)
  181. {
  182. struct acpi_table_madt *madt = NULL;
  183. if (!cpu_has_apic)
  184. return -EINVAL;
  185. madt = (struct acpi_table_madt *)table;
  186. if (!madt) {
  187. printk(KERN_WARNING PREFIX "Unable to map MADT\n");
  188. return -ENODEV;
  189. }
  190. if (madt->address) {
  191. acpi_lapic_addr = (u64) madt->address;
  192. printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
  193. madt->address);
  194. }
  195. acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
  196. return 0;
  197. }
  198. static void __cpuinit acpi_register_lapic(int id, u8 enabled)
  199. {
  200. unsigned int ver = 0;
  201. if (!enabled) {
  202. ++disabled_cpus;
  203. return;
  204. }
  205. #ifdef CONFIG_X86_32
  206. if (boot_cpu_physical_apicid != -1U)
  207. ver = apic_version[boot_cpu_physical_apicid];
  208. #endif
  209. generic_processor_info(id, ver);
  210. }
  211. static int __init
  212. acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
  213. {
  214. struct acpi_madt_local_apic *processor = NULL;
  215. processor = (struct acpi_madt_local_apic *)header;
  216. if (BAD_MADT_ENTRY(processor, end))
  217. return -EINVAL;
  218. acpi_table_print_madt_entry(header);
  219. /*
  220. * We need to register disabled CPU as well to permit
  221. * counting disabled CPUs. This allows us to size
  222. * cpus_possible_map more accurately, to permit
  223. * to not preallocating memory for all NR_CPUS
  224. * when we use CPU hotplug.
  225. */
  226. acpi_register_lapic(processor->id, /* APIC ID */
  227. processor->lapic_flags & ACPI_MADT_ENABLED);
  228. return 0;
  229. }
  230. static int __init
  231. acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
  232. {
  233. struct acpi_madt_local_sapic *processor = NULL;
  234. processor = (struct acpi_madt_local_sapic *)header;
  235. if (BAD_MADT_ENTRY(processor, end))
  236. return -EINVAL;
  237. acpi_table_print_madt_entry(header);
  238. acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
  239. processor->lapic_flags & ACPI_MADT_ENABLED);
  240. return 0;
  241. }
  242. static int __init
  243. acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
  244. const unsigned long end)
  245. {
  246. struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
  247. lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
  248. if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
  249. return -EINVAL;
  250. acpi_lapic_addr = lapic_addr_ovr->address;
  251. return 0;
  252. }
  253. static int __init
  254. acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
  255. {
  256. struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
  257. lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
  258. if (BAD_MADT_ENTRY(lapic_nmi, end))
  259. return -EINVAL;
  260. acpi_table_print_madt_entry(header);
  261. if (lapic_nmi->lint != 1)
  262. printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
  263. return 0;
  264. }
  265. #endif /*CONFIG_X86_LOCAL_APIC */
  266. #ifdef CONFIG_X86_IO_APIC
  267. static int __init
  268. acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
  269. {
  270. struct acpi_madt_io_apic *ioapic = NULL;
  271. ioapic = (struct acpi_madt_io_apic *)header;
  272. if (BAD_MADT_ENTRY(ioapic, end))
  273. return -EINVAL;
  274. acpi_table_print_madt_entry(header);
  275. mp_register_ioapic(ioapic->id,
  276. ioapic->address, ioapic->global_irq_base);
  277. return 0;
  278. }
  279. /*
  280. * Parse Interrupt Source Override for the ACPI SCI
  281. */
  282. static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
  283. {
  284. if (trigger == 0) /* compatible SCI trigger is level */
  285. trigger = 3;
  286. if (polarity == 0) /* compatible SCI polarity is low */
  287. polarity = 3;
  288. /* Command-line over-ride via acpi_sci= */
  289. if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
  290. trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
  291. if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
  292. polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
  293. /*
  294. * mp_config_acpi_legacy_irqs() already setup IRQs < 16
  295. * If GSI is < 16, this will update its flags,
  296. * else it will create a new mp_irqs[] entry.
  297. */
  298. mp_override_legacy_irq(gsi, polarity, trigger, gsi);
  299. /*
  300. * stash over-ride to indicate we've been here
  301. * and for later update of acpi_gbl_FADT
  302. */
  303. acpi_sci_override_gsi = gsi;
  304. return;
  305. }
  306. static int __init
  307. acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
  308. const unsigned long end)
  309. {
  310. struct acpi_madt_interrupt_override *intsrc = NULL;
  311. intsrc = (struct acpi_madt_interrupt_override *)header;
  312. if (BAD_MADT_ENTRY(intsrc, end))
  313. return -EINVAL;
  314. acpi_table_print_madt_entry(header);
  315. if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
  316. acpi_sci_ioapic_setup(intsrc->global_irq,
  317. intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
  318. (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
  319. return 0;
  320. }
  321. if (acpi_skip_timer_override &&
  322. intsrc->source_irq == 0 && intsrc->global_irq == 2) {
  323. printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
  324. return 0;
  325. }
  326. mp_override_legacy_irq(intsrc->source_irq,
  327. intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
  328. (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
  329. intsrc->global_irq);
  330. return 0;
  331. }
  332. static int __init
  333. acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
  334. {
  335. struct acpi_madt_nmi_source *nmi_src = NULL;
  336. nmi_src = (struct acpi_madt_nmi_source *)header;
  337. if (BAD_MADT_ENTRY(nmi_src, end))
  338. return -EINVAL;
  339. acpi_table_print_madt_entry(header);
  340. /* TBD: Support nimsrc entries? */
  341. return 0;
  342. }
  343. #endif /* CONFIG_X86_IO_APIC */
  344. /*
  345. * acpi_pic_sci_set_trigger()
  346. *
  347. * use ELCR to set PIC-mode trigger type for SCI
  348. *
  349. * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
  350. * it may require Edge Trigger -- use "acpi_sci=edge"
  351. *
  352. * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
  353. * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
  354. * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
  355. * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
  356. */
  357. void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
  358. {
  359. unsigned int mask = 1 << irq;
  360. unsigned int old, new;
  361. /* Real old ELCR mask */
  362. old = inb(0x4d0) | (inb(0x4d1) << 8);
  363. /*
  364. * If we use ACPI to set PCI IRQs, then we should clear ELCR
  365. * since we will set it correctly as we enable the PCI irq
  366. * routing.
  367. */
  368. new = acpi_noirq ? old : 0;
  369. /*
  370. * Update SCI information in the ELCR, it isn't in the PCI
  371. * routing tables..
  372. */
  373. switch (trigger) {
  374. case 1: /* Edge - clear */
  375. new &= ~mask;
  376. break;
  377. case 3: /* Level - set */
  378. new |= mask;
  379. break;
  380. }
  381. if (old == new)
  382. return;
  383. printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
  384. outb(new, 0x4d0);
  385. outb(new >> 8, 0x4d1);
  386. }
  387. int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
  388. {
  389. *irq = gsi;
  390. return 0;
  391. }
  392. /*
  393. * success: return IRQ number (>=0)
  394. * failure: return < 0
  395. */
  396. int acpi_register_gsi(u32 gsi, int triggering, int polarity)
  397. {
  398. unsigned int irq;
  399. unsigned int plat_gsi = gsi;
  400. #ifdef CONFIG_PCI
  401. /*
  402. * Make sure all (legacy) PCI IRQs are set as level-triggered.
  403. */
  404. if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
  405. extern void eisa_set_level_irq(unsigned int irq);
  406. if (triggering == ACPI_LEVEL_SENSITIVE)
  407. eisa_set_level_irq(gsi);
  408. }
  409. #endif
  410. #ifdef CONFIG_X86_IO_APIC
  411. if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
  412. plat_gsi = mp_register_gsi(gsi, triggering, polarity);
  413. }
  414. #endif
  415. acpi_gsi_to_irq(plat_gsi, &irq);
  416. return irq;
  417. }
  418. /*
  419. * ACPI based hotplug support for CPU
  420. */
  421. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  422. static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
  423. {
  424. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  425. union acpi_object *obj;
  426. struct acpi_madt_local_apic *lapic;
  427. cpumask_t tmp_map, new_map;
  428. u8 physid;
  429. int cpu;
  430. if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
  431. return -EINVAL;
  432. if (!buffer.length || !buffer.pointer)
  433. return -EINVAL;
  434. obj = buffer.pointer;
  435. if (obj->type != ACPI_TYPE_BUFFER ||
  436. obj->buffer.length < sizeof(*lapic)) {
  437. kfree(buffer.pointer);
  438. return -EINVAL;
  439. }
  440. lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
  441. if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
  442. !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
  443. kfree(buffer.pointer);
  444. return -EINVAL;
  445. }
  446. physid = lapic->id;
  447. kfree(buffer.pointer);
  448. buffer.length = ACPI_ALLOCATE_BUFFER;
  449. buffer.pointer = NULL;
  450. tmp_map = cpu_present_map;
  451. acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
  452. /*
  453. * If mp_register_lapic successfully generates a new logical cpu
  454. * number, then the following will get us exactly what was mapped
  455. */
  456. cpus_andnot(new_map, cpu_present_map, tmp_map);
  457. if (cpus_empty(new_map)) {
  458. printk ("Unable to map lapic to logical cpu number\n");
  459. return -EINVAL;
  460. }
  461. cpu = first_cpu(new_map);
  462. *pcpu = cpu;
  463. return 0;
  464. }
  465. /* wrapper to silence section mismatch warning */
  466. int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
  467. {
  468. return _acpi_map_lsapic(handle, pcpu);
  469. }
  470. EXPORT_SYMBOL(acpi_map_lsapic);
  471. int acpi_unmap_lsapic(int cpu)
  472. {
  473. per_cpu(x86_cpu_to_apicid, cpu) = -1;
  474. cpu_clear(cpu, cpu_present_map);
  475. num_processors--;
  476. return (0);
  477. }
  478. EXPORT_SYMBOL(acpi_unmap_lsapic);
  479. #endif /* CONFIG_ACPI_HOTPLUG_CPU */
  480. int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
  481. {
  482. /* TBD */
  483. return -EINVAL;
  484. }
  485. EXPORT_SYMBOL(acpi_register_ioapic);
  486. int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
  487. {
  488. /* TBD */
  489. return -EINVAL;
  490. }
  491. EXPORT_SYMBOL(acpi_unregister_ioapic);
  492. static int __init acpi_parse_sbf(struct acpi_table_header *table)
  493. {
  494. struct acpi_table_boot *sb;
  495. sb = (struct acpi_table_boot *)table;
  496. if (!sb) {
  497. printk(KERN_WARNING PREFIX "Unable to map SBF\n");
  498. return -ENODEV;
  499. }
  500. sbf_port = sb->cmos_index; /* Save CMOS port */
  501. return 0;
  502. }
  503. #ifdef CONFIG_HPET_TIMER
  504. #include <asm/hpet.h>
  505. static struct __initdata resource *hpet_res;
  506. static int __init acpi_parse_hpet(struct acpi_table_header *table)
  507. {
  508. struct acpi_table_hpet *hpet_tbl;
  509. hpet_tbl = (struct acpi_table_hpet *)table;
  510. if (!hpet_tbl) {
  511. printk(KERN_WARNING PREFIX "Unable to map HPET\n");
  512. return -ENODEV;
  513. }
  514. if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
  515. printk(KERN_WARNING PREFIX "HPET timers must be located in "
  516. "memory.\n");
  517. return -1;
  518. }
  519. hpet_address = hpet_tbl->address.address;
  520. /*
  521. * Some broken BIOSes advertise HPET at 0x0. We really do not
  522. * want to allocate a resource there.
  523. */
  524. if (!hpet_address) {
  525. printk(KERN_WARNING PREFIX
  526. "HPET id: %#x base: %#lx is invalid\n",
  527. hpet_tbl->id, hpet_address);
  528. return 0;
  529. }
  530. #ifdef CONFIG_X86_64
  531. /*
  532. * Some even more broken BIOSes advertise HPET at
  533. * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
  534. * some noise:
  535. */
  536. if (hpet_address == 0xfed0000000000000UL) {
  537. if (!hpet_force_user) {
  538. printk(KERN_WARNING PREFIX "HPET id: %#x "
  539. "base: 0xfed0000000000000 is bogus\n "
  540. "try hpet=force on the kernel command line to "
  541. "fix it up to 0xfed00000.\n", hpet_tbl->id);
  542. hpet_address = 0;
  543. return 0;
  544. }
  545. printk(KERN_WARNING PREFIX
  546. "HPET id: %#x base: 0xfed0000000000000 fixed up "
  547. "to 0xfed00000.\n", hpet_tbl->id);
  548. hpet_address >>= 32;
  549. }
  550. #endif
  551. printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
  552. hpet_tbl->id, hpet_address);
  553. /*
  554. * Allocate and initialize the HPET firmware resource for adding into
  555. * the resource tree during the lateinit timeframe.
  556. */
  557. #define HPET_RESOURCE_NAME_SIZE 9
  558. hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
  559. hpet_res->name = (void *)&hpet_res[1];
  560. hpet_res->flags = IORESOURCE_MEM;
  561. snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
  562. hpet_tbl->sequence);
  563. hpet_res->start = hpet_address;
  564. hpet_res->end = hpet_address + (1 * 1024) - 1;
  565. return 0;
  566. }
  567. /*
  568. * hpet_insert_resource inserts the HPET resources used into the resource
  569. * tree.
  570. */
  571. static __init int hpet_insert_resource(void)
  572. {
  573. if (!hpet_res)
  574. return 1;
  575. return insert_resource(&iomem_resource, hpet_res);
  576. }
  577. late_initcall(hpet_insert_resource);
  578. #else
  579. #define acpi_parse_hpet NULL
  580. #endif
  581. static int __init acpi_parse_fadt(struct acpi_table_header *table)
  582. {
  583. #ifdef CONFIG_X86_PM_TIMER
  584. /* detect the location of the ACPI PM Timer */
  585. if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
  586. /* FADT rev. 2 */
  587. if (acpi_gbl_FADT.xpm_timer_block.space_id !=
  588. ACPI_ADR_SPACE_SYSTEM_IO)
  589. return 0;
  590. pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
  591. /*
  592. * "X" fields are optional extensions to the original V1.0
  593. * fields, so we must selectively expand V1.0 fields if the
  594. * corresponding X field is zero.
  595. */
  596. if (!pmtmr_ioport)
  597. pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
  598. } else {
  599. /* FADT rev. 1 */
  600. pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
  601. }
  602. if (pmtmr_ioport)
  603. printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
  604. pmtmr_ioport);
  605. #endif
  606. return 0;
  607. }
  608. #ifdef CONFIG_X86_LOCAL_APIC
  609. /*
  610. * Parse LAPIC entries in MADT
  611. * returns 0 on success, < 0 on error
  612. */
  613. static void __init acpi_register_lapic_address(unsigned long address)
  614. {
  615. mp_lapic_addr = address;
  616. set_fixmap_nocache(FIX_APIC_BASE, address);
  617. if (boot_cpu_physical_apicid == -1U) {
  618. boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
  619. #ifdef CONFIG_X86_32
  620. apic_version[boot_cpu_physical_apicid] =
  621. GET_APIC_VERSION(apic_read(APIC_LVR));
  622. #endif
  623. }
  624. }
  625. static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
  626. {
  627. int count;
  628. if (!cpu_has_apic)
  629. return -ENODEV;
  630. /*
  631. * Note that the LAPIC address is obtained from the MADT (32-bit value)
  632. * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
  633. */
  634. count =
  635. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
  636. acpi_parse_lapic_addr_ovr, 0);
  637. if (count < 0) {
  638. printk(KERN_ERR PREFIX
  639. "Error parsing LAPIC address override entry\n");
  640. return count;
  641. }
  642. acpi_register_lapic_address(acpi_lapic_addr);
  643. return count;
  644. }
  645. static int __init acpi_parse_madt_lapic_entries(void)
  646. {
  647. int count;
  648. if (!cpu_has_apic)
  649. return -ENODEV;
  650. /*
  651. * Note that the LAPIC address is obtained from the MADT (32-bit value)
  652. * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
  653. */
  654. count =
  655. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
  656. acpi_parse_lapic_addr_ovr, 0);
  657. if (count < 0) {
  658. printk(KERN_ERR PREFIX
  659. "Error parsing LAPIC address override entry\n");
  660. return count;
  661. }
  662. acpi_register_lapic_address(acpi_lapic_addr);
  663. count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
  664. acpi_parse_sapic, MAX_APICS);
  665. if (!count)
  666. count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
  667. acpi_parse_lapic, MAX_APICS);
  668. if (!count) {
  669. printk(KERN_ERR PREFIX "No LAPIC entries present\n");
  670. /* TBD: Cleanup to allow fallback to MPS */
  671. return -ENODEV;
  672. } else if (count < 0) {
  673. printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
  674. /* TBD: Cleanup to allow fallback to MPS */
  675. return count;
  676. }
  677. count =
  678. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
  679. if (count < 0) {
  680. printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
  681. /* TBD: Cleanup to allow fallback to MPS */
  682. return count;
  683. }
  684. return 0;
  685. }
  686. #endif /* CONFIG_X86_LOCAL_APIC */
  687. #ifdef CONFIG_X86_IO_APIC
  688. #define MP_ISA_BUS 0
  689. #ifdef CONFIG_X86_ES7000
  690. extern int es7000_plat;
  691. #endif
  692. static struct {
  693. int apic_id;
  694. int gsi_base;
  695. int gsi_end;
  696. DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
  697. } mp_ioapic_routing[MAX_IO_APICS];
  698. static int mp_find_ioapic(int gsi)
  699. {
  700. int i = 0;
  701. /* Find the IOAPIC that manages this GSI. */
  702. for (i = 0; i < nr_ioapics; i++) {
  703. if ((gsi >= mp_ioapic_routing[i].gsi_base)
  704. && (gsi <= mp_ioapic_routing[i].gsi_end))
  705. return i;
  706. }
  707. printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
  708. return -1;
  709. }
  710. static u8 __init uniq_ioapic_id(u8 id)
  711. {
  712. #ifdef CONFIG_X86_32
  713. if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
  714. !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
  715. return io_apic_get_unique_id(nr_ioapics, id);
  716. else
  717. return id;
  718. #else
  719. int i;
  720. DECLARE_BITMAP(used, 256);
  721. bitmap_zero(used, 256);
  722. for (i = 0; i < nr_ioapics; i++) {
  723. struct mp_config_ioapic *ia = &mp_ioapics[i];
  724. __set_bit(ia->mp_apicid, used);
  725. }
  726. if (!test_bit(id, used))
  727. return id;
  728. return find_first_zero_bit(used, 256);
  729. #endif
  730. }
  731. static int bad_ioapic(unsigned long address)
  732. {
  733. if (nr_ioapics >= MAX_IO_APICS) {
  734. printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
  735. "(found %d)\n", MAX_IO_APICS, nr_ioapics);
  736. panic("Recompile kernel with bigger MAX_IO_APICS!\n");
  737. }
  738. if (!address) {
  739. printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
  740. " found in table, skipping!\n");
  741. return 1;
  742. }
  743. return 0;
  744. }
  745. void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
  746. {
  747. int idx = 0;
  748. if (bad_ioapic(address))
  749. return;
  750. idx = nr_ioapics;
  751. mp_ioapics[idx].mp_type = MP_IOAPIC;
  752. mp_ioapics[idx].mp_flags = MPC_APIC_USABLE;
  753. mp_ioapics[idx].mp_apicaddr = address;
  754. set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
  755. mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id);
  756. #ifdef CONFIG_X86_32
  757. mp_ioapics[idx].mp_apicver = io_apic_get_version(idx);
  758. #else
  759. mp_ioapics[idx].mp_apicver = 0;
  760. #endif
  761. /*
  762. * Build basic GSI lookup table to facilitate gsi->io_apic lookups
  763. * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
  764. */
  765. mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid;
  766. mp_ioapic_routing[idx].gsi_base = gsi_base;
  767. mp_ioapic_routing[idx].gsi_end = gsi_base +
  768. io_apic_get_redir_entries(idx);
  769. printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
  770. "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid,
  771. mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr,
  772. mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
  773. nr_ioapics++;
  774. }
  775. void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
  776. {
  777. int ioapic = -1;
  778. int pin = -1;
  779. /*
  780. * Convert 'gsi' to 'ioapic.pin'.
  781. */
  782. ioapic = mp_find_ioapic(gsi);
  783. if (ioapic < 0)
  784. return;
  785. pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
  786. /*
  787. * TBD: This check is for faulty timer entries, where the override
  788. * erroneously sets the trigger to level, resulting in a HUGE
  789. * increase of timer interrupts!
  790. */
  791. if ((bus_irq == 0) && (trigger == 3))
  792. trigger = 1;
  793. mp_irqs[mp_irq_entries].mp_type = MP_INTSRC;
  794. mp_irqs[mp_irq_entries].mp_irqtype = mp_INT;
  795. mp_irqs[mp_irq_entries].mp_irqflag = (trigger << 2) | polarity;
  796. mp_irqs[mp_irq_entries].mp_srcbus = MP_ISA_BUS;
  797. mp_irqs[mp_irq_entries].mp_srcbusirq = bus_irq; /* IRQ */
  798. mp_irqs[mp_irq_entries].mp_dstapic =
  799. mp_ioapics[ioapic].mp_apicid; /* APIC ID */
  800. mp_irqs[mp_irq_entries].mp_dstirq = pin; /* INTIN# */
  801. if (++mp_irq_entries == MAX_IRQ_SOURCES)
  802. panic("Max # of irq sources exceeded!!\n");
  803. }
  804. void __init mp_config_acpi_legacy_irqs(void)
  805. {
  806. int i = 0;
  807. int ioapic = -1;
  808. #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
  809. /*
  810. * Fabricate the legacy ISA bus (bus #31).
  811. */
  812. mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
  813. #endif
  814. set_bit(MP_ISA_BUS, mp_bus_not_pci);
  815. Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
  816. #ifdef CONFIG_X86_ES7000
  817. /*
  818. * Older generations of ES7000 have no legacy identity mappings
  819. */
  820. if (es7000_plat == 1)
  821. return;
  822. #endif
  823. /*
  824. * Locate the IOAPIC that manages the ISA IRQs (0-15).
  825. */
  826. ioapic = mp_find_ioapic(0);
  827. if (ioapic < 0)
  828. return;
  829. /*
  830. * Use the default configuration for the IRQs 0-15. Unless
  831. * overridden by (MADT) interrupt source override entries.
  832. */
  833. for (i = 0; i < 16; i++) {
  834. int idx;
  835. mp_irqs[mp_irq_entries].mp_type = MP_INTSRC;
  836. mp_irqs[mp_irq_entries].mp_irqflag = 0; /* Conforming */
  837. mp_irqs[mp_irq_entries].mp_srcbus = MP_ISA_BUS;
  838. mp_irqs[mp_irq_entries].mp_dstapic = mp_ioapics[ioapic].mp_apicid;
  839. for (idx = 0; idx < mp_irq_entries; idx++) {
  840. struct mp_config_intsrc *irq = mp_irqs + idx;
  841. /* Do we already have a mapping for this ISA IRQ? */
  842. if (irq->mp_srcbus == MP_ISA_BUS
  843. && irq->mp_srcbusirq == i)
  844. break;
  845. /* Do we already have a mapping for this IOAPIC pin */
  846. if ((irq->mp_dstapic ==
  847. mp_irqs[mp_irq_entries].mp_dstapic) &&
  848. (irq->mp_dstirq == i))
  849. break;
  850. }
  851. if (idx != mp_irq_entries) {
  852. printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
  853. continue; /* IRQ already used */
  854. }
  855. mp_irqs[mp_irq_entries].mp_irqtype = mp_INT;
  856. mp_irqs[mp_irq_entries].mp_srcbusirq = i; /* Identity mapped */
  857. mp_irqs[mp_irq_entries].mp_dstirq = i;
  858. if (++mp_irq_entries == MAX_IRQ_SOURCES)
  859. panic("Max # of irq sources exceeded!!\n");
  860. }
  861. }
  862. int mp_register_gsi(u32 gsi, int triggering, int polarity)
  863. {
  864. int ioapic;
  865. int ioapic_pin;
  866. #ifdef CONFIG_X86_32
  867. #define MAX_GSI_NUM 4096
  868. #define IRQ_COMPRESSION_START 64
  869. static int pci_irq = IRQ_COMPRESSION_START;
  870. /*
  871. * Mapping between Global System Interrupts, which
  872. * represent all possible interrupts, and IRQs
  873. * assigned to actual devices.
  874. */
  875. static int gsi_to_irq[MAX_GSI_NUM];
  876. #else
  877. if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
  878. return gsi;
  879. #endif
  880. /* Don't set up the ACPI SCI because it's already set up */
  881. if (acpi_gbl_FADT.sci_interrupt == gsi)
  882. return gsi;
  883. ioapic = mp_find_ioapic(gsi);
  884. if (ioapic < 0) {
  885. printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
  886. return gsi;
  887. }
  888. ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
  889. #ifdef CONFIG_X86_32
  890. if (ioapic_renumber_irq)
  891. gsi = ioapic_renumber_irq(ioapic, gsi);
  892. #endif
  893. /*
  894. * Avoid pin reprogramming. PRTs typically include entries
  895. * with redundant pin->gsi mappings (but unique PCI devices);
  896. * we only program the IOAPIC on the first.
  897. */
  898. if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
  899. printk(KERN_ERR "Invalid reference to IOAPIC pin "
  900. "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
  901. ioapic_pin);
  902. return gsi;
  903. }
  904. if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) {
  905. Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
  906. mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
  907. #ifdef CONFIG_X86_32
  908. return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
  909. #else
  910. return gsi;
  911. #endif
  912. }
  913. set_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed);
  914. #ifdef CONFIG_X86_32
  915. /*
  916. * For GSI >= 64, use IRQ compression
  917. */
  918. if ((gsi >= IRQ_COMPRESSION_START)
  919. && (triggering == ACPI_LEVEL_SENSITIVE)) {
  920. /*
  921. * For PCI devices assign IRQs in order, avoiding gaps
  922. * due to unused I/O APIC pins.
  923. */
  924. int irq = gsi;
  925. if (gsi < MAX_GSI_NUM) {
  926. /*
  927. * Retain the VIA chipset work-around (gsi > 15), but
  928. * avoid a problem where the 8254 timer (IRQ0) is setup
  929. * via an override (so it's not on pin 0 of the ioapic),
  930. * and at the same time, the pin 0 interrupt is a PCI
  931. * type. The gsi > 15 test could cause these two pins
  932. * to be shared as IRQ0, and they are not shareable.
  933. * So test for this condition, and if necessary, avoid
  934. * the pin collision.
  935. */
  936. gsi = pci_irq++;
  937. /*
  938. * Don't assign IRQ used by ACPI SCI
  939. */
  940. if (gsi == acpi_gbl_FADT.sci_interrupt)
  941. gsi = pci_irq++;
  942. gsi_to_irq[irq] = gsi;
  943. } else {
  944. printk(KERN_ERR "GSI %u is too high\n", gsi);
  945. return gsi;
  946. }
  947. }
  948. #endif
  949. io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
  950. triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
  951. polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
  952. return gsi;
  953. }
  954. int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
  955. u32 gsi, int triggering, int polarity)
  956. {
  957. struct mpc_config_intsrc intsrc;
  958. int ioapic;
  959. if (!enable_update_mptable)
  960. return 0;
  961. /* print the entry should happen on mptable identically */
  962. intsrc.mpc_type = MP_INTSRC;
  963. intsrc.mpc_irqtype = mp_INT;
  964. intsrc.mpc_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
  965. (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
  966. intsrc.mpc_srcbus = number;
  967. intsrc.mpc_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
  968. ioapic = mp_find_ioapic(gsi);
  969. intsrc.mpc_dstapic = mp_ioapic_routing[ioapic].apic_id;
  970. intsrc.mpc_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
  971. MP_intsrc_info(&intsrc);
  972. return 0;
  973. }
  974. /*
  975. * Parse IOAPIC related entries in MADT
  976. * returns 0 on success, < 0 on error
  977. */
  978. static int __init acpi_parse_madt_ioapic_entries(void)
  979. {
  980. int count;
  981. /*
  982. * ACPI interpreter is required to complete interrupt setup,
  983. * so if it is off, don't enumerate the io-apics with ACPI.
  984. * If MPS is present, it will handle them,
  985. * otherwise the system will stay in PIC mode
  986. */
  987. if (acpi_disabled || acpi_noirq) {
  988. return -ENODEV;
  989. }
  990. if (!cpu_has_apic)
  991. return -ENODEV;
  992. /*
  993. * if "noapic" boot option, don't look for IO-APICs
  994. */
  995. if (skip_ioapic_setup) {
  996. printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
  997. "due to 'noapic' option.\n");
  998. return -ENODEV;
  999. }
  1000. count =
  1001. acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
  1002. MAX_IO_APICS);
  1003. if (!count) {
  1004. printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
  1005. return -ENODEV;
  1006. } else if (count < 0) {
  1007. printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
  1008. return count;
  1009. }
  1010. count =
  1011. acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
  1012. NR_IRQ_VECTORS);
  1013. if (count < 0) {
  1014. printk(KERN_ERR PREFIX
  1015. "Error parsing interrupt source overrides entry\n");
  1016. /* TBD: Cleanup to allow fallback to MPS */
  1017. return count;
  1018. }
  1019. /*
  1020. * If BIOS did not supply an INT_SRC_OVR for the SCI
  1021. * pretend we got one so we can set the SCI flags.
  1022. */
  1023. if (!acpi_sci_override_gsi)
  1024. acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
  1025. /* Fill in identity legacy mapings where no override */
  1026. mp_config_acpi_legacy_irqs();
  1027. count =
  1028. acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
  1029. NR_IRQ_VECTORS);
  1030. if (count < 0) {
  1031. printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
  1032. /* TBD: Cleanup to allow fallback to MPS */
  1033. return count;
  1034. }
  1035. return 0;
  1036. }
  1037. #else
  1038. static inline int acpi_parse_madt_ioapic_entries(void)
  1039. {
  1040. return -1;
  1041. }
  1042. #endif /* !CONFIG_X86_IO_APIC */
  1043. static void __init early_acpi_process_madt(void)
  1044. {
  1045. #ifdef CONFIG_X86_LOCAL_APIC
  1046. int error;
  1047. if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
  1048. /*
  1049. * Parse MADT LAPIC entries
  1050. */
  1051. error = early_acpi_parse_madt_lapic_addr_ovr();
  1052. if (!error) {
  1053. acpi_lapic = 1;
  1054. smp_found_config = 1;
  1055. }
  1056. if (error == -EINVAL) {
  1057. /*
  1058. * Dell Precision Workstation 410, 610 come here.
  1059. */
  1060. printk(KERN_ERR PREFIX
  1061. "Invalid BIOS MADT, disabling ACPI\n");
  1062. disable_acpi();
  1063. }
  1064. }
  1065. #endif
  1066. }
  1067. static void __init acpi_process_madt(void)
  1068. {
  1069. #ifdef CONFIG_X86_LOCAL_APIC
  1070. int error;
  1071. if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
  1072. /*
  1073. * Parse MADT LAPIC entries
  1074. */
  1075. error = acpi_parse_madt_lapic_entries();
  1076. if (!error) {
  1077. acpi_lapic = 1;
  1078. #ifdef CONFIG_X86_GENERICARCH
  1079. generic_bigsmp_probe();
  1080. #endif
  1081. /*
  1082. * Parse MADT IO-APIC entries
  1083. */
  1084. error = acpi_parse_madt_ioapic_entries();
  1085. if (!error) {
  1086. acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
  1087. acpi_irq_balance_set(NULL);
  1088. acpi_ioapic = 1;
  1089. smp_found_config = 1;
  1090. setup_apic_routing();
  1091. }
  1092. }
  1093. if (error == -EINVAL) {
  1094. /*
  1095. * Dell Precision Workstation 410, 610 come here.
  1096. */
  1097. printk(KERN_ERR PREFIX
  1098. "Invalid BIOS MADT, disabling ACPI\n");
  1099. disable_acpi();
  1100. }
  1101. }
  1102. #endif
  1103. return;
  1104. }
  1105. #ifdef __i386__
  1106. static int __init disable_acpi_irq(const struct dmi_system_id *d)
  1107. {
  1108. if (!acpi_force) {
  1109. printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
  1110. d->ident);
  1111. acpi_noirq_set();
  1112. }
  1113. return 0;
  1114. }
  1115. static int __init disable_acpi_pci(const struct dmi_system_id *d)
  1116. {
  1117. if (!acpi_force) {
  1118. printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
  1119. d->ident);
  1120. acpi_disable_pci();
  1121. }
  1122. return 0;
  1123. }
  1124. static int __init dmi_disable_acpi(const struct dmi_system_id *d)
  1125. {
  1126. if (!acpi_force) {
  1127. printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
  1128. disable_acpi();
  1129. } else {
  1130. printk(KERN_NOTICE
  1131. "Warning: DMI blacklist says broken, but acpi forced\n");
  1132. }
  1133. return 0;
  1134. }
  1135. /*
  1136. * Limit ACPI to CPU enumeration for HT
  1137. */
  1138. static int __init force_acpi_ht(const struct dmi_system_id *d)
  1139. {
  1140. if (!acpi_force) {
  1141. printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
  1142. d->ident);
  1143. disable_acpi();
  1144. acpi_ht = 1;
  1145. } else {
  1146. printk(KERN_NOTICE
  1147. "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
  1148. }
  1149. return 0;
  1150. }
  1151. /*
  1152. * If your system is blacklisted here, but you find that acpi=force
  1153. * works for you, please contact acpi-devel@sourceforge.net
  1154. */
  1155. static struct dmi_system_id __initdata acpi_dmi_table[] = {
  1156. /*
  1157. * Boxes that need ACPI disabled
  1158. */
  1159. {
  1160. .callback = dmi_disable_acpi,
  1161. .ident = "IBM Thinkpad",
  1162. .matches = {
  1163. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1164. DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
  1165. },
  1166. },
  1167. /*
  1168. * Boxes that need acpi=ht
  1169. */
  1170. {
  1171. .callback = force_acpi_ht,
  1172. .ident = "FSC Primergy T850",
  1173. .matches = {
  1174. DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
  1175. DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
  1176. },
  1177. },
  1178. {
  1179. .callback = force_acpi_ht,
  1180. .ident = "HP VISUALIZE NT Workstation",
  1181. .matches = {
  1182. DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
  1183. DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
  1184. },
  1185. },
  1186. {
  1187. .callback = force_acpi_ht,
  1188. .ident = "Compaq Workstation W8000",
  1189. .matches = {
  1190. DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
  1191. DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
  1192. },
  1193. },
  1194. {
  1195. .callback = force_acpi_ht,
  1196. .ident = "ASUS P4B266",
  1197. .matches = {
  1198. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1199. DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
  1200. },
  1201. },
  1202. {
  1203. .callback = force_acpi_ht,
  1204. .ident = "ASUS P2B-DS",
  1205. .matches = {
  1206. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1207. DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
  1208. },
  1209. },
  1210. {
  1211. .callback = force_acpi_ht,
  1212. .ident = "ASUS CUR-DLS",
  1213. .matches = {
  1214. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1215. DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
  1216. },
  1217. },
  1218. {
  1219. .callback = force_acpi_ht,
  1220. .ident = "ABIT i440BX-W83977",
  1221. .matches = {
  1222. DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
  1223. DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
  1224. },
  1225. },
  1226. {
  1227. .callback = force_acpi_ht,
  1228. .ident = "IBM Bladecenter",
  1229. .matches = {
  1230. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1231. DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
  1232. },
  1233. },
  1234. {
  1235. .callback = force_acpi_ht,
  1236. .ident = "IBM eServer xSeries 360",
  1237. .matches = {
  1238. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1239. DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
  1240. },
  1241. },
  1242. {
  1243. .callback = force_acpi_ht,
  1244. .ident = "IBM eserver xSeries 330",
  1245. .matches = {
  1246. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1247. DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
  1248. },
  1249. },
  1250. {
  1251. .callback = force_acpi_ht,
  1252. .ident = "IBM eserver xSeries 440",
  1253. .matches = {
  1254. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1255. DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
  1256. },
  1257. },
  1258. /*
  1259. * Boxes that need ACPI PCI IRQ routing disabled
  1260. */
  1261. {
  1262. .callback = disable_acpi_irq,
  1263. .ident = "ASUS A7V",
  1264. .matches = {
  1265. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
  1266. DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
  1267. /* newer BIOS, Revision 1011, does work */
  1268. DMI_MATCH(DMI_BIOS_VERSION,
  1269. "ASUS A7V ACPI BIOS Revision 1007"),
  1270. },
  1271. },
  1272. {
  1273. /*
  1274. * Latest BIOS for IBM 600E (1.16) has bad pcinum
  1275. * for LPC bridge, which is needed for the PCI
  1276. * interrupt links to work. DSDT fix is in bug 5966.
  1277. * 2645, 2646 model numbers are shared with 600/600E/600X
  1278. */
  1279. .callback = disable_acpi_irq,
  1280. .ident = "IBM Thinkpad 600 Series 2645",
  1281. .matches = {
  1282. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1283. DMI_MATCH(DMI_BOARD_NAME, "2645"),
  1284. },
  1285. },
  1286. {
  1287. .callback = disable_acpi_irq,
  1288. .ident = "IBM Thinkpad 600 Series 2646",
  1289. .matches = {
  1290. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1291. DMI_MATCH(DMI_BOARD_NAME, "2646"),
  1292. },
  1293. },
  1294. /*
  1295. * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
  1296. */
  1297. { /* _BBN 0 bug */
  1298. .callback = disable_acpi_pci,
  1299. .ident = "ASUS PR-DLS",
  1300. .matches = {
  1301. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1302. DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
  1303. DMI_MATCH(DMI_BIOS_VERSION,
  1304. "ASUS PR-DLS ACPI BIOS Revision 1010"),
  1305. DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
  1306. },
  1307. },
  1308. {
  1309. .callback = disable_acpi_pci,
  1310. .ident = "Acer TravelMate 36x Laptop",
  1311. .matches = {
  1312. DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
  1313. DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
  1314. },
  1315. },
  1316. {}
  1317. };
  1318. #endif /* __i386__ */
  1319. /*
  1320. * acpi_boot_table_init() and acpi_boot_init()
  1321. * called from setup_arch(), always.
  1322. * 1. checksums all tables
  1323. * 2. enumerates lapics
  1324. * 3. enumerates io-apics
  1325. *
  1326. * acpi_table_init() is separate to allow reading SRAT without
  1327. * other side effects.
  1328. *
  1329. * side effects of acpi_boot_init:
  1330. * acpi_lapic = 1 if LAPIC found
  1331. * acpi_ioapic = 1 if IOAPIC found
  1332. * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
  1333. * if acpi_blacklisted() acpi_disabled = 1;
  1334. * acpi_irq_model=...
  1335. * ...
  1336. *
  1337. * return value: (currently ignored)
  1338. * 0: success
  1339. * !0: failure
  1340. */
  1341. int __init acpi_boot_table_init(void)
  1342. {
  1343. int error;
  1344. #ifdef __i386__
  1345. dmi_check_system(acpi_dmi_table);
  1346. #endif
  1347. /*
  1348. * If acpi_disabled, bail out
  1349. * One exception: acpi=ht continues far enough to enumerate LAPICs
  1350. */
  1351. if (acpi_disabled && !acpi_ht)
  1352. return 1;
  1353. /*
  1354. * Initialize the ACPI boot-time table parser.
  1355. */
  1356. error = acpi_table_init();
  1357. if (error) {
  1358. disable_acpi();
  1359. return error;
  1360. }
  1361. acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
  1362. /*
  1363. * blacklist may disable ACPI entirely
  1364. */
  1365. error = acpi_blacklisted();
  1366. if (error) {
  1367. if (acpi_force) {
  1368. printk(KERN_WARNING PREFIX "acpi=force override\n");
  1369. } else {
  1370. printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
  1371. disable_acpi();
  1372. return error;
  1373. }
  1374. }
  1375. return 0;
  1376. }
  1377. int __init early_acpi_boot_init(void)
  1378. {
  1379. /*
  1380. * If acpi_disabled, bail out
  1381. * One exception: acpi=ht continues far enough to enumerate LAPICs
  1382. */
  1383. if (acpi_disabled && !acpi_ht)
  1384. return 1;
  1385. /*
  1386. * Process the Multiple APIC Description Table (MADT), if present
  1387. */
  1388. early_acpi_process_madt();
  1389. return 0;
  1390. }
  1391. int __init acpi_boot_init(void)
  1392. {
  1393. /*
  1394. * If acpi_disabled, bail out
  1395. * One exception: acpi=ht continues far enough to enumerate LAPICs
  1396. */
  1397. if (acpi_disabled && !acpi_ht)
  1398. return 1;
  1399. acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
  1400. /*
  1401. * set sci_int and PM timer address
  1402. */
  1403. acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
  1404. /*
  1405. * Process the Multiple APIC Description Table (MADT), if present
  1406. */
  1407. acpi_process_madt();
  1408. acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
  1409. return 0;
  1410. }
  1411. static int __init parse_acpi(char *arg)
  1412. {
  1413. if (!arg)
  1414. return -EINVAL;
  1415. /* "acpi=off" disables both ACPI table parsing and interpreter */
  1416. if (strcmp(arg, "off") == 0) {
  1417. disable_acpi();
  1418. }
  1419. /* acpi=force to over-ride black-list */
  1420. else if (strcmp(arg, "force") == 0) {
  1421. acpi_force = 1;
  1422. acpi_ht = 1;
  1423. acpi_disabled = 0;
  1424. }
  1425. /* acpi=strict disables out-of-spec workarounds */
  1426. else if (strcmp(arg, "strict") == 0) {
  1427. acpi_strict = 1;
  1428. }
  1429. /* Limit ACPI just to boot-time to enable HT */
  1430. else if (strcmp(arg, "ht") == 0) {
  1431. if (!acpi_force)
  1432. disable_acpi();
  1433. acpi_ht = 1;
  1434. }
  1435. /* "acpi=noirq" disables ACPI interrupt routing */
  1436. else if (strcmp(arg, "noirq") == 0) {
  1437. acpi_noirq_set();
  1438. } else {
  1439. /* Core will printk when we return error. */
  1440. return -EINVAL;
  1441. }
  1442. return 0;
  1443. }
  1444. early_param("acpi", parse_acpi);
  1445. /* FIXME: Using pci= for an ACPI parameter is a travesty. */
  1446. static int __init parse_pci(char *arg)
  1447. {
  1448. if (arg && strcmp(arg, "noacpi") == 0)
  1449. acpi_disable_pci();
  1450. return 0;
  1451. }
  1452. early_param("pci", parse_pci);
  1453. #ifdef CONFIG_X86_IO_APIC
  1454. static int __init parse_acpi_skip_timer_override(char *arg)
  1455. {
  1456. acpi_skip_timer_override = 1;
  1457. return 0;
  1458. }
  1459. early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
  1460. static int __init parse_acpi_use_timer_override(char *arg)
  1461. {
  1462. acpi_use_timer_override = 1;
  1463. return 0;
  1464. }
  1465. early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
  1466. #endif /* CONFIG_X86_IO_APIC */
  1467. static int __init setup_acpi_sci(char *s)
  1468. {
  1469. if (!s)
  1470. return -EINVAL;
  1471. if (!strcmp(s, "edge"))
  1472. acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
  1473. (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
  1474. else if (!strcmp(s, "level"))
  1475. acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
  1476. (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
  1477. else if (!strcmp(s, "high"))
  1478. acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
  1479. (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
  1480. else if (!strcmp(s, "low"))
  1481. acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
  1482. (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
  1483. else
  1484. return -EINVAL;
  1485. return 0;
  1486. }
  1487. early_param("acpi_sci", setup_acpi_sci);
  1488. int __acpi_acquire_global_lock(unsigned int *lock)
  1489. {
  1490. unsigned int old, new, val;
  1491. do {
  1492. old = *lock;
  1493. new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
  1494. val = cmpxchg(lock, old, new);
  1495. } while (unlikely (val != old));
  1496. return (new < 3) ? -1 : 0;
  1497. }
  1498. int __acpi_release_global_lock(unsigned int *lock)
  1499. {
  1500. unsigned int old, new, val;
  1501. do {
  1502. old = *lock;
  1503. new = old & ~0x3;
  1504. val = cmpxchg(lock, old, new);
  1505. } while (unlikely (val != old));
  1506. return old & 0x1;
  1507. }