boot.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721
  1. /*
  2. * boot.c - Architecture-Specific Low-Level ACPI Boot Support
  3. *
  4. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  5. * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
  6. *
  7. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. *
  23. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  24. */
  25. #include <linux/init.h>
  26. #include <linux/acpi.h>
  27. #include <linux/acpi_pmtmr.h>
  28. #include <linux/efi.h>
  29. #include <linux/cpumask.h>
  30. #include <linux/module.h>
  31. #include <linux/dmi.h>
  32. #include <linux/irq.h>
  33. #include <linux/slab.h>
  34. #include <linux/bootmem.h>
  35. #include <linux/ioport.h>
  36. #include <linux/pci.h>
  37. #include <asm/pci_x86.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/io_apic.h>
  40. #include <asm/apic.h>
  41. #include <asm/io.h>
  42. #include <asm/mpspec.h>
  43. #include <asm/smp.h>
  44. static int __initdata acpi_force = 0;
  45. u32 acpi_rsdt_forced;
  46. int acpi_disabled;
  47. EXPORT_SYMBOL(acpi_disabled);
  48. #ifdef CONFIG_X86_64
  49. # include <asm/proto.h>
  50. # include <asm/numa_64.h>
  51. #endif /* X86 */
  52. #define BAD_MADT_ENTRY(entry, end) ( \
  53. (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
  54. ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
  55. #define PREFIX "ACPI: "
  56. int acpi_noirq; /* skip ACPI IRQ initialization */
  57. int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
  58. EXPORT_SYMBOL(acpi_pci_disabled);
  59. int acpi_lapic;
  60. int acpi_ioapic;
  61. int acpi_strict;
  62. u8 acpi_sci_flags __initdata;
  63. int acpi_sci_override_gsi __initdata;
  64. int acpi_skip_timer_override __initdata;
  65. int acpi_use_timer_override __initdata;
  66. #ifdef CONFIG_X86_LOCAL_APIC
  67. static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
  68. #endif
  69. #ifndef __HAVE_ARCH_CMPXCHG
  70. #warning ACPI uses CMPXCHG, i486 and later hardware
  71. #endif
  72. /* --------------------------------------------------------------------------
  73. Boot-time Configuration
  74. -------------------------------------------------------------------------- */
  75. /*
  76. * The default interrupt routing model is PIC (8259). This gets
  77. * overridden if IOAPICs are enumerated (below).
  78. */
  79. enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  80. /*
  81. * ISA irqs by default are the first 16 gsis but can be
  82. * any gsi as specified by an interrupt source override.
  83. */
  84. static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
  85. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
  86. };
  87. static unsigned int gsi_to_irq(unsigned int gsi)
  88. {
  89. unsigned int irq = gsi + NR_IRQS_LEGACY;
  90. unsigned int i;
  91. for (i = 0; i < NR_IRQS_LEGACY; i++) {
  92. if (isa_irq_to_gsi[i] == gsi) {
  93. return i;
  94. }
  95. }
  96. /* Provide an identity mapping of gsi == irq
  97. * except on truly weird platforms that have
  98. * non isa irqs in the first 16 gsis.
  99. */
  100. if (gsi >= NR_IRQS_LEGACY)
  101. irq = gsi;
  102. else
  103. irq = gsi_top + gsi;
  104. return irq;
  105. }
  106. static u32 irq_to_gsi(int irq)
  107. {
  108. unsigned int gsi;
  109. if (irq < NR_IRQS_LEGACY)
  110. gsi = isa_irq_to_gsi[irq];
  111. else if (irq < gsi_top)
  112. gsi = irq;
  113. else if (irq < (gsi_top + NR_IRQS_LEGACY))
  114. gsi = irq - gsi_top;
  115. else
  116. gsi = 0xffffffff;
  117. return gsi;
  118. }
  119. /*
  120. * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
  121. * to map the target physical address. The problem is that set_fixmap()
  122. * provides a single page, and it is possible that the page is not
  123. * sufficient.
  124. * By using this area, we can map up to MAX_IO_APICS pages temporarily,
  125. * i.e. until the next __va_range() call.
  126. *
  127. * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
  128. * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
  129. * count idx down while incrementing the phys address.
  130. */
  131. char *__init __acpi_map_table(unsigned long phys, unsigned long size)
  132. {
  133. if (!phys || !size)
  134. return NULL;
  135. return early_ioremap(phys, size);
  136. }
  137. void __init __acpi_unmap_table(char *map, unsigned long size)
  138. {
  139. if (!map || !size)
  140. return;
  141. early_iounmap(map, size);
  142. }
  143. #ifdef CONFIG_X86_LOCAL_APIC
  144. static int __init acpi_parse_madt(struct acpi_table_header *table)
  145. {
  146. struct acpi_table_madt *madt = NULL;
  147. if (!cpu_has_apic)
  148. return -EINVAL;
  149. madt = (struct acpi_table_madt *)table;
  150. if (!madt) {
  151. printk(KERN_WARNING PREFIX "Unable to map MADT\n");
  152. return -ENODEV;
  153. }
  154. if (madt->address) {
  155. acpi_lapic_addr = (u64) madt->address;
  156. printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
  157. madt->address);
  158. }
  159. default_acpi_madt_oem_check(madt->header.oem_id,
  160. madt->header.oem_table_id);
  161. return 0;
  162. }
  163. static void __cpuinit acpi_register_lapic(int id, u8 enabled)
  164. {
  165. unsigned int ver = 0;
  166. if (!enabled) {
  167. ++disabled_cpus;
  168. return;
  169. }
  170. if (boot_cpu_physical_apicid != -1U)
  171. ver = apic_version[boot_cpu_physical_apicid];
  172. generic_processor_info(id, ver);
  173. }
  174. static int __init
  175. acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
  176. {
  177. struct acpi_madt_local_x2apic *processor = NULL;
  178. processor = (struct acpi_madt_local_x2apic *)header;
  179. if (BAD_MADT_ENTRY(processor, end))
  180. return -EINVAL;
  181. acpi_table_print_madt_entry(header);
  182. #ifdef CONFIG_X86_X2APIC
  183. /*
  184. * We need to register disabled CPU as well to permit
  185. * counting disabled CPUs. This allows us to size
  186. * cpus_possible_map more accurately, to permit
  187. * to not preallocating memory for all NR_CPUS
  188. * when we use CPU hotplug.
  189. */
  190. acpi_register_lapic(processor->local_apic_id, /* APIC ID */
  191. processor->lapic_flags & ACPI_MADT_ENABLED);
  192. #else
  193. printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
  194. #endif
  195. return 0;
  196. }
  197. static int __init
  198. acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
  199. {
  200. struct acpi_madt_local_apic *processor = NULL;
  201. processor = (struct acpi_madt_local_apic *)header;
  202. if (BAD_MADT_ENTRY(processor, end))
  203. return -EINVAL;
  204. acpi_table_print_madt_entry(header);
  205. /*
  206. * We need to register disabled CPU as well to permit
  207. * counting disabled CPUs. This allows us to size
  208. * cpus_possible_map more accurately, to permit
  209. * to not preallocating memory for all NR_CPUS
  210. * when we use CPU hotplug.
  211. */
  212. acpi_register_lapic(processor->id, /* APIC ID */
  213. processor->lapic_flags & ACPI_MADT_ENABLED);
  214. return 0;
  215. }
  216. static int __init
  217. acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
  218. {
  219. struct acpi_madt_local_sapic *processor = NULL;
  220. processor = (struct acpi_madt_local_sapic *)header;
  221. if (BAD_MADT_ENTRY(processor, end))
  222. return -EINVAL;
  223. acpi_table_print_madt_entry(header);
  224. acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
  225. processor->lapic_flags & ACPI_MADT_ENABLED);
  226. return 0;
  227. }
  228. static int __init
  229. acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
  230. const unsigned long end)
  231. {
  232. struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
  233. lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
  234. if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
  235. return -EINVAL;
  236. acpi_lapic_addr = lapic_addr_ovr->address;
  237. return 0;
  238. }
  239. static int __init
  240. acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
  241. const unsigned long end)
  242. {
  243. struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL;
  244. x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header;
  245. if (BAD_MADT_ENTRY(x2apic_nmi, end))
  246. return -EINVAL;
  247. acpi_table_print_madt_entry(header);
  248. if (x2apic_nmi->lint != 1)
  249. printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
  250. return 0;
  251. }
  252. static int __init
  253. acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
  254. {
  255. struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
  256. lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
  257. if (BAD_MADT_ENTRY(lapic_nmi, end))
  258. return -EINVAL;
  259. acpi_table_print_madt_entry(header);
  260. if (lapic_nmi->lint != 1)
  261. printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
  262. return 0;
  263. }
  264. #endif /*CONFIG_X86_LOCAL_APIC */
  265. #ifdef CONFIG_X86_IO_APIC
  266. static int __init
  267. acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
  268. {
  269. struct acpi_madt_io_apic *ioapic = NULL;
  270. ioapic = (struct acpi_madt_io_apic *)header;
  271. if (BAD_MADT_ENTRY(ioapic, end))
  272. return -EINVAL;
  273. acpi_table_print_madt_entry(header);
  274. mp_register_ioapic(ioapic->id,
  275. ioapic->address, ioapic->global_irq_base);
  276. return 0;
  277. }
  278. /*
  279. * Parse Interrupt Source Override for the ACPI SCI
  280. */
  281. static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi)
  282. {
  283. if (trigger == 0) /* compatible SCI trigger is level */
  284. trigger = 3;
  285. if (polarity == 0) /* compatible SCI polarity is low */
  286. polarity = 3;
  287. /* Command-line over-ride via acpi_sci= */
  288. if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
  289. trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
  290. if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
  291. polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
  292. /*
  293. * mp_config_acpi_legacy_irqs() already setup IRQs < 16
  294. * If GSI is < 16, this will update its flags,
  295. * else it will create a new mp_irqs[] entry.
  296. */
  297. mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
  298. /*
  299. * stash over-ride to indicate we've been here
  300. * and for later update of acpi_gbl_FADT
  301. */
  302. acpi_sci_override_gsi = gsi;
  303. return;
  304. }
  305. static int __init
  306. acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
  307. const unsigned long end)
  308. {
  309. struct acpi_madt_interrupt_override *intsrc = NULL;
  310. intsrc = (struct acpi_madt_interrupt_override *)header;
  311. if (BAD_MADT_ENTRY(intsrc, end))
  312. return -EINVAL;
  313. acpi_table_print_madt_entry(header);
  314. if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
  315. acpi_sci_ioapic_setup(intsrc->source_irq,
  316. intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
  317. (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
  318. intsrc->global_irq);
  319. return 0;
  320. }
  321. if (acpi_skip_timer_override &&
  322. intsrc->source_irq == 0 && intsrc->global_irq == 2) {
  323. printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
  324. return 0;
  325. }
  326. mp_override_legacy_irq(intsrc->source_irq,
  327. intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
  328. (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
  329. intsrc->global_irq);
  330. return 0;
  331. }
  332. static int __init
  333. acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
  334. {
  335. struct acpi_madt_nmi_source *nmi_src = NULL;
  336. nmi_src = (struct acpi_madt_nmi_source *)header;
  337. if (BAD_MADT_ENTRY(nmi_src, end))
  338. return -EINVAL;
  339. acpi_table_print_madt_entry(header);
  340. /* TBD: Support nimsrc entries? */
  341. return 0;
  342. }
  343. #endif /* CONFIG_X86_IO_APIC */
  344. /*
  345. * acpi_pic_sci_set_trigger()
  346. *
  347. * use ELCR to set PIC-mode trigger type for SCI
  348. *
  349. * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
  350. * it may require Edge Trigger -- use "acpi_sci=edge"
  351. *
  352. * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
  353. * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
  354. * ECLR1 is IRQs 0-7 (IRQ 0, 1, 2 must be 0)
  355. * ECLR2 is IRQs 8-15 (IRQ 8, 13 must be 0)
  356. */
  357. void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
  358. {
  359. unsigned int mask = 1 << irq;
  360. unsigned int old, new;
  361. /* Real old ELCR mask */
  362. old = inb(0x4d0) | (inb(0x4d1) << 8);
  363. /*
  364. * If we use ACPI to set PCI IRQs, then we should clear ELCR
  365. * since we will set it correctly as we enable the PCI irq
  366. * routing.
  367. */
  368. new = acpi_noirq ? old : 0;
  369. /*
  370. * Update SCI information in the ELCR, it isn't in the PCI
  371. * routing tables..
  372. */
  373. switch (trigger) {
  374. case 1: /* Edge - clear */
  375. new &= ~mask;
  376. break;
  377. case 3: /* Level - set */
  378. new |= mask;
  379. break;
  380. }
  381. if (old == new)
  382. return;
  383. printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
  384. outb(new, 0x4d0);
  385. outb(new >> 8, 0x4d1);
  386. }
  387. int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
  388. {
  389. *irq = gsi_to_irq(gsi);
  390. #ifdef CONFIG_X86_IO_APIC
  391. if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
  392. setup_IO_APIC_irq_extra(gsi);
  393. #endif
  394. return 0;
  395. }
  396. int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
  397. {
  398. if (isa_irq >= 16)
  399. return -1;
  400. *gsi = irq_to_gsi(isa_irq);
  401. return 0;
  402. }
  403. static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
  404. int trigger, int polarity)
  405. {
  406. #ifdef CONFIG_PCI
  407. /*
  408. * Make sure all (legacy) PCI IRQs are set as level-triggered.
  409. */
  410. if (trigger == ACPI_LEVEL_SENSITIVE)
  411. eisa_set_level_irq(gsi);
  412. #endif
  413. return gsi;
  414. }
  415. static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
  416. int trigger, int polarity)
  417. {
  418. #ifdef CONFIG_X86_IO_APIC
  419. gsi = mp_register_gsi(dev, gsi, trigger, polarity);
  420. #endif
  421. return gsi;
  422. }
  423. static int (*__acpi_register_gsi)(struct device *dev, u32 gsi, int trigger, int polarity) = acpi_register_gsi_pic;
  424. /*
  425. * success: return IRQ number (>=0)
  426. * failure: return < 0
  427. */
  428. int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
  429. {
  430. unsigned int irq;
  431. unsigned int plat_gsi = gsi;
  432. plat_gsi = (*__acpi_register_gsi)(dev, gsi, trigger, polarity);
  433. irq = gsi_to_irq(plat_gsi);
  434. return irq;
  435. }
  436. void __init acpi_set_irq_model_pic(void)
  437. {
  438. acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  439. __acpi_register_gsi = acpi_register_gsi_pic;
  440. acpi_ioapic = 0;
  441. }
  442. void __init acpi_set_irq_model_ioapic(void)
  443. {
  444. acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
  445. __acpi_register_gsi = acpi_register_gsi_ioapic;
  446. acpi_ioapic = 1;
  447. }
  448. /*
  449. * ACPI based hotplug support for CPU
  450. */
  451. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  452. #include <acpi/processor.h>
  453. static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
  454. {
  455. #ifdef CONFIG_ACPI_NUMA
  456. int nid;
  457. nid = acpi_get_node(handle);
  458. if (nid == -1 || !node_online(nid))
  459. return;
  460. #ifdef CONFIG_X86_64
  461. apicid_to_node[physid] = nid;
  462. numa_set_node(cpu, nid);
  463. #else /* CONFIG_X86_32 */
  464. apicid_2_node[physid] = nid;
  465. cpu_to_node_map[cpu] = nid;
  466. #endif
  467. #endif
  468. }
  469. static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
  470. {
  471. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  472. union acpi_object *obj;
  473. struct acpi_madt_local_apic *lapic;
  474. cpumask_var_t tmp_map, new_map;
  475. u8 physid;
  476. int cpu;
  477. int retval = -ENOMEM;
  478. if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
  479. return -EINVAL;
  480. if (!buffer.length || !buffer.pointer)
  481. return -EINVAL;
  482. obj = buffer.pointer;
  483. if (obj->type != ACPI_TYPE_BUFFER ||
  484. obj->buffer.length < sizeof(*lapic)) {
  485. kfree(buffer.pointer);
  486. return -EINVAL;
  487. }
  488. lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
  489. if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
  490. !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
  491. kfree(buffer.pointer);
  492. return -EINVAL;
  493. }
  494. physid = lapic->id;
  495. kfree(buffer.pointer);
  496. buffer.length = ACPI_ALLOCATE_BUFFER;
  497. buffer.pointer = NULL;
  498. if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
  499. goto out;
  500. if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
  501. goto free_tmp_map;
  502. cpumask_copy(tmp_map, cpu_present_mask);
  503. acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
  504. /*
  505. * If mp_register_lapic successfully generates a new logical cpu
  506. * number, then the following will get us exactly what was mapped
  507. */
  508. cpumask_andnot(new_map, cpu_present_mask, tmp_map);
  509. if (cpumask_empty(new_map)) {
  510. printk ("Unable to map lapic to logical cpu number\n");
  511. retval = -EINVAL;
  512. goto free_new_map;
  513. }
  514. acpi_processor_set_pdc(handle);
  515. cpu = cpumask_first(new_map);
  516. acpi_map_cpu2node(handle, cpu, physid);
  517. *pcpu = cpu;
  518. retval = 0;
  519. free_new_map:
  520. free_cpumask_var(new_map);
  521. free_tmp_map:
  522. free_cpumask_var(tmp_map);
  523. out:
  524. return retval;
  525. }
  526. /* wrapper to silence section mismatch warning */
  527. int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
  528. {
  529. return _acpi_map_lsapic(handle, pcpu);
  530. }
  531. EXPORT_SYMBOL(acpi_map_lsapic);
  532. int acpi_unmap_lsapic(int cpu)
  533. {
  534. per_cpu(x86_cpu_to_apicid, cpu) = -1;
  535. set_cpu_present(cpu, false);
  536. num_processors--;
  537. return (0);
  538. }
  539. EXPORT_SYMBOL(acpi_unmap_lsapic);
  540. #endif /* CONFIG_ACPI_HOTPLUG_CPU */
  541. int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
  542. {
  543. /* TBD */
  544. return -EINVAL;
  545. }
  546. EXPORT_SYMBOL(acpi_register_ioapic);
  547. int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
  548. {
  549. /* TBD */
  550. return -EINVAL;
  551. }
  552. EXPORT_SYMBOL(acpi_unregister_ioapic);
  553. static int __init acpi_parse_sbf(struct acpi_table_header *table)
  554. {
  555. struct acpi_table_boot *sb;
  556. sb = (struct acpi_table_boot *)table;
  557. if (!sb) {
  558. printk(KERN_WARNING PREFIX "Unable to map SBF\n");
  559. return -ENODEV;
  560. }
  561. sbf_port = sb->cmos_index; /* Save CMOS port */
  562. return 0;
  563. }
  564. #ifdef CONFIG_HPET_TIMER
  565. #include <asm/hpet.h>
  566. static struct __initdata resource *hpet_res;
  567. static int __init acpi_parse_hpet(struct acpi_table_header *table)
  568. {
  569. struct acpi_table_hpet *hpet_tbl;
  570. hpet_tbl = (struct acpi_table_hpet *)table;
  571. if (!hpet_tbl) {
  572. printk(KERN_WARNING PREFIX "Unable to map HPET\n");
  573. return -ENODEV;
  574. }
  575. if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
  576. printk(KERN_WARNING PREFIX "HPET timers must be located in "
  577. "memory.\n");
  578. return -1;
  579. }
  580. hpet_address = hpet_tbl->address.address;
  581. hpet_blockid = hpet_tbl->sequence;
  582. /*
  583. * Some broken BIOSes advertise HPET at 0x0. We really do not
  584. * want to allocate a resource there.
  585. */
  586. if (!hpet_address) {
  587. printk(KERN_WARNING PREFIX
  588. "HPET id: %#x base: %#lx is invalid\n",
  589. hpet_tbl->id, hpet_address);
  590. return 0;
  591. }
  592. #ifdef CONFIG_X86_64
  593. /*
  594. * Some even more broken BIOSes advertise HPET at
  595. * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
  596. * some noise:
  597. */
  598. if (hpet_address == 0xfed0000000000000UL) {
  599. if (!hpet_force_user) {
  600. printk(KERN_WARNING PREFIX "HPET id: %#x "
  601. "base: 0xfed0000000000000 is bogus\n "
  602. "try hpet=force on the kernel command line to "
  603. "fix it up to 0xfed00000.\n", hpet_tbl->id);
  604. hpet_address = 0;
  605. return 0;
  606. }
  607. printk(KERN_WARNING PREFIX
  608. "HPET id: %#x base: 0xfed0000000000000 fixed up "
  609. "to 0xfed00000.\n", hpet_tbl->id);
  610. hpet_address >>= 32;
  611. }
  612. #endif
  613. printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
  614. hpet_tbl->id, hpet_address);
  615. /*
  616. * Allocate and initialize the HPET firmware resource for adding into
  617. * the resource tree during the lateinit timeframe.
  618. */
  619. #define HPET_RESOURCE_NAME_SIZE 9
  620. hpet_res = alloc_bootmem(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
  621. hpet_res->name = (void *)&hpet_res[1];
  622. hpet_res->flags = IORESOURCE_MEM;
  623. snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, "HPET %u",
  624. hpet_tbl->sequence);
  625. hpet_res->start = hpet_address;
  626. hpet_res->end = hpet_address + (1 * 1024) - 1;
  627. return 0;
  628. }
  629. /*
  630. * hpet_insert_resource inserts the HPET resources used into the resource
  631. * tree.
  632. */
  633. static __init int hpet_insert_resource(void)
  634. {
  635. if (!hpet_res)
  636. return 1;
  637. return insert_resource(&iomem_resource, hpet_res);
  638. }
  639. late_initcall(hpet_insert_resource);
  640. #else
  641. #define acpi_parse_hpet NULL
  642. #endif
  643. static int __init acpi_parse_fadt(struct acpi_table_header *table)
  644. {
  645. #ifdef CONFIG_X86_PM_TIMER
  646. /* detect the location of the ACPI PM Timer */
  647. if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
  648. /* FADT rev. 2 */
  649. if (acpi_gbl_FADT.xpm_timer_block.space_id !=
  650. ACPI_ADR_SPACE_SYSTEM_IO)
  651. return 0;
  652. pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
  653. /*
  654. * "X" fields are optional extensions to the original V1.0
  655. * fields, so we must selectively expand V1.0 fields if the
  656. * corresponding X field is zero.
  657. */
  658. if (!pmtmr_ioport)
  659. pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
  660. } else {
  661. /* FADT rev. 1 */
  662. pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
  663. }
  664. if (pmtmr_ioport)
  665. printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
  666. pmtmr_ioport);
  667. #endif
  668. return 0;
  669. }
  670. #ifdef CONFIG_X86_LOCAL_APIC
  671. /*
  672. * Parse LAPIC entries in MADT
  673. * returns 0 on success, < 0 on error
  674. */
  675. static void __init acpi_register_lapic_address(unsigned long address)
  676. {
  677. mp_lapic_addr = address;
  678. set_fixmap_nocache(FIX_APIC_BASE, address);
  679. if (boot_cpu_physical_apicid == -1U) {
  680. boot_cpu_physical_apicid = read_apic_id();
  681. apic_version[boot_cpu_physical_apicid] =
  682. GET_APIC_VERSION(apic_read(APIC_LVR));
  683. }
  684. }
  685. static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
  686. {
  687. int count;
  688. if (!cpu_has_apic)
  689. return -ENODEV;
  690. /*
  691. * Note that the LAPIC address is obtained from the MADT (32-bit value)
  692. * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
  693. */
  694. count =
  695. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
  696. acpi_parse_lapic_addr_ovr, 0);
  697. if (count < 0) {
  698. printk(KERN_ERR PREFIX
  699. "Error parsing LAPIC address override entry\n");
  700. return count;
  701. }
  702. acpi_register_lapic_address(acpi_lapic_addr);
  703. return count;
  704. }
  705. static int __init acpi_parse_madt_lapic_entries(void)
  706. {
  707. int count;
  708. int x2count = 0;
  709. if (!cpu_has_apic)
  710. return -ENODEV;
  711. /*
  712. * Note that the LAPIC address is obtained from the MADT (32-bit value)
  713. * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
  714. */
  715. count =
  716. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
  717. acpi_parse_lapic_addr_ovr, 0);
  718. if (count < 0) {
  719. printk(KERN_ERR PREFIX
  720. "Error parsing LAPIC address override entry\n");
  721. return count;
  722. }
  723. acpi_register_lapic_address(acpi_lapic_addr);
  724. count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
  725. acpi_parse_sapic, MAX_APICS);
  726. if (!count) {
  727. x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
  728. acpi_parse_x2apic, MAX_APICS);
  729. count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
  730. acpi_parse_lapic, MAX_APICS);
  731. }
  732. if (!count && !x2count) {
  733. printk(KERN_ERR PREFIX "No LAPIC entries present\n");
  734. /* TBD: Cleanup to allow fallback to MPS */
  735. return -ENODEV;
  736. } else if (count < 0 || x2count < 0) {
  737. printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
  738. /* TBD: Cleanup to allow fallback to MPS */
  739. return count;
  740. }
  741. x2count =
  742. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI,
  743. acpi_parse_x2apic_nmi, 0);
  744. count =
  745. acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
  746. if (count < 0 || x2count < 0) {
  747. printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
  748. /* TBD: Cleanup to allow fallback to MPS */
  749. return count;
  750. }
  751. return 0;
  752. }
  753. #endif /* CONFIG_X86_LOCAL_APIC */
  754. #ifdef CONFIG_X86_IO_APIC
  755. #define MP_ISA_BUS 0
  756. #ifdef CONFIG_X86_ES7000
  757. extern int es7000_plat;
  758. #endif
  759. static void assign_to_mp_irq(struct mpc_intsrc *m,
  760. struct mpc_intsrc *mp_irq)
  761. {
  762. memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
  763. }
  764. static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
  765. struct mpc_intsrc *m)
  766. {
  767. return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
  768. }
  769. static void save_mp_irq(struct mpc_intsrc *m)
  770. {
  771. int i;
  772. for (i = 0; i < mp_irq_entries; i++) {
  773. if (!mp_irq_cmp(&mp_irqs[i], m))
  774. return;
  775. }
  776. assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]);
  777. if (++mp_irq_entries == MAX_IRQ_SOURCES)
  778. panic("Max # of irq sources exceeded!!\n");
  779. }
  780. void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
  781. {
  782. int ioapic;
  783. int pin;
  784. struct mpc_intsrc mp_irq;
  785. /*
  786. * Convert 'gsi' to 'ioapic.pin'.
  787. */
  788. ioapic = mp_find_ioapic(gsi);
  789. if (ioapic < 0)
  790. return;
  791. pin = mp_find_ioapic_pin(ioapic, gsi);
  792. /*
  793. * TBD: This check is for faulty timer entries, where the override
  794. * erroneously sets the trigger to level, resulting in a HUGE
  795. * increase of timer interrupts!
  796. */
  797. if ((bus_irq == 0) && (trigger == 3))
  798. trigger = 1;
  799. mp_irq.type = MP_INTSRC;
  800. mp_irq.irqtype = mp_INT;
  801. mp_irq.irqflag = (trigger << 2) | polarity;
  802. mp_irq.srcbus = MP_ISA_BUS;
  803. mp_irq.srcbusirq = bus_irq; /* IRQ */
  804. mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
  805. mp_irq.dstirq = pin; /* INTIN# */
  806. save_mp_irq(&mp_irq);
  807. isa_irq_to_gsi[bus_irq] = gsi;
  808. }
  809. void __init mp_config_acpi_legacy_irqs(void)
  810. {
  811. int i;
  812. struct mpc_intsrc mp_irq;
  813. #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
  814. /*
  815. * Fabricate the legacy ISA bus (bus #31).
  816. */
  817. mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
  818. #endif
  819. set_bit(MP_ISA_BUS, mp_bus_not_pci);
  820. pr_debug("Bus #%d is ISA\n", MP_ISA_BUS);
  821. #ifdef CONFIG_X86_ES7000
  822. /*
  823. * Older generations of ES7000 have no legacy identity mappings
  824. */
  825. if (es7000_plat == 1)
  826. return;
  827. #endif
  828. /*
  829. * Use the default configuration for the IRQs 0-15. Unless
  830. * overridden by (MADT) interrupt source override entries.
  831. */
  832. for (i = 0; i < 16; i++) {
  833. int ioapic, pin;
  834. unsigned int dstapic;
  835. int idx;
  836. u32 gsi;
  837. /* Locate the gsi that irq i maps to. */
  838. if (acpi_isa_irq_to_gsi(i, &gsi))
  839. continue;
  840. /*
  841. * Locate the IOAPIC that manages the ISA IRQ.
  842. */
  843. ioapic = mp_find_ioapic(gsi);
  844. if (ioapic < 0)
  845. continue;
  846. pin = mp_find_ioapic_pin(ioapic, gsi);
  847. dstapic = mp_ioapics[ioapic].apicid;
  848. for (idx = 0; idx < mp_irq_entries; idx++) {
  849. struct mpc_intsrc *irq = mp_irqs + idx;
  850. /* Do we already have a mapping for this ISA IRQ? */
  851. if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
  852. break;
  853. /* Do we already have a mapping for this IOAPIC pin */
  854. if (irq->dstapic == dstapic && irq->dstirq == pin)
  855. break;
  856. }
  857. if (idx != mp_irq_entries) {
  858. printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
  859. continue; /* IRQ already used */
  860. }
  861. mp_irq.type = MP_INTSRC;
  862. mp_irq.irqflag = 0; /* Conforming */
  863. mp_irq.srcbus = MP_ISA_BUS;
  864. mp_irq.dstapic = dstapic;
  865. mp_irq.irqtype = mp_INT;
  866. mp_irq.srcbusirq = i; /* Identity mapped */
  867. mp_irq.dstirq = pin;
  868. save_mp_irq(&mp_irq);
  869. }
  870. }
  871. static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
  872. int polarity)
  873. {
  874. #ifdef CONFIG_X86_MPPARSE
  875. struct mpc_intsrc mp_irq;
  876. struct pci_dev *pdev;
  877. unsigned char number;
  878. unsigned int devfn;
  879. int ioapic;
  880. u8 pin;
  881. if (!acpi_ioapic)
  882. return 0;
  883. if (!dev)
  884. return 0;
  885. if (dev->bus != &pci_bus_type)
  886. return 0;
  887. pdev = to_pci_dev(dev);
  888. number = pdev->bus->number;
  889. devfn = pdev->devfn;
  890. pin = pdev->pin;
  891. /* print the entry should happen on mptable identically */
  892. mp_irq.type = MP_INTSRC;
  893. mp_irq.irqtype = mp_INT;
  894. mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
  895. (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
  896. mp_irq.srcbus = number;
  897. mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
  898. ioapic = mp_find_ioapic(gsi);
  899. mp_irq.dstapic = mp_ioapics[ioapic].apicid;
  900. mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
  901. save_mp_irq(&mp_irq);
  902. #endif
  903. return 0;
  904. }
  905. int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
  906. {
  907. int ioapic;
  908. int ioapic_pin;
  909. struct io_apic_irq_attr irq_attr;
  910. if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
  911. return gsi;
  912. /* Don't set up the ACPI SCI because it's already set up */
  913. if (acpi_gbl_FADT.sci_interrupt == gsi)
  914. return gsi;
  915. ioapic = mp_find_ioapic(gsi);
  916. if (ioapic < 0) {
  917. printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
  918. return gsi;
  919. }
  920. ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
  921. if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
  922. printk(KERN_ERR "Invalid reference to IOAPIC pin "
  923. "%d-%d\n", mp_ioapics[ioapic].apicid,
  924. ioapic_pin);
  925. return gsi;
  926. }
  927. if (enable_update_mptable)
  928. mp_config_acpi_gsi(dev, gsi, trigger, polarity);
  929. set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
  930. trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
  931. polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
  932. io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr);
  933. return gsi;
  934. }
  935. /*
  936. * Parse IOAPIC related entries in MADT
  937. * returns 0 on success, < 0 on error
  938. */
  939. static int __init acpi_parse_madt_ioapic_entries(void)
  940. {
  941. int count;
  942. /*
  943. * ACPI interpreter is required to complete interrupt setup,
  944. * so if it is off, don't enumerate the io-apics with ACPI.
  945. * If MPS is present, it will handle them,
  946. * otherwise the system will stay in PIC mode
  947. */
  948. if (acpi_disabled || acpi_noirq)
  949. return -ENODEV;
  950. if (!cpu_has_apic)
  951. return -ENODEV;
  952. /*
  953. * if "noapic" boot option, don't look for IO-APICs
  954. */
  955. if (skip_ioapic_setup) {
  956. printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
  957. "due to 'noapic' option.\n");
  958. return -ENODEV;
  959. }
  960. count =
  961. acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
  962. MAX_IO_APICS);
  963. if (!count) {
  964. printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
  965. return -ENODEV;
  966. } else if (count < 0) {
  967. printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
  968. return count;
  969. }
  970. count =
  971. acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
  972. nr_irqs);
  973. if (count < 0) {
  974. printk(KERN_ERR PREFIX
  975. "Error parsing interrupt source overrides entry\n");
  976. /* TBD: Cleanup to allow fallback to MPS */
  977. return count;
  978. }
  979. /*
  980. * If BIOS did not supply an INT_SRC_OVR for the SCI
  981. * pretend we got one so we can set the SCI flags.
  982. */
  983. if (!acpi_sci_override_gsi)
  984. acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
  985. acpi_gbl_FADT.sci_interrupt);
  986. /* Fill in identity legacy mappings where no override */
  987. mp_config_acpi_legacy_irqs();
  988. count =
  989. acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
  990. nr_irqs);
  991. if (count < 0) {
  992. printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
  993. /* TBD: Cleanup to allow fallback to MPS */
  994. return count;
  995. }
  996. return 0;
  997. }
  998. #else
  999. static inline int acpi_parse_madt_ioapic_entries(void)
  1000. {
  1001. return -1;
  1002. }
  1003. #endif /* !CONFIG_X86_IO_APIC */
  1004. static void __init early_acpi_process_madt(void)
  1005. {
  1006. #ifdef CONFIG_X86_LOCAL_APIC
  1007. int error;
  1008. if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
  1009. /*
  1010. * Parse MADT LAPIC entries
  1011. */
  1012. error = early_acpi_parse_madt_lapic_addr_ovr();
  1013. if (!error) {
  1014. acpi_lapic = 1;
  1015. smp_found_config = 1;
  1016. }
  1017. if (error == -EINVAL) {
  1018. /*
  1019. * Dell Precision Workstation 410, 610 come here.
  1020. */
  1021. printk(KERN_ERR PREFIX
  1022. "Invalid BIOS MADT, disabling ACPI\n");
  1023. disable_acpi();
  1024. }
  1025. }
  1026. #endif
  1027. }
  1028. static void __init acpi_process_madt(void)
  1029. {
  1030. #ifdef CONFIG_X86_LOCAL_APIC
  1031. int error;
  1032. if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
  1033. /*
  1034. * Parse MADT LAPIC entries
  1035. */
  1036. error = acpi_parse_madt_lapic_entries();
  1037. if (!error) {
  1038. acpi_lapic = 1;
  1039. /*
  1040. * Parse MADT IO-APIC entries
  1041. */
  1042. error = acpi_parse_madt_ioapic_entries();
  1043. if (!error) {
  1044. acpi_set_irq_model_ioapic();
  1045. smp_found_config = 1;
  1046. }
  1047. }
  1048. if (error == -EINVAL) {
  1049. /*
  1050. * Dell Precision Workstation 410, 610 come here.
  1051. */
  1052. printk(KERN_ERR PREFIX
  1053. "Invalid BIOS MADT, disabling ACPI\n");
  1054. disable_acpi();
  1055. }
  1056. } else {
  1057. /*
  1058. * ACPI found no MADT, and so ACPI wants UP PIC mode.
  1059. * In the event an MPS table was found, forget it.
  1060. * Boot with "acpi=off" to use MPS on such a system.
  1061. */
  1062. if (smp_found_config) {
  1063. printk(KERN_WARNING PREFIX
  1064. "No APIC-table, disabling MPS\n");
  1065. smp_found_config = 0;
  1066. }
  1067. }
  1068. /*
  1069. * ACPI supports both logical (e.g. Hyper-Threading) and physical
  1070. * processors, where MPS only supports physical.
  1071. */
  1072. if (acpi_lapic && acpi_ioapic)
  1073. printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
  1074. "information\n");
  1075. else if (acpi_lapic)
  1076. printk(KERN_INFO "Using ACPI for processor (LAPIC) "
  1077. "configuration information\n");
  1078. #endif
  1079. return;
  1080. }
  1081. static int __init disable_acpi_irq(const struct dmi_system_id *d)
  1082. {
  1083. if (!acpi_force) {
  1084. printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
  1085. d->ident);
  1086. acpi_noirq_set();
  1087. }
  1088. return 0;
  1089. }
  1090. static int __init disable_acpi_pci(const struct dmi_system_id *d)
  1091. {
  1092. if (!acpi_force) {
  1093. printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
  1094. d->ident);
  1095. acpi_disable_pci();
  1096. }
  1097. return 0;
  1098. }
  1099. static int __init dmi_disable_acpi(const struct dmi_system_id *d)
  1100. {
  1101. if (!acpi_force) {
  1102. printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
  1103. disable_acpi();
  1104. } else {
  1105. printk(KERN_NOTICE
  1106. "Warning: DMI blacklist says broken, but acpi forced\n");
  1107. }
  1108. return 0;
  1109. }
  1110. /*
  1111. * Force ignoring BIOS IRQ0 pin2 override
  1112. */
  1113. static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
  1114. {
  1115. /*
  1116. * The ati_ixp4x0_rev() early PCI quirk should have set
  1117. * the acpi_skip_timer_override flag already:
  1118. */
  1119. if (!acpi_skip_timer_override) {
  1120. WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
  1121. pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
  1122. d->ident);
  1123. acpi_skip_timer_override = 1;
  1124. }
  1125. return 0;
  1126. }
  1127. /*
  1128. * If your system is blacklisted here, but you find that acpi=force
  1129. * works for you, please contact linux-acpi@vger.kernel.org
  1130. */
  1131. static struct dmi_system_id __initdata acpi_dmi_table[] = {
  1132. /*
  1133. * Boxes that need ACPI disabled
  1134. */
  1135. {
  1136. .callback = dmi_disable_acpi,
  1137. .ident = "IBM Thinkpad",
  1138. .matches = {
  1139. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1140. DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
  1141. },
  1142. },
  1143. /*
  1144. * Boxes that need ACPI PCI IRQ routing disabled
  1145. */
  1146. {
  1147. .callback = disable_acpi_irq,
  1148. .ident = "ASUS A7V",
  1149. .matches = {
  1150. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
  1151. DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
  1152. /* newer BIOS, Revision 1011, does work */
  1153. DMI_MATCH(DMI_BIOS_VERSION,
  1154. "ASUS A7V ACPI BIOS Revision 1007"),
  1155. },
  1156. },
  1157. {
  1158. /*
  1159. * Latest BIOS for IBM 600E (1.16) has bad pcinum
  1160. * for LPC bridge, which is needed for the PCI
  1161. * interrupt links to work. DSDT fix is in bug 5966.
  1162. * 2645, 2646 model numbers are shared with 600/600E/600X
  1163. */
  1164. .callback = disable_acpi_irq,
  1165. .ident = "IBM Thinkpad 600 Series 2645",
  1166. .matches = {
  1167. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1168. DMI_MATCH(DMI_BOARD_NAME, "2645"),
  1169. },
  1170. },
  1171. {
  1172. .callback = disable_acpi_irq,
  1173. .ident = "IBM Thinkpad 600 Series 2646",
  1174. .matches = {
  1175. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  1176. DMI_MATCH(DMI_BOARD_NAME, "2646"),
  1177. },
  1178. },
  1179. /*
  1180. * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
  1181. */
  1182. { /* _BBN 0 bug */
  1183. .callback = disable_acpi_pci,
  1184. .ident = "ASUS PR-DLS",
  1185. .matches = {
  1186. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  1187. DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
  1188. DMI_MATCH(DMI_BIOS_VERSION,
  1189. "ASUS PR-DLS ACPI BIOS Revision 1010"),
  1190. DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
  1191. },
  1192. },
  1193. {
  1194. .callback = disable_acpi_pci,
  1195. .ident = "Acer TravelMate 36x Laptop",
  1196. .matches = {
  1197. DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
  1198. DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
  1199. },
  1200. },
  1201. {}
  1202. };
  1203. /* second table for DMI checks that should run after early-quirks */
  1204. static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
  1205. /*
  1206. * HP laptops which use a DSDT reporting as HP/SB400/10000,
  1207. * which includes some code which overrides all temperature
  1208. * trip points to 16C if the INTIN2 input of the I/O APIC
  1209. * is enabled. This input is incorrectly designated the
  1210. * ISA IRQ 0 via an interrupt source override even though
  1211. * it is wired to the output of the master 8259A and INTIN0
  1212. * is not connected at all. Force ignoring BIOS IRQ0 pin2
  1213. * override in that cases.
  1214. */
  1215. {
  1216. .callback = dmi_ignore_irq0_timer_override,
  1217. .ident = "HP nx6115 laptop",
  1218. .matches = {
  1219. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1220. DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6115"),
  1221. },
  1222. },
  1223. {
  1224. .callback = dmi_ignore_irq0_timer_override,
  1225. .ident = "HP NX6125 laptop",
  1226. .matches = {
  1227. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1228. DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6125"),
  1229. },
  1230. },
  1231. {
  1232. .callback = dmi_ignore_irq0_timer_override,
  1233. .ident = "HP NX6325 laptop",
  1234. .matches = {
  1235. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1236. DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
  1237. },
  1238. },
  1239. {
  1240. .callback = dmi_ignore_irq0_timer_override,
  1241. .ident = "HP 6715b laptop",
  1242. .matches = {
  1243. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1244. DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
  1245. },
  1246. },
  1247. {}
  1248. };
  1249. /*
  1250. * acpi_boot_table_init() and acpi_boot_init()
  1251. * called from setup_arch(), always.
  1252. * 1. checksums all tables
  1253. * 2. enumerates lapics
  1254. * 3. enumerates io-apics
  1255. *
  1256. * acpi_table_init() is separate to allow reading SRAT without
  1257. * other side effects.
  1258. *
  1259. * side effects of acpi_boot_init:
  1260. * acpi_lapic = 1 if LAPIC found
  1261. * acpi_ioapic = 1 if IOAPIC found
  1262. * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
  1263. * if acpi_blacklisted() acpi_disabled = 1;
  1264. * acpi_irq_model=...
  1265. * ...
  1266. */
  1267. void __init acpi_boot_table_init(void)
  1268. {
  1269. dmi_check_system(acpi_dmi_table);
  1270. /*
  1271. * If acpi_disabled, bail out
  1272. */
  1273. if (acpi_disabled)
  1274. return;
  1275. /*
  1276. * Initialize the ACPI boot-time table parser.
  1277. */
  1278. if (acpi_table_init()) {
  1279. disable_acpi();
  1280. return;
  1281. }
  1282. acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
  1283. /*
  1284. * blacklist may disable ACPI entirely
  1285. */
  1286. if (acpi_blacklisted()) {
  1287. if (acpi_force) {
  1288. printk(KERN_WARNING PREFIX "acpi=force override\n");
  1289. } else {
  1290. printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
  1291. disable_acpi();
  1292. return;
  1293. }
  1294. }
  1295. }
  1296. int __init early_acpi_boot_init(void)
  1297. {
  1298. /*
  1299. * If acpi_disabled, bail out
  1300. */
  1301. if (acpi_disabled)
  1302. return 1;
  1303. /*
  1304. * Process the Multiple APIC Description Table (MADT), if present
  1305. */
  1306. early_acpi_process_madt();
  1307. return 0;
  1308. }
  1309. int __init acpi_boot_init(void)
  1310. {
  1311. /* those are executed after early-quirks are executed */
  1312. dmi_check_system(acpi_dmi_table_late);
  1313. /*
  1314. * If acpi_disabled, bail out
  1315. */
  1316. if (acpi_disabled)
  1317. return 1;
  1318. acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
  1319. /*
  1320. * set sci_int and PM timer address
  1321. */
  1322. acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
  1323. /*
  1324. * Process the Multiple APIC Description Table (MADT), if present
  1325. */
  1326. acpi_process_madt();
  1327. acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
  1328. if (!acpi_noirq)
  1329. x86_init.pci.init = pci_acpi_init;
  1330. return 0;
  1331. }
  1332. static int __init parse_acpi(char *arg)
  1333. {
  1334. if (!arg)
  1335. return -EINVAL;
  1336. /* "acpi=off" disables both ACPI table parsing and interpreter */
  1337. if (strcmp(arg, "off") == 0) {
  1338. disable_acpi();
  1339. }
  1340. /* acpi=force to over-ride black-list */
  1341. else if (strcmp(arg, "force") == 0) {
  1342. acpi_force = 1;
  1343. acpi_disabled = 0;
  1344. }
  1345. /* acpi=strict disables out-of-spec workarounds */
  1346. else if (strcmp(arg, "strict") == 0) {
  1347. acpi_strict = 1;
  1348. }
  1349. /* acpi=rsdt use RSDT instead of XSDT */
  1350. else if (strcmp(arg, "rsdt") == 0) {
  1351. acpi_rsdt_forced = 1;
  1352. }
  1353. /* "acpi=noirq" disables ACPI interrupt routing */
  1354. else if (strcmp(arg, "noirq") == 0) {
  1355. acpi_noirq_set();
  1356. }
  1357. /* "acpi=copy_dsdt" copys DSDT */
  1358. else if (strcmp(arg, "copy_dsdt") == 0) {
  1359. acpi_gbl_copy_dsdt_locally = 1;
  1360. } else {
  1361. /* Core will printk when we return error. */
  1362. return -EINVAL;
  1363. }
  1364. return 0;
  1365. }
  1366. early_param("acpi", parse_acpi);
  1367. /* FIXME: Using pci= for an ACPI parameter is a travesty. */
  1368. static int __init parse_pci(char *arg)
  1369. {
  1370. if (arg && strcmp(arg, "noacpi") == 0)
  1371. acpi_disable_pci();
  1372. return 0;
  1373. }
  1374. early_param("pci", parse_pci);
  1375. int __init acpi_mps_check(void)
  1376. {
  1377. #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
  1378. /* mptable code is not built-in*/
  1379. if (acpi_disabled || acpi_noirq) {
  1380. printk(KERN_WARNING "MPS support code is not built-in.\n"
  1381. "Using acpi=off or acpi=noirq or pci=noacpi "
  1382. "may have problem\n");
  1383. return 1;
  1384. }
  1385. #endif
  1386. return 0;
  1387. }
  1388. #ifdef CONFIG_X86_IO_APIC
  1389. static int __init parse_acpi_skip_timer_override(char *arg)
  1390. {
  1391. acpi_skip_timer_override = 1;
  1392. return 0;
  1393. }
  1394. early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
  1395. static int __init parse_acpi_use_timer_override(char *arg)
  1396. {
  1397. acpi_use_timer_override = 1;
  1398. return 0;
  1399. }
  1400. early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
  1401. #endif /* CONFIG_X86_IO_APIC */
  1402. static int __init setup_acpi_sci(char *s)
  1403. {
  1404. if (!s)
  1405. return -EINVAL;
  1406. if (!strcmp(s, "edge"))
  1407. acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
  1408. (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
  1409. else if (!strcmp(s, "level"))
  1410. acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
  1411. (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
  1412. else if (!strcmp(s, "high"))
  1413. acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
  1414. (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
  1415. else if (!strcmp(s, "low"))
  1416. acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
  1417. (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
  1418. else
  1419. return -EINVAL;
  1420. return 0;
  1421. }
  1422. early_param("acpi_sci", setup_acpi_sci);
  1423. int __acpi_acquire_global_lock(unsigned int *lock)
  1424. {
  1425. unsigned int old, new, val;
  1426. do {
  1427. old = *lock;
  1428. new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
  1429. val = cmpxchg(lock, old, new);
  1430. } while (unlikely (val != old));
  1431. return (new < 3) ? -1 : 0;
  1432. }
  1433. int __acpi_release_global_lock(unsigned int *lock)
  1434. {
  1435. unsigned int old, new, val;
  1436. do {
  1437. old = *lock;
  1438. new = old & ~0x3;
  1439. val = cmpxchg(lock, old, new);
  1440. } while (unlikely (val != old));
  1441. return old & 0x1;
  1442. }