mpparse_32.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179
  1. /*
  2. * Intel Multiprocessor Specification 1.1 and 1.4
  3. * compliant MP-table parsing routines.
  4. *
  5. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  6. * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
  7. *
  8. * Fixes
  9. * Erich Boleyn : MP v1.4 and additional changes.
  10. * Alan Cox : Added EBDA scanning
  11. * Ingo Molnar : various cleanups and rewrites
  12. * Maciej W. Rozycki: Bits for default MP configurations
  13. * Paul Diefenbaugh: Added full ACPI support
  14. */
  15. #include <linux/mm.h>
  16. #include <linux/init.h>
  17. #include <linux/acpi.h>
  18. #include <linux/delay.h>
  19. #include <linux/bootmem.h>
  20. #include <linux/kernel_stat.h>
  21. #include <linux/mc146818rtc.h>
  22. #include <linux/bitops.h>
  23. #include <asm/smp.h>
  24. #include <asm/acpi.h>
  25. #include <asm/mtrr.h>
  26. #include <asm/mpspec.h>
  27. #include <asm/io_apic.h>
  28. #include <asm/bios_ebda.h>
  29. #include <mach_apic.h>
  30. #include <mach_apicdef.h>
  31. #include <mach_mpparse.h>
  32. /* Have we found an MP table */
  33. int smp_found_config;
  34. unsigned int __cpuinitdata maxcpus = NR_CPUS;
  35. /*
  36. * Various Linux-internal data structures created from the
  37. * MP-table.
  38. */
  39. int apic_version [MAX_APICS];
  40. #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
  41. int mp_bus_id_to_type [MAX_MP_BUSSES];
  42. #endif
  43. DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
  44. int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
  45. static int mp_current_pci_id;
  46. /* I/O APIC entries */
  47. struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
  48. /* # of MP IRQ source entries */
  49. struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
  50. /* MP IRQ source entries */
  51. int mp_irq_entries;
  52. int nr_ioapics;
  53. int pic_mode;
  54. unsigned long mp_lapic_addr;
  55. unsigned int def_to_bigsmp = 0;
  56. /* Processor that is doing the boot up */
  57. unsigned int boot_cpu_physical_apicid = -1U;
  58. /* Internal processor count */
  59. unsigned int num_processors;
  60. unsigned disabled_cpus __cpuinitdata;
  61. /* Bitmask of physically existing CPUs */
  62. physid_mask_t phys_cpu_present_map;
  63. #ifndef CONFIG_SMP
  64. DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
  65. #endif
  66. /*
  67. * Intel MP BIOS table parsing routines:
  68. */
  69. /*
  70. * Checksum an MP configuration block.
  71. */
  72. static int __init mpf_checksum(unsigned char *mp, int len)
  73. {
  74. int sum = 0;
  75. while (len--)
  76. sum += *mp++;
  77. return sum & 0xFF;
  78. }
  79. /*
  80. * Have to match translation table entries to main table entries by counter
  81. * hence the mpc_record variable .... can't see a less disgusting way of
  82. * doing this ....
  83. */
  84. static int mpc_record;
  85. static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata;
  86. static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
  87. {
  88. int ver, apicid, cpu;
  89. cpumask_t tmp_map;
  90. physid_mask_t phys_cpu;
  91. if (!(m->mpc_cpuflag & CPU_ENABLED)) {
  92. disabled_cpus++;
  93. return;
  94. }
  95. apicid = mpc_apic_id(m, translation_table[mpc_record]);
  96. if (m->mpc_featureflag&(1<<0))
  97. Dprintk(" Floating point unit present.\n");
  98. if (m->mpc_featureflag&(1<<7))
  99. Dprintk(" Machine Exception supported.\n");
  100. if (m->mpc_featureflag&(1<<8))
  101. Dprintk(" 64 bit compare & exchange supported.\n");
  102. if (m->mpc_featureflag&(1<<9))
  103. Dprintk(" Internal APIC present.\n");
  104. if (m->mpc_featureflag&(1<<11))
  105. Dprintk(" SEP present.\n");
  106. if (m->mpc_featureflag&(1<<12))
  107. Dprintk(" MTRR present.\n");
  108. if (m->mpc_featureflag&(1<<13))
  109. Dprintk(" PGE present.\n");
  110. if (m->mpc_featureflag&(1<<14))
  111. Dprintk(" MCA present.\n");
  112. if (m->mpc_featureflag&(1<<15))
  113. Dprintk(" CMOV present.\n");
  114. if (m->mpc_featureflag&(1<<16))
  115. Dprintk(" PAT present.\n");
  116. if (m->mpc_featureflag&(1<<17))
  117. Dprintk(" PSE present.\n");
  118. if (m->mpc_featureflag&(1<<18))
  119. Dprintk(" PSN present.\n");
  120. if (m->mpc_featureflag&(1<<19))
  121. Dprintk(" Cache Line Flush Instruction present.\n");
  122. /* 20 Reserved */
  123. if (m->mpc_featureflag&(1<<21))
  124. Dprintk(" Debug Trace and EMON Store present.\n");
  125. if (m->mpc_featureflag&(1<<22))
  126. Dprintk(" ACPI Thermal Throttle Registers present.\n");
  127. if (m->mpc_featureflag&(1<<23))
  128. Dprintk(" MMX present.\n");
  129. if (m->mpc_featureflag&(1<<24))
  130. Dprintk(" FXSR present.\n");
  131. if (m->mpc_featureflag&(1<<25))
  132. Dprintk(" XMM present.\n");
  133. if (m->mpc_featureflag&(1<<26))
  134. Dprintk(" Willamette New Instructions present.\n");
  135. if (m->mpc_featureflag&(1<<27))
  136. Dprintk(" Self Snoop present.\n");
  137. if (m->mpc_featureflag&(1<<28))
  138. Dprintk(" HT present.\n");
  139. if (m->mpc_featureflag&(1<<29))
  140. Dprintk(" Thermal Monitor present.\n");
  141. /* 30, 31 Reserved */
  142. if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
  143. Dprintk(" Bootup CPU\n");
  144. boot_cpu_physical_apicid = m->mpc_apicid;
  145. }
  146. ver = m->mpc_apicver;
  147. /*
  148. * Validate version
  149. */
  150. if (ver == 0x0) {
  151. printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
  152. "fixing up to 0x10. (tell your hw vendor)\n",
  153. m->mpc_apicid);
  154. ver = 0x10;
  155. }
  156. apic_version[m->mpc_apicid] = ver;
  157. phys_cpu = apicid_to_cpu_present(apicid);
  158. physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
  159. if (num_processors >= NR_CPUS) {
  160. printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
  161. " Processor ignored.\n", NR_CPUS);
  162. return;
  163. }
  164. if (num_processors >= maxcpus) {
  165. printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
  166. " Processor ignored.\n", maxcpus);
  167. return;
  168. }
  169. cpu_set(num_processors, cpu_possible_map);
  170. num_processors++;
  171. cpus_complement(tmp_map, cpu_present_map);
  172. cpu = first_cpu(tmp_map);
  173. if (m->mpc_cpuflag & CPU_BOOTPROCESSOR)
  174. /*
  175. * x86_bios_cpu_apicid is required to have processors listed
  176. * in same order as logical cpu numbers. Hence the first
  177. * entry is BSP, and so on.
  178. */
  179. cpu = 0;
  180. /*
  181. * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
  182. * but we need to work other dependencies like SMP_SUSPEND etc
  183. * before this can be done without some confusion.
  184. * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
  185. * - Ashok Raj <ashok.raj@intel.com>
  186. */
  187. if (num_processors > 8) {
  188. switch (boot_cpu_data.x86_vendor) {
  189. case X86_VENDOR_INTEL:
  190. if (!APIC_XAPIC(ver)) {
  191. def_to_bigsmp = 0;
  192. break;
  193. }
  194. /* If P4 and above fall through */
  195. case X86_VENDOR_AMD:
  196. def_to_bigsmp = 1;
  197. }
  198. }
  199. #ifdef CONFIG_SMP
  200. /* are we being called early in kernel startup? */
  201. if (x86_cpu_to_apicid_early_ptr) {
  202. u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
  203. u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
  204. cpu_to_apicid[cpu] = m->mpc_apicid;
  205. bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
  206. } else {
  207. per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
  208. per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
  209. }
  210. #endif
  211. cpu_set(cpu, cpu_present_map);
  212. }
  213. static void __init MP_bus_info (struct mpc_config_bus *m)
  214. {
  215. char str[7];
  216. memcpy(str, m->mpc_bustype, 6);
  217. str[6] = 0;
  218. mpc_oem_bus_info(m, str, translation_table[mpc_record]);
  219. #if MAX_MP_BUSSES < 256
  220. if (m->mpc_busid >= MAX_MP_BUSSES) {
  221. printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
  222. " is too large, max. supported is %d\n",
  223. m->mpc_busid, str, MAX_MP_BUSSES - 1);
  224. return;
  225. }
  226. #endif
  227. set_bit(m->mpc_busid, mp_bus_not_pci);
  228. if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
  229. mpc_oem_pci_bus(m, translation_table[mpc_record]);
  230. clear_bit(m->mpc_busid, mp_bus_not_pci);
  231. mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
  232. mp_current_pci_id++;
  233. #if defined(CONFIG_EISA) || defined (CONFIG_MCA)
  234. mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
  235. } else if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
  236. mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
  237. } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
  238. mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
  239. } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
  240. mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
  241. } else {
  242. printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
  243. #endif
  244. }
  245. }
  246. static int bad_ioapic(unsigned long address)
  247. {
  248. if (nr_ioapics >= MAX_IO_APICS) {
  249. printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
  250. "(found %d)\n", MAX_IO_APICS, nr_ioapics);
  251. panic("Recompile kernel with bigger MAX_IO_APICS!\n");
  252. }
  253. if (!address) {
  254. printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
  255. " found in table, skipping!\n");
  256. return 1;
  257. }
  258. return 0;
  259. }
  260. static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
  261. {
  262. if (!(m->mpc_flags & MPC_APIC_USABLE))
  263. return;
  264. printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
  265. m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
  266. if (bad_ioapic(m->mpc_apicaddr))
  267. return;
  268. mp_ioapics[nr_ioapics] = *m;
  269. nr_ioapics++;
  270. }
  271. static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
  272. {
  273. mp_irqs [mp_irq_entries] = *m;
  274. Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
  275. " IRQ %02x, APIC ID %x, APIC INT %02x\n",
  276. m->mpc_irqtype, m->mpc_irqflag & 3,
  277. (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
  278. m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
  279. if (++mp_irq_entries == MAX_IRQ_SOURCES)
  280. panic("Max # of irq sources exceeded!!\n");
  281. }
  282. static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
  283. {
  284. Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
  285. " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
  286. m->mpc_irqtype, m->mpc_irqflag & 3,
  287. (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
  288. m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
  289. }
  290. #ifdef CONFIG_X86_NUMAQ
  291. static void __init MP_translation_info (struct mpc_config_translation *m)
  292. {
  293. printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
  294. if (mpc_record >= MAX_MPC_ENTRY)
  295. printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
  296. else
  297. translation_table[mpc_record] = m; /* stash this for later */
  298. if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
  299. node_set_online(m->trans_quad);
  300. }
  301. /*
  302. * Read/parse the MPC oem tables
  303. */
  304. static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
  305. unsigned short oemsize)
  306. {
  307. int count = sizeof (*oemtable); /* the header size */
  308. unsigned char *oemptr = ((unsigned char *)oemtable)+count;
  309. mpc_record = 0;
  310. printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
  311. if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
  312. {
  313. printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
  314. oemtable->oem_signature[0],
  315. oemtable->oem_signature[1],
  316. oemtable->oem_signature[2],
  317. oemtable->oem_signature[3]);
  318. return;
  319. }
  320. if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
  321. {
  322. printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
  323. return;
  324. }
  325. while (count < oemtable->oem_length) {
  326. switch (*oemptr) {
  327. case MP_TRANSLATION:
  328. {
  329. struct mpc_config_translation *m=
  330. (struct mpc_config_translation *)oemptr;
  331. MP_translation_info(m);
  332. oemptr += sizeof(*m);
  333. count += sizeof(*m);
  334. ++mpc_record;
  335. break;
  336. }
  337. default:
  338. {
  339. printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
  340. return;
  341. }
  342. }
  343. }
  344. }
  345. static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
  346. char *productid)
  347. {
  348. if (strncmp(oem, "IBM NUMA", 8))
  349. printk("Warning! May not be a NUMA-Q system!\n");
  350. if (mpc->mpc_oemptr)
  351. smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
  352. mpc->mpc_oemsize);
  353. }
  354. #endif /* CONFIG_X86_NUMAQ */
  355. /*
  356. * Read/parse the MPC
  357. */
  358. static int __init smp_read_mpc(struct mp_config_table *mpc)
  359. {
  360. char str[16];
  361. char oem[10];
  362. int count=sizeof(*mpc);
  363. unsigned char *mpt=((unsigned char *)mpc)+count;
  364. if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
  365. printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
  366. *(u32 *)mpc->mpc_signature);
  367. return 0;
  368. }
  369. if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
  370. printk(KERN_ERR "SMP mptable: checksum error!\n");
  371. return 0;
  372. }
  373. if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
  374. printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
  375. mpc->mpc_spec);
  376. return 0;
  377. }
  378. if (!mpc->mpc_lapic) {
  379. printk(KERN_ERR "SMP mptable: null local APIC address!\n");
  380. return 0;
  381. }
  382. memcpy(oem,mpc->mpc_oem,8);
  383. oem[8]=0;
  384. printk(KERN_INFO "OEM ID: %s ",oem);
  385. memcpy(str,mpc->mpc_productid,12);
  386. str[12]=0;
  387. printk("Product ID: %s ",str);
  388. mps_oem_check(mpc, oem, str);
  389. printk("APIC at: 0x%X\n", mpc->mpc_lapic);
  390. /*
  391. * Save the local APIC address (it might be non-default) -- but only
  392. * if we're not using ACPI.
  393. */
  394. if (!acpi_lapic)
  395. mp_lapic_addr = mpc->mpc_lapic;
  396. /*
  397. * Now process the configuration blocks.
  398. */
  399. mpc_record = 0;
  400. while (count < mpc->mpc_length) {
  401. switch(*mpt) {
  402. case MP_PROCESSOR:
  403. {
  404. struct mpc_config_processor *m=
  405. (struct mpc_config_processor *)mpt;
  406. /* ACPI may have already provided this data */
  407. if (!acpi_lapic)
  408. MP_processor_info(m);
  409. mpt += sizeof(*m);
  410. count += sizeof(*m);
  411. break;
  412. }
  413. case MP_BUS:
  414. {
  415. struct mpc_config_bus *m=
  416. (struct mpc_config_bus *)mpt;
  417. MP_bus_info(m);
  418. mpt += sizeof(*m);
  419. count += sizeof(*m);
  420. break;
  421. }
  422. case MP_IOAPIC:
  423. {
  424. struct mpc_config_ioapic *m=
  425. (struct mpc_config_ioapic *)mpt;
  426. MP_ioapic_info(m);
  427. mpt+=sizeof(*m);
  428. count+=sizeof(*m);
  429. break;
  430. }
  431. case MP_INTSRC:
  432. {
  433. struct mpc_config_intsrc *m=
  434. (struct mpc_config_intsrc *)mpt;
  435. MP_intsrc_info(m);
  436. mpt+=sizeof(*m);
  437. count+=sizeof(*m);
  438. break;
  439. }
  440. case MP_LINTSRC:
  441. {
  442. struct mpc_config_lintsrc *m=
  443. (struct mpc_config_lintsrc *)mpt;
  444. MP_lintsrc_info(m);
  445. mpt+=sizeof(*m);
  446. count+=sizeof(*m);
  447. break;
  448. }
  449. default:
  450. {
  451. count = mpc->mpc_length;
  452. break;
  453. }
  454. }
  455. ++mpc_record;
  456. }
  457. setup_apic_routing();
  458. if (!num_processors)
  459. printk(KERN_ERR "SMP mptable: no processors registered!\n");
  460. return num_processors;
  461. }
  462. static int __init ELCR_trigger(unsigned int irq)
  463. {
  464. unsigned int port;
  465. port = 0x4d0 + (irq >> 3);
  466. return (inb(port) >> (irq & 7)) & 1;
  467. }
  468. static void __init construct_default_ioirq_mptable(int mpc_default_type)
  469. {
  470. struct mpc_config_intsrc intsrc;
  471. int i;
  472. int ELCR_fallback = 0;
  473. intsrc.mpc_type = MP_INTSRC;
  474. intsrc.mpc_irqflag = 0; /* conforming */
  475. intsrc.mpc_srcbus = 0;
  476. intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
  477. intsrc.mpc_irqtype = mp_INT;
  478. /*
  479. * If true, we have an ISA/PCI system with no IRQ entries
  480. * in the MP table. To prevent the PCI interrupts from being set up
  481. * incorrectly, we try to use the ELCR. The sanity check to see if
  482. * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
  483. * never be level sensitive, so we simply see if the ELCR agrees.
  484. * If it does, we assume it's valid.
  485. */
  486. if (mpc_default_type == 5) {
  487. printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
  488. if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
  489. printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
  490. else {
  491. printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
  492. ELCR_fallback = 1;
  493. }
  494. }
  495. for (i = 0; i < 16; i++) {
  496. switch (mpc_default_type) {
  497. case 2:
  498. if (i == 0 || i == 13)
  499. continue; /* IRQ0 & IRQ13 not connected */
  500. /* fall through */
  501. default:
  502. if (i == 2)
  503. continue; /* IRQ2 is never connected */
  504. }
  505. if (ELCR_fallback) {
  506. /*
  507. * If the ELCR indicates a level-sensitive interrupt, we
  508. * copy that information over to the MP table in the
  509. * irqflag field (level sensitive, active high polarity).
  510. */
  511. if (ELCR_trigger(i))
  512. intsrc.mpc_irqflag = 13;
  513. else
  514. intsrc.mpc_irqflag = 0;
  515. }
  516. intsrc.mpc_srcbusirq = i;
  517. intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
  518. MP_intsrc_info(&intsrc);
  519. }
  520. intsrc.mpc_irqtype = mp_ExtINT;
  521. intsrc.mpc_srcbusirq = 0;
  522. intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
  523. MP_intsrc_info(&intsrc);
  524. }
  525. static inline void __init construct_default_ISA_mptable(int mpc_default_type)
  526. {
  527. struct mpc_config_processor processor;
  528. struct mpc_config_bus bus;
  529. struct mpc_config_ioapic ioapic;
  530. struct mpc_config_lintsrc lintsrc;
  531. int linttypes[2] = { mp_ExtINT, mp_NMI };
  532. int i;
  533. /*
  534. * local APIC has default address
  535. */
  536. mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
  537. /*
  538. * 2 CPUs, numbered 0 & 1.
  539. */
  540. processor.mpc_type = MP_PROCESSOR;
  541. /* Either an integrated APIC or a discrete 82489DX. */
  542. processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
  543. processor.mpc_cpuflag = CPU_ENABLED;
  544. processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
  545. (boot_cpu_data.x86_model << 4) |
  546. boot_cpu_data.x86_mask;
  547. processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
  548. processor.mpc_reserved[0] = 0;
  549. processor.mpc_reserved[1] = 0;
  550. for (i = 0; i < 2; i++) {
  551. processor.mpc_apicid = i;
  552. MP_processor_info(&processor);
  553. }
  554. bus.mpc_type = MP_BUS;
  555. bus.mpc_busid = 0;
  556. switch (mpc_default_type) {
  557. default:
  558. printk("???\n");
  559. printk(KERN_ERR "Unknown standard configuration %d\n",
  560. mpc_default_type);
  561. /* fall through */
  562. case 1:
  563. case 5:
  564. memcpy(bus.mpc_bustype, "ISA ", 6);
  565. break;
  566. case 2:
  567. case 6:
  568. case 3:
  569. memcpy(bus.mpc_bustype, "EISA ", 6);
  570. break;
  571. case 4:
  572. case 7:
  573. memcpy(bus.mpc_bustype, "MCA ", 6);
  574. }
  575. MP_bus_info(&bus);
  576. if (mpc_default_type > 4) {
  577. bus.mpc_busid = 1;
  578. memcpy(bus.mpc_bustype, "PCI ", 6);
  579. MP_bus_info(&bus);
  580. }
  581. ioapic.mpc_type = MP_IOAPIC;
  582. ioapic.mpc_apicid = 2;
  583. ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
  584. ioapic.mpc_flags = MPC_APIC_USABLE;
  585. ioapic.mpc_apicaddr = 0xFEC00000;
  586. MP_ioapic_info(&ioapic);
  587. /*
  588. * We set up most of the low 16 IO-APIC pins according to MPS rules.
  589. */
  590. construct_default_ioirq_mptable(mpc_default_type);
  591. lintsrc.mpc_type = MP_LINTSRC;
  592. lintsrc.mpc_irqflag = 0; /* conforming */
  593. lintsrc.mpc_srcbusid = 0;
  594. lintsrc.mpc_srcbusirq = 0;
  595. lintsrc.mpc_destapic = MP_APIC_ALL;
  596. for (i = 0; i < 2; i++) {
  597. lintsrc.mpc_irqtype = linttypes[i];
  598. lintsrc.mpc_destapiclint = i;
  599. MP_lintsrc_info(&lintsrc);
  600. }
  601. }
  602. static struct intel_mp_floating *mpf_found;
  603. /*
  604. * Scan the memory blocks for an SMP configuration block.
  605. */
  606. void __init get_smp_config (void)
  607. {
  608. struct intel_mp_floating *mpf = mpf_found;
  609. /*
  610. * ACPI supports both logical (e.g. Hyper-Threading) and physical
  611. * processors, where MPS only supports physical.
  612. */
  613. if (acpi_lapic && acpi_ioapic) {
  614. printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
  615. return;
  616. }
  617. else if (acpi_lapic)
  618. printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
  619. printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
  620. if (mpf->mpf_feature2 & (1<<7)) {
  621. printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
  622. pic_mode = 1;
  623. } else {
  624. printk(KERN_INFO " Virtual Wire compatibility mode.\n");
  625. pic_mode = 0;
  626. }
  627. /*
  628. * Now see if we need to read further.
  629. */
  630. if (mpf->mpf_feature1 != 0) {
  631. printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
  632. construct_default_ISA_mptable(mpf->mpf_feature1);
  633. } else if (mpf->mpf_physptr) {
  634. /*
  635. * Read the physical hardware table. Anything here will
  636. * override the defaults.
  637. */
  638. if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
  639. smp_found_config = 0;
  640. printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
  641. printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
  642. return;
  643. }
  644. /*
  645. * If there are no explicit MP IRQ entries, then we are
  646. * broken. We set up most of the low 16 IO-APIC pins to
  647. * ISA defaults and hope it will work.
  648. */
  649. if (!mp_irq_entries) {
  650. struct mpc_config_bus bus;
  651. printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
  652. bus.mpc_type = MP_BUS;
  653. bus.mpc_busid = 0;
  654. memcpy(bus.mpc_bustype, "ISA ", 6);
  655. MP_bus_info(&bus);
  656. construct_default_ioirq_mptable(0);
  657. }
  658. } else
  659. BUG();
  660. printk(KERN_INFO "Processors: %d\n", num_processors);
  661. /*
  662. * Only use the first configuration found.
  663. */
  664. }
  665. static int __init smp_scan_config (unsigned long base, unsigned long length)
  666. {
  667. unsigned long *bp = phys_to_virt(base);
  668. struct intel_mp_floating *mpf;
  669. printk(KERN_INFO "Scan SMP from %p for %ld bytes.\n", bp,length);
  670. if (sizeof(*mpf) != 16)
  671. printk("Error: MPF size\n");
  672. while (length > 0) {
  673. mpf = (struct intel_mp_floating *)bp;
  674. if ((*bp == SMP_MAGIC_IDENT) &&
  675. (mpf->mpf_length == 1) &&
  676. !mpf_checksum((unsigned char *)bp, 16) &&
  677. ((mpf->mpf_specification == 1)
  678. || (mpf->mpf_specification == 4)) ) {
  679. smp_found_config = 1;
  680. printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
  681. mpf, virt_to_phys(mpf));
  682. reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
  683. BOOTMEM_DEFAULT);
  684. if (mpf->mpf_physptr) {
  685. /*
  686. * We cannot access to MPC table to compute
  687. * table size yet, as only few megabytes from
  688. * the bottom is mapped now.
  689. * PC-9800's MPC table places on the very last
  690. * of physical memory; so that simply reserving
  691. * PAGE_SIZE from mpg->mpf_physptr yields BUG()
  692. * in reserve_bootmem.
  693. */
  694. unsigned long size = PAGE_SIZE;
  695. unsigned long end = max_low_pfn * PAGE_SIZE;
  696. if (mpf->mpf_physptr + size > end)
  697. size = end - mpf->mpf_physptr;
  698. reserve_bootmem(mpf->mpf_physptr, size,
  699. BOOTMEM_DEFAULT);
  700. }
  701. mpf_found = mpf;
  702. return 1;
  703. }
  704. bp += 4;
  705. length -= 16;
  706. }
  707. return 0;
  708. }
  709. void __init find_smp_config (void)
  710. {
  711. unsigned int address;
  712. /*
  713. * FIXME: Linux assumes you have 640K of base ram..
  714. * this continues the error...
  715. *
  716. * 1) Scan the bottom 1K for a signature
  717. * 2) Scan the top 1K of base RAM
  718. * 3) Scan the 64K of bios
  719. */
  720. if (smp_scan_config(0x0,0x400) ||
  721. smp_scan_config(639*0x400,0x400) ||
  722. smp_scan_config(0xF0000,0x10000))
  723. return;
  724. /*
  725. * If it is an SMP machine we should know now, unless the
  726. * configuration is in an EISA/MCA bus machine with an
  727. * extended bios data area.
  728. *
  729. * there is a real-mode segmented pointer pointing to the
  730. * 4K EBDA area at 0x40E, calculate and scan it here.
  731. *
  732. * NOTE! There are Linux loaders that will corrupt the EBDA
  733. * area, and as such this kind of SMP config may be less
  734. * trustworthy, simply because the SMP table may have been
  735. * stomped on during early boot. These loaders are buggy and
  736. * should be fixed.
  737. *
  738. * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
  739. */
  740. address = get_bios_ebda();
  741. if (address)
  742. smp_scan_config(address, 0x400);
  743. }
  744. int es7000_plat;
  745. /* --------------------------------------------------------------------------
  746. ACPI-based MP Configuration
  747. -------------------------------------------------------------------------- */
  748. #ifdef CONFIG_ACPI
  749. void __init mp_register_lapic_address(u64 address)
  750. {
  751. mp_lapic_addr = (unsigned long) address;
  752. set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
  753. if (boot_cpu_physical_apicid == -1U)
  754. boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
  755. Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
  756. }
  757. void __cpuinit mp_register_lapic (u8 id, u8 enabled)
  758. {
  759. struct mpc_config_processor processor;
  760. int boot_cpu = 0;
  761. if (MAX_APICS - id <= 0) {
  762. printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
  763. id, MAX_APICS);
  764. return;
  765. }
  766. if (id == boot_cpu_physical_apicid)
  767. boot_cpu = 1;
  768. processor.mpc_type = MP_PROCESSOR;
  769. processor.mpc_apicid = id;
  770. processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
  771. processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
  772. processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
  773. processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
  774. (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
  775. processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
  776. processor.mpc_reserved[0] = 0;
  777. processor.mpc_reserved[1] = 0;
  778. MP_processor_info(&processor);
  779. }
  780. #ifdef CONFIG_X86_IO_APIC
  781. #define MP_ISA_BUS 0
  782. #define MP_MAX_IOAPIC_PIN 127
  783. static struct mp_ioapic_routing {
  784. int apic_id;
  785. int gsi_base;
  786. int gsi_end;
  787. u32 pin_programmed[4];
  788. } mp_ioapic_routing[MAX_IO_APICS];
  789. static int mp_find_ioapic (int gsi)
  790. {
  791. int i = 0;
  792. /* Find the IOAPIC that manages this GSI. */
  793. for (i = 0; i < nr_ioapics; i++) {
  794. if ((gsi >= mp_ioapic_routing[i].gsi_base)
  795. && (gsi <= mp_ioapic_routing[i].gsi_end))
  796. return i;
  797. }
  798. printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
  799. return -1;
  800. }
  801. static u8 uniq_ioapic_id(u8 id)
  802. {
  803. if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
  804. !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
  805. return io_apic_get_unique_id(nr_ioapics, id);
  806. else
  807. return id;
  808. }
  809. void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
  810. {
  811. int idx = 0;
  812. if (bad_ioapic(address))
  813. return;
  814. idx = nr_ioapics;
  815. mp_ioapics[idx].mpc_type = MP_IOAPIC;
  816. mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
  817. mp_ioapics[idx].mpc_apicaddr = address;
  818. set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
  819. mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
  820. mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
  821. /*
  822. * Build basic GSI lookup table to facilitate gsi->io_apic lookups
  823. * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
  824. */
  825. mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
  826. mp_ioapic_routing[idx].gsi_base = gsi_base;
  827. mp_ioapic_routing[idx].gsi_end = gsi_base +
  828. io_apic_get_redir_entries(idx);
  829. printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
  830. "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
  831. mp_ioapics[idx].mpc_apicver,
  832. mp_ioapics[idx].mpc_apicaddr,
  833. mp_ioapic_routing[idx].gsi_base,
  834. mp_ioapic_routing[idx].gsi_end);
  835. nr_ioapics++;
  836. }
  837. void __init
  838. mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
  839. {
  840. struct mpc_config_intsrc intsrc;
  841. int ioapic = -1;
  842. int pin = -1;
  843. /*
  844. * Convert 'gsi' to 'ioapic.pin'.
  845. */
  846. ioapic = mp_find_ioapic(gsi);
  847. if (ioapic < 0)
  848. return;
  849. pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
  850. /*
  851. * TBD: This check is for faulty timer entries, where the override
  852. * erroneously sets the trigger to level, resulting in a HUGE
  853. * increase of timer interrupts!
  854. */
  855. if ((bus_irq == 0) && (trigger == 3))
  856. trigger = 1;
  857. intsrc.mpc_type = MP_INTSRC;
  858. intsrc.mpc_irqtype = mp_INT;
  859. intsrc.mpc_irqflag = (trigger << 2) | polarity;
  860. intsrc.mpc_srcbus = MP_ISA_BUS;
  861. intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
  862. intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
  863. intsrc.mpc_dstirq = pin; /* INTIN# */
  864. Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
  865. intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
  866. (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
  867. intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
  868. mp_irqs[mp_irq_entries] = intsrc;
  869. if (++mp_irq_entries == MAX_IRQ_SOURCES)
  870. panic("Max # of irq sources exceeded!\n");
  871. }
  872. void __init mp_config_acpi_legacy_irqs (void)
  873. {
  874. struct mpc_config_intsrc intsrc;
  875. int i = 0;
  876. int ioapic = -1;
  877. #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
  878. /*
  879. * Fabricate the legacy ISA bus (bus #31).
  880. */
  881. mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
  882. #endif
  883. set_bit(MP_ISA_BUS, mp_bus_not_pci);
  884. Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
  885. /*
  886. * Older generations of ES7000 have no legacy identity mappings
  887. */
  888. if (es7000_plat == 1)
  889. return;
  890. /*
  891. * Locate the IOAPIC that manages the ISA IRQs (0-15).
  892. */
  893. ioapic = mp_find_ioapic(0);
  894. if (ioapic < 0)
  895. return;
  896. intsrc.mpc_type = MP_INTSRC;
  897. intsrc.mpc_irqflag = 0; /* Conforming */
  898. intsrc.mpc_srcbus = MP_ISA_BUS;
  899. intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
  900. /*
  901. * Use the default configuration for the IRQs 0-15. Unless
  902. * overridden by (MADT) interrupt source override entries.
  903. */
  904. for (i = 0; i < 16; i++) {
  905. int idx;
  906. for (idx = 0; idx < mp_irq_entries; idx++) {
  907. struct mpc_config_intsrc *irq = mp_irqs + idx;
  908. /* Do we already have a mapping for this ISA IRQ? */
  909. if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
  910. break;
  911. /* Do we already have a mapping for this IOAPIC pin */
  912. if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
  913. (irq->mpc_dstirq == i))
  914. break;
  915. }
  916. if (idx != mp_irq_entries) {
  917. printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
  918. continue; /* IRQ already used */
  919. }
  920. intsrc.mpc_irqtype = mp_INT;
  921. intsrc.mpc_srcbusirq = i; /* Identity mapped */
  922. intsrc.mpc_dstirq = i;
  923. Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
  924. "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
  925. (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
  926. intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
  927. intsrc.mpc_dstirq);
  928. mp_irqs[mp_irq_entries] = intsrc;
  929. if (++mp_irq_entries == MAX_IRQ_SOURCES)
  930. panic("Max # of irq sources exceeded!\n");
  931. }
  932. }
  933. #define MAX_GSI_NUM 4096
  934. #define IRQ_COMPRESSION_START 64
  935. int mp_register_gsi(u32 gsi, int triggering, int polarity)
  936. {
  937. int ioapic = -1;
  938. int ioapic_pin = 0;
  939. int idx, bit = 0;
  940. static int pci_irq = IRQ_COMPRESSION_START;
  941. /*
  942. * Mapping between Global System Interrupts, which
  943. * represent all possible interrupts, and IRQs
  944. * assigned to actual devices.
  945. */
  946. static int gsi_to_irq[MAX_GSI_NUM];
  947. /* Don't set up the ACPI SCI because it's already set up */
  948. if (acpi_gbl_FADT.sci_interrupt == gsi)
  949. return gsi;
  950. ioapic = mp_find_ioapic(gsi);
  951. if (ioapic < 0) {
  952. printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
  953. return gsi;
  954. }
  955. ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
  956. if (ioapic_renumber_irq)
  957. gsi = ioapic_renumber_irq(ioapic, gsi);
  958. /*
  959. * Avoid pin reprogramming. PRTs typically include entries
  960. * with redundant pin->gsi mappings (but unique PCI devices);
  961. * we only program the IOAPIC on the first.
  962. */
  963. bit = ioapic_pin % 32;
  964. idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
  965. if (idx > 3) {
  966. printk(KERN_ERR "Invalid reference to IOAPIC pin "
  967. "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
  968. ioapic_pin);
  969. return gsi;
  970. }
  971. if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
  972. Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
  973. mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
  974. return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
  975. }
  976. mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
  977. /*
  978. * For GSI >= 64, use IRQ compression
  979. */
  980. if ((gsi >= IRQ_COMPRESSION_START)
  981. && (triggering == ACPI_LEVEL_SENSITIVE)) {
  982. /*
  983. * For PCI devices assign IRQs in order, avoiding gaps
  984. * due to unused I/O APIC pins.
  985. */
  986. int irq = gsi;
  987. if (gsi < MAX_GSI_NUM) {
  988. /*
  989. * Retain the VIA chipset work-around (gsi > 15), but
  990. * avoid a problem where the 8254 timer (IRQ0) is setup
  991. * via an override (so it's not on pin 0 of the ioapic),
  992. * and at the same time, the pin 0 interrupt is a PCI
  993. * type. The gsi > 15 test could cause these two pins
  994. * to be shared as IRQ0, and they are not shareable.
  995. * So test for this condition, and if necessary, avoid
  996. * the pin collision.
  997. */
  998. if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
  999. gsi = pci_irq++;
  1000. /*
  1001. * Don't assign IRQ used by ACPI SCI
  1002. */
  1003. if (gsi == acpi_gbl_FADT.sci_interrupt)
  1004. gsi = pci_irq++;
  1005. gsi_to_irq[irq] = gsi;
  1006. } else {
  1007. printk(KERN_ERR "GSI %u is too high\n", gsi);
  1008. return gsi;
  1009. }
  1010. }
  1011. io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
  1012. triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
  1013. polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
  1014. return gsi;
  1015. }
  1016. #endif /* CONFIG_X86_IO_APIC */
  1017. #endif /* CONFIG_ACPI */