iosapic.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153
  1. /*
  2. * I/O SAPIC support.
  3. *
  4. * Copyright (C) 1999 Intel Corp.
  5. * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  6. * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@intel.com>
  7. * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
  8. * David Mosberger-Tang <davidm@hpl.hp.com>
  9. * Copyright (C) 1999 VA Linux Systems
  10. * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
  11. *
  12. * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O
  13. * APIC code. In particular, we now have separate
  14. * handlers for edge and level triggered
  15. * interrupts.
  16. * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector
  17. * allocation PCI to vector mapping, shared PCI
  18. * interrupts.
  19. * 00/10/27 D. Mosberger Document things a bit more to make them more
  20. * understandable. Clean up much of the old
  21. * IOSAPIC cruft.
  22. * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts
  23. * and fixes for ACPI S5(SoftOff) support.
  24. * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
  25. * 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt
  26. * vectors in iosapic_set_affinity(),
  27. * initializations for /proc/irq/#/smp_affinity
  28. * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
  29. * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
  30. * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to
  31. * IOSAPIC mapping error
  32. * 02/07/29 T. Kochi Allocate interrupt vectors dynamically
  33. * 02/08/04 T. Kochi Cleaned up terminology (irq, global system
  34. * interrupt, vector, etc.)
  35. * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's
  36. * pci_irq code.
  37. * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
  38. * Remove iosapic_address & gsi_base from
  39. * external interfaces. Rationalize
  40. * __init/__devinit attributes.
  41. * 04/12/04 Ashok Raj <ashok.raj@intel.com> Intel Corporation 2004
  42. * Updated to work with irq migration necessary
  43. * for CPU Hotplug
  44. */
  45. /*
  46. * Here is what the interrupt logic between a PCI device and the kernel looks
  47. * like:
  48. *
  49. * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
  50. * INTD). The device is uniquely identified by its bus-, and slot-number
  51. * (the function number does not matter here because all functions share
  52. * the same interrupt lines).
  53. *
  54. * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
  55. * controller. Multiple interrupt lines may have to share the same
  56. * IOSAPIC pin (if they're level triggered and use the same polarity).
  57. * Each interrupt line has a unique Global System Interrupt (GSI) number
  58. * which can be calculated as the sum of the controller's base GSI number
  59. * and the IOSAPIC pin number to which the line connects.
  60. *
  61. * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
  62. * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then
  63. * sent to the CPU.
  64. *
  65. * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is
  66. * used as architecture-independent interrupt handling mechanism in Linux.
  67. * As an IRQ is a number, we have to have
  68. * IA-64 interrupt vector number <-> IRQ number mapping. On smaller
  69. * systems, we use one-to-one mapping between IA-64 vector and IRQ. A
  70. * platform can implement platform_irq_to_vector(irq) and
  71. * platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
  72. * Please see also include/asm-ia64/hw_irq.h for those APIs.
  73. *
  74. * To sum up, there are three levels of mappings involved:
  75. *
  76. * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
  77. *
  78. * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
  79. * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
  80. * (isa_irq) is the only exception in this source code.
  81. */
  82. #include <linux/acpi.h>
  83. #include <linux/init.h>
  84. #include <linux/irq.h>
  85. #include <linux/kernel.h>
  86. #include <linux/list.h>
  87. #include <linux/pci.h>
  88. #include <linux/smp.h>
  89. #include <linux/string.h>
  90. #include <linux/bootmem.h>
  91. #include <asm/delay.h>
  92. #include <asm/hw_irq.h>
  93. #include <asm/io.h>
  94. #include <asm/iosapic.h>
  95. #include <asm/machvec.h>
  96. #include <asm/processor.h>
  97. #include <asm/ptrace.h>
  98. #include <asm/system.h>
  99. #undef DEBUG_INTERRUPT_ROUTING
  100. #ifdef DEBUG_INTERRUPT_ROUTING
  101. #define DBG(fmt...) printk(fmt)
  102. #else
  103. #define DBG(fmt...)
  104. #endif
  105. #define NR_PREALLOCATE_RTE_ENTRIES \
  106. (PAGE_SIZE / sizeof(struct iosapic_rte_info))
  107. #define RTE_PREALLOCATED (1)
  108. static DEFINE_SPINLOCK(iosapic_lock);
  109. /*
  110. * These tables map IA-64 vectors to the IOSAPIC pin that generates this
  111. * vector.
  112. */
  113. #define NO_REF_RTE 0
  114. static struct iosapic {
  115. char __iomem *addr; /* base address of IOSAPIC */
  116. unsigned int gsi_base; /* GSI base */
  117. unsigned short num_rte; /* # of RTEs on this IOSAPIC */
  118. int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
  119. #ifdef CONFIG_NUMA
  120. unsigned short node; /* numa node association via pxm */
  121. #endif
  122. spinlock_t lock; /* lock for indirect reg access */
  123. } iosapic_lists[NR_IOSAPICS];
  124. struct iosapic_rte_info {
  125. struct list_head rte_list; /* RTEs sharing the same vector */
  126. char rte_index; /* IOSAPIC RTE index */
  127. int refcnt; /* reference counter */
  128. unsigned int flags; /* flags */
  129. struct iosapic *iosapic;
  130. } ____cacheline_aligned;
  131. static struct iosapic_intr_info {
  132. struct list_head rtes; /* RTEs using this vector (empty =>
  133. * not an IOSAPIC interrupt) */
  134. int count; /* # of registered RTEs */
  135. u32 low32; /* current value of low word of
  136. * Redirection table entry */
  137. unsigned int dest; /* destination CPU physical ID */
  138. unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
  139. unsigned char polarity: 1; /* interrupt polarity
  140. * (see iosapic.h) */
  141. unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
  142. } iosapic_intr_info[NR_IRQS];
  143. static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
  144. static int iosapic_kmalloc_ok;
  145. static LIST_HEAD(free_rte_list);
  146. static inline void
  147. iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
  148. {
  149. unsigned long flags;
  150. spin_lock_irqsave(&iosapic->lock, flags);
  151. __iosapic_write(iosapic->addr, reg, val);
  152. spin_unlock_irqrestore(&iosapic->lock, flags);
  153. }
  154. /*
  155. * Find an IOSAPIC associated with a GSI
  156. */
  157. static inline int
  158. find_iosapic (unsigned int gsi)
  159. {
  160. int i;
  161. for (i = 0; i < NR_IOSAPICS; i++) {
  162. if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
  163. iosapic_lists[i].num_rte)
  164. return i;
  165. }
  166. return -1;
  167. }
  168. static inline int __gsi_to_irq(unsigned int gsi)
  169. {
  170. int irq;
  171. struct iosapic_intr_info *info;
  172. struct iosapic_rte_info *rte;
  173. for (irq = 0; irq < NR_IRQS; irq++) {
  174. info = &iosapic_intr_info[irq];
  175. list_for_each_entry(rte, &info->rtes, rte_list)
  176. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  177. return irq;
  178. }
  179. return -1;
  180. }
  181. int
  182. gsi_to_irq (unsigned int gsi)
  183. {
  184. unsigned long flags;
  185. int irq;
  186. spin_lock_irqsave(&iosapic_lock, flags);
  187. irq = __gsi_to_irq(gsi);
  188. spin_unlock_irqrestore(&iosapic_lock, flags);
  189. return irq;
  190. }
  191. static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
  192. {
  193. struct iosapic_rte_info *rte;
  194. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  195. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  196. return rte;
  197. return NULL;
  198. }
  199. static void
  200. set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
  201. {
  202. unsigned long pol, trigger, dmode;
  203. u32 low32, high32;
  204. int rte_index;
  205. char redir;
  206. struct iosapic_rte_info *rte;
  207. ia64_vector vector = irq_to_vector(irq);
  208. DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
  209. rte = find_rte(irq, gsi);
  210. if (!rte)
  211. return; /* not an IOSAPIC interrupt */
  212. rte_index = rte->rte_index;
  213. pol = iosapic_intr_info[irq].polarity;
  214. trigger = iosapic_intr_info[irq].trigger;
  215. dmode = iosapic_intr_info[irq].dmode;
  216. redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
  217. #ifdef CONFIG_SMP
  218. set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
  219. #endif
  220. low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
  221. (trigger << IOSAPIC_TRIGGER_SHIFT) |
  222. (dmode << IOSAPIC_DELIVERY_SHIFT) |
  223. ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
  224. vector);
  225. /* dest contains both id and eid */
  226. high32 = (dest << IOSAPIC_DEST_SHIFT);
  227. iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  228. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  229. iosapic_intr_info[irq].low32 = low32;
  230. iosapic_intr_info[irq].dest = dest;
  231. }
  232. static void
  233. nop (unsigned int irq)
  234. {
  235. /* do nothing... */
  236. }
  237. #ifdef CONFIG_KEXEC
  238. void
  239. kexec_disable_iosapic(void)
  240. {
  241. struct iosapic_intr_info *info;
  242. struct iosapic_rte_info *rte;
  243. ia64_vector vec;
  244. int irq;
  245. for (irq = 0; irq < NR_IRQS; irq++) {
  246. info = &iosapic_intr_info[irq];
  247. vec = irq_to_vector(irq);
  248. list_for_each_entry(rte, &info->rtes,
  249. rte_list) {
  250. iosapic_write(rte->iosapic,
  251. IOSAPIC_RTE_LOW(rte->rte_index),
  252. IOSAPIC_MASK|vec);
  253. iosapic_eoi(rte->iosapic->addr, vec);
  254. }
  255. }
  256. }
  257. #endif
  258. static void
  259. mask_irq (unsigned int irq)
  260. {
  261. u32 low32;
  262. int rte_index;
  263. struct iosapic_rte_info *rte;
  264. if (!iosapic_intr_info[irq].count)
  265. return; /* not an IOSAPIC interrupt! */
  266. /* set only the mask bit */
  267. low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  268. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  269. rte_index = rte->rte_index;
  270. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  271. }
  272. }
  273. static void
  274. unmask_irq (unsigned int irq)
  275. {
  276. u32 low32;
  277. int rte_index;
  278. struct iosapic_rte_info *rte;
  279. if (!iosapic_intr_info[irq].count)
  280. return; /* not an IOSAPIC interrupt! */
  281. low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
  282. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  283. rte_index = rte->rte_index;
  284. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  285. }
  286. }
  287. static void
  288. iosapic_set_affinity (unsigned int irq, cpumask_t mask)
  289. {
  290. #ifdef CONFIG_SMP
  291. u32 high32, low32;
  292. int dest, rte_index;
  293. int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
  294. struct iosapic_rte_info *rte;
  295. struct iosapic *iosapic;
  296. irq &= (~IA64_IRQ_REDIRECTED);
  297. cpus_and(mask, mask, cpu_online_map);
  298. if (cpus_empty(mask))
  299. return;
  300. if (irq_prepare_move(irq, first_cpu(mask)))
  301. return;
  302. dest = cpu_physical_id(first_cpu(mask));
  303. if (!iosapic_intr_info[irq].count)
  304. return; /* not an IOSAPIC interrupt */
  305. set_irq_affinity_info(irq, dest, redir);
  306. /* dest contains both id and eid */
  307. high32 = dest << IOSAPIC_DEST_SHIFT;
  308. low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
  309. if (redir)
  310. /* change delivery mode to lowest priority */
  311. low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
  312. else
  313. /* change delivery mode to fixed */
  314. low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
  315. low32 &= IOSAPIC_VECTOR_MASK;
  316. low32 |= irq_to_vector(irq);
  317. iosapic_intr_info[irq].low32 = low32;
  318. iosapic_intr_info[irq].dest = dest;
  319. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  320. iosapic = rte->iosapic;
  321. rte_index = rte->rte_index;
  322. iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  323. iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  324. }
  325. #endif
  326. }
  327. /*
  328. * Handlers for level-triggered interrupts.
  329. */
  330. static unsigned int
  331. iosapic_startup_level_irq (unsigned int irq)
  332. {
  333. unmask_irq(irq);
  334. return 0;
  335. }
  336. static void
  337. iosapic_end_level_irq (unsigned int irq)
  338. {
  339. ia64_vector vec = irq_to_vector(irq);
  340. struct iosapic_rte_info *rte;
  341. int do_unmask_irq = 0;
  342. irq_complete_move(irq);
  343. if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
  344. do_unmask_irq = 1;
  345. mask_irq(irq);
  346. }
  347. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  348. iosapic_eoi(rte->iosapic->addr, vec);
  349. if (unlikely(do_unmask_irq)) {
  350. move_masked_irq(irq);
  351. unmask_irq(irq);
  352. }
  353. }
  354. #define iosapic_shutdown_level_irq mask_irq
  355. #define iosapic_enable_level_irq unmask_irq
  356. #define iosapic_disable_level_irq mask_irq
  357. #define iosapic_ack_level_irq nop
  358. static struct irq_chip irq_type_iosapic_level = {
  359. .name = "IO-SAPIC-level",
  360. .startup = iosapic_startup_level_irq,
  361. .shutdown = iosapic_shutdown_level_irq,
  362. .enable = iosapic_enable_level_irq,
  363. .disable = iosapic_disable_level_irq,
  364. .ack = iosapic_ack_level_irq,
  365. .end = iosapic_end_level_irq,
  366. .mask = mask_irq,
  367. .unmask = unmask_irq,
  368. .set_affinity = iosapic_set_affinity
  369. };
  370. /*
  371. * Handlers for edge-triggered interrupts.
  372. */
  373. static unsigned int
  374. iosapic_startup_edge_irq (unsigned int irq)
  375. {
  376. unmask_irq(irq);
  377. /*
  378. * IOSAPIC simply drops interrupts pended while the
  379. * corresponding pin was masked, so we can't know if an
  380. * interrupt is pending already. Let's hope not...
  381. */
  382. return 0;
  383. }
  384. static void
  385. iosapic_ack_edge_irq (unsigned int irq)
  386. {
  387. irq_desc_t *idesc = irq_desc + irq;
  388. irq_complete_move(irq);
  389. move_native_irq(irq);
  390. /*
  391. * Once we have recorded IRQ_PENDING already, we can mask the
  392. * interrupt for real. This prevents IRQ storms from unhandled
  393. * devices.
  394. */
  395. if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
  396. (IRQ_PENDING|IRQ_DISABLED))
  397. mask_irq(irq);
  398. }
  399. #define iosapic_enable_edge_irq unmask_irq
  400. #define iosapic_disable_edge_irq nop
  401. #define iosapic_end_edge_irq nop
  402. static struct irq_chip irq_type_iosapic_edge = {
  403. .name = "IO-SAPIC-edge",
  404. .startup = iosapic_startup_edge_irq,
  405. .shutdown = iosapic_disable_edge_irq,
  406. .enable = iosapic_enable_edge_irq,
  407. .disable = iosapic_disable_edge_irq,
  408. .ack = iosapic_ack_edge_irq,
  409. .end = iosapic_end_edge_irq,
  410. .mask = mask_irq,
  411. .unmask = unmask_irq,
  412. .set_affinity = iosapic_set_affinity
  413. };
  414. static unsigned int
  415. iosapic_version (char __iomem *addr)
  416. {
  417. /*
  418. * IOSAPIC Version Register return 32 bit structure like:
  419. * {
  420. * unsigned int version : 8;
  421. * unsigned int reserved1 : 8;
  422. * unsigned int max_redir : 8;
  423. * unsigned int reserved2 : 8;
  424. * }
  425. */
  426. return __iosapic_read(addr, IOSAPIC_VERSION);
  427. }
  428. static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
  429. {
  430. int i, irq = -ENOSPC, min_count = -1;
  431. struct iosapic_intr_info *info;
  432. /*
  433. * shared vectors for edge-triggered interrupts are not
  434. * supported yet
  435. */
  436. if (trigger == IOSAPIC_EDGE)
  437. return -EINVAL;
  438. for (i = 0; i <= NR_IRQS; i++) {
  439. info = &iosapic_intr_info[i];
  440. if (info->trigger == trigger && info->polarity == pol &&
  441. (info->dmode == IOSAPIC_FIXED ||
  442. info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
  443. can_request_irq(i, IRQF_SHARED)) {
  444. if (min_count == -1 || info->count < min_count) {
  445. irq = i;
  446. min_count = info->count;
  447. }
  448. }
  449. }
  450. return irq;
  451. }
  452. /*
  453. * if the given vector is already owned by other,
  454. * assign a new vector for the other and make the vector available
  455. */
  456. static void __init
  457. iosapic_reassign_vector (int irq)
  458. {
  459. int new_irq;
  460. if (iosapic_intr_info[irq].count) {
  461. new_irq = create_irq();
  462. if (new_irq < 0)
  463. panic("%s: out of interrupt vectors!\n", __func__);
  464. printk(KERN_INFO "Reassigning vector %d to %d\n",
  465. irq_to_vector(irq), irq_to_vector(new_irq));
  466. memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
  467. sizeof(struct iosapic_intr_info));
  468. INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
  469. list_move(iosapic_intr_info[irq].rtes.next,
  470. &iosapic_intr_info[new_irq].rtes);
  471. memset(&iosapic_intr_info[irq], 0,
  472. sizeof(struct iosapic_intr_info));
  473. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  474. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  475. }
  476. }
  477. static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
  478. {
  479. int i;
  480. struct iosapic_rte_info *rte;
  481. int preallocated = 0;
  482. if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
  483. rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
  484. NR_PREALLOCATE_RTE_ENTRIES);
  485. if (!rte)
  486. return NULL;
  487. for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
  488. list_add(&rte->rte_list, &free_rte_list);
  489. }
  490. if (!list_empty(&free_rte_list)) {
  491. rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
  492. rte_list);
  493. list_del(&rte->rte_list);
  494. preallocated++;
  495. } else {
  496. rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
  497. if (!rte)
  498. return NULL;
  499. }
  500. memset(rte, 0, sizeof(struct iosapic_rte_info));
  501. if (preallocated)
  502. rte->flags |= RTE_PREALLOCATED;
  503. return rte;
  504. }
  505. static inline int irq_is_shared (int irq)
  506. {
  507. return (iosapic_intr_info[irq].count > 1);
  508. }
  509. static int
  510. register_intr (unsigned int gsi, int irq, unsigned char delivery,
  511. unsigned long polarity, unsigned long trigger)
  512. {
  513. irq_desc_t *idesc;
  514. struct hw_interrupt_type *irq_type;
  515. int index;
  516. struct iosapic_rte_info *rte;
  517. index = find_iosapic(gsi);
  518. if (index < 0) {
  519. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  520. __func__, gsi);
  521. return -ENODEV;
  522. }
  523. rte = find_rte(irq, gsi);
  524. if (!rte) {
  525. rte = iosapic_alloc_rte();
  526. if (!rte) {
  527. printk(KERN_WARNING "%s: cannot allocate memory\n",
  528. __func__);
  529. return -ENOMEM;
  530. }
  531. rte->iosapic = &iosapic_lists[index];
  532. rte->rte_index = gsi - rte->iosapic->gsi_base;
  533. rte->refcnt++;
  534. list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
  535. iosapic_intr_info[irq].count++;
  536. iosapic_lists[index].rtes_inuse++;
  537. }
  538. else if (rte->refcnt == NO_REF_RTE) {
  539. struct iosapic_intr_info *info = &iosapic_intr_info[irq];
  540. if (info->count > 0 &&
  541. (info->trigger != trigger || info->polarity != polarity)){
  542. printk (KERN_WARNING
  543. "%s: cannot override the interrupt\n",
  544. __func__);
  545. return -EINVAL;
  546. }
  547. rte->refcnt++;
  548. iosapic_intr_info[irq].count++;
  549. iosapic_lists[index].rtes_inuse++;
  550. }
  551. iosapic_intr_info[irq].polarity = polarity;
  552. iosapic_intr_info[irq].dmode = delivery;
  553. iosapic_intr_info[irq].trigger = trigger;
  554. if (trigger == IOSAPIC_EDGE)
  555. irq_type = &irq_type_iosapic_edge;
  556. else
  557. irq_type = &irq_type_iosapic_level;
  558. idesc = irq_desc + irq;
  559. if (idesc->chip != irq_type) {
  560. if (idesc->chip != &no_irq_type)
  561. printk(KERN_WARNING
  562. "%s: changing vector %d from %s to %s\n",
  563. __func__, irq_to_vector(irq),
  564. idesc->chip->name, irq_type->name);
  565. idesc->chip = irq_type;
  566. }
  567. return 0;
  568. }
  569. static unsigned int
  570. get_target_cpu (unsigned int gsi, int irq)
  571. {
  572. #ifdef CONFIG_SMP
  573. static int cpu = -1;
  574. extern int cpe_vector;
  575. cpumask_t domain = irq_to_domain(irq);
  576. /*
  577. * In case of vector shared by multiple RTEs, all RTEs that
  578. * share the vector need to use the same destination CPU.
  579. */
  580. if (iosapic_intr_info[irq].count)
  581. return iosapic_intr_info[irq].dest;
  582. /*
  583. * If the platform supports redirection via XTP, let it
  584. * distribute interrupts.
  585. */
  586. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  587. return cpu_physical_id(smp_processor_id());
  588. /*
  589. * Some interrupts (ACPI SCI, for instance) are registered
  590. * before the BSP is marked as online.
  591. */
  592. if (!cpu_online(smp_processor_id()))
  593. return cpu_physical_id(smp_processor_id());
  594. #ifdef CONFIG_ACPI
  595. if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
  596. return get_cpei_target_cpu();
  597. #endif
  598. #ifdef CONFIG_NUMA
  599. {
  600. int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
  601. cpumask_t cpu_mask;
  602. iosapic_index = find_iosapic(gsi);
  603. if (iosapic_index < 0 ||
  604. iosapic_lists[iosapic_index].node == MAX_NUMNODES)
  605. goto skip_numa_setup;
  606. cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
  607. cpus_and(cpu_mask, cpu_mask, domain);
  608. for_each_cpu_mask(numa_cpu, cpu_mask) {
  609. if (!cpu_online(numa_cpu))
  610. cpu_clear(numa_cpu, cpu_mask);
  611. }
  612. num_cpus = cpus_weight(cpu_mask);
  613. if (!num_cpus)
  614. goto skip_numa_setup;
  615. /* Use irq assignment to distribute across cpus in node */
  616. cpu_index = irq % num_cpus;
  617. for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
  618. numa_cpu = next_cpu(numa_cpu, cpu_mask);
  619. if (numa_cpu != NR_CPUS)
  620. return cpu_physical_id(numa_cpu);
  621. }
  622. skip_numa_setup:
  623. #endif
  624. /*
  625. * Otherwise, round-robin interrupt vectors across all the
  626. * processors. (It'd be nice if we could be smarter in the
  627. * case of NUMA.)
  628. */
  629. do {
  630. if (++cpu >= NR_CPUS)
  631. cpu = 0;
  632. } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
  633. return cpu_physical_id(cpu);
  634. #else /* CONFIG_SMP */
  635. return cpu_physical_id(smp_processor_id());
  636. #endif
  637. }
  638. static inline unsigned char choose_dmode(void)
  639. {
  640. #ifdef CONFIG_SMP
  641. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  642. return IOSAPIC_LOWEST_PRIORITY;
  643. #endif
  644. return IOSAPIC_FIXED;
  645. }
  646. /*
  647. * ACPI can describe IOSAPIC interrupts via static tables and namespace
  648. * methods. This provides an interface to register those interrupts and
  649. * program the IOSAPIC RTE.
  650. */
  651. int
  652. iosapic_register_intr (unsigned int gsi,
  653. unsigned long polarity, unsigned long trigger)
  654. {
  655. int irq, mask = 1, err;
  656. unsigned int dest;
  657. unsigned long flags;
  658. struct iosapic_rte_info *rte;
  659. u32 low32;
  660. unsigned char dmode;
  661. /*
  662. * If this GSI has already been registered (i.e., it's a
  663. * shared interrupt, or we lost a race to register it),
  664. * don't touch the RTE.
  665. */
  666. spin_lock_irqsave(&iosapic_lock, flags);
  667. irq = __gsi_to_irq(gsi);
  668. if (irq > 0) {
  669. rte = find_rte(irq, gsi);
  670. if(iosapic_intr_info[irq].count == 0) {
  671. assign_irq_vector(irq);
  672. dynamic_irq_init(irq);
  673. } else if (rte->refcnt != NO_REF_RTE) {
  674. rte->refcnt++;
  675. goto unlock_iosapic_lock;
  676. }
  677. } else
  678. irq = create_irq();
  679. /* If vector is running out, we try to find a sharable vector */
  680. if (irq < 0) {
  681. irq = iosapic_find_sharable_irq(trigger, polarity);
  682. if (irq < 0)
  683. goto unlock_iosapic_lock;
  684. }
  685. spin_lock(&irq_desc[irq].lock);
  686. dest = get_target_cpu(gsi, irq);
  687. dmode = choose_dmode();
  688. err = register_intr(gsi, irq, dmode, polarity, trigger);
  689. if (err < 0) {
  690. spin_unlock(&irq_desc[irq].lock);
  691. irq = err;
  692. goto unlock_iosapic_lock;
  693. }
  694. /*
  695. * If the vector is shared and already unmasked for other
  696. * interrupt sources, don't mask it.
  697. */
  698. low32 = iosapic_intr_info[irq].low32;
  699. if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
  700. mask = 0;
  701. set_rte(gsi, irq, dest, mask);
  702. printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
  703. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  704. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  705. cpu_logical_id(dest), dest, irq_to_vector(irq));
  706. spin_unlock(&irq_desc[irq].lock);
  707. unlock_iosapic_lock:
  708. spin_unlock_irqrestore(&iosapic_lock, flags);
  709. return irq;
  710. }
  711. void
  712. iosapic_unregister_intr (unsigned int gsi)
  713. {
  714. unsigned long flags;
  715. int irq, index;
  716. irq_desc_t *idesc;
  717. u32 low32;
  718. unsigned long trigger, polarity;
  719. unsigned int dest;
  720. struct iosapic_rte_info *rte;
  721. /*
  722. * If the irq associated with the gsi is not found,
  723. * iosapic_unregister_intr() is unbalanced. We need to check
  724. * this again after getting locks.
  725. */
  726. irq = gsi_to_irq(gsi);
  727. if (irq < 0) {
  728. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  729. gsi);
  730. WARN_ON(1);
  731. return;
  732. }
  733. spin_lock_irqsave(&iosapic_lock, flags);
  734. if ((rte = find_rte(irq, gsi)) == NULL) {
  735. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  736. gsi);
  737. WARN_ON(1);
  738. goto out;
  739. }
  740. if (--rte->refcnt > 0)
  741. goto out;
  742. idesc = irq_desc + irq;
  743. rte->refcnt = NO_REF_RTE;
  744. /* Mask the interrupt */
  745. low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
  746. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
  747. iosapic_intr_info[irq].count--;
  748. index = find_iosapic(gsi);
  749. iosapic_lists[index].rtes_inuse--;
  750. WARN_ON(iosapic_lists[index].rtes_inuse < 0);
  751. trigger = iosapic_intr_info[irq].trigger;
  752. polarity = iosapic_intr_info[irq].polarity;
  753. dest = iosapic_intr_info[irq].dest;
  754. printk(KERN_INFO
  755. "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
  756. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  757. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  758. cpu_logical_id(dest), dest, irq_to_vector(irq));
  759. if (iosapic_intr_info[irq].count == 0) {
  760. #ifdef CONFIG_SMP
  761. /* Clear affinity */
  762. cpus_setall(idesc->affinity);
  763. #endif
  764. /* Clear the interrupt information */
  765. iosapic_intr_info[irq].dest = 0;
  766. iosapic_intr_info[irq].dmode = 0;
  767. iosapic_intr_info[irq].polarity = 0;
  768. iosapic_intr_info[irq].trigger = 0;
  769. iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  770. /* Destroy and reserve IRQ */
  771. destroy_and_reserve_irq(irq);
  772. }
  773. out:
  774. spin_unlock_irqrestore(&iosapic_lock, flags);
  775. }
  776. /*
  777. * ACPI calls this when it finds an entry for a platform interrupt.
  778. */
  779. int __init
  780. iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
  781. int iosapic_vector, u16 eid, u16 id,
  782. unsigned long polarity, unsigned long trigger)
  783. {
  784. static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
  785. unsigned char delivery;
  786. int irq, vector, mask = 0;
  787. unsigned int dest = ((id << 8) | eid) & 0xffff;
  788. switch (int_type) {
  789. case ACPI_INTERRUPT_PMI:
  790. irq = vector = iosapic_vector;
  791. bind_irq_vector(irq, vector, CPU_MASK_ALL);
  792. /*
  793. * since PMI vector is alloc'd by FW(ACPI) not by kernel,
  794. * we need to make sure the vector is available
  795. */
  796. iosapic_reassign_vector(irq);
  797. delivery = IOSAPIC_PMI;
  798. break;
  799. case ACPI_INTERRUPT_INIT:
  800. irq = create_irq();
  801. if (irq < 0)
  802. panic("%s: out of interrupt vectors!\n", __func__);
  803. vector = irq_to_vector(irq);
  804. delivery = IOSAPIC_INIT;
  805. break;
  806. case ACPI_INTERRUPT_CPEI:
  807. irq = vector = IA64_CPE_VECTOR;
  808. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  809. delivery = IOSAPIC_FIXED;
  810. mask = 1;
  811. break;
  812. default:
  813. printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
  814. int_type);
  815. return -1;
  816. }
  817. register_intr(gsi, irq, delivery, polarity, trigger);
  818. printk(KERN_INFO
  819. "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
  820. " vector %d\n",
  821. int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
  822. int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  823. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  824. cpu_logical_id(dest), dest, vector);
  825. set_rte(gsi, irq, dest, mask);
  826. return vector;
  827. }
  828. /*
  829. * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
  830. */
  831. void __devinit
  832. iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
  833. unsigned long polarity,
  834. unsigned long trigger)
  835. {
  836. int vector, irq;
  837. unsigned int dest = cpu_physical_id(smp_processor_id());
  838. unsigned char dmode;
  839. irq = vector = isa_irq_to_vector(isa_irq);
  840. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  841. dmode = choose_dmode();
  842. register_intr(gsi, irq, dmode, polarity, trigger);
  843. DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
  844. isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
  845. polarity == IOSAPIC_POL_HIGH ? "high" : "low",
  846. cpu_logical_id(dest), dest, vector);
  847. set_rte(gsi, irq, dest, 1);
  848. }
  849. void __init
  850. iosapic_system_init (int system_pcat_compat)
  851. {
  852. int irq;
  853. for (irq = 0; irq < NR_IRQS; ++irq) {
  854. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  855. /* mark as unused */
  856. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  857. iosapic_intr_info[irq].count = 0;
  858. }
  859. pcat_compat = system_pcat_compat;
  860. if (pcat_compat) {
  861. /*
  862. * Disable the compatibility mode interrupts (8259 style),
  863. * needs IN/OUT support enabled.
  864. */
  865. printk(KERN_INFO
  866. "%s: Disabling PC-AT compatible 8259 interrupts\n",
  867. __func__);
  868. outb(0xff, 0xA1);
  869. outb(0xff, 0x21);
  870. }
  871. }
  872. static inline int
  873. iosapic_alloc (void)
  874. {
  875. int index;
  876. for (index = 0; index < NR_IOSAPICS; index++)
  877. if (!iosapic_lists[index].addr)
  878. return index;
  879. printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
  880. return -1;
  881. }
  882. static inline void
  883. iosapic_free (int index)
  884. {
  885. memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
  886. }
  887. static inline int
  888. iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
  889. {
  890. int index;
  891. unsigned int gsi_end, base, end;
  892. /* check gsi range */
  893. gsi_end = gsi_base + ((ver >> 16) & 0xff);
  894. for (index = 0; index < NR_IOSAPICS; index++) {
  895. if (!iosapic_lists[index].addr)
  896. continue;
  897. base = iosapic_lists[index].gsi_base;
  898. end = base + iosapic_lists[index].num_rte - 1;
  899. if (gsi_end < base || end < gsi_base)
  900. continue; /* OK */
  901. return -EBUSY;
  902. }
  903. return 0;
  904. }
  905. int __devinit
  906. iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
  907. {
  908. int num_rte, err, index;
  909. unsigned int isa_irq, ver;
  910. char __iomem *addr;
  911. unsigned long flags;
  912. spin_lock_irqsave(&iosapic_lock, flags);
  913. index = find_iosapic(gsi_base);
  914. if (index >= 0) {
  915. spin_unlock_irqrestore(&iosapic_lock, flags);
  916. return -EBUSY;
  917. }
  918. addr = ioremap(phys_addr, 0);
  919. ver = iosapic_version(addr);
  920. if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
  921. iounmap(addr);
  922. spin_unlock_irqrestore(&iosapic_lock, flags);
  923. return err;
  924. }
  925. /*
  926. * The MAX_REDIR register holds the highest input pin number
  927. * (starting from 0). We add 1 so that we can use it for
  928. * number of pins (= RTEs)
  929. */
  930. num_rte = ((ver >> 16) & 0xff) + 1;
  931. index = iosapic_alloc();
  932. iosapic_lists[index].addr = addr;
  933. iosapic_lists[index].gsi_base = gsi_base;
  934. iosapic_lists[index].num_rte = num_rte;
  935. #ifdef CONFIG_NUMA
  936. iosapic_lists[index].node = MAX_NUMNODES;
  937. #endif
  938. spin_lock_init(&iosapic_lists[index].lock);
  939. spin_unlock_irqrestore(&iosapic_lock, flags);
  940. if ((gsi_base == 0) && pcat_compat) {
  941. /*
  942. * Map the legacy ISA devices into the IOSAPIC data. Some of
  943. * these may get reprogrammed later on with data from the ACPI
  944. * Interrupt Source Override table.
  945. */
  946. for (isa_irq = 0; isa_irq < 16; ++isa_irq)
  947. iosapic_override_isa_irq(isa_irq, isa_irq,
  948. IOSAPIC_POL_HIGH,
  949. IOSAPIC_EDGE);
  950. }
  951. return 0;
  952. }
  953. #ifdef CONFIG_HOTPLUG
  954. int
  955. iosapic_remove (unsigned int gsi_base)
  956. {
  957. int index, err = 0;
  958. unsigned long flags;
  959. spin_lock_irqsave(&iosapic_lock, flags);
  960. index = find_iosapic(gsi_base);
  961. if (index < 0) {
  962. printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
  963. __func__, gsi_base);
  964. goto out;
  965. }
  966. if (iosapic_lists[index].rtes_inuse) {
  967. err = -EBUSY;
  968. printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
  969. __func__, gsi_base);
  970. goto out;
  971. }
  972. iounmap(iosapic_lists[index].addr);
  973. iosapic_free(index);
  974. out:
  975. spin_unlock_irqrestore(&iosapic_lock, flags);
  976. return err;
  977. }
  978. #endif /* CONFIG_HOTPLUG */
  979. #ifdef CONFIG_NUMA
  980. void __devinit
  981. map_iosapic_to_node(unsigned int gsi_base, int node)
  982. {
  983. int index;
  984. index = find_iosapic(gsi_base);
  985. if (index < 0) {
  986. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  987. __func__, gsi_base);
  988. return;
  989. }
  990. iosapic_lists[index].node = node;
  991. return;
  992. }
  993. #endif
  994. static int __init iosapic_enable_kmalloc (void)
  995. {
  996. iosapic_kmalloc_ok = 1;
  997. return 0;
  998. }
  999. core_initcall (iosapic_enable_kmalloc);