iosapic.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170
  1. /*
  2. * I/O SAPIC support.
  3. *
  4. * Copyright (C) 1999 Intel Corp.
  5. * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  6. * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@intel.com>
  7. * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
  8. * David Mosberger-Tang <davidm@hpl.hp.com>
  9. * Copyright (C) 1999 VA Linux Systems
  10. * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
  11. *
  12. * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O
  13. * APIC code. In particular, we now have separate
  14. * handlers for edge and level triggered
  15. * interrupts.
  16. * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector
  17. * allocation PCI to vector mapping, shared PCI
  18. * interrupts.
  19. * 00/10/27 D. Mosberger Document things a bit more to make them more
  20. * understandable. Clean up much of the old
  21. * IOSAPIC cruft.
  22. * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts
  23. * and fixes for ACPI S5(SoftOff) support.
  24. * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
  25. * 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt
  26. * vectors in iosapic_set_affinity(),
  27. * initializations for /proc/irq/#/smp_affinity
  28. * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
  29. * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
  30. * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to
  31. * IOSAPIC mapping error
  32. * 02/07/29 T. Kochi Allocate interrupt vectors dynamically
  33. * 02/08/04 T. Kochi Cleaned up terminology (irq, global system
  34. * interrupt, vector, etc.)
  35. * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's
  36. * pci_irq code.
  37. * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
  38. * Remove iosapic_address & gsi_base from
  39. * external interfaces. Rationalize
  40. * __init/__devinit attributes.
  41. * 04/12/04 Ashok Raj <ashok.raj@intel.com> Intel Corporation 2004
  42. * Updated to work with irq migration necessary
  43. * for CPU Hotplug
  44. */
  45. /*
  46. * Here is what the interrupt logic between a PCI device and the kernel looks
  47. * like:
  48. *
  49. * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
  50. * INTD). The device is uniquely identified by its bus-, and slot-number
  51. * (the function number does not matter here because all functions share
  52. * the same interrupt lines).
  53. *
  54. * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
  55. * controller. Multiple interrupt lines may have to share the same
  56. * IOSAPIC pin (if they're level triggered and use the same polarity).
  57. * Each interrupt line has a unique Global System Interrupt (GSI) number
  58. * which can be calculated as the sum of the controller's base GSI number
  59. * and the IOSAPIC pin number to which the line connects.
  60. *
  61. * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
  62. * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then
  63. * sent to the CPU.
  64. *
  65. * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is
  66. * used as architecture-independent interrupt handling mechanism in Linux.
  67. * As an IRQ is a number, we have to have
  68. * IA-64 interrupt vector number <-> IRQ number mapping. On smaller
  69. * systems, we use one-to-one mapping between IA-64 vector and IRQ. A
  70. * platform can implement platform_irq_to_vector(irq) and
  71. * platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
  72. * Please see also arch/ia64/include/asm/hw_irq.h for those APIs.
  73. *
  74. * To sum up, there are three levels of mappings involved:
  75. *
  76. * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
  77. *
  78. * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
  79. * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
  80. * (isa_irq) is the only exception in this source code.
  81. */
  82. #include <linux/acpi.h>
  83. #include <linux/init.h>
  84. #include <linux/irq.h>
  85. #include <linux/kernel.h>
  86. #include <linux/list.h>
  87. #include <linux/pci.h>
  88. #include <linux/slab.h>
  89. #include <linux/smp.h>
  90. #include <linux/string.h>
  91. #include <linux/bootmem.h>
  92. #include <asm/delay.h>
  93. #include <asm/hw_irq.h>
  94. #include <asm/io.h>
  95. #include <asm/iosapic.h>
  96. #include <asm/machvec.h>
  97. #include <asm/processor.h>
  98. #include <asm/ptrace.h>
  99. #include <asm/system.h>
  100. #undef DEBUG_INTERRUPT_ROUTING
  101. #ifdef DEBUG_INTERRUPT_ROUTING
  102. #define DBG(fmt...) printk(fmt)
  103. #else
  104. #define DBG(fmt...)
  105. #endif
  106. #define NR_PREALLOCATE_RTE_ENTRIES \
  107. (PAGE_SIZE / sizeof(struct iosapic_rte_info))
  108. #define RTE_PREALLOCATED (1)
  109. static DEFINE_SPINLOCK(iosapic_lock);
  110. /*
  111. * These tables map IA-64 vectors to the IOSAPIC pin that generates this
  112. * vector.
  113. */
  114. #define NO_REF_RTE 0
  115. static struct iosapic {
  116. char __iomem *addr; /* base address of IOSAPIC */
  117. unsigned int gsi_base; /* GSI base */
  118. unsigned short num_rte; /* # of RTEs on this IOSAPIC */
  119. int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
  120. #ifdef CONFIG_NUMA
  121. unsigned short node; /* numa node association via pxm */
  122. #endif
  123. spinlock_t lock; /* lock for indirect reg access */
  124. } iosapic_lists[NR_IOSAPICS];
  125. struct iosapic_rte_info {
  126. struct list_head rte_list; /* RTEs sharing the same vector */
  127. char rte_index; /* IOSAPIC RTE index */
  128. int refcnt; /* reference counter */
  129. unsigned int flags; /* flags */
  130. struct iosapic *iosapic;
  131. } ____cacheline_aligned;
  132. static struct iosapic_intr_info {
  133. struct list_head rtes; /* RTEs using this vector (empty =>
  134. * not an IOSAPIC interrupt) */
  135. int count; /* # of registered RTEs */
  136. u32 low32; /* current value of low word of
  137. * Redirection table entry */
  138. unsigned int dest; /* destination CPU physical ID */
  139. unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
  140. unsigned char polarity: 1; /* interrupt polarity
  141. * (see iosapic.h) */
  142. unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
  143. } iosapic_intr_info[NR_IRQS];
  144. static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
  145. static int iosapic_kmalloc_ok;
  146. static LIST_HEAD(free_rte_list);
  147. static inline void
  148. iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
  149. {
  150. unsigned long flags;
  151. spin_lock_irqsave(&iosapic->lock, flags);
  152. __iosapic_write(iosapic->addr, reg, val);
  153. spin_unlock_irqrestore(&iosapic->lock, flags);
  154. }
  155. /*
  156. * Find an IOSAPIC associated with a GSI
  157. */
  158. static inline int
  159. find_iosapic (unsigned int gsi)
  160. {
  161. int i;
  162. for (i = 0; i < NR_IOSAPICS; i++) {
  163. if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
  164. iosapic_lists[i].num_rte)
  165. return i;
  166. }
  167. return -1;
  168. }
  169. static inline int __gsi_to_irq(unsigned int gsi)
  170. {
  171. int irq;
  172. struct iosapic_intr_info *info;
  173. struct iosapic_rte_info *rte;
  174. for (irq = 0; irq < NR_IRQS; irq++) {
  175. info = &iosapic_intr_info[irq];
  176. list_for_each_entry(rte, &info->rtes, rte_list)
  177. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  178. return irq;
  179. }
  180. return -1;
  181. }
  182. int
  183. gsi_to_irq (unsigned int gsi)
  184. {
  185. unsigned long flags;
  186. int irq;
  187. spin_lock_irqsave(&iosapic_lock, flags);
  188. irq = __gsi_to_irq(gsi);
  189. spin_unlock_irqrestore(&iosapic_lock, flags);
  190. return irq;
  191. }
  192. static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
  193. {
  194. struct iosapic_rte_info *rte;
  195. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  196. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  197. return rte;
  198. return NULL;
  199. }
  200. static void
  201. set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
  202. {
  203. unsigned long pol, trigger, dmode;
  204. u32 low32, high32;
  205. int rte_index;
  206. char redir;
  207. struct iosapic_rte_info *rte;
  208. ia64_vector vector = irq_to_vector(irq);
  209. DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
  210. rte = find_rte(irq, gsi);
  211. if (!rte)
  212. return; /* not an IOSAPIC interrupt */
  213. rte_index = rte->rte_index;
  214. pol = iosapic_intr_info[irq].polarity;
  215. trigger = iosapic_intr_info[irq].trigger;
  216. dmode = iosapic_intr_info[irq].dmode;
  217. redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
  218. #ifdef CONFIG_SMP
  219. set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
  220. #endif
  221. low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
  222. (trigger << IOSAPIC_TRIGGER_SHIFT) |
  223. (dmode << IOSAPIC_DELIVERY_SHIFT) |
  224. ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
  225. vector);
  226. /* dest contains both id and eid */
  227. high32 = (dest << IOSAPIC_DEST_SHIFT);
  228. iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  229. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  230. iosapic_intr_info[irq].low32 = low32;
  231. iosapic_intr_info[irq].dest = dest;
  232. }
  233. static void
  234. nop (unsigned int irq)
  235. {
  236. /* do nothing... */
  237. }
  238. #ifdef CONFIG_KEXEC
  239. void
  240. kexec_disable_iosapic(void)
  241. {
  242. struct iosapic_intr_info *info;
  243. struct iosapic_rte_info *rte;
  244. ia64_vector vec;
  245. int irq;
  246. for (irq = 0; irq < NR_IRQS; irq++) {
  247. info = &iosapic_intr_info[irq];
  248. vec = irq_to_vector(irq);
  249. list_for_each_entry(rte, &info->rtes,
  250. rte_list) {
  251. iosapic_write(rte->iosapic,
  252. IOSAPIC_RTE_LOW(rte->rte_index),
  253. IOSAPIC_MASK|vec);
  254. iosapic_eoi(rte->iosapic->addr, vec);
  255. }
  256. }
  257. }
  258. #endif
  259. static void
  260. mask_irq (unsigned int irq)
  261. {
  262. u32 low32;
  263. int rte_index;
  264. struct iosapic_rte_info *rte;
  265. if (!iosapic_intr_info[irq].count)
  266. return; /* not an IOSAPIC interrupt! */
  267. /* set only the mask bit */
  268. low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  269. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  270. rte_index = rte->rte_index;
  271. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  272. }
  273. }
  274. static void
  275. unmask_irq (unsigned int irq)
  276. {
  277. u32 low32;
  278. int rte_index;
  279. struct iosapic_rte_info *rte;
  280. if (!iosapic_intr_info[irq].count)
  281. return; /* not an IOSAPIC interrupt! */
  282. low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
  283. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  284. rte_index = rte->rte_index;
  285. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  286. }
  287. }
  288. static int
  289. iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
  290. {
  291. #ifdef CONFIG_SMP
  292. u32 high32, low32;
  293. int cpu, dest, rte_index;
  294. int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
  295. struct iosapic_rte_info *rte;
  296. struct iosapic *iosapic;
  297. irq &= (~IA64_IRQ_REDIRECTED);
  298. cpu = cpumask_first_and(cpu_online_mask, mask);
  299. if (cpu >= nr_cpu_ids)
  300. return -1;
  301. if (irq_prepare_move(irq, cpu))
  302. return -1;
  303. dest = cpu_physical_id(cpu);
  304. if (!iosapic_intr_info[irq].count)
  305. return -1; /* not an IOSAPIC interrupt */
  306. set_irq_affinity_info(irq, dest, redir);
  307. /* dest contains both id and eid */
  308. high32 = dest << IOSAPIC_DEST_SHIFT;
  309. low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
  310. if (redir)
  311. /* change delivery mode to lowest priority */
  312. low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
  313. else
  314. /* change delivery mode to fixed */
  315. low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
  316. low32 &= IOSAPIC_VECTOR_MASK;
  317. low32 |= irq_to_vector(irq);
  318. iosapic_intr_info[irq].low32 = low32;
  319. iosapic_intr_info[irq].dest = dest;
  320. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  321. iosapic = rte->iosapic;
  322. rte_index = rte->rte_index;
  323. iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  324. iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  325. }
  326. #endif
  327. return 0;
  328. }
  329. /*
  330. * Handlers for level-triggered interrupts.
  331. */
  332. static unsigned int
  333. iosapic_startup_level_irq (unsigned int irq)
  334. {
  335. unmask_irq(irq);
  336. return 0;
  337. }
  338. static void
  339. iosapic_end_level_irq (unsigned int irq)
  340. {
  341. ia64_vector vec = irq_to_vector(irq);
  342. struct iosapic_rte_info *rte;
  343. int do_unmask_irq = 0;
  344. irq_complete_move(irq);
  345. if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
  346. do_unmask_irq = 1;
  347. mask_irq(irq);
  348. }
  349. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  350. iosapic_eoi(rte->iosapic->addr, vec);
  351. if (unlikely(do_unmask_irq)) {
  352. move_masked_irq(irq);
  353. unmask_irq(irq);
  354. }
  355. }
  356. #define iosapic_shutdown_level_irq mask_irq
  357. #define iosapic_enable_level_irq unmask_irq
  358. #define iosapic_disable_level_irq mask_irq
  359. #define iosapic_ack_level_irq nop
  360. static struct irq_chip irq_type_iosapic_level = {
  361. .name = "IO-SAPIC-level",
  362. .startup = iosapic_startup_level_irq,
  363. .shutdown = iosapic_shutdown_level_irq,
  364. .enable = iosapic_enable_level_irq,
  365. .disable = iosapic_disable_level_irq,
  366. .ack = iosapic_ack_level_irq,
  367. .end = iosapic_end_level_irq,
  368. .mask = mask_irq,
  369. .unmask = unmask_irq,
  370. .set_affinity = iosapic_set_affinity
  371. };
  372. /*
  373. * Handlers for edge-triggered interrupts.
  374. */
  375. static unsigned int
  376. iosapic_startup_edge_irq (unsigned int irq)
  377. {
  378. unmask_irq(irq);
  379. /*
  380. * IOSAPIC simply drops interrupts pended while the
  381. * corresponding pin was masked, so we can't know if an
  382. * interrupt is pending already. Let's hope not...
  383. */
  384. return 0;
  385. }
  386. static void
  387. iosapic_ack_edge_irq (unsigned int irq)
  388. {
  389. struct irq_desc *idesc = irq_desc + irq;
  390. irq_complete_move(irq);
  391. move_native_irq(irq);
  392. /*
  393. * Once we have recorded IRQ_PENDING already, we can mask the
  394. * interrupt for real. This prevents IRQ storms from unhandled
  395. * devices.
  396. */
  397. if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
  398. (IRQ_PENDING|IRQ_DISABLED))
  399. mask_irq(irq);
  400. }
  401. #define iosapic_enable_edge_irq unmask_irq
  402. #define iosapic_disable_edge_irq nop
  403. #define iosapic_end_edge_irq nop
  404. static struct irq_chip irq_type_iosapic_edge = {
  405. .name = "IO-SAPIC-edge",
  406. .startup = iosapic_startup_edge_irq,
  407. .shutdown = iosapic_disable_edge_irq,
  408. .enable = iosapic_enable_edge_irq,
  409. .disable = iosapic_disable_edge_irq,
  410. .ack = iosapic_ack_edge_irq,
  411. .end = iosapic_end_edge_irq,
  412. .mask = mask_irq,
  413. .unmask = unmask_irq,
  414. .set_affinity = iosapic_set_affinity
  415. };
  416. static unsigned int
  417. iosapic_version (char __iomem *addr)
  418. {
  419. /*
  420. * IOSAPIC Version Register return 32 bit structure like:
  421. * {
  422. * unsigned int version : 8;
  423. * unsigned int reserved1 : 8;
  424. * unsigned int max_redir : 8;
  425. * unsigned int reserved2 : 8;
  426. * }
  427. */
  428. return __iosapic_read(addr, IOSAPIC_VERSION);
  429. }
  430. static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
  431. {
  432. int i, irq = -ENOSPC, min_count = -1;
  433. struct iosapic_intr_info *info;
  434. /*
  435. * shared vectors for edge-triggered interrupts are not
  436. * supported yet
  437. */
  438. if (trigger == IOSAPIC_EDGE)
  439. return -EINVAL;
  440. for (i = 0; i < NR_IRQS; i++) {
  441. info = &iosapic_intr_info[i];
  442. if (info->trigger == trigger && info->polarity == pol &&
  443. (info->dmode == IOSAPIC_FIXED ||
  444. info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
  445. can_request_irq(i, IRQF_SHARED)) {
  446. if (min_count == -1 || info->count < min_count) {
  447. irq = i;
  448. min_count = info->count;
  449. }
  450. }
  451. }
  452. return irq;
  453. }
  454. /*
  455. * if the given vector is already owned by other,
  456. * assign a new vector for the other and make the vector available
  457. */
  458. static void __init
  459. iosapic_reassign_vector (int irq)
  460. {
  461. int new_irq;
  462. if (iosapic_intr_info[irq].count) {
  463. new_irq = create_irq();
  464. if (new_irq < 0)
  465. panic("%s: out of interrupt vectors!\n", __func__);
  466. printk(KERN_INFO "Reassigning vector %d to %d\n",
  467. irq_to_vector(irq), irq_to_vector(new_irq));
  468. memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
  469. sizeof(struct iosapic_intr_info));
  470. INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
  471. list_move(iosapic_intr_info[irq].rtes.next,
  472. &iosapic_intr_info[new_irq].rtes);
  473. memset(&iosapic_intr_info[irq], 0,
  474. sizeof(struct iosapic_intr_info));
  475. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  476. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  477. }
  478. }
  479. static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
  480. {
  481. int i;
  482. struct iosapic_rte_info *rte;
  483. int preallocated = 0;
  484. if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
  485. rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
  486. NR_PREALLOCATE_RTE_ENTRIES);
  487. for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
  488. list_add(&rte->rte_list, &free_rte_list);
  489. }
  490. if (!list_empty(&free_rte_list)) {
  491. rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
  492. rte_list);
  493. list_del(&rte->rte_list);
  494. preallocated++;
  495. } else {
  496. rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
  497. if (!rte)
  498. return NULL;
  499. }
  500. memset(rte, 0, sizeof(struct iosapic_rte_info));
  501. if (preallocated)
  502. rte->flags |= RTE_PREALLOCATED;
  503. return rte;
  504. }
  505. static inline int irq_is_shared (int irq)
  506. {
  507. return (iosapic_intr_info[irq].count > 1);
  508. }
  509. struct irq_chip*
  510. ia64_native_iosapic_get_irq_chip(unsigned long trigger)
  511. {
  512. if (trigger == IOSAPIC_EDGE)
  513. return &irq_type_iosapic_edge;
  514. else
  515. return &irq_type_iosapic_level;
  516. }
  517. static int
  518. register_intr (unsigned int gsi, int irq, unsigned char delivery,
  519. unsigned long polarity, unsigned long trigger)
  520. {
  521. struct irq_desc *idesc;
  522. struct irq_chip *irq_type;
  523. int index;
  524. struct iosapic_rte_info *rte;
  525. index = find_iosapic(gsi);
  526. if (index < 0) {
  527. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  528. __func__, gsi);
  529. return -ENODEV;
  530. }
  531. rte = find_rte(irq, gsi);
  532. if (!rte) {
  533. rte = iosapic_alloc_rte();
  534. if (!rte) {
  535. printk(KERN_WARNING "%s: cannot allocate memory\n",
  536. __func__);
  537. return -ENOMEM;
  538. }
  539. rte->iosapic = &iosapic_lists[index];
  540. rte->rte_index = gsi - rte->iosapic->gsi_base;
  541. rte->refcnt++;
  542. list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
  543. iosapic_intr_info[irq].count++;
  544. iosapic_lists[index].rtes_inuse++;
  545. }
  546. else if (rte->refcnt == NO_REF_RTE) {
  547. struct iosapic_intr_info *info = &iosapic_intr_info[irq];
  548. if (info->count > 0 &&
  549. (info->trigger != trigger || info->polarity != polarity)){
  550. printk (KERN_WARNING
  551. "%s: cannot override the interrupt\n",
  552. __func__);
  553. return -EINVAL;
  554. }
  555. rte->refcnt++;
  556. iosapic_intr_info[irq].count++;
  557. iosapic_lists[index].rtes_inuse++;
  558. }
  559. iosapic_intr_info[irq].polarity = polarity;
  560. iosapic_intr_info[irq].dmode = delivery;
  561. iosapic_intr_info[irq].trigger = trigger;
  562. irq_type = iosapic_get_irq_chip(trigger);
  563. idesc = irq_desc + irq;
  564. if (irq_type != NULL && idesc->chip != irq_type) {
  565. if (idesc->chip != &no_irq_chip)
  566. printk(KERN_WARNING
  567. "%s: changing vector %d from %s to %s\n",
  568. __func__, irq_to_vector(irq),
  569. idesc->chip->name, irq_type->name);
  570. idesc->chip = irq_type;
  571. }
  572. return 0;
  573. }
  574. static unsigned int
  575. get_target_cpu (unsigned int gsi, int irq)
  576. {
  577. #ifdef CONFIG_SMP
  578. static int cpu = -1;
  579. extern int cpe_vector;
  580. cpumask_t domain = irq_to_domain(irq);
  581. /*
  582. * In case of vector shared by multiple RTEs, all RTEs that
  583. * share the vector need to use the same destination CPU.
  584. */
  585. if (iosapic_intr_info[irq].count)
  586. return iosapic_intr_info[irq].dest;
  587. /*
  588. * If the platform supports redirection via XTP, let it
  589. * distribute interrupts.
  590. */
  591. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  592. return cpu_physical_id(smp_processor_id());
  593. /*
  594. * Some interrupts (ACPI SCI, for instance) are registered
  595. * before the BSP is marked as online.
  596. */
  597. if (!cpu_online(smp_processor_id()))
  598. return cpu_physical_id(smp_processor_id());
  599. #ifdef CONFIG_ACPI
  600. if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
  601. return get_cpei_target_cpu();
  602. #endif
  603. #ifdef CONFIG_NUMA
  604. {
  605. int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
  606. const struct cpumask *cpu_mask;
  607. iosapic_index = find_iosapic(gsi);
  608. if (iosapic_index < 0 ||
  609. iosapic_lists[iosapic_index].node == MAX_NUMNODES)
  610. goto skip_numa_setup;
  611. cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
  612. num_cpus = 0;
  613. for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
  614. if (cpu_online(numa_cpu))
  615. num_cpus++;
  616. }
  617. if (!num_cpus)
  618. goto skip_numa_setup;
  619. /* Use irq assignment to distribute across cpus in node */
  620. cpu_index = irq % num_cpus;
  621. for_each_cpu_and(numa_cpu, cpu_mask, &domain)
  622. if (cpu_online(numa_cpu) && i++ >= cpu_index)
  623. break;
  624. if (numa_cpu < nr_cpu_ids)
  625. return cpu_physical_id(numa_cpu);
  626. }
  627. skip_numa_setup:
  628. #endif
  629. /*
  630. * Otherwise, round-robin interrupt vectors across all the
  631. * processors. (It'd be nice if we could be smarter in the
  632. * case of NUMA.)
  633. */
  634. do {
  635. if (++cpu >= nr_cpu_ids)
  636. cpu = 0;
  637. } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
  638. return cpu_physical_id(cpu);
  639. #else /* CONFIG_SMP */
  640. return cpu_physical_id(smp_processor_id());
  641. #endif
  642. }
  643. static inline unsigned char choose_dmode(void)
  644. {
  645. #ifdef CONFIG_SMP
  646. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  647. return IOSAPIC_LOWEST_PRIORITY;
  648. #endif
  649. return IOSAPIC_FIXED;
  650. }
  651. /*
  652. * ACPI can describe IOSAPIC interrupts via static tables and namespace
  653. * methods. This provides an interface to register those interrupts and
  654. * program the IOSAPIC RTE.
  655. */
  656. int
  657. iosapic_register_intr (unsigned int gsi,
  658. unsigned long polarity, unsigned long trigger)
  659. {
  660. int irq, mask = 1, err;
  661. unsigned int dest;
  662. unsigned long flags;
  663. struct iosapic_rte_info *rte;
  664. u32 low32;
  665. unsigned char dmode;
  666. /*
  667. * If this GSI has already been registered (i.e., it's a
  668. * shared interrupt, or we lost a race to register it),
  669. * don't touch the RTE.
  670. */
  671. spin_lock_irqsave(&iosapic_lock, flags);
  672. irq = __gsi_to_irq(gsi);
  673. if (irq > 0) {
  674. rte = find_rte(irq, gsi);
  675. if(iosapic_intr_info[irq].count == 0) {
  676. assign_irq_vector(irq);
  677. dynamic_irq_init(irq);
  678. } else if (rte->refcnt != NO_REF_RTE) {
  679. rte->refcnt++;
  680. goto unlock_iosapic_lock;
  681. }
  682. } else
  683. irq = create_irq();
  684. /* If vector is running out, we try to find a sharable vector */
  685. if (irq < 0) {
  686. irq = iosapic_find_sharable_irq(trigger, polarity);
  687. if (irq < 0)
  688. goto unlock_iosapic_lock;
  689. }
  690. raw_spin_lock(&irq_desc[irq].lock);
  691. dest = get_target_cpu(gsi, irq);
  692. dmode = choose_dmode();
  693. err = register_intr(gsi, irq, dmode, polarity, trigger);
  694. if (err < 0) {
  695. raw_spin_unlock(&irq_desc[irq].lock);
  696. irq = err;
  697. goto unlock_iosapic_lock;
  698. }
  699. /*
  700. * If the vector is shared and already unmasked for other
  701. * interrupt sources, don't mask it.
  702. */
  703. low32 = iosapic_intr_info[irq].low32;
  704. if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
  705. mask = 0;
  706. set_rte(gsi, irq, dest, mask);
  707. printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
  708. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  709. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  710. cpu_logical_id(dest), dest, irq_to_vector(irq));
  711. raw_spin_unlock(&irq_desc[irq].lock);
  712. unlock_iosapic_lock:
  713. spin_unlock_irqrestore(&iosapic_lock, flags);
  714. return irq;
  715. }
  716. void
  717. iosapic_unregister_intr (unsigned int gsi)
  718. {
  719. unsigned long flags;
  720. int irq, index;
  721. struct irq_desc *idesc;
  722. u32 low32;
  723. unsigned long trigger, polarity;
  724. unsigned int dest;
  725. struct iosapic_rte_info *rte;
  726. /*
  727. * If the irq associated with the gsi is not found,
  728. * iosapic_unregister_intr() is unbalanced. We need to check
  729. * this again after getting locks.
  730. */
  731. irq = gsi_to_irq(gsi);
  732. if (irq < 0) {
  733. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  734. gsi);
  735. WARN_ON(1);
  736. return;
  737. }
  738. spin_lock_irqsave(&iosapic_lock, flags);
  739. if ((rte = find_rte(irq, gsi)) == NULL) {
  740. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  741. gsi);
  742. WARN_ON(1);
  743. goto out;
  744. }
  745. if (--rte->refcnt > 0)
  746. goto out;
  747. idesc = irq_desc + irq;
  748. rte->refcnt = NO_REF_RTE;
  749. /* Mask the interrupt */
  750. low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
  751. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
  752. iosapic_intr_info[irq].count--;
  753. index = find_iosapic(gsi);
  754. iosapic_lists[index].rtes_inuse--;
  755. WARN_ON(iosapic_lists[index].rtes_inuse < 0);
  756. trigger = iosapic_intr_info[irq].trigger;
  757. polarity = iosapic_intr_info[irq].polarity;
  758. dest = iosapic_intr_info[irq].dest;
  759. printk(KERN_INFO
  760. "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
  761. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  762. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  763. cpu_logical_id(dest), dest, irq_to_vector(irq));
  764. if (iosapic_intr_info[irq].count == 0) {
  765. #ifdef CONFIG_SMP
  766. /* Clear affinity */
  767. cpumask_setall(idesc->affinity);
  768. #endif
  769. /* Clear the interrupt information */
  770. iosapic_intr_info[irq].dest = 0;
  771. iosapic_intr_info[irq].dmode = 0;
  772. iosapic_intr_info[irq].polarity = 0;
  773. iosapic_intr_info[irq].trigger = 0;
  774. iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  775. /* Destroy and reserve IRQ */
  776. destroy_and_reserve_irq(irq);
  777. }
  778. out:
  779. spin_unlock_irqrestore(&iosapic_lock, flags);
  780. }
  781. /*
  782. * ACPI calls this when it finds an entry for a platform interrupt.
  783. */
  784. int __init
  785. iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
  786. int iosapic_vector, u16 eid, u16 id,
  787. unsigned long polarity, unsigned long trigger)
  788. {
  789. static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
  790. unsigned char delivery;
  791. int irq, vector, mask = 0;
  792. unsigned int dest = ((id << 8) | eid) & 0xffff;
  793. switch (int_type) {
  794. case ACPI_INTERRUPT_PMI:
  795. irq = vector = iosapic_vector;
  796. bind_irq_vector(irq, vector, CPU_MASK_ALL);
  797. /*
  798. * since PMI vector is alloc'd by FW(ACPI) not by kernel,
  799. * we need to make sure the vector is available
  800. */
  801. iosapic_reassign_vector(irq);
  802. delivery = IOSAPIC_PMI;
  803. break;
  804. case ACPI_INTERRUPT_INIT:
  805. irq = create_irq();
  806. if (irq < 0)
  807. panic("%s: out of interrupt vectors!\n", __func__);
  808. vector = irq_to_vector(irq);
  809. delivery = IOSAPIC_INIT;
  810. break;
  811. case ACPI_INTERRUPT_CPEI:
  812. irq = vector = IA64_CPE_VECTOR;
  813. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  814. delivery = IOSAPIC_FIXED;
  815. mask = 1;
  816. break;
  817. default:
  818. printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
  819. int_type);
  820. return -1;
  821. }
  822. register_intr(gsi, irq, delivery, polarity, trigger);
  823. printk(KERN_INFO
  824. "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
  825. " vector %d\n",
  826. int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
  827. int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  828. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  829. cpu_logical_id(dest), dest, vector);
  830. set_rte(gsi, irq, dest, mask);
  831. return vector;
  832. }
  833. /*
  834. * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
  835. */
  836. void __devinit
  837. iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
  838. unsigned long polarity,
  839. unsigned long trigger)
  840. {
  841. int vector, irq;
  842. unsigned int dest = cpu_physical_id(smp_processor_id());
  843. unsigned char dmode;
  844. irq = vector = isa_irq_to_vector(isa_irq);
  845. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  846. dmode = choose_dmode();
  847. register_intr(gsi, irq, dmode, polarity, trigger);
  848. DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
  849. isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
  850. polarity == IOSAPIC_POL_HIGH ? "high" : "low",
  851. cpu_logical_id(dest), dest, vector);
  852. set_rte(gsi, irq, dest, 1);
  853. }
  854. void __init
  855. ia64_native_iosapic_pcat_compat_init(void)
  856. {
  857. if (pcat_compat) {
  858. /*
  859. * Disable the compatibility mode interrupts (8259 style),
  860. * needs IN/OUT support enabled.
  861. */
  862. printk(KERN_INFO
  863. "%s: Disabling PC-AT compatible 8259 interrupts\n",
  864. __func__);
  865. outb(0xff, 0xA1);
  866. outb(0xff, 0x21);
  867. }
  868. }
  869. void __init
  870. iosapic_system_init (int system_pcat_compat)
  871. {
  872. int irq;
  873. for (irq = 0; irq < NR_IRQS; ++irq) {
  874. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  875. /* mark as unused */
  876. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  877. iosapic_intr_info[irq].count = 0;
  878. }
  879. pcat_compat = system_pcat_compat;
  880. if (pcat_compat)
  881. iosapic_pcat_compat_init();
  882. }
  883. static inline int
  884. iosapic_alloc (void)
  885. {
  886. int index;
  887. for (index = 0; index < NR_IOSAPICS; index++)
  888. if (!iosapic_lists[index].addr)
  889. return index;
  890. printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
  891. return -1;
  892. }
  893. static inline void
  894. iosapic_free (int index)
  895. {
  896. memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
  897. }
  898. static inline int
  899. iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
  900. {
  901. int index;
  902. unsigned int gsi_end, base, end;
  903. /* check gsi range */
  904. gsi_end = gsi_base + ((ver >> 16) & 0xff);
  905. for (index = 0; index < NR_IOSAPICS; index++) {
  906. if (!iosapic_lists[index].addr)
  907. continue;
  908. base = iosapic_lists[index].gsi_base;
  909. end = base + iosapic_lists[index].num_rte - 1;
  910. if (gsi_end < base || end < gsi_base)
  911. continue; /* OK */
  912. return -EBUSY;
  913. }
  914. return 0;
  915. }
  916. int __devinit
  917. iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
  918. {
  919. int num_rte, err, index;
  920. unsigned int isa_irq, ver;
  921. char __iomem *addr;
  922. unsigned long flags;
  923. spin_lock_irqsave(&iosapic_lock, flags);
  924. index = find_iosapic(gsi_base);
  925. if (index >= 0) {
  926. spin_unlock_irqrestore(&iosapic_lock, flags);
  927. return -EBUSY;
  928. }
  929. addr = ioremap(phys_addr, 0);
  930. if (addr == NULL) {
  931. spin_unlock_irqrestore(&iosapic_lock, flags);
  932. return -ENOMEM;
  933. }
  934. ver = iosapic_version(addr);
  935. if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
  936. iounmap(addr);
  937. spin_unlock_irqrestore(&iosapic_lock, flags);
  938. return err;
  939. }
  940. /*
  941. * The MAX_REDIR register holds the highest input pin number
  942. * (starting from 0). We add 1 so that we can use it for
  943. * number of pins (= RTEs)
  944. */
  945. num_rte = ((ver >> 16) & 0xff) + 1;
  946. index = iosapic_alloc();
  947. iosapic_lists[index].addr = addr;
  948. iosapic_lists[index].gsi_base = gsi_base;
  949. iosapic_lists[index].num_rte = num_rte;
  950. #ifdef CONFIG_NUMA
  951. iosapic_lists[index].node = MAX_NUMNODES;
  952. #endif
  953. spin_lock_init(&iosapic_lists[index].lock);
  954. spin_unlock_irqrestore(&iosapic_lock, flags);
  955. if ((gsi_base == 0) && pcat_compat) {
  956. /*
  957. * Map the legacy ISA devices into the IOSAPIC data. Some of
  958. * these may get reprogrammed later on with data from the ACPI
  959. * Interrupt Source Override table.
  960. */
  961. for (isa_irq = 0; isa_irq < 16; ++isa_irq)
  962. iosapic_override_isa_irq(isa_irq, isa_irq,
  963. IOSAPIC_POL_HIGH,
  964. IOSAPIC_EDGE);
  965. }
  966. return 0;
  967. }
  968. #ifdef CONFIG_HOTPLUG
  969. int
  970. iosapic_remove (unsigned int gsi_base)
  971. {
  972. int index, err = 0;
  973. unsigned long flags;
  974. spin_lock_irqsave(&iosapic_lock, flags);
  975. index = find_iosapic(gsi_base);
  976. if (index < 0) {
  977. printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
  978. __func__, gsi_base);
  979. goto out;
  980. }
  981. if (iosapic_lists[index].rtes_inuse) {
  982. err = -EBUSY;
  983. printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
  984. __func__, gsi_base);
  985. goto out;
  986. }
  987. iounmap(iosapic_lists[index].addr);
  988. iosapic_free(index);
  989. out:
  990. spin_unlock_irqrestore(&iosapic_lock, flags);
  991. return err;
  992. }
  993. #endif /* CONFIG_HOTPLUG */
  994. #ifdef CONFIG_NUMA
  995. void __devinit
  996. map_iosapic_to_node(unsigned int gsi_base, int node)
  997. {
  998. int index;
  999. index = find_iosapic(gsi_base);
  1000. if (index < 0) {
  1001. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  1002. __func__, gsi_base);
  1003. return;
  1004. }
  1005. iosapic_lists[index].node = node;
  1006. return;
  1007. }
  1008. #endif
  1009. static int __init iosapic_enable_kmalloc (void)
  1010. {
  1011. iosapic_kmalloc_ok = 1;
  1012. return 0;
  1013. }
  1014. core_initcall (iosapic_enable_kmalloc);