iosapic.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163
  1. /*
  2. * I/O SAPIC support.
  3. *
  4. * Copyright (C) 1999 Intel Corp.
  5. * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  6. * Copyright (C) 2000-2002 J.I. Lee <jung-ik.lee@intel.com>
  7. * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co.
  8. * David Mosberger-Tang <davidm@hpl.hp.com>
  9. * Copyright (C) 1999 VA Linux Systems
  10. * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
  11. *
  12. * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O
  13. * APIC code. In particular, we now have separate
  14. * handlers for edge and level triggered
  15. * interrupts.
  16. * 00/10/27 Asit Mallick, Goutham Rao <goutham.rao@intel.com> IRQ vector
  17. * allocation PCI to vector mapping, shared PCI
  18. * interrupts.
  19. * 00/10/27 D. Mosberger Document things a bit more to make them more
  20. * understandable. Clean up much of the old
  21. * IOSAPIC cruft.
  22. * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts
  23. * and fixes for ACPI S5(SoftOff) support.
  24. * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
  25. * 02/01/07 E. Focht <efocht@ess.nec.de> Redirectable interrupt
  26. * vectors in iosapic_set_affinity(),
  27. * initializations for /proc/irq/#/smp_affinity
  28. * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
  29. * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
  30. * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to
  31. * IOSAPIC mapping error
  32. * 02/07/29 T. Kochi Allocate interrupt vectors dynamically
  33. * 02/08/04 T. Kochi Cleaned up terminology (irq, global system
  34. * interrupt, vector, etc.)
  35. * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's
  36. * pci_irq code.
  37. * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
  38. * Remove iosapic_address & gsi_base from
  39. * external interfaces. Rationalize
  40. * __init/__devinit attributes.
  41. * 04/12/04 Ashok Raj <ashok.raj@intel.com> Intel Corporation 2004
  42. * Updated to work with irq migration necessary
  43. * for CPU Hotplug
  44. */
  45. /*
  46. * Here is what the interrupt logic between a PCI device and the kernel looks
  47. * like:
  48. *
  49. * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
  50. * INTD). The device is uniquely identified by its bus-, and slot-number
  51. * (the function number does not matter here because all functions share
  52. * the same interrupt lines).
  53. *
  54. * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
  55. * controller. Multiple interrupt lines may have to share the same
  56. * IOSAPIC pin (if they're level triggered and use the same polarity).
  57. * Each interrupt line has a unique Global System Interrupt (GSI) number
  58. * which can be calculated as the sum of the controller's base GSI number
  59. * and the IOSAPIC pin number to which the line connects.
  60. *
  61. * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
  62. * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then
  63. * sent to the CPU.
  64. *
  65. * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is
  66. * used as architecture-independent interrupt handling mechanism in Linux.
  67. * As an IRQ is a number, we have to have
  68. * IA-64 interrupt vector number <-> IRQ number mapping. On smaller
  69. * systems, we use one-to-one mapping between IA-64 vector and IRQ. A
  70. * platform can implement platform_irq_to_vector(irq) and
  71. * platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
  72. * Please see also arch/ia64/include/asm/hw_irq.h for those APIs.
  73. *
  74. * To sum up, there are three levels of mappings involved:
  75. *
  76. * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
  77. *
  78. * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
  79. * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
  80. * (isa_irq) is the only exception in this source code.
  81. */
  82. #include <linux/acpi.h>
  83. #include <linux/init.h>
  84. #include <linux/irq.h>
  85. #include <linux/kernel.h>
  86. #include <linux/list.h>
  87. #include <linux/pci.h>
  88. #include <linux/smp.h>
  89. #include <linux/string.h>
  90. #include <linux/bootmem.h>
  91. #include <asm/delay.h>
  92. #include <asm/hw_irq.h>
  93. #include <asm/io.h>
  94. #include <asm/iosapic.h>
  95. #include <asm/machvec.h>
  96. #include <asm/processor.h>
  97. #include <asm/ptrace.h>
  98. #include <asm/system.h>
  99. #undef DEBUG_INTERRUPT_ROUTING
  100. #ifdef DEBUG_INTERRUPT_ROUTING
  101. #define DBG(fmt...) printk(fmt)
  102. #else
  103. #define DBG(fmt...)
  104. #endif
  105. #define NR_PREALLOCATE_RTE_ENTRIES \
  106. (PAGE_SIZE / sizeof(struct iosapic_rte_info))
  107. #define RTE_PREALLOCATED (1)
  108. static DEFINE_SPINLOCK(iosapic_lock);
  109. /*
  110. * These tables map IA-64 vectors to the IOSAPIC pin that generates this
  111. * vector.
  112. */
  113. #define NO_REF_RTE 0
  114. static struct iosapic {
  115. char __iomem *addr; /* base address of IOSAPIC */
  116. unsigned int gsi_base; /* GSI base */
  117. unsigned short num_rte; /* # of RTEs on this IOSAPIC */
  118. int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
  119. #ifdef CONFIG_NUMA
  120. unsigned short node; /* numa node association via pxm */
  121. #endif
  122. spinlock_t lock; /* lock for indirect reg access */
  123. } iosapic_lists[NR_IOSAPICS];
  124. struct iosapic_rte_info {
  125. struct list_head rte_list; /* RTEs sharing the same vector */
  126. char rte_index; /* IOSAPIC RTE index */
  127. int refcnt; /* reference counter */
  128. unsigned int flags; /* flags */
  129. struct iosapic *iosapic;
  130. } ____cacheline_aligned;
  131. static struct iosapic_intr_info {
  132. struct list_head rtes; /* RTEs using this vector (empty =>
  133. * not an IOSAPIC interrupt) */
  134. int count; /* # of registered RTEs */
  135. u32 low32; /* current value of low word of
  136. * Redirection table entry */
  137. unsigned int dest; /* destination CPU physical ID */
  138. unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
  139. unsigned char polarity: 1; /* interrupt polarity
  140. * (see iosapic.h) */
  141. unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
  142. } iosapic_intr_info[NR_IRQS];
  143. static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
  144. static int iosapic_kmalloc_ok;
  145. static LIST_HEAD(free_rte_list);
  146. static inline void
  147. iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
  148. {
  149. unsigned long flags;
  150. spin_lock_irqsave(&iosapic->lock, flags);
  151. __iosapic_write(iosapic->addr, reg, val);
  152. spin_unlock_irqrestore(&iosapic->lock, flags);
  153. }
  154. /*
  155. * Find an IOSAPIC associated with a GSI
  156. */
  157. static inline int
  158. find_iosapic (unsigned int gsi)
  159. {
  160. int i;
  161. for (i = 0; i < NR_IOSAPICS; i++) {
  162. if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
  163. iosapic_lists[i].num_rte)
  164. return i;
  165. }
  166. return -1;
  167. }
  168. static inline int __gsi_to_irq(unsigned int gsi)
  169. {
  170. int irq;
  171. struct iosapic_intr_info *info;
  172. struct iosapic_rte_info *rte;
  173. for (irq = 0; irq < NR_IRQS; irq++) {
  174. info = &iosapic_intr_info[irq];
  175. list_for_each_entry(rte, &info->rtes, rte_list)
  176. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  177. return irq;
  178. }
  179. return -1;
  180. }
  181. int
  182. gsi_to_irq (unsigned int gsi)
  183. {
  184. unsigned long flags;
  185. int irq;
  186. spin_lock_irqsave(&iosapic_lock, flags);
  187. irq = __gsi_to_irq(gsi);
  188. spin_unlock_irqrestore(&iosapic_lock, flags);
  189. return irq;
  190. }
  191. static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
  192. {
  193. struct iosapic_rte_info *rte;
  194. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  195. if (rte->iosapic->gsi_base + rte->rte_index == gsi)
  196. return rte;
  197. return NULL;
  198. }
  199. static void
  200. set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
  201. {
  202. unsigned long pol, trigger, dmode;
  203. u32 low32, high32;
  204. int rte_index;
  205. char redir;
  206. struct iosapic_rte_info *rte;
  207. ia64_vector vector = irq_to_vector(irq);
  208. DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
  209. rte = find_rte(irq, gsi);
  210. if (!rte)
  211. return; /* not an IOSAPIC interrupt */
  212. rte_index = rte->rte_index;
  213. pol = iosapic_intr_info[irq].polarity;
  214. trigger = iosapic_intr_info[irq].trigger;
  215. dmode = iosapic_intr_info[irq].dmode;
  216. redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
  217. #ifdef CONFIG_SMP
  218. set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
  219. #endif
  220. low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
  221. (trigger << IOSAPIC_TRIGGER_SHIFT) |
  222. (dmode << IOSAPIC_DELIVERY_SHIFT) |
  223. ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) |
  224. vector);
  225. /* dest contains both id and eid */
  226. high32 = (dest << IOSAPIC_DEST_SHIFT);
  227. iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  228. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  229. iosapic_intr_info[irq].low32 = low32;
  230. iosapic_intr_info[irq].dest = dest;
  231. }
  232. static void
  233. nop (unsigned int irq)
  234. {
  235. /* do nothing... */
  236. }
  237. #ifdef CONFIG_KEXEC
  238. void
  239. kexec_disable_iosapic(void)
  240. {
  241. struct iosapic_intr_info *info;
  242. struct iosapic_rte_info *rte;
  243. ia64_vector vec;
  244. int irq;
  245. for (irq = 0; irq < NR_IRQS; irq++) {
  246. info = &iosapic_intr_info[irq];
  247. vec = irq_to_vector(irq);
  248. list_for_each_entry(rte, &info->rtes,
  249. rte_list) {
  250. iosapic_write(rte->iosapic,
  251. IOSAPIC_RTE_LOW(rte->rte_index),
  252. IOSAPIC_MASK|vec);
  253. iosapic_eoi(rte->iosapic->addr, vec);
  254. }
  255. }
  256. }
  257. #endif
  258. static void
  259. mask_irq (unsigned int irq)
  260. {
  261. u32 low32;
  262. int rte_index;
  263. struct iosapic_rte_info *rte;
  264. if (!iosapic_intr_info[irq].count)
  265. return; /* not an IOSAPIC interrupt! */
  266. /* set only the mask bit */
  267. low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  268. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  269. rte_index = rte->rte_index;
  270. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  271. }
  272. }
  273. static void
  274. unmask_irq (unsigned int irq)
  275. {
  276. u32 low32;
  277. int rte_index;
  278. struct iosapic_rte_info *rte;
  279. if (!iosapic_intr_info[irq].count)
  280. return; /* not an IOSAPIC interrupt! */
  281. low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
  282. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  283. rte_index = rte->rte_index;
  284. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  285. }
  286. }
  287. static void
  288. iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
  289. {
  290. #ifdef CONFIG_SMP
  291. u32 high32, low32;
  292. int cpu, dest, rte_index;
  293. int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
  294. struct iosapic_rte_info *rte;
  295. struct iosapic *iosapic;
  296. irq &= (~IA64_IRQ_REDIRECTED);
  297. cpu = cpumask_first_and(cpu_online_mask, mask);
  298. if (cpu >= nr_cpu_ids)
  299. return;
  300. if (irq_prepare_move(irq, cpu))
  301. return;
  302. dest = cpu_physical_id(cpu);
  303. if (!iosapic_intr_info[irq].count)
  304. return; /* not an IOSAPIC interrupt */
  305. set_irq_affinity_info(irq, dest, redir);
  306. /* dest contains both id and eid */
  307. high32 = dest << IOSAPIC_DEST_SHIFT;
  308. low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
  309. if (redir)
  310. /* change delivery mode to lowest priority */
  311. low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
  312. else
  313. /* change delivery mode to fixed */
  314. low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
  315. low32 &= IOSAPIC_VECTOR_MASK;
  316. low32 |= irq_to_vector(irq);
  317. iosapic_intr_info[irq].low32 = low32;
  318. iosapic_intr_info[irq].dest = dest;
  319. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
  320. iosapic = rte->iosapic;
  321. rte_index = rte->rte_index;
  322. iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
  323. iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
  324. }
  325. #endif
  326. }
  327. /*
  328. * Handlers for level-triggered interrupts.
  329. */
  330. static unsigned int
  331. iosapic_startup_level_irq (unsigned int irq)
  332. {
  333. unmask_irq(irq);
  334. return 0;
  335. }
  336. static void
  337. iosapic_end_level_irq (unsigned int irq)
  338. {
  339. ia64_vector vec = irq_to_vector(irq);
  340. struct iosapic_rte_info *rte;
  341. int do_unmask_irq = 0;
  342. irq_complete_move(irq);
  343. if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
  344. do_unmask_irq = 1;
  345. mask_irq(irq);
  346. }
  347. list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
  348. iosapic_eoi(rte->iosapic->addr, vec);
  349. if (unlikely(do_unmask_irq)) {
  350. move_masked_irq(irq);
  351. unmask_irq(irq);
  352. }
  353. }
  354. #define iosapic_shutdown_level_irq mask_irq
  355. #define iosapic_enable_level_irq unmask_irq
  356. #define iosapic_disable_level_irq mask_irq
  357. #define iosapic_ack_level_irq nop
  358. static struct irq_chip irq_type_iosapic_level = {
  359. .name = "IO-SAPIC-level",
  360. .startup = iosapic_startup_level_irq,
  361. .shutdown = iosapic_shutdown_level_irq,
  362. .enable = iosapic_enable_level_irq,
  363. .disable = iosapic_disable_level_irq,
  364. .ack = iosapic_ack_level_irq,
  365. .end = iosapic_end_level_irq,
  366. .mask = mask_irq,
  367. .unmask = unmask_irq,
  368. .set_affinity = iosapic_set_affinity
  369. };
  370. /*
  371. * Handlers for edge-triggered interrupts.
  372. */
  373. static unsigned int
  374. iosapic_startup_edge_irq (unsigned int irq)
  375. {
  376. unmask_irq(irq);
  377. /*
  378. * IOSAPIC simply drops interrupts pended while the
  379. * corresponding pin was masked, so we can't know if an
  380. * interrupt is pending already. Let's hope not...
  381. */
  382. return 0;
  383. }
  384. static void
  385. iosapic_ack_edge_irq (unsigned int irq)
  386. {
  387. irq_desc_t *idesc = irq_desc + irq;
  388. irq_complete_move(irq);
  389. move_native_irq(irq);
  390. /*
  391. * Once we have recorded IRQ_PENDING already, we can mask the
  392. * interrupt for real. This prevents IRQ storms from unhandled
  393. * devices.
  394. */
  395. if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
  396. (IRQ_PENDING|IRQ_DISABLED))
  397. mask_irq(irq);
  398. }
  399. #define iosapic_enable_edge_irq unmask_irq
  400. #define iosapic_disable_edge_irq nop
  401. #define iosapic_end_edge_irq nop
  402. static struct irq_chip irq_type_iosapic_edge = {
  403. .name = "IO-SAPIC-edge",
  404. .startup = iosapic_startup_edge_irq,
  405. .shutdown = iosapic_disable_edge_irq,
  406. .enable = iosapic_enable_edge_irq,
  407. .disable = iosapic_disable_edge_irq,
  408. .ack = iosapic_ack_edge_irq,
  409. .end = iosapic_end_edge_irq,
  410. .mask = mask_irq,
  411. .unmask = unmask_irq,
  412. .set_affinity = iosapic_set_affinity
  413. };
  414. static unsigned int
  415. iosapic_version (char __iomem *addr)
  416. {
  417. /*
  418. * IOSAPIC Version Register return 32 bit structure like:
  419. * {
  420. * unsigned int version : 8;
  421. * unsigned int reserved1 : 8;
  422. * unsigned int max_redir : 8;
  423. * unsigned int reserved2 : 8;
  424. * }
  425. */
  426. return __iosapic_read(addr, IOSAPIC_VERSION);
  427. }
  428. static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
  429. {
  430. int i, irq = -ENOSPC, min_count = -1;
  431. struct iosapic_intr_info *info;
  432. /*
  433. * shared vectors for edge-triggered interrupts are not
  434. * supported yet
  435. */
  436. if (trigger == IOSAPIC_EDGE)
  437. return -EINVAL;
  438. for (i = 0; i < NR_IRQS; i++) {
  439. info = &iosapic_intr_info[i];
  440. if (info->trigger == trigger && info->polarity == pol &&
  441. (info->dmode == IOSAPIC_FIXED ||
  442. info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
  443. can_request_irq(i, IRQF_SHARED)) {
  444. if (min_count == -1 || info->count < min_count) {
  445. irq = i;
  446. min_count = info->count;
  447. }
  448. }
  449. }
  450. return irq;
  451. }
  452. /*
  453. * if the given vector is already owned by other,
  454. * assign a new vector for the other and make the vector available
  455. */
  456. static void __init
  457. iosapic_reassign_vector (int irq)
  458. {
  459. int new_irq;
  460. if (iosapic_intr_info[irq].count) {
  461. new_irq = create_irq();
  462. if (new_irq < 0)
  463. panic("%s: out of interrupt vectors!\n", __func__);
  464. printk(KERN_INFO "Reassigning vector %d to %d\n",
  465. irq_to_vector(irq), irq_to_vector(new_irq));
  466. memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
  467. sizeof(struct iosapic_intr_info));
  468. INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
  469. list_move(iosapic_intr_info[irq].rtes.next,
  470. &iosapic_intr_info[new_irq].rtes);
  471. memset(&iosapic_intr_info[irq], 0,
  472. sizeof(struct iosapic_intr_info));
  473. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  474. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  475. }
  476. }
  477. static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
  478. {
  479. int i;
  480. struct iosapic_rte_info *rte;
  481. int preallocated = 0;
  482. if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
  483. rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
  484. NR_PREALLOCATE_RTE_ENTRIES);
  485. for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
  486. list_add(&rte->rte_list, &free_rte_list);
  487. }
  488. if (!list_empty(&free_rte_list)) {
  489. rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
  490. rte_list);
  491. list_del(&rte->rte_list);
  492. preallocated++;
  493. } else {
  494. rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
  495. if (!rte)
  496. return NULL;
  497. }
  498. memset(rte, 0, sizeof(struct iosapic_rte_info));
  499. if (preallocated)
  500. rte->flags |= RTE_PREALLOCATED;
  501. return rte;
  502. }
  503. static inline int irq_is_shared (int irq)
  504. {
  505. return (iosapic_intr_info[irq].count > 1);
  506. }
  507. struct irq_chip*
  508. ia64_native_iosapic_get_irq_chip(unsigned long trigger)
  509. {
  510. if (trigger == IOSAPIC_EDGE)
  511. return &irq_type_iosapic_edge;
  512. else
  513. return &irq_type_iosapic_level;
  514. }
  515. static int
  516. register_intr (unsigned int gsi, int irq, unsigned char delivery,
  517. unsigned long polarity, unsigned long trigger)
  518. {
  519. irq_desc_t *idesc;
  520. struct hw_interrupt_type *irq_type;
  521. int index;
  522. struct iosapic_rte_info *rte;
  523. index = find_iosapic(gsi);
  524. if (index < 0) {
  525. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  526. __func__, gsi);
  527. return -ENODEV;
  528. }
  529. rte = find_rte(irq, gsi);
  530. if (!rte) {
  531. rte = iosapic_alloc_rte();
  532. if (!rte) {
  533. printk(KERN_WARNING "%s: cannot allocate memory\n",
  534. __func__);
  535. return -ENOMEM;
  536. }
  537. rte->iosapic = &iosapic_lists[index];
  538. rte->rte_index = gsi - rte->iosapic->gsi_base;
  539. rte->refcnt++;
  540. list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
  541. iosapic_intr_info[irq].count++;
  542. iosapic_lists[index].rtes_inuse++;
  543. }
  544. else if (rte->refcnt == NO_REF_RTE) {
  545. struct iosapic_intr_info *info = &iosapic_intr_info[irq];
  546. if (info->count > 0 &&
  547. (info->trigger != trigger || info->polarity != polarity)){
  548. printk (KERN_WARNING
  549. "%s: cannot override the interrupt\n",
  550. __func__);
  551. return -EINVAL;
  552. }
  553. rte->refcnt++;
  554. iosapic_intr_info[irq].count++;
  555. iosapic_lists[index].rtes_inuse++;
  556. }
  557. iosapic_intr_info[irq].polarity = polarity;
  558. iosapic_intr_info[irq].dmode = delivery;
  559. iosapic_intr_info[irq].trigger = trigger;
  560. irq_type = iosapic_get_irq_chip(trigger);
  561. idesc = irq_desc + irq;
  562. if (irq_type != NULL && idesc->chip != irq_type) {
  563. if (idesc->chip != &no_irq_type)
  564. printk(KERN_WARNING
  565. "%s: changing vector %d from %s to %s\n",
  566. __func__, irq_to_vector(irq),
  567. idesc->chip->name, irq_type->name);
  568. idesc->chip = irq_type;
  569. }
  570. return 0;
  571. }
  572. static unsigned int
  573. get_target_cpu (unsigned int gsi, int irq)
  574. {
  575. #ifdef CONFIG_SMP
  576. static int cpu = -1;
  577. extern int cpe_vector;
  578. cpumask_t domain = irq_to_domain(irq);
  579. /*
  580. * In case of vector shared by multiple RTEs, all RTEs that
  581. * share the vector need to use the same destination CPU.
  582. */
  583. if (iosapic_intr_info[irq].count)
  584. return iosapic_intr_info[irq].dest;
  585. /*
  586. * If the platform supports redirection via XTP, let it
  587. * distribute interrupts.
  588. */
  589. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  590. return cpu_physical_id(smp_processor_id());
  591. /*
  592. * Some interrupts (ACPI SCI, for instance) are registered
  593. * before the BSP is marked as online.
  594. */
  595. if (!cpu_online(smp_processor_id()))
  596. return cpu_physical_id(smp_processor_id());
  597. #ifdef CONFIG_ACPI
  598. if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
  599. return get_cpei_target_cpu();
  600. #endif
  601. #ifdef CONFIG_NUMA
  602. {
  603. int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
  604. const struct cpumask *cpu_mask;
  605. iosapic_index = find_iosapic(gsi);
  606. if (iosapic_index < 0 ||
  607. iosapic_lists[iosapic_index].node == MAX_NUMNODES)
  608. goto skip_numa_setup;
  609. cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node);
  610. num_cpus = 0;
  611. for_each_cpu_and(numa_cpu, cpu_mask, &domain) {
  612. if (cpu_online(numa_cpu))
  613. num_cpus++;
  614. }
  615. if (!num_cpus)
  616. goto skip_numa_setup;
  617. /* Use irq assignment to distribute across cpus in node */
  618. cpu_index = irq % num_cpus;
  619. for_each_cpu_and(numa_cpu, cpu_mask, &domain)
  620. if (cpu_online(numa_cpu) && i++ >= cpu_index)
  621. break;
  622. if (numa_cpu < nr_cpu_ids)
  623. return cpu_physical_id(numa_cpu);
  624. }
  625. skip_numa_setup:
  626. #endif
  627. /*
  628. * Otherwise, round-robin interrupt vectors across all the
  629. * processors. (It'd be nice if we could be smarter in the
  630. * case of NUMA.)
  631. */
  632. do {
  633. if (++cpu >= nr_cpu_ids)
  634. cpu = 0;
  635. } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
  636. return cpu_physical_id(cpu);
  637. #else /* CONFIG_SMP */
  638. return cpu_physical_id(smp_processor_id());
  639. #endif
  640. }
  641. static inline unsigned char choose_dmode(void)
  642. {
  643. #ifdef CONFIG_SMP
  644. if (smp_int_redirect & SMP_IRQ_REDIRECTION)
  645. return IOSAPIC_LOWEST_PRIORITY;
  646. #endif
  647. return IOSAPIC_FIXED;
  648. }
  649. /*
  650. * ACPI can describe IOSAPIC interrupts via static tables and namespace
  651. * methods. This provides an interface to register those interrupts and
  652. * program the IOSAPIC RTE.
  653. */
  654. int
  655. iosapic_register_intr (unsigned int gsi,
  656. unsigned long polarity, unsigned long trigger)
  657. {
  658. int irq, mask = 1, err;
  659. unsigned int dest;
  660. unsigned long flags;
  661. struct iosapic_rte_info *rte;
  662. u32 low32;
  663. unsigned char dmode;
  664. /*
  665. * If this GSI has already been registered (i.e., it's a
  666. * shared interrupt, or we lost a race to register it),
  667. * don't touch the RTE.
  668. */
  669. spin_lock_irqsave(&iosapic_lock, flags);
  670. irq = __gsi_to_irq(gsi);
  671. if (irq > 0) {
  672. rte = find_rte(irq, gsi);
  673. if(iosapic_intr_info[irq].count == 0) {
  674. assign_irq_vector(irq);
  675. dynamic_irq_init(irq);
  676. } else if (rte->refcnt != NO_REF_RTE) {
  677. rte->refcnt++;
  678. goto unlock_iosapic_lock;
  679. }
  680. } else
  681. irq = create_irq();
  682. /* If vector is running out, we try to find a sharable vector */
  683. if (irq < 0) {
  684. irq = iosapic_find_sharable_irq(trigger, polarity);
  685. if (irq < 0)
  686. goto unlock_iosapic_lock;
  687. }
  688. spin_lock(&irq_desc[irq].lock);
  689. dest = get_target_cpu(gsi, irq);
  690. dmode = choose_dmode();
  691. err = register_intr(gsi, irq, dmode, polarity, trigger);
  692. if (err < 0) {
  693. spin_unlock(&irq_desc[irq].lock);
  694. irq = err;
  695. goto unlock_iosapic_lock;
  696. }
  697. /*
  698. * If the vector is shared and already unmasked for other
  699. * interrupt sources, don't mask it.
  700. */
  701. low32 = iosapic_intr_info[irq].low32;
  702. if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
  703. mask = 0;
  704. set_rte(gsi, irq, dest, mask);
  705. printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
  706. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  707. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  708. cpu_logical_id(dest), dest, irq_to_vector(irq));
  709. spin_unlock(&irq_desc[irq].lock);
  710. unlock_iosapic_lock:
  711. spin_unlock_irqrestore(&iosapic_lock, flags);
  712. return irq;
  713. }
  714. void
  715. iosapic_unregister_intr (unsigned int gsi)
  716. {
  717. unsigned long flags;
  718. int irq, index;
  719. irq_desc_t *idesc;
  720. u32 low32;
  721. unsigned long trigger, polarity;
  722. unsigned int dest;
  723. struct iosapic_rte_info *rte;
  724. /*
  725. * If the irq associated with the gsi is not found,
  726. * iosapic_unregister_intr() is unbalanced. We need to check
  727. * this again after getting locks.
  728. */
  729. irq = gsi_to_irq(gsi);
  730. if (irq < 0) {
  731. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  732. gsi);
  733. WARN_ON(1);
  734. return;
  735. }
  736. spin_lock_irqsave(&iosapic_lock, flags);
  737. if ((rte = find_rte(irq, gsi)) == NULL) {
  738. printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
  739. gsi);
  740. WARN_ON(1);
  741. goto out;
  742. }
  743. if (--rte->refcnt > 0)
  744. goto out;
  745. idesc = irq_desc + irq;
  746. rte->refcnt = NO_REF_RTE;
  747. /* Mask the interrupt */
  748. low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
  749. iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
  750. iosapic_intr_info[irq].count--;
  751. index = find_iosapic(gsi);
  752. iosapic_lists[index].rtes_inuse--;
  753. WARN_ON(iosapic_lists[index].rtes_inuse < 0);
  754. trigger = iosapic_intr_info[irq].trigger;
  755. polarity = iosapic_intr_info[irq].polarity;
  756. dest = iosapic_intr_info[irq].dest;
  757. printk(KERN_INFO
  758. "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
  759. gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  760. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  761. cpu_logical_id(dest), dest, irq_to_vector(irq));
  762. if (iosapic_intr_info[irq].count == 0) {
  763. #ifdef CONFIG_SMP
  764. /* Clear affinity */
  765. cpumask_setall(idesc->affinity);
  766. #endif
  767. /* Clear the interrupt information */
  768. iosapic_intr_info[irq].dest = 0;
  769. iosapic_intr_info[irq].dmode = 0;
  770. iosapic_intr_info[irq].polarity = 0;
  771. iosapic_intr_info[irq].trigger = 0;
  772. iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
  773. /* Destroy and reserve IRQ */
  774. destroy_and_reserve_irq(irq);
  775. }
  776. out:
  777. spin_unlock_irqrestore(&iosapic_lock, flags);
  778. }
  779. /*
  780. * ACPI calls this when it finds an entry for a platform interrupt.
  781. */
  782. int __init
  783. iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
  784. int iosapic_vector, u16 eid, u16 id,
  785. unsigned long polarity, unsigned long trigger)
  786. {
  787. static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
  788. unsigned char delivery;
  789. int irq, vector, mask = 0;
  790. unsigned int dest = ((id << 8) | eid) & 0xffff;
  791. switch (int_type) {
  792. case ACPI_INTERRUPT_PMI:
  793. irq = vector = iosapic_vector;
  794. bind_irq_vector(irq, vector, CPU_MASK_ALL);
  795. /*
  796. * since PMI vector is alloc'd by FW(ACPI) not by kernel,
  797. * we need to make sure the vector is available
  798. */
  799. iosapic_reassign_vector(irq);
  800. delivery = IOSAPIC_PMI;
  801. break;
  802. case ACPI_INTERRUPT_INIT:
  803. irq = create_irq();
  804. if (irq < 0)
  805. panic("%s: out of interrupt vectors!\n", __func__);
  806. vector = irq_to_vector(irq);
  807. delivery = IOSAPIC_INIT;
  808. break;
  809. case ACPI_INTERRUPT_CPEI:
  810. irq = vector = IA64_CPE_VECTOR;
  811. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  812. delivery = IOSAPIC_FIXED;
  813. mask = 1;
  814. break;
  815. default:
  816. printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__,
  817. int_type);
  818. return -1;
  819. }
  820. register_intr(gsi, irq, delivery, polarity, trigger);
  821. printk(KERN_INFO
  822. "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
  823. " vector %d\n",
  824. int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
  825. int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
  826. (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
  827. cpu_logical_id(dest), dest, vector);
  828. set_rte(gsi, irq, dest, mask);
  829. return vector;
  830. }
  831. /*
  832. * ACPI calls this when it finds an entry for a legacy ISA IRQ override.
  833. */
  834. void __devinit
  835. iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
  836. unsigned long polarity,
  837. unsigned long trigger)
  838. {
  839. int vector, irq;
  840. unsigned int dest = cpu_physical_id(smp_processor_id());
  841. unsigned char dmode;
  842. irq = vector = isa_irq_to_vector(isa_irq);
  843. BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
  844. dmode = choose_dmode();
  845. register_intr(gsi, irq, dmode, polarity, trigger);
  846. DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
  847. isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
  848. polarity == IOSAPIC_POL_HIGH ? "high" : "low",
  849. cpu_logical_id(dest), dest, vector);
  850. set_rte(gsi, irq, dest, 1);
  851. }
  852. void __init
  853. ia64_native_iosapic_pcat_compat_init(void)
  854. {
  855. if (pcat_compat) {
  856. /*
  857. * Disable the compatibility mode interrupts (8259 style),
  858. * needs IN/OUT support enabled.
  859. */
  860. printk(KERN_INFO
  861. "%s: Disabling PC-AT compatible 8259 interrupts\n",
  862. __func__);
  863. outb(0xff, 0xA1);
  864. outb(0xff, 0x21);
  865. }
  866. }
  867. void __init
  868. iosapic_system_init (int system_pcat_compat)
  869. {
  870. int irq;
  871. for (irq = 0; irq < NR_IRQS; ++irq) {
  872. iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
  873. /* mark as unused */
  874. INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
  875. iosapic_intr_info[irq].count = 0;
  876. }
  877. pcat_compat = system_pcat_compat;
  878. if (pcat_compat)
  879. iosapic_pcat_compat_init();
  880. }
  881. static inline int
  882. iosapic_alloc (void)
  883. {
  884. int index;
  885. for (index = 0; index < NR_IOSAPICS; index++)
  886. if (!iosapic_lists[index].addr)
  887. return index;
  888. printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__);
  889. return -1;
  890. }
  891. static inline void
  892. iosapic_free (int index)
  893. {
  894. memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0]));
  895. }
  896. static inline int
  897. iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
  898. {
  899. int index;
  900. unsigned int gsi_end, base, end;
  901. /* check gsi range */
  902. gsi_end = gsi_base + ((ver >> 16) & 0xff);
  903. for (index = 0; index < NR_IOSAPICS; index++) {
  904. if (!iosapic_lists[index].addr)
  905. continue;
  906. base = iosapic_lists[index].gsi_base;
  907. end = base + iosapic_lists[index].num_rte - 1;
  908. if (gsi_end < base || end < gsi_base)
  909. continue; /* OK */
  910. return -EBUSY;
  911. }
  912. return 0;
  913. }
  914. int __devinit
  915. iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
  916. {
  917. int num_rte, err, index;
  918. unsigned int isa_irq, ver;
  919. char __iomem *addr;
  920. unsigned long flags;
  921. spin_lock_irqsave(&iosapic_lock, flags);
  922. index = find_iosapic(gsi_base);
  923. if (index >= 0) {
  924. spin_unlock_irqrestore(&iosapic_lock, flags);
  925. return -EBUSY;
  926. }
  927. addr = ioremap(phys_addr, 0);
  928. ver = iosapic_version(addr);
  929. if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
  930. iounmap(addr);
  931. spin_unlock_irqrestore(&iosapic_lock, flags);
  932. return err;
  933. }
  934. /*
  935. * The MAX_REDIR register holds the highest input pin number
  936. * (starting from 0). We add 1 so that we can use it for
  937. * number of pins (= RTEs)
  938. */
  939. num_rte = ((ver >> 16) & 0xff) + 1;
  940. index = iosapic_alloc();
  941. iosapic_lists[index].addr = addr;
  942. iosapic_lists[index].gsi_base = gsi_base;
  943. iosapic_lists[index].num_rte = num_rte;
  944. #ifdef CONFIG_NUMA
  945. iosapic_lists[index].node = MAX_NUMNODES;
  946. #endif
  947. spin_lock_init(&iosapic_lists[index].lock);
  948. spin_unlock_irqrestore(&iosapic_lock, flags);
  949. if ((gsi_base == 0) && pcat_compat) {
  950. /*
  951. * Map the legacy ISA devices into the IOSAPIC data. Some of
  952. * these may get reprogrammed later on with data from the ACPI
  953. * Interrupt Source Override table.
  954. */
  955. for (isa_irq = 0; isa_irq < 16; ++isa_irq)
  956. iosapic_override_isa_irq(isa_irq, isa_irq,
  957. IOSAPIC_POL_HIGH,
  958. IOSAPIC_EDGE);
  959. }
  960. return 0;
  961. }
  962. #ifdef CONFIG_HOTPLUG
  963. int
  964. iosapic_remove (unsigned int gsi_base)
  965. {
  966. int index, err = 0;
  967. unsigned long flags;
  968. spin_lock_irqsave(&iosapic_lock, flags);
  969. index = find_iosapic(gsi_base);
  970. if (index < 0) {
  971. printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
  972. __func__, gsi_base);
  973. goto out;
  974. }
  975. if (iosapic_lists[index].rtes_inuse) {
  976. err = -EBUSY;
  977. printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
  978. __func__, gsi_base);
  979. goto out;
  980. }
  981. iounmap(iosapic_lists[index].addr);
  982. iosapic_free(index);
  983. out:
  984. spin_unlock_irqrestore(&iosapic_lock, flags);
  985. return err;
  986. }
  987. #endif /* CONFIG_HOTPLUG */
  988. #ifdef CONFIG_NUMA
  989. void __devinit
  990. map_iosapic_to_node(unsigned int gsi_base, int node)
  991. {
  992. int index;
  993. index = find_iosapic(gsi_base);
  994. if (index < 0) {
  995. printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
  996. __func__, gsi_base);
  997. return;
  998. }
  999. iosapic_lists[index].node = node;
  1000. return;
  1001. }
  1002. #endif
  1003. static int __init iosapic_enable_kmalloc (void)
  1004. {
  1005. iosapic_kmalloc_ok = 1;
  1006. return 0;
  1007. }
  1008. core_initcall (iosapic_enable_kmalloc);