events.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517
  1. /*
  2. * Xen event channels
  3. *
  4. * Xen models interrupts with abstract event channels. Because each
  5. * domain gets 1024 event channels, but NR_IRQ is not that large, we
  6. * must dynamically map irqs<->event channels. The event channels
  7. * interface with the rest of the kernel by defining a xen interrupt
  8. * chip. When an event is recieved, it is mapped to an irq and sent
  9. * through the normal interrupt processing path.
  10. *
  11. * There are four kinds of events which can be mapped to an event
  12. * channel:
  13. *
  14. * 1. Inter-domain notifications. This includes all the virtual
  15. * device events, since they're driven by front-ends in another domain
  16. * (typically dom0).
  17. * 2. VIRQs, typically used for timers. These are per-cpu events.
  18. * 3. IPIs.
  19. * 4. PIRQs - Hardware interrupts.
  20. *
  21. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  22. */
  23. #include <linux/linkage.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/irq.h>
  26. #include <linux/module.h>
  27. #include <linux/string.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/slab.h>
  30. #include <linux/irqnr.h>
  31. #include <linux/pci.h>
  32. #include <asm/desc.h>
  33. #include <asm/ptrace.h>
  34. #include <asm/irq.h>
  35. #include <asm/idle.h>
  36. #include <asm/io_apic.h>
  37. #include <asm/sync_bitops.h>
  38. #include <asm/xen/pci.h>
  39. #include <asm/xen/hypercall.h>
  40. #include <asm/xen/hypervisor.h>
  41. #include <xen/xen.h>
  42. #include <xen/hvm.h>
  43. #include <xen/xen-ops.h>
  44. #include <xen/events.h>
  45. #include <xen/interface/xen.h>
  46. #include <xen/interface/event_channel.h>
  47. #include <xen/interface/hvm/hvm_op.h>
  48. #include <xen/interface/hvm/params.h>
  49. /*
  50. * This lock protects updates to the following mapping and reference-count
  51. * arrays. The lock does not need to be acquired to read the mapping tables.
  52. */
  53. static DEFINE_SPINLOCK(irq_mapping_update_lock);
  54. /* IRQ <-> VIRQ mapping. */
  55. static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
  56. /* IRQ <-> IPI mapping */
  57. static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
  58. /* Interrupt types. */
  59. enum xen_irq_type {
  60. IRQT_UNBOUND = 0,
  61. IRQT_PIRQ,
  62. IRQT_VIRQ,
  63. IRQT_IPI,
  64. IRQT_EVTCHN
  65. };
  66. /*
  67. * Packed IRQ information:
  68. * type - enum xen_irq_type
  69. * event channel - irq->event channel mapping
  70. * cpu - cpu this event channel is bound to
  71. * index - type-specific information:
  72. * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
  73. * guest, or GSI (real passthrough IRQ) of the device.
  74. * VIRQ - virq number
  75. * IPI - IPI vector
  76. * EVTCHN -
  77. */
  78. struct irq_info
  79. {
  80. enum xen_irq_type type; /* type */
  81. unsigned short evtchn; /* event channel */
  82. unsigned short cpu; /* cpu bound */
  83. union {
  84. unsigned short virq;
  85. enum ipi_vector ipi;
  86. struct {
  87. unsigned short pirq;
  88. unsigned short gsi;
  89. unsigned char vector;
  90. unsigned char flags;
  91. } pirq;
  92. } u;
  93. };
  94. #define PIRQ_NEEDS_EOI (1 << 0)
  95. #define PIRQ_SHAREABLE (1 << 1)
  96. static struct irq_info *irq_info;
  97. static int *pirq_to_irq;
  98. static int *evtchn_to_irq;
  99. struct cpu_evtchn_s {
  100. unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
  101. };
  102. static __initdata struct cpu_evtchn_s init_evtchn_mask = {
  103. .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
  104. };
  105. static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
  106. static inline unsigned long *cpu_evtchn_mask(int cpu)
  107. {
  108. return cpu_evtchn_mask_p[cpu].bits;
  109. }
  110. /* Xen will never allocate port zero for any purpose. */
  111. #define VALID_EVTCHN(chn) ((chn) != 0)
  112. static struct irq_chip xen_dynamic_chip;
  113. static struct irq_chip xen_percpu_chip;
  114. static struct irq_chip xen_pirq_chip;
  115. /* Constructor for packed IRQ information. */
  116. static struct irq_info mk_unbound_info(void)
  117. {
  118. return (struct irq_info) { .type = IRQT_UNBOUND };
  119. }
  120. static struct irq_info mk_evtchn_info(unsigned short evtchn)
  121. {
  122. return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
  123. .cpu = 0 };
  124. }
  125. static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
  126. {
  127. return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
  128. .cpu = 0, .u.ipi = ipi };
  129. }
  130. static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
  131. {
  132. return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
  133. .cpu = 0, .u.virq = virq };
  134. }
  135. static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
  136. unsigned short gsi, unsigned short vector)
  137. {
  138. return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
  139. .cpu = 0,
  140. .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
  141. }
  142. /*
  143. * Accessors for packed IRQ information.
  144. */
  145. static struct irq_info *info_for_irq(unsigned irq)
  146. {
  147. return &irq_info[irq];
  148. }
  149. static unsigned int evtchn_from_irq(unsigned irq)
  150. {
  151. if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
  152. return 0;
  153. return info_for_irq(irq)->evtchn;
  154. }
  155. unsigned irq_from_evtchn(unsigned int evtchn)
  156. {
  157. return evtchn_to_irq[evtchn];
  158. }
  159. EXPORT_SYMBOL_GPL(irq_from_evtchn);
  160. static enum ipi_vector ipi_from_irq(unsigned irq)
  161. {
  162. struct irq_info *info = info_for_irq(irq);
  163. BUG_ON(info == NULL);
  164. BUG_ON(info->type != IRQT_IPI);
  165. return info->u.ipi;
  166. }
  167. static unsigned virq_from_irq(unsigned irq)
  168. {
  169. struct irq_info *info = info_for_irq(irq);
  170. BUG_ON(info == NULL);
  171. BUG_ON(info->type != IRQT_VIRQ);
  172. return info->u.virq;
  173. }
  174. static unsigned pirq_from_irq(unsigned irq)
  175. {
  176. struct irq_info *info = info_for_irq(irq);
  177. BUG_ON(info == NULL);
  178. BUG_ON(info->type != IRQT_PIRQ);
  179. return info->u.pirq.pirq;
  180. }
  181. static unsigned gsi_from_irq(unsigned irq)
  182. {
  183. struct irq_info *info = info_for_irq(irq);
  184. BUG_ON(info == NULL);
  185. BUG_ON(info->type != IRQT_PIRQ);
  186. return info->u.pirq.gsi;
  187. }
  188. static unsigned vector_from_irq(unsigned irq)
  189. {
  190. struct irq_info *info = info_for_irq(irq);
  191. BUG_ON(info == NULL);
  192. BUG_ON(info->type != IRQT_PIRQ);
  193. return info->u.pirq.vector;
  194. }
  195. static enum xen_irq_type type_from_irq(unsigned irq)
  196. {
  197. return info_for_irq(irq)->type;
  198. }
  199. static unsigned cpu_from_irq(unsigned irq)
  200. {
  201. return info_for_irq(irq)->cpu;
  202. }
  203. static unsigned int cpu_from_evtchn(unsigned int evtchn)
  204. {
  205. int irq = evtchn_to_irq[evtchn];
  206. unsigned ret = 0;
  207. if (irq != -1)
  208. ret = cpu_from_irq(irq);
  209. return ret;
  210. }
  211. static bool pirq_needs_eoi(unsigned irq)
  212. {
  213. struct irq_info *info = info_for_irq(irq);
  214. BUG_ON(info->type != IRQT_PIRQ);
  215. return info->u.pirq.flags & PIRQ_NEEDS_EOI;
  216. }
  217. static inline unsigned long active_evtchns(unsigned int cpu,
  218. struct shared_info *sh,
  219. unsigned int idx)
  220. {
  221. return (sh->evtchn_pending[idx] &
  222. cpu_evtchn_mask(cpu)[idx] &
  223. ~sh->evtchn_mask[idx]);
  224. }
  225. static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
  226. {
  227. int irq = evtchn_to_irq[chn];
  228. BUG_ON(irq == -1);
  229. #ifdef CONFIG_SMP
  230. cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
  231. #endif
  232. clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
  233. set_bit(chn, cpu_evtchn_mask(cpu));
  234. irq_info[irq].cpu = cpu;
  235. }
  236. static void init_evtchn_cpu_bindings(void)
  237. {
  238. int i;
  239. #ifdef CONFIG_SMP
  240. struct irq_desc *desc;
  241. /* By default all event channels notify CPU#0. */
  242. for_each_irq_desc(i, desc) {
  243. cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
  244. }
  245. #endif
  246. for_each_possible_cpu(i)
  247. memset(cpu_evtchn_mask(i),
  248. (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s));
  249. }
  250. static inline void clear_evtchn(int port)
  251. {
  252. struct shared_info *s = HYPERVISOR_shared_info;
  253. sync_clear_bit(port, &s->evtchn_pending[0]);
  254. }
  255. static inline void set_evtchn(int port)
  256. {
  257. struct shared_info *s = HYPERVISOR_shared_info;
  258. sync_set_bit(port, &s->evtchn_pending[0]);
  259. }
  260. static inline int test_evtchn(int port)
  261. {
  262. struct shared_info *s = HYPERVISOR_shared_info;
  263. return sync_test_bit(port, &s->evtchn_pending[0]);
  264. }
  265. /**
  266. * notify_remote_via_irq - send event to remote end of event channel via irq
  267. * @irq: irq of event channel to send event to
  268. *
  269. * Unlike notify_remote_via_evtchn(), this is safe to use across
  270. * save/restore. Notifications on a broken connection are silently
  271. * dropped.
  272. */
  273. void notify_remote_via_irq(int irq)
  274. {
  275. int evtchn = evtchn_from_irq(irq);
  276. if (VALID_EVTCHN(evtchn))
  277. notify_remote_via_evtchn(evtchn);
  278. }
  279. EXPORT_SYMBOL_GPL(notify_remote_via_irq);
  280. static void mask_evtchn(int port)
  281. {
  282. struct shared_info *s = HYPERVISOR_shared_info;
  283. sync_set_bit(port, &s->evtchn_mask[0]);
  284. }
  285. static void unmask_evtchn(int port)
  286. {
  287. struct shared_info *s = HYPERVISOR_shared_info;
  288. unsigned int cpu = get_cpu();
  289. BUG_ON(!irqs_disabled());
  290. /* Slow path (hypercall) if this is a non-local port. */
  291. if (unlikely(cpu != cpu_from_evtchn(port))) {
  292. struct evtchn_unmask unmask = { .port = port };
  293. (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
  294. } else {
  295. struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  296. sync_clear_bit(port, &s->evtchn_mask[0]);
  297. /*
  298. * The following is basically the equivalent of
  299. * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
  300. * the interrupt edge' if the channel is masked.
  301. */
  302. if (sync_test_bit(port, &s->evtchn_pending[0]) &&
  303. !sync_test_and_set_bit(port / BITS_PER_LONG,
  304. &vcpu_info->evtchn_pending_sel))
  305. vcpu_info->evtchn_upcall_pending = 1;
  306. }
  307. put_cpu();
  308. }
  309. static int xen_allocate_irq_dynamic(void)
  310. {
  311. int first = 0;
  312. int irq;
  313. #ifdef CONFIG_X86_IO_APIC
  314. /*
  315. * For an HVM guest or domain 0 which see "real" (emulated or
  316. * actual repectively) GSIs we allocate dynamic IRQs
  317. * e.g. those corresponding to event channels or MSIs
  318. * etc. from the range above those "real" GSIs to avoid
  319. * collisions.
  320. */
  321. if (xen_initial_domain() || xen_hvm_domain())
  322. first = get_nr_irqs_gsi();
  323. #endif
  324. retry:
  325. irq = irq_alloc_desc_from(first, -1);
  326. if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
  327. printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
  328. first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
  329. goto retry;
  330. }
  331. if (irq < 0)
  332. panic("No available IRQ to bind to: increase nr_irqs!\n");
  333. return irq;
  334. }
  335. static int xen_allocate_irq_gsi(unsigned gsi)
  336. {
  337. int irq;
  338. /*
  339. * A PV guest has no concept of a GSI (since it has no ACPI
  340. * nor access to/knowledge of the physical APICs). Therefore
  341. * all IRQs are dynamically allocated from the entire IRQ
  342. * space.
  343. */
  344. if (xen_pv_domain() && !xen_initial_domain())
  345. return xen_allocate_irq_dynamic();
  346. /* Legacy IRQ descriptors are already allocated by the arch. */
  347. if (gsi < NR_IRQS_LEGACY)
  348. return gsi;
  349. irq = irq_alloc_desc_at(gsi, -1);
  350. if (irq < 0)
  351. panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
  352. return irq;
  353. }
  354. static void xen_free_irq(unsigned irq)
  355. {
  356. /* Legacy IRQ descriptors are managed by the arch. */
  357. if (irq < NR_IRQS_LEGACY)
  358. return;
  359. irq_free_desc(irq);
  360. }
  361. static void pirq_unmask_notify(int irq)
  362. {
  363. struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
  364. if (unlikely(pirq_needs_eoi(irq))) {
  365. int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  366. WARN_ON(rc);
  367. }
  368. }
  369. static void pirq_query_unmask(int irq)
  370. {
  371. struct physdev_irq_status_query irq_status;
  372. struct irq_info *info = info_for_irq(irq);
  373. BUG_ON(info->type != IRQT_PIRQ);
  374. irq_status.irq = pirq_from_irq(irq);
  375. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  376. irq_status.flags = 0;
  377. info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
  378. if (irq_status.flags & XENIRQSTAT_needs_eoi)
  379. info->u.pirq.flags |= PIRQ_NEEDS_EOI;
  380. }
  381. static bool probing_irq(int irq)
  382. {
  383. struct irq_desc *desc = irq_to_desc(irq);
  384. return desc && desc->action == NULL;
  385. }
  386. static unsigned int __startup_pirq(unsigned int irq)
  387. {
  388. struct evtchn_bind_pirq bind_pirq;
  389. struct irq_info *info = info_for_irq(irq);
  390. int evtchn = evtchn_from_irq(irq);
  391. int rc;
  392. BUG_ON(info->type != IRQT_PIRQ);
  393. if (VALID_EVTCHN(evtchn))
  394. goto out;
  395. bind_pirq.pirq = pirq_from_irq(irq);
  396. /* NB. We are happy to share unless we are probing. */
  397. bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
  398. BIND_PIRQ__WILL_SHARE : 0;
  399. rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
  400. if (rc != 0) {
  401. if (!probing_irq(irq))
  402. printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
  403. irq);
  404. return 0;
  405. }
  406. evtchn = bind_pirq.port;
  407. pirq_query_unmask(irq);
  408. evtchn_to_irq[evtchn] = irq;
  409. bind_evtchn_to_cpu(evtchn, 0);
  410. info->evtchn = evtchn;
  411. out:
  412. unmask_evtchn(evtchn);
  413. pirq_unmask_notify(irq);
  414. return 0;
  415. }
  416. static unsigned int startup_pirq(struct irq_data *data)
  417. {
  418. return __startup_pirq(data->irq);
  419. }
  420. static void shutdown_pirq(struct irq_data *data)
  421. {
  422. struct evtchn_close close;
  423. unsigned int irq = data->irq;
  424. struct irq_info *info = info_for_irq(irq);
  425. int evtchn = evtchn_from_irq(irq);
  426. BUG_ON(info->type != IRQT_PIRQ);
  427. if (!VALID_EVTCHN(evtchn))
  428. return;
  429. mask_evtchn(evtchn);
  430. close.port = evtchn;
  431. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  432. BUG();
  433. bind_evtchn_to_cpu(evtchn, 0);
  434. evtchn_to_irq[evtchn] = -1;
  435. info->evtchn = 0;
  436. }
  437. static void enable_pirq(struct irq_data *data)
  438. {
  439. startup_pirq(data);
  440. }
  441. static void disable_pirq(struct irq_data *data)
  442. {
  443. }
  444. static void ack_pirq(struct irq_data *data)
  445. {
  446. int evtchn = evtchn_from_irq(data->irq);
  447. move_native_irq(data->irq);
  448. if (VALID_EVTCHN(evtchn)) {
  449. mask_evtchn(evtchn);
  450. clear_evtchn(evtchn);
  451. }
  452. }
  453. static int find_irq_by_gsi(unsigned gsi)
  454. {
  455. int irq;
  456. for (irq = 0; irq < nr_irqs; irq++) {
  457. struct irq_info *info = info_for_irq(irq);
  458. if (info == NULL || info->type != IRQT_PIRQ)
  459. continue;
  460. if (gsi_from_irq(irq) == gsi)
  461. return irq;
  462. }
  463. return -1;
  464. }
  465. int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
  466. {
  467. return xen_map_pirq_gsi(gsi, gsi, shareable, name);
  468. }
  469. /* xen_map_pirq_gsi might allocate irqs from the top down, as a
  470. * consequence don't assume that the irq number returned has a low value
  471. * or can be used as a pirq number unless you know otherwise.
  472. *
  473. * One notable exception is when xen_map_pirq_gsi is called passing an
  474. * hardware gsi as argument, in that case the irq number returned
  475. * matches the gsi number passed as second argument.
  476. *
  477. * Note: We don't assign an event channel until the irq actually started
  478. * up. Return an existing irq if we've already got one for the gsi.
  479. */
  480. int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
  481. {
  482. int irq = 0;
  483. struct physdev_irq irq_op;
  484. spin_lock(&irq_mapping_update_lock);
  485. if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
  486. printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
  487. pirq > nr_irqs ? "pirq" :"",
  488. gsi > nr_irqs ? "gsi" : "");
  489. goto out;
  490. }
  491. irq = find_irq_by_gsi(gsi);
  492. if (irq != -1) {
  493. printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
  494. irq, gsi);
  495. goto out; /* XXX need refcount? */
  496. }
  497. irq = xen_allocate_irq_gsi(gsi);
  498. set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
  499. handle_level_irq, name);
  500. irq_op.irq = irq;
  501. irq_op.vector = 0;
  502. /* Only the privileged domain can do this. For non-priv, the pcifront
  503. * driver provides a PCI bus that does the call to do exactly
  504. * this in the priv domain. */
  505. if (xen_initial_domain() &&
  506. HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
  507. xen_free_irq(irq);
  508. irq = -ENOSPC;
  509. goto out;
  510. }
  511. irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
  512. irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
  513. pirq_to_irq[pirq] = irq;
  514. out:
  515. spin_unlock(&irq_mapping_update_lock);
  516. return irq;
  517. }
  518. #ifdef CONFIG_PCI_MSI
  519. int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
  520. {
  521. int rc;
  522. struct physdev_get_free_pirq op_get_free_pirq;
  523. op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
  524. rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
  525. WARN_ONCE(rc == -ENOSYS,
  526. "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
  527. return rc ? -1 : op_get_free_pirq.pirq;
  528. }
  529. int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
  530. int pirq, int vector, const char *name)
  531. {
  532. int irq, ret;
  533. spin_lock(&irq_mapping_update_lock);
  534. irq = xen_allocate_irq_dynamic();
  535. if (irq == -1)
  536. goto out;
  537. set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
  538. handle_level_irq, name);
  539. irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
  540. pirq_to_irq[pirq] = irq;
  541. ret = set_irq_msi(irq, msidesc);
  542. if (ret < 0)
  543. goto error_irq;
  544. out:
  545. spin_unlock(&irq_mapping_update_lock);
  546. return irq;
  547. error_irq:
  548. spin_unlock(&irq_mapping_update_lock);
  549. xen_free_irq(irq);
  550. return -1;
  551. }
  552. #endif
  553. int xen_destroy_irq(int irq)
  554. {
  555. struct irq_desc *desc;
  556. struct physdev_unmap_pirq unmap_irq;
  557. struct irq_info *info = info_for_irq(irq);
  558. int rc = -ENOENT;
  559. spin_lock(&irq_mapping_update_lock);
  560. desc = irq_to_desc(irq);
  561. if (!desc)
  562. goto out;
  563. if (xen_initial_domain()) {
  564. unmap_irq.pirq = info->u.pirq.pirq;
  565. unmap_irq.domid = DOMID_SELF;
  566. rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
  567. if (rc) {
  568. printk(KERN_WARNING "unmap irq failed %d\n", rc);
  569. goto out;
  570. }
  571. }
  572. pirq_to_irq[info->u.pirq.pirq] = -1;
  573. irq_info[irq] = mk_unbound_info();
  574. xen_free_irq(irq);
  575. out:
  576. spin_unlock(&irq_mapping_update_lock);
  577. return rc;
  578. }
  579. int xen_vector_from_irq(unsigned irq)
  580. {
  581. return vector_from_irq(irq);
  582. }
  583. int xen_gsi_from_irq(unsigned irq)
  584. {
  585. return gsi_from_irq(irq);
  586. }
  587. int xen_irq_from_pirq(unsigned pirq)
  588. {
  589. return pirq_to_irq[pirq];
  590. }
  591. int bind_evtchn_to_irq(unsigned int evtchn)
  592. {
  593. int irq;
  594. spin_lock(&irq_mapping_update_lock);
  595. irq = evtchn_to_irq[evtchn];
  596. if (irq == -1) {
  597. irq = xen_allocate_irq_dynamic();
  598. set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
  599. handle_fasteoi_irq, "event");
  600. evtchn_to_irq[evtchn] = irq;
  601. irq_info[irq] = mk_evtchn_info(evtchn);
  602. }
  603. spin_unlock(&irq_mapping_update_lock);
  604. return irq;
  605. }
  606. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
  607. static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
  608. {
  609. struct evtchn_bind_ipi bind_ipi;
  610. int evtchn, irq;
  611. spin_lock(&irq_mapping_update_lock);
  612. irq = per_cpu(ipi_to_irq, cpu)[ipi];
  613. if (irq == -1) {
  614. irq = xen_allocate_irq_dynamic();
  615. if (irq < 0)
  616. goto out;
  617. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  618. handle_percpu_irq, "ipi");
  619. bind_ipi.vcpu = cpu;
  620. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  621. &bind_ipi) != 0)
  622. BUG();
  623. evtchn = bind_ipi.port;
  624. evtchn_to_irq[evtchn] = irq;
  625. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  626. per_cpu(ipi_to_irq, cpu)[ipi] = irq;
  627. bind_evtchn_to_cpu(evtchn, cpu);
  628. }
  629. out:
  630. spin_unlock(&irq_mapping_update_lock);
  631. return irq;
  632. }
  633. int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
  634. {
  635. struct evtchn_bind_virq bind_virq;
  636. int evtchn, irq;
  637. spin_lock(&irq_mapping_update_lock);
  638. irq = per_cpu(virq_to_irq, cpu)[virq];
  639. if (irq == -1) {
  640. irq = xen_allocate_irq_dynamic();
  641. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  642. handle_percpu_irq, "virq");
  643. bind_virq.virq = virq;
  644. bind_virq.vcpu = cpu;
  645. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  646. &bind_virq) != 0)
  647. BUG();
  648. evtchn = bind_virq.port;
  649. evtchn_to_irq[evtchn] = irq;
  650. irq_info[irq] = mk_virq_info(evtchn, virq);
  651. per_cpu(virq_to_irq, cpu)[virq] = irq;
  652. bind_evtchn_to_cpu(evtchn, cpu);
  653. }
  654. spin_unlock(&irq_mapping_update_lock);
  655. return irq;
  656. }
  657. static void unbind_from_irq(unsigned int irq)
  658. {
  659. struct evtchn_close close;
  660. int evtchn = evtchn_from_irq(irq);
  661. spin_lock(&irq_mapping_update_lock);
  662. if (VALID_EVTCHN(evtchn)) {
  663. close.port = evtchn;
  664. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  665. BUG();
  666. switch (type_from_irq(irq)) {
  667. case IRQT_VIRQ:
  668. per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
  669. [virq_from_irq(irq)] = -1;
  670. break;
  671. case IRQT_IPI:
  672. per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
  673. [ipi_from_irq(irq)] = -1;
  674. break;
  675. default:
  676. break;
  677. }
  678. /* Closed ports are implicitly re-bound to VCPU0. */
  679. bind_evtchn_to_cpu(evtchn, 0);
  680. evtchn_to_irq[evtchn] = -1;
  681. }
  682. if (irq_info[irq].type != IRQT_UNBOUND) {
  683. irq_info[irq] = mk_unbound_info();
  684. xen_free_irq(irq);
  685. }
  686. spin_unlock(&irq_mapping_update_lock);
  687. }
  688. int bind_evtchn_to_irqhandler(unsigned int evtchn,
  689. irq_handler_t handler,
  690. unsigned long irqflags,
  691. const char *devname, void *dev_id)
  692. {
  693. unsigned int irq;
  694. int retval;
  695. irq = bind_evtchn_to_irq(evtchn);
  696. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  697. if (retval != 0) {
  698. unbind_from_irq(irq);
  699. return retval;
  700. }
  701. return irq;
  702. }
  703. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
  704. int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
  705. irq_handler_t handler,
  706. unsigned long irqflags, const char *devname, void *dev_id)
  707. {
  708. unsigned int irq;
  709. int retval;
  710. irq = bind_virq_to_irq(virq, cpu);
  711. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  712. if (retval != 0) {
  713. unbind_from_irq(irq);
  714. return retval;
  715. }
  716. return irq;
  717. }
  718. EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
  719. int bind_ipi_to_irqhandler(enum ipi_vector ipi,
  720. unsigned int cpu,
  721. irq_handler_t handler,
  722. unsigned long irqflags,
  723. const char *devname,
  724. void *dev_id)
  725. {
  726. int irq, retval;
  727. irq = bind_ipi_to_irq(ipi, cpu);
  728. if (irq < 0)
  729. return irq;
  730. irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
  731. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  732. if (retval != 0) {
  733. unbind_from_irq(irq);
  734. return retval;
  735. }
  736. return irq;
  737. }
  738. void unbind_from_irqhandler(unsigned int irq, void *dev_id)
  739. {
  740. free_irq(irq, dev_id);
  741. unbind_from_irq(irq);
  742. }
  743. EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
  744. void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
  745. {
  746. int irq = per_cpu(ipi_to_irq, cpu)[vector];
  747. BUG_ON(irq < 0);
  748. notify_remote_via_irq(irq);
  749. }
  750. irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
  751. {
  752. struct shared_info *sh = HYPERVISOR_shared_info;
  753. int cpu = smp_processor_id();
  754. unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
  755. int i;
  756. unsigned long flags;
  757. static DEFINE_SPINLOCK(debug_lock);
  758. struct vcpu_info *v;
  759. spin_lock_irqsave(&debug_lock, flags);
  760. printk("\nvcpu %d\n ", cpu);
  761. for_each_online_cpu(i) {
  762. int pending;
  763. v = per_cpu(xen_vcpu, i);
  764. pending = (get_irq_regs() && i == cpu)
  765. ? xen_irqs_disabled(get_irq_regs())
  766. : v->evtchn_upcall_mask;
  767. printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
  768. pending, v->evtchn_upcall_pending,
  769. (int)(sizeof(v->evtchn_pending_sel)*2),
  770. v->evtchn_pending_sel);
  771. }
  772. v = per_cpu(xen_vcpu, cpu);
  773. printk("\npending:\n ");
  774. for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
  775. printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
  776. sh->evtchn_pending[i],
  777. i % 8 == 0 ? "\n " : " ");
  778. printk("\nglobal mask:\n ");
  779. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  780. printk("%0*lx%s",
  781. (int)(sizeof(sh->evtchn_mask[0])*2),
  782. sh->evtchn_mask[i],
  783. i % 8 == 0 ? "\n " : " ");
  784. printk("\nglobally unmasked:\n ");
  785. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  786. printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
  787. sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
  788. i % 8 == 0 ? "\n " : " ");
  789. printk("\nlocal cpu%d mask:\n ", cpu);
  790. for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
  791. printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
  792. cpu_evtchn[i],
  793. i % 8 == 0 ? "\n " : " ");
  794. printk("\nlocally unmasked:\n ");
  795. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
  796. unsigned long pending = sh->evtchn_pending[i]
  797. & ~sh->evtchn_mask[i]
  798. & cpu_evtchn[i];
  799. printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
  800. pending, i % 8 == 0 ? "\n " : " ");
  801. }
  802. printk("\npending list:\n");
  803. for (i = 0; i < NR_EVENT_CHANNELS; i++) {
  804. if (sync_test_bit(i, sh->evtchn_pending)) {
  805. int word_idx = i / BITS_PER_LONG;
  806. printk(" %d: event %d -> irq %d%s%s%s\n",
  807. cpu_from_evtchn(i), i,
  808. evtchn_to_irq[i],
  809. sync_test_bit(word_idx, &v->evtchn_pending_sel)
  810. ? "" : " l2-clear",
  811. !sync_test_bit(i, sh->evtchn_mask)
  812. ? "" : " globally-masked",
  813. sync_test_bit(i, cpu_evtchn)
  814. ? "" : " locally-masked");
  815. }
  816. }
  817. spin_unlock_irqrestore(&debug_lock, flags);
  818. return IRQ_HANDLED;
  819. }
  820. static DEFINE_PER_CPU(unsigned, xed_nesting_count);
  821. /*
  822. * Search the CPUs pending events bitmasks. For each one found, map
  823. * the event number to an irq, and feed it into do_IRQ() for
  824. * handling.
  825. *
  826. * Xen uses a two-level bitmap to speed searching. The first level is
  827. * a bitset of words which contain pending event bits. The second
  828. * level is a bitset of pending events themselves.
  829. */
  830. static void __xen_evtchn_do_upcall(void)
  831. {
  832. int cpu = get_cpu();
  833. struct shared_info *s = HYPERVISOR_shared_info;
  834. struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  835. unsigned count;
  836. do {
  837. unsigned long pending_words;
  838. vcpu_info->evtchn_upcall_pending = 0;
  839. if (__this_cpu_inc_return(xed_nesting_count) - 1)
  840. goto out;
  841. #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
  842. /* Clear master flag /before/ clearing selector flag. */
  843. wmb();
  844. #endif
  845. pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
  846. while (pending_words != 0) {
  847. unsigned long pending_bits;
  848. int word_idx = __ffs(pending_words);
  849. pending_words &= ~(1UL << word_idx);
  850. while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
  851. int bit_idx = __ffs(pending_bits);
  852. int port = (word_idx * BITS_PER_LONG) + bit_idx;
  853. int irq = evtchn_to_irq[port];
  854. struct irq_desc *desc;
  855. mask_evtchn(port);
  856. clear_evtchn(port);
  857. if (irq != -1) {
  858. desc = irq_to_desc(irq);
  859. if (desc)
  860. generic_handle_irq_desc(irq, desc);
  861. }
  862. }
  863. }
  864. BUG_ON(!irqs_disabled());
  865. count = __this_cpu_read(xed_nesting_count);
  866. __this_cpu_write(xed_nesting_count, 0);
  867. } while (count != 1 || vcpu_info->evtchn_upcall_pending);
  868. out:
  869. put_cpu();
  870. }
  871. void xen_evtchn_do_upcall(struct pt_regs *regs)
  872. {
  873. struct pt_regs *old_regs = set_irq_regs(regs);
  874. exit_idle();
  875. irq_enter();
  876. __xen_evtchn_do_upcall();
  877. irq_exit();
  878. set_irq_regs(old_regs);
  879. }
  880. void xen_hvm_evtchn_do_upcall(void)
  881. {
  882. __xen_evtchn_do_upcall();
  883. }
  884. EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
  885. /* Rebind a new event channel to an existing irq. */
  886. void rebind_evtchn_irq(int evtchn, int irq)
  887. {
  888. struct irq_info *info = info_for_irq(irq);
  889. /* Make sure the irq is masked, since the new event channel
  890. will also be masked. */
  891. disable_irq(irq);
  892. spin_lock(&irq_mapping_update_lock);
  893. /* After resume the irq<->evtchn mappings are all cleared out */
  894. BUG_ON(evtchn_to_irq[evtchn] != -1);
  895. /* Expect irq to have been bound before,
  896. so there should be a proper type */
  897. BUG_ON(info->type == IRQT_UNBOUND);
  898. evtchn_to_irq[evtchn] = irq;
  899. irq_info[irq] = mk_evtchn_info(evtchn);
  900. spin_unlock(&irq_mapping_update_lock);
  901. /* new event channels are always bound to cpu 0 */
  902. irq_set_affinity(irq, cpumask_of(0));
  903. /* Unmask the event channel. */
  904. enable_irq(irq);
  905. }
  906. /* Rebind an evtchn so that it gets delivered to a specific cpu */
  907. static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
  908. {
  909. struct evtchn_bind_vcpu bind_vcpu;
  910. int evtchn = evtchn_from_irq(irq);
  911. /* events delivered via platform PCI interrupts are always
  912. * routed to vcpu 0 */
  913. if (!VALID_EVTCHN(evtchn) ||
  914. (xen_hvm_domain() && !xen_have_vector_callback))
  915. return -1;
  916. /* Send future instances of this interrupt to other vcpu. */
  917. bind_vcpu.port = evtchn;
  918. bind_vcpu.vcpu = tcpu;
  919. /*
  920. * If this fails, it usually just indicates that we're dealing with a
  921. * virq or IPI channel, which don't actually need to be rebound. Ignore
  922. * it, but don't do the xenlinux-level rebind in that case.
  923. */
  924. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
  925. bind_evtchn_to_cpu(evtchn, tcpu);
  926. return 0;
  927. }
  928. static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
  929. bool force)
  930. {
  931. unsigned tcpu = cpumask_first(dest);
  932. return rebind_irq_to_cpu(data->irq, tcpu);
  933. }
  934. int resend_irq_on_evtchn(unsigned int irq)
  935. {
  936. int masked, evtchn = evtchn_from_irq(irq);
  937. struct shared_info *s = HYPERVISOR_shared_info;
  938. if (!VALID_EVTCHN(evtchn))
  939. return 1;
  940. masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
  941. sync_set_bit(evtchn, s->evtchn_pending);
  942. if (!masked)
  943. unmask_evtchn(evtchn);
  944. return 1;
  945. }
  946. static void enable_dynirq(struct irq_data *data)
  947. {
  948. int evtchn = evtchn_from_irq(data->irq);
  949. if (VALID_EVTCHN(evtchn))
  950. unmask_evtchn(evtchn);
  951. }
  952. static void disable_dynirq(struct irq_data *data)
  953. {
  954. int evtchn = evtchn_from_irq(data->irq);
  955. if (VALID_EVTCHN(evtchn))
  956. mask_evtchn(evtchn);
  957. }
  958. static void ack_dynirq(struct irq_data *data)
  959. {
  960. int evtchn = evtchn_from_irq(data->irq);
  961. move_masked_irq(data->irq);
  962. if (VALID_EVTCHN(evtchn))
  963. unmask_evtchn(evtchn);
  964. }
  965. static int retrigger_dynirq(struct irq_data *data)
  966. {
  967. int evtchn = evtchn_from_irq(data->irq);
  968. struct shared_info *sh = HYPERVISOR_shared_info;
  969. int ret = 0;
  970. if (VALID_EVTCHN(evtchn)) {
  971. int masked;
  972. masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
  973. sync_set_bit(evtchn, sh->evtchn_pending);
  974. if (!masked)
  975. unmask_evtchn(evtchn);
  976. ret = 1;
  977. }
  978. return ret;
  979. }
  980. static void restore_cpu_pirqs(void)
  981. {
  982. int pirq, rc, irq, gsi;
  983. struct physdev_map_pirq map_irq;
  984. for (pirq = 0; pirq < nr_irqs; pirq++) {
  985. irq = pirq_to_irq[pirq];
  986. if (irq == -1)
  987. continue;
  988. /* save/restore of PT devices doesn't work, so at this point the
  989. * only devices present are GSI based emulated devices */
  990. gsi = gsi_from_irq(irq);
  991. if (!gsi)
  992. continue;
  993. map_irq.domid = DOMID_SELF;
  994. map_irq.type = MAP_PIRQ_TYPE_GSI;
  995. map_irq.index = gsi;
  996. map_irq.pirq = pirq;
  997. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  998. if (rc) {
  999. printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
  1000. gsi, irq, pirq, rc);
  1001. irq_info[irq] = mk_unbound_info();
  1002. pirq_to_irq[pirq] = -1;
  1003. continue;
  1004. }
  1005. printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
  1006. __startup_pirq(irq);
  1007. }
  1008. }
  1009. static void restore_cpu_virqs(unsigned int cpu)
  1010. {
  1011. struct evtchn_bind_virq bind_virq;
  1012. int virq, irq, evtchn;
  1013. for (virq = 0; virq < NR_VIRQS; virq++) {
  1014. if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
  1015. continue;
  1016. BUG_ON(virq_from_irq(irq) != virq);
  1017. /* Get a new binding from Xen. */
  1018. bind_virq.virq = virq;
  1019. bind_virq.vcpu = cpu;
  1020. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  1021. &bind_virq) != 0)
  1022. BUG();
  1023. evtchn = bind_virq.port;
  1024. /* Record the new mapping. */
  1025. evtchn_to_irq[evtchn] = irq;
  1026. irq_info[irq] = mk_virq_info(evtchn, virq);
  1027. bind_evtchn_to_cpu(evtchn, cpu);
  1028. }
  1029. }
  1030. static void restore_cpu_ipis(unsigned int cpu)
  1031. {
  1032. struct evtchn_bind_ipi bind_ipi;
  1033. int ipi, irq, evtchn;
  1034. for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
  1035. if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
  1036. continue;
  1037. BUG_ON(ipi_from_irq(irq) != ipi);
  1038. /* Get a new binding from Xen. */
  1039. bind_ipi.vcpu = cpu;
  1040. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  1041. &bind_ipi) != 0)
  1042. BUG();
  1043. evtchn = bind_ipi.port;
  1044. /* Record the new mapping. */
  1045. evtchn_to_irq[evtchn] = irq;
  1046. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  1047. bind_evtchn_to_cpu(evtchn, cpu);
  1048. }
  1049. }
  1050. /* Clear an irq's pending state, in preparation for polling on it */
  1051. void xen_clear_irq_pending(int irq)
  1052. {
  1053. int evtchn = evtchn_from_irq(irq);
  1054. if (VALID_EVTCHN(evtchn))
  1055. clear_evtchn(evtchn);
  1056. }
  1057. EXPORT_SYMBOL(xen_clear_irq_pending);
  1058. void xen_set_irq_pending(int irq)
  1059. {
  1060. int evtchn = evtchn_from_irq(irq);
  1061. if (VALID_EVTCHN(evtchn))
  1062. set_evtchn(evtchn);
  1063. }
  1064. bool xen_test_irq_pending(int irq)
  1065. {
  1066. int evtchn = evtchn_from_irq(irq);
  1067. bool ret = false;
  1068. if (VALID_EVTCHN(evtchn))
  1069. ret = test_evtchn(evtchn);
  1070. return ret;
  1071. }
  1072. /* Poll waiting for an irq to become pending with timeout. In the usual case,
  1073. * the irq will be disabled so it won't deliver an interrupt. */
  1074. void xen_poll_irq_timeout(int irq, u64 timeout)
  1075. {
  1076. evtchn_port_t evtchn = evtchn_from_irq(irq);
  1077. if (VALID_EVTCHN(evtchn)) {
  1078. struct sched_poll poll;
  1079. poll.nr_ports = 1;
  1080. poll.timeout = timeout;
  1081. set_xen_guest_handle(poll.ports, &evtchn);
  1082. if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
  1083. BUG();
  1084. }
  1085. }
  1086. EXPORT_SYMBOL(xen_poll_irq_timeout);
  1087. /* Poll waiting for an irq to become pending. In the usual case, the
  1088. * irq will be disabled so it won't deliver an interrupt. */
  1089. void xen_poll_irq(int irq)
  1090. {
  1091. xen_poll_irq_timeout(irq, 0 /* no timeout */);
  1092. }
  1093. void xen_irq_resume(void)
  1094. {
  1095. unsigned int cpu, irq, evtchn;
  1096. init_evtchn_cpu_bindings();
  1097. /* New event-channel space is not 'live' yet. */
  1098. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  1099. mask_evtchn(evtchn);
  1100. /* No IRQ <-> event-channel mappings. */
  1101. for (irq = 0; irq < nr_irqs; irq++)
  1102. irq_info[irq].evtchn = 0; /* zap event-channel binding */
  1103. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  1104. evtchn_to_irq[evtchn] = -1;
  1105. for_each_possible_cpu(cpu) {
  1106. restore_cpu_virqs(cpu);
  1107. restore_cpu_ipis(cpu);
  1108. }
  1109. restore_cpu_pirqs();
  1110. }
  1111. static struct irq_chip xen_dynamic_chip __read_mostly = {
  1112. .name = "xen-dyn",
  1113. .irq_disable = disable_dynirq,
  1114. .irq_mask = disable_dynirq,
  1115. .irq_unmask = enable_dynirq,
  1116. .irq_eoi = ack_dynirq,
  1117. .irq_set_affinity = set_affinity_irq,
  1118. .irq_retrigger = retrigger_dynirq,
  1119. };
  1120. static struct irq_chip xen_pirq_chip __read_mostly = {
  1121. .name = "xen-pirq",
  1122. .irq_startup = startup_pirq,
  1123. .irq_shutdown = shutdown_pirq,
  1124. .irq_enable = enable_pirq,
  1125. .irq_unmask = enable_pirq,
  1126. .irq_disable = disable_pirq,
  1127. .irq_mask = disable_pirq,
  1128. .irq_ack = ack_pirq,
  1129. .irq_set_affinity = set_affinity_irq,
  1130. .irq_retrigger = retrigger_dynirq,
  1131. };
  1132. static struct irq_chip xen_percpu_chip __read_mostly = {
  1133. .name = "xen-percpu",
  1134. .irq_disable = disable_dynirq,
  1135. .irq_mask = disable_dynirq,
  1136. .irq_unmask = enable_dynirq,
  1137. .irq_ack = ack_dynirq,
  1138. };
  1139. int xen_set_callback_via(uint64_t via)
  1140. {
  1141. struct xen_hvm_param a;
  1142. a.domid = DOMID_SELF;
  1143. a.index = HVM_PARAM_CALLBACK_IRQ;
  1144. a.value = via;
  1145. return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
  1146. }
  1147. EXPORT_SYMBOL_GPL(xen_set_callback_via);
  1148. #ifdef CONFIG_XEN_PVHVM
  1149. /* Vector callbacks are better than PCI interrupts to receive event
  1150. * channel notifications because we can receive vector callbacks on any
  1151. * vcpu and we don't need PCI support or APIC interactions. */
  1152. void xen_callback_vector(void)
  1153. {
  1154. int rc;
  1155. uint64_t callback_via;
  1156. if (xen_have_vector_callback) {
  1157. callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
  1158. rc = xen_set_callback_via(callback_via);
  1159. if (rc) {
  1160. printk(KERN_ERR "Request for Xen HVM callback vector"
  1161. " failed.\n");
  1162. xen_have_vector_callback = 0;
  1163. return;
  1164. }
  1165. printk(KERN_INFO "Xen HVM callback vector for event delivery is "
  1166. "enabled\n");
  1167. /* in the restore case the vector has already been allocated */
  1168. if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
  1169. alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
  1170. }
  1171. }
  1172. #else
  1173. void xen_callback_vector(void) {}
  1174. #endif
  1175. void __init xen_init_IRQ(void)
  1176. {
  1177. int i;
  1178. cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
  1179. GFP_KERNEL);
  1180. irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
  1181. /* We are using nr_irqs as the maximum number of pirq available but
  1182. * that number is actually chosen by Xen and we don't know exactly
  1183. * what it is. Be careful choosing high pirq numbers. */
  1184. pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
  1185. for (i = 0; i < nr_irqs; i++)
  1186. pirq_to_irq[i] = -1;
  1187. evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
  1188. GFP_KERNEL);
  1189. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1190. evtchn_to_irq[i] = -1;
  1191. init_evtchn_cpu_bindings();
  1192. /* No event channels are 'live' right now. */
  1193. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1194. mask_evtchn(i);
  1195. if (xen_hvm_domain()) {
  1196. xen_callback_vector();
  1197. native_init_IRQ();
  1198. /* pci_xen_hvm_init must be called after native_init_IRQ so that
  1199. * __acpi_register_gsi can point at the right function */
  1200. pci_xen_hvm_init();
  1201. } else {
  1202. irq_ctx_init(smp_processor_id());
  1203. if (xen_initial_domain())
  1204. xen_setup_pirqs();
  1205. }
  1206. }