events.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517
  1. /*
  2. * Xen event channels
  3. *
  4. * Xen models interrupts with abstract event channels. Because each
  5. * domain gets 1024 event channels, but NR_IRQ is not that large, we
  6. * must dynamically map irqs<->event channels. The event channels
  7. * interface with the rest of the kernel by defining a xen interrupt
  8. * chip. When an event is recieved, it is mapped to an irq and sent
  9. * through the normal interrupt processing path.
  10. *
  11. * There are four kinds of events which can be mapped to an event
  12. * channel:
  13. *
  14. * 1. Inter-domain notifications. This includes all the virtual
  15. * device events, since they're driven by front-ends in another domain
  16. * (typically dom0).
  17. * 2. VIRQs, typically used for timers. These are per-cpu events.
  18. * 3. IPIs.
  19. * 4. PIRQs - Hardware interrupts.
  20. *
  21. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  22. */
  23. #include <linux/linkage.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/irq.h>
  26. #include <linux/module.h>
  27. #include <linux/string.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/slab.h>
  30. #include <linux/irqnr.h>
  31. #include <linux/pci.h>
  32. #include <asm/desc.h>
  33. #include <asm/ptrace.h>
  34. #include <asm/irq.h>
  35. #include <asm/idle.h>
  36. #include <asm/io_apic.h>
  37. #include <asm/sync_bitops.h>
  38. #include <asm/xen/pci.h>
  39. #include <asm/xen/hypercall.h>
  40. #include <asm/xen/hypervisor.h>
  41. #include <xen/xen.h>
  42. #include <xen/hvm.h>
  43. #include <xen/xen-ops.h>
  44. #include <xen/events.h>
  45. #include <xen/interface/xen.h>
  46. #include <xen/interface/event_channel.h>
  47. #include <xen/interface/hvm/hvm_op.h>
  48. #include <xen/interface/hvm/params.h>
  49. /*
  50. * This lock protects updates to the following mapping and reference-count
  51. * arrays. The lock does not need to be acquired to read the mapping tables.
  52. */
  53. static DEFINE_SPINLOCK(irq_mapping_update_lock);
  54. /* IRQ <-> VIRQ mapping. */
  55. static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
  56. /* IRQ <-> IPI mapping */
  57. static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
  58. /* Interrupt types. */
  59. enum xen_irq_type {
  60. IRQT_UNBOUND = 0,
  61. IRQT_PIRQ,
  62. IRQT_VIRQ,
  63. IRQT_IPI,
  64. IRQT_EVTCHN
  65. };
  66. /*
  67. * Packed IRQ information:
  68. * type - enum xen_irq_type
  69. * event channel - irq->event channel mapping
  70. * cpu - cpu this event channel is bound to
  71. * index - type-specific information:
  72. * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
  73. * guest, or GSI (real passthrough IRQ) of the device.
  74. * VIRQ - virq number
  75. * IPI - IPI vector
  76. * EVTCHN -
  77. */
  78. struct irq_info
  79. {
  80. enum xen_irq_type type; /* type */
  81. unsigned short evtchn; /* event channel */
  82. unsigned short cpu; /* cpu bound */
  83. union {
  84. unsigned short virq;
  85. enum ipi_vector ipi;
  86. struct {
  87. unsigned short pirq;
  88. unsigned short gsi;
  89. unsigned char vector;
  90. unsigned char flags;
  91. } pirq;
  92. } u;
  93. };
  94. #define PIRQ_NEEDS_EOI (1 << 0)
  95. #define PIRQ_SHAREABLE (1 << 1)
  96. static struct irq_info *irq_info;
  97. static int *pirq_to_irq;
  98. static int *evtchn_to_irq;
  99. struct cpu_evtchn_s {
  100. unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
  101. };
  102. static __initdata struct cpu_evtchn_s init_evtchn_mask = {
  103. .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
  104. };
  105. static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
  106. static inline unsigned long *cpu_evtchn_mask(int cpu)
  107. {
  108. return cpu_evtchn_mask_p[cpu].bits;
  109. }
  110. /* Xen will never allocate port zero for any purpose. */
  111. #define VALID_EVTCHN(chn) ((chn) != 0)
  112. static struct irq_chip xen_dynamic_chip;
  113. static struct irq_chip xen_percpu_chip;
  114. static struct irq_chip xen_pirq_chip;
  115. /* Constructor for packed IRQ information. */
  116. static struct irq_info mk_unbound_info(void)
  117. {
  118. return (struct irq_info) { .type = IRQT_UNBOUND };
  119. }
  120. static struct irq_info mk_evtchn_info(unsigned short evtchn)
  121. {
  122. return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
  123. .cpu = 0 };
  124. }
  125. static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
  126. {
  127. return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
  128. .cpu = 0, .u.ipi = ipi };
  129. }
  130. static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
  131. {
  132. return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
  133. .cpu = 0, .u.virq = virq };
  134. }
  135. static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
  136. unsigned short gsi, unsigned short vector)
  137. {
  138. return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
  139. .cpu = 0,
  140. .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
  141. }
  142. /*
  143. * Accessors for packed IRQ information.
  144. */
  145. static struct irq_info *info_for_irq(unsigned irq)
  146. {
  147. return &irq_info[irq];
  148. }
  149. static unsigned int evtchn_from_irq(unsigned irq)
  150. {
  151. if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
  152. return 0;
  153. return info_for_irq(irq)->evtchn;
  154. }
  155. unsigned irq_from_evtchn(unsigned int evtchn)
  156. {
  157. return evtchn_to_irq[evtchn];
  158. }
  159. EXPORT_SYMBOL_GPL(irq_from_evtchn);
  160. static enum ipi_vector ipi_from_irq(unsigned irq)
  161. {
  162. struct irq_info *info = info_for_irq(irq);
  163. BUG_ON(info == NULL);
  164. BUG_ON(info->type != IRQT_IPI);
  165. return info->u.ipi;
  166. }
  167. static unsigned virq_from_irq(unsigned irq)
  168. {
  169. struct irq_info *info = info_for_irq(irq);
  170. BUG_ON(info == NULL);
  171. BUG_ON(info->type != IRQT_VIRQ);
  172. return info->u.virq;
  173. }
  174. static unsigned pirq_from_irq(unsigned irq)
  175. {
  176. struct irq_info *info = info_for_irq(irq);
  177. BUG_ON(info == NULL);
  178. BUG_ON(info->type != IRQT_PIRQ);
  179. return info->u.pirq.pirq;
  180. }
  181. static unsigned gsi_from_irq(unsigned irq)
  182. {
  183. struct irq_info *info = info_for_irq(irq);
  184. BUG_ON(info == NULL);
  185. BUG_ON(info->type != IRQT_PIRQ);
  186. return info->u.pirq.gsi;
  187. }
  188. static unsigned vector_from_irq(unsigned irq)
  189. {
  190. struct irq_info *info = info_for_irq(irq);
  191. BUG_ON(info == NULL);
  192. BUG_ON(info->type != IRQT_PIRQ);
  193. return info->u.pirq.vector;
  194. }
  195. static enum xen_irq_type type_from_irq(unsigned irq)
  196. {
  197. return info_for_irq(irq)->type;
  198. }
  199. static unsigned cpu_from_irq(unsigned irq)
  200. {
  201. return info_for_irq(irq)->cpu;
  202. }
  203. static unsigned int cpu_from_evtchn(unsigned int evtchn)
  204. {
  205. int irq = evtchn_to_irq[evtchn];
  206. unsigned ret = 0;
  207. if (irq != -1)
  208. ret = cpu_from_irq(irq);
  209. return ret;
  210. }
  211. static bool pirq_needs_eoi(unsigned irq)
  212. {
  213. struct irq_info *info = info_for_irq(irq);
  214. BUG_ON(info->type != IRQT_PIRQ);
  215. return info->u.pirq.flags & PIRQ_NEEDS_EOI;
  216. }
  217. static inline unsigned long active_evtchns(unsigned int cpu,
  218. struct shared_info *sh,
  219. unsigned int idx)
  220. {
  221. return (sh->evtchn_pending[idx] &
  222. cpu_evtchn_mask(cpu)[idx] &
  223. ~sh->evtchn_mask[idx]);
  224. }
  225. static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
  226. {
  227. int irq = evtchn_to_irq[chn];
  228. BUG_ON(irq == -1);
  229. #ifdef CONFIG_SMP
  230. cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
  231. #endif
  232. clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
  233. set_bit(chn, cpu_evtchn_mask(cpu));
  234. irq_info[irq].cpu = cpu;
  235. }
  236. static void init_evtchn_cpu_bindings(void)
  237. {
  238. int i;
  239. #ifdef CONFIG_SMP
  240. struct irq_desc *desc;
  241. /* By default all event channels notify CPU#0. */
  242. for_each_irq_desc(i, desc) {
  243. cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
  244. }
  245. #endif
  246. for_each_possible_cpu(i)
  247. memset(cpu_evtchn_mask(i),
  248. (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s));
  249. }
  250. static inline void clear_evtchn(int port)
  251. {
  252. struct shared_info *s = HYPERVISOR_shared_info;
  253. sync_clear_bit(port, &s->evtchn_pending[0]);
  254. }
  255. static inline void set_evtchn(int port)
  256. {
  257. struct shared_info *s = HYPERVISOR_shared_info;
  258. sync_set_bit(port, &s->evtchn_pending[0]);
  259. }
  260. static inline int test_evtchn(int port)
  261. {
  262. struct shared_info *s = HYPERVISOR_shared_info;
  263. return sync_test_bit(port, &s->evtchn_pending[0]);
  264. }
  265. /**
  266. * notify_remote_via_irq - send event to remote end of event channel via irq
  267. * @irq: irq of event channel to send event to
  268. *
  269. * Unlike notify_remote_via_evtchn(), this is safe to use across
  270. * save/restore. Notifications on a broken connection are silently
  271. * dropped.
  272. */
  273. void notify_remote_via_irq(int irq)
  274. {
  275. int evtchn = evtchn_from_irq(irq);
  276. if (VALID_EVTCHN(evtchn))
  277. notify_remote_via_evtchn(evtchn);
  278. }
  279. EXPORT_SYMBOL_GPL(notify_remote_via_irq);
  280. static void mask_evtchn(int port)
  281. {
  282. struct shared_info *s = HYPERVISOR_shared_info;
  283. sync_set_bit(port, &s->evtchn_mask[0]);
  284. }
  285. static void unmask_evtchn(int port)
  286. {
  287. struct shared_info *s = HYPERVISOR_shared_info;
  288. unsigned int cpu = get_cpu();
  289. BUG_ON(!irqs_disabled());
  290. /* Slow path (hypercall) if this is a non-local port. */
  291. if (unlikely(cpu != cpu_from_evtchn(port))) {
  292. struct evtchn_unmask unmask = { .port = port };
  293. (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
  294. } else {
  295. struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  296. sync_clear_bit(port, &s->evtchn_mask[0]);
  297. /*
  298. * The following is basically the equivalent of
  299. * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
  300. * the interrupt edge' if the channel is masked.
  301. */
  302. if (sync_test_bit(port, &s->evtchn_pending[0]) &&
  303. !sync_test_and_set_bit(port / BITS_PER_LONG,
  304. &vcpu_info->evtchn_pending_sel))
  305. vcpu_info->evtchn_upcall_pending = 1;
  306. }
  307. put_cpu();
  308. }
  309. static int xen_allocate_irq_dynamic(void)
  310. {
  311. int first = 0;
  312. int irq;
  313. #ifdef CONFIG_X86_IO_APIC
  314. /*
  315. * For an HVM guest or domain 0 which see "real" (emulated or
  316. * actual repectively) GSIs we allocate dynamic IRQs
  317. * e.g. those corresponding to event channels or MSIs
  318. * etc. from the range above those "real" GSIs to avoid
  319. * collisions.
  320. */
  321. if (xen_initial_domain() || xen_hvm_domain())
  322. first = get_nr_irqs_gsi();
  323. #endif
  324. retry:
  325. irq = irq_alloc_desc_from(first, -1);
  326. if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
  327. printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
  328. first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
  329. goto retry;
  330. }
  331. if (irq < 0)
  332. panic("No available IRQ to bind to: increase nr_irqs!\n");
  333. return irq;
  334. }
  335. static int xen_allocate_irq_gsi(unsigned gsi)
  336. {
  337. int irq;
  338. /*
  339. * A PV guest has no concept of a GSI (since it has no ACPI
  340. * nor access to/knowledge of the physical APICs). Therefore
  341. * all IRQs are dynamically allocated from the entire IRQ
  342. * space.
  343. */
  344. if (xen_pv_domain() && !xen_initial_domain())
  345. return xen_allocate_irq_dynamic();
  346. /* Legacy IRQ descriptors are already allocated by the arch. */
  347. if (gsi < NR_IRQS_LEGACY)
  348. return gsi;
  349. irq = irq_alloc_desc_at(gsi, -1);
  350. if (irq < 0)
  351. panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
  352. return irq;
  353. }
  354. static void xen_free_irq(unsigned irq)
  355. {
  356. /* Legacy IRQ descriptors are managed by the arch. */
  357. if (irq < NR_IRQS_LEGACY)
  358. return;
  359. irq_free_desc(irq);
  360. }
  361. static void pirq_unmask_notify(int irq)
  362. {
  363. struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
  364. if (unlikely(pirq_needs_eoi(irq))) {
  365. int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  366. WARN_ON(rc);
  367. }
  368. }
  369. static void pirq_query_unmask(int irq)
  370. {
  371. struct physdev_irq_status_query irq_status;
  372. struct irq_info *info = info_for_irq(irq);
  373. BUG_ON(info->type != IRQT_PIRQ);
  374. irq_status.irq = pirq_from_irq(irq);
  375. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  376. irq_status.flags = 0;
  377. info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
  378. if (irq_status.flags & XENIRQSTAT_needs_eoi)
  379. info->u.pirq.flags |= PIRQ_NEEDS_EOI;
  380. }
  381. static bool probing_irq(int irq)
  382. {
  383. struct irq_desc *desc = irq_to_desc(irq);
  384. return desc && desc->action == NULL;
  385. }
  386. static unsigned int __startup_pirq(unsigned int irq)
  387. {
  388. struct evtchn_bind_pirq bind_pirq;
  389. struct irq_info *info = info_for_irq(irq);
  390. int evtchn = evtchn_from_irq(irq);
  391. int rc;
  392. BUG_ON(info->type != IRQT_PIRQ);
  393. if (VALID_EVTCHN(evtchn))
  394. goto out;
  395. bind_pirq.pirq = pirq_from_irq(irq);
  396. /* NB. We are happy to share unless we are probing. */
  397. bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
  398. BIND_PIRQ__WILL_SHARE : 0;
  399. rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
  400. if (rc != 0) {
  401. if (!probing_irq(irq))
  402. printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
  403. irq);
  404. return 0;
  405. }
  406. evtchn = bind_pirq.port;
  407. pirq_query_unmask(irq);
  408. evtchn_to_irq[evtchn] = irq;
  409. bind_evtchn_to_cpu(evtchn, 0);
  410. info->evtchn = evtchn;
  411. out:
  412. unmask_evtchn(evtchn);
  413. pirq_unmask_notify(irq);
  414. return 0;
  415. }
  416. static unsigned int startup_pirq(struct irq_data *data)
  417. {
  418. return __startup_pirq(data->irq);
  419. }
  420. static void shutdown_pirq(struct irq_data *data)
  421. {
  422. struct evtchn_close close;
  423. unsigned int irq = data->irq;
  424. struct irq_info *info = info_for_irq(irq);
  425. int evtchn = evtchn_from_irq(irq);
  426. BUG_ON(info->type != IRQT_PIRQ);
  427. if (!VALID_EVTCHN(evtchn))
  428. return;
  429. mask_evtchn(evtchn);
  430. close.port = evtchn;
  431. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  432. BUG();
  433. bind_evtchn_to_cpu(evtchn, 0);
  434. evtchn_to_irq[evtchn] = -1;
  435. info->evtchn = 0;
  436. }
  437. static void enable_pirq(struct irq_data *data)
  438. {
  439. startup_pirq(data);
  440. }
  441. static void disable_pirq(struct irq_data *data)
  442. {
  443. }
  444. static void ack_pirq(struct irq_data *data)
  445. {
  446. int evtchn = evtchn_from_irq(data->irq);
  447. move_native_irq(data->irq);
  448. if (VALID_EVTCHN(evtchn)) {
  449. mask_evtchn(evtchn);
  450. clear_evtchn(evtchn);
  451. }
  452. }
  453. static int find_irq_by_gsi(unsigned gsi)
  454. {
  455. int irq;
  456. for (irq = 0; irq < nr_irqs; irq++) {
  457. struct irq_info *info = info_for_irq(irq);
  458. if (info == NULL || info->type != IRQT_PIRQ)
  459. continue;
  460. if (gsi_from_irq(irq) == gsi)
  461. return irq;
  462. }
  463. return -1;
  464. }
  465. int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
  466. {
  467. return xen_map_pirq_gsi(gsi, gsi, shareable, name);
  468. }
  469. /*
  470. * Do not make any assumptions regarding the relationship between the
  471. * IRQ number returned here and the Xen pirq argument.
  472. *
  473. * Note: We don't assign an event channel until the irq actually started
  474. * up. Return an existing irq if we've already got one for the gsi.
  475. */
  476. int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
  477. {
  478. int irq = -1;
  479. struct physdev_irq irq_op;
  480. spin_lock(&irq_mapping_update_lock);
  481. if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
  482. printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
  483. pirq > nr_irqs ? "pirq" :"",
  484. gsi > nr_irqs ? "gsi" : "");
  485. goto out;
  486. }
  487. irq = find_irq_by_gsi(gsi);
  488. if (irq != -1) {
  489. printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
  490. irq, gsi);
  491. goto out; /* XXX need refcount? */
  492. }
  493. irq = xen_allocate_irq_gsi(gsi);
  494. set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
  495. handle_level_irq, name);
  496. irq_op.irq = irq;
  497. irq_op.vector = 0;
  498. /* Only the privileged domain can do this. For non-priv, the pcifront
  499. * driver provides a PCI bus that does the call to do exactly
  500. * this in the priv domain. */
  501. if (xen_initial_domain() &&
  502. HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
  503. xen_free_irq(irq);
  504. irq = -ENOSPC;
  505. goto out;
  506. }
  507. irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
  508. irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
  509. pirq_to_irq[pirq] = irq;
  510. out:
  511. spin_unlock(&irq_mapping_update_lock);
  512. return irq;
  513. }
  514. #ifdef CONFIG_PCI_MSI
  515. int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
  516. {
  517. int rc;
  518. struct physdev_get_free_pirq op_get_free_pirq;
  519. op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
  520. rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
  521. WARN_ONCE(rc == -ENOSYS,
  522. "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
  523. return rc ? -1 : op_get_free_pirq.pirq;
  524. }
  525. int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
  526. int pirq, int vector, const char *name)
  527. {
  528. int irq, ret;
  529. spin_lock(&irq_mapping_update_lock);
  530. irq = xen_allocate_irq_dynamic();
  531. if (irq == -1)
  532. goto out;
  533. set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
  534. handle_level_irq, name);
  535. irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
  536. pirq_to_irq[pirq] = irq;
  537. ret = set_irq_msi(irq, msidesc);
  538. if (ret < 0)
  539. goto error_irq;
  540. out:
  541. spin_unlock(&irq_mapping_update_lock);
  542. return irq;
  543. error_irq:
  544. spin_unlock(&irq_mapping_update_lock);
  545. xen_free_irq(irq);
  546. return -1;
  547. }
  548. #endif
  549. int xen_destroy_irq(int irq)
  550. {
  551. struct irq_desc *desc;
  552. struct physdev_unmap_pirq unmap_irq;
  553. struct irq_info *info = info_for_irq(irq);
  554. int rc = -ENOENT;
  555. spin_lock(&irq_mapping_update_lock);
  556. desc = irq_to_desc(irq);
  557. if (!desc)
  558. goto out;
  559. if (xen_initial_domain()) {
  560. unmap_irq.pirq = info->u.pirq.pirq;
  561. unmap_irq.domid = DOMID_SELF;
  562. rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
  563. if (rc) {
  564. printk(KERN_WARNING "unmap irq failed %d\n", rc);
  565. goto out;
  566. }
  567. }
  568. pirq_to_irq[info->u.pirq.pirq] = -1;
  569. irq_info[irq] = mk_unbound_info();
  570. xen_free_irq(irq);
  571. out:
  572. spin_unlock(&irq_mapping_update_lock);
  573. return rc;
  574. }
  575. int xen_vector_from_irq(unsigned irq)
  576. {
  577. return vector_from_irq(irq);
  578. }
  579. int xen_gsi_from_irq(unsigned irq)
  580. {
  581. return gsi_from_irq(irq);
  582. }
  583. int xen_irq_from_pirq(unsigned pirq)
  584. {
  585. return pirq_to_irq[pirq];
  586. }
  587. int bind_evtchn_to_irq(unsigned int evtchn)
  588. {
  589. int irq;
  590. spin_lock(&irq_mapping_update_lock);
  591. irq = evtchn_to_irq[evtchn];
  592. if (irq == -1) {
  593. irq = xen_allocate_irq_dynamic();
  594. set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
  595. handle_fasteoi_irq, "event");
  596. evtchn_to_irq[evtchn] = irq;
  597. irq_info[irq] = mk_evtchn_info(evtchn);
  598. }
  599. spin_unlock(&irq_mapping_update_lock);
  600. return irq;
  601. }
  602. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
  603. static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
  604. {
  605. struct evtchn_bind_ipi bind_ipi;
  606. int evtchn, irq;
  607. spin_lock(&irq_mapping_update_lock);
  608. irq = per_cpu(ipi_to_irq, cpu)[ipi];
  609. if (irq == -1) {
  610. irq = xen_allocate_irq_dynamic();
  611. if (irq < 0)
  612. goto out;
  613. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  614. handle_percpu_irq, "ipi");
  615. bind_ipi.vcpu = cpu;
  616. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  617. &bind_ipi) != 0)
  618. BUG();
  619. evtchn = bind_ipi.port;
  620. evtchn_to_irq[evtchn] = irq;
  621. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  622. per_cpu(ipi_to_irq, cpu)[ipi] = irq;
  623. bind_evtchn_to_cpu(evtchn, cpu);
  624. }
  625. out:
  626. spin_unlock(&irq_mapping_update_lock);
  627. return irq;
  628. }
  629. int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
  630. {
  631. struct evtchn_bind_virq bind_virq;
  632. int evtchn, irq;
  633. spin_lock(&irq_mapping_update_lock);
  634. irq = per_cpu(virq_to_irq, cpu)[virq];
  635. if (irq == -1) {
  636. irq = xen_allocate_irq_dynamic();
  637. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  638. handle_percpu_irq, "virq");
  639. bind_virq.virq = virq;
  640. bind_virq.vcpu = cpu;
  641. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  642. &bind_virq) != 0)
  643. BUG();
  644. evtchn = bind_virq.port;
  645. evtchn_to_irq[evtchn] = irq;
  646. irq_info[irq] = mk_virq_info(evtchn, virq);
  647. per_cpu(virq_to_irq, cpu)[virq] = irq;
  648. bind_evtchn_to_cpu(evtchn, cpu);
  649. }
  650. spin_unlock(&irq_mapping_update_lock);
  651. return irq;
  652. }
  653. static void unbind_from_irq(unsigned int irq)
  654. {
  655. struct evtchn_close close;
  656. int evtchn = evtchn_from_irq(irq);
  657. spin_lock(&irq_mapping_update_lock);
  658. if (VALID_EVTCHN(evtchn)) {
  659. close.port = evtchn;
  660. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  661. BUG();
  662. switch (type_from_irq(irq)) {
  663. case IRQT_VIRQ:
  664. per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
  665. [virq_from_irq(irq)] = -1;
  666. break;
  667. case IRQT_IPI:
  668. per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
  669. [ipi_from_irq(irq)] = -1;
  670. break;
  671. default:
  672. break;
  673. }
  674. /* Closed ports are implicitly re-bound to VCPU0. */
  675. bind_evtchn_to_cpu(evtchn, 0);
  676. evtchn_to_irq[evtchn] = -1;
  677. }
  678. if (irq_info[irq].type != IRQT_UNBOUND) {
  679. irq_info[irq] = mk_unbound_info();
  680. xen_free_irq(irq);
  681. }
  682. spin_unlock(&irq_mapping_update_lock);
  683. }
  684. int bind_evtchn_to_irqhandler(unsigned int evtchn,
  685. irq_handler_t handler,
  686. unsigned long irqflags,
  687. const char *devname, void *dev_id)
  688. {
  689. unsigned int irq;
  690. int retval;
  691. irq = bind_evtchn_to_irq(evtchn);
  692. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  693. if (retval != 0) {
  694. unbind_from_irq(irq);
  695. return retval;
  696. }
  697. return irq;
  698. }
  699. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
  700. int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
  701. irq_handler_t handler,
  702. unsigned long irqflags, const char *devname, void *dev_id)
  703. {
  704. unsigned int irq;
  705. int retval;
  706. irq = bind_virq_to_irq(virq, cpu);
  707. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  708. if (retval != 0) {
  709. unbind_from_irq(irq);
  710. return retval;
  711. }
  712. return irq;
  713. }
  714. EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
  715. int bind_ipi_to_irqhandler(enum ipi_vector ipi,
  716. unsigned int cpu,
  717. irq_handler_t handler,
  718. unsigned long irqflags,
  719. const char *devname,
  720. void *dev_id)
  721. {
  722. int irq, retval;
  723. irq = bind_ipi_to_irq(ipi, cpu);
  724. if (irq < 0)
  725. return irq;
  726. irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
  727. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  728. if (retval != 0) {
  729. unbind_from_irq(irq);
  730. return retval;
  731. }
  732. return irq;
  733. }
  734. void unbind_from_irqhandler(unsigned int irq, void *dev_id)
  735. {
  736. free_irq(irq, dev_id);
  737. unbind_from_irq(irq);
  738. }
  739. EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
  740. void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
  741. {
  742. int irq = per_cpu(ipi_to_irq, cpu)[vector];
  743. BUG_ON(irq < 0);
  744. notify_remote_via_irq(irq);
  745. }
  746. irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
  747. {
  748. struct shared_info *sh = HYPERVISOR_shared_info;
  749. int cpu = smp_processor_id();
  750. unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
  751. int i;
  752. unsigned long flags;
  753. static DEFINE_SPINLOCK(debug_lock);
  754. struct vcpu_info *v;
  755. spin_lock_irqsave(&debug_lock, flags);
  756. printk("\nvcpu %d\n ", cpu);
  757. for_each_online_cpu(i) {
  758. int pending;
  759. v = per_cpu(xen_vcpu, i);
  760. pending = (get_irq_regs() && i == cpu)
  761. ? xen_irqs_disabled(get_irq_regs())
  762. : v->evtchn_upcall_mask;
  763. printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
  764. pending, v->evtchn_upcall_pending,
  765. (int)(sizeof(v->evtchn_pending_sel)*2),
  766. v->evtchn_pending_sel);
  767. }
  768. v = per_cpu(xen_vcpu, cpu);
  769. printk("\npending:\n ");
  770. for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
  771. printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
  772. sh->evtchn_pending[i],
  773. i % 8 == 0 ? "\n " : " ");
  774. printk("\nglobal mask:\n ");
  775. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  776. printk("%0*lx%s",
  777. (int)(sizeof(sh->evtchn_mask[0])*2),
  778. sh->evtchn_mask[i],
  779. i % 8 == 0 ? "\n " : " ");
  780. printk("\nglobally unmasked:\n ");
  781. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  782. printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
  783. sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
  784. i % 8 == 0 ? "\n " : " ");
  785. printk("\nlocal cpu%d mask:\n ", cpu);
  786. for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
  787. printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
  788. cpu_evtchn[i],
  789. i % 8 == 0 ? "\n " : " ");
  790. printk("\nlocally unmasked:\n ");
  791. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
  792. unsigned long pending = sh->evtchn_pending[i]
  793. & ~sh->evtchn_mask[i]
  794. & cpu_evtchn[i];
  795. printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
  796. pending, i % 8 == 0 ? "\n " : " ");
  797. }
  798. printk("\npending list:\n");
  799. for (i = 0; i < NR_EVENT_CHANNELS; i++) {
  800. if (sync_test_bit(i, sh->evtchn_pending)) {
  801. int word_idx = i / BITS_PER_LONG;
  802. printk(" %d: event %d -> irq %d%s%s%s\n",
  803. cpu_from_evtchn(i), i,
  804. evtchn_to_irq[i],
  805. sync_test_bit(word_idx, &v->evtchn_pending_sel)
  806. ? "" : " l2-clear",
  807. !sync_test_bit(i, sh->evtchn_mask)
  808. ? "" : " globally-masked",
  809. sync_test_bit(i, cpu_evtchn)
  810. ? "" : " locally-masked");
  811. }
  812. }
  813. spin_unlock_irqrestore(&debug_lock, flags);
  814. return IRQ_HANDLED;
  815. }
  816. static DEFINE_PER_CPU(unsigned, xed_nesting_count);
  817. /*
  818. * Search the CPUs pending events bitmasks. For each one found, map
  819. * the event number to an irq, and feed it into do_IRQ() for
  820. * handling.
  821. *
  822. * Xen uses a two-level bitmap to speed searching. The first level is
  823. * a bitset of words which contain pending event bits. The second
  824. * level is a bitset of pending events themselves.
  825. */
  826. static void __xen_evtchn_do_upcall(void)
  827. {
  828. int cpu = get_cpu();
  829. struct shared_info *s = HYPERVISOR_shared_info;
  830. struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  831. unsigned count;
  832. do {
  833. unsigned long pending_words;
  834. vcpu_info->evtchn_upcall_pending = 0;
  835. if (__this_cpu_inc_return(xed_nesting_count) - 1)
  836. goto out;
  837. #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
  838. /* Clear master flag /before/ clearing selector flag. */
  839. wmb();
  840. #endif
  841. pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
  842. while (pending_words != 0) {
  843. unsigned long pending_bits;
  844. int word_idx = __ffs(pending_words);
  845. pending_words &= ~(1UL << word_idx);
  846. while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
  847. int bit_idx = __ffs(pending_bits);
  848. int port = (word_idx * BITS_PER_LONG) + bit_idx;
  849. int irq = evtchn_to_irq[port];
  850. struct irq_desc *desc;
  851. mask_evtchn(port);
  852. clear_evtchn(port);
  853. if (irq != -1) {
  854. desc = irq_to_desc(irq);
  855. if (desc)
  856. generic_handle_irq_desc(irq, desc);
  857. }
  858. }
  859. }
  860. BUG_ON(!irqs_disabled());
  861. count = __this_cpu_read(xed_nesting_count);
  862. __this_cpu_write(xed_nesting_count, 0);
  863. } while (count != 1 || vcpu_info->evtchn_upcall_pending);
  864. out:
  865. put_cpu();
  866. }
  867. void xen_evtchn_do_upcall(struct pt_regs *regs)
  868. {
  869. struct pt_regs *old_regs = set_irq_regs(regs);
  870. exit_idle();
  871. irq_enter();
  872. __xen_evtchn_do_upcall();
  873. irq_exit();
  874. set_irq_regs(old_regs);
  875. }
  876. void xen_hvm_evtchn_do_upcall(void)
  877. {
  878. __xen_evtchn_do_upcall();
  879. }
  880. EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
  881. /* Rebind a new event channel to an existing irq. */
  882. void rebind_evtchn_irq(int evtchn, int irq)
  883. {
  884. struct irq_info *info = info_for_irq(irq);
  885. /* Make sure the irq is masked, since the new event channel
  886. will also be masked. */
  887. disable_irq(irq);
  888. spin_lock(&irq_mapping_update_lock);
  889. /* After resume the irq<->evtchn mappings are all cleared out */
  890. BUG_ON(evtchn_to_irq[evtchn] != -1);
  891. /* Expect irq to have been bound before,
  892. so there should be a proper type */
  893. BUG_ON(info->type == IRQT_UNBOUND);
  894. evtchn_to_irq[evtchn] = irq;
  895. irq_info[irq] = mk_evtchn_info(evtchn);
  896. spin_unlock(&irq_mapping_update_lock);
  897. /* new event channels are always bound to cpu 0 */
  898. irq_set_affinity(irq, cpumask_of(0));
  899. /* Unmask the event channel. */
  900. enable_irq(irq);
  901. }
  902. /* Rebind an evtchn so that it gets delivered to a specific cpu */
  903. static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
  904. {
  905. struct evtchn_bind_vcpu bind_vcpu;
  906. int evtchn = evtchn_from_irq(irq);
  907. if (!VALID_EVTCHN(evtchn))
  908. return -1;
  909. /*
  910. * Events delivered via platform PCI interrupts are always
  911. * routed to vcpu 0 and hence cannot be rebound.
  912. */
  913. if (xen_hvm_domain() && !xen_have_vector_callback)
  914. return -1;
  915. /* Send future instances of this interrupt to other vcpu. */
  916. bind_vcpu.port = evtchn;
  917. bind_vcpu.vcpu = tcpu;
  918. /*
  919. * If this fails, it usually just indicates that we're dealing with a
  920. * virq or IPI channel, which don't actually need to be rebound. Ignore
  921. * it, but don't do the xenlinux-level rebind in that case.
  922. */
  923. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
  924. bind_evtchn_to_cpu(evtchn, tcpu);
  925. return 0;
  926. }
  927. static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
  928. bool force)
  929. {
  930. unsigned tcpu = cpumask_first(dest);
  931. return rebind_irq_to_cpu(data->irq, tcpu);
  932. }
  933. int resend_irq_on_evtchn(unsigned int irq)
  934. {
  935. int masked, evtchn = evtchn_from_irq(irq);
  936. struct shared_info *s = HYPERVISOR_shared_info;
  937. if (!VALID_EVTCHN(evtchn))
  938. return 1;
  939. masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
  940. sync_set_bit(evtchn, s->evtchn_pending);
  941. if (!masked)
  942. unmask_evtchn(evtchn);
  943. return 1;
  944. }
  945. static void enable_dynirq(struct irq_data *data)
  946. {
  947. int evtchn = evtchn_from_irq(data->irq);
  948. if (VALID_EVTCHN(evtchn))
  949. unmask_evtchn(evtchn);
  950. }
  951. static void disable_dynirq(struct irq_data *data)
  952. {
  953. int evtchn = evtchn_from_irq(data->irq);
  954. if (VALID_EVTCHN(evtchn))
  955. mask_evtchn(evtchn);
  956. }
  957. static void ack_dynirq(struct irq_data *data)
  958. {
  959. int evtchn = evtchn_from_irq(data->irq);
  960. move_masked_irq(data->irq);
  961. if (VALID_EVTCHN(evtchn))
  962. unmask_evtchn(evtchn);
  963. }
  964. static int retrigger_dynirq(struct irq_data *data)
  965. {
  966. int evtchn = evtchn_from_irq(data->irq);
  967. struct shared_info *sh = HYPERVISOR_shared_info;
  968. int ret = 0;
  969. if (VALID_EVTCHN(evtchn)) {
  970. int masked;
  971. masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
  972. sync_set_bit(evtchn, sh->evtchn_pending);
  973. if (!masked)
  974. unmask_evtchn(evtchn);
  975. ret = 1;
  976. }
  977. return ret;
  978. }
  979. static void restore_cpu_pirqs(void)
  980. {
  981. int pirq, rc, irq, gsi;
  982. struct physdev_map_pirq map_irq;
  983. for (pirq = 0; pirq < nr_irqs; pirq++) {
  984. irq = pirq_to_irq[pirq];
  985. if (irq == -1)
  986. continue;
  987. /* save/restore of PT devices doesn't work, so at this point the
  988. * only devices present are GSI based emulated devices */
  989. gsi = gsi_from_irq(irq);
  990. if (!gsi)
  991. continue;
  992. map_irq.domid = DOMID_SELF;
  993. map_irq.type = MAP_PIRQ_TYPE_GSI;
  994. map_irq.index = gsi;
  995. map_irq.pirq = pirq;
  996. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  997. if (rc) {
  998. printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
  999. gsi, irq, pirq, rc);
  1000. irq_info[irq] = mk_unbound_info();
  1001. pirq_to_irq[pirq] = -1;
  1002. continue;
  1003. }
  1004. printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
  1005. __startup_pirq(irq);
  1006. }
  1007. }
  1008. static void restore_cpu_virqs(unsigned int cpu)
  1009. {
  1010. struct evtchn_bind_virq bind_virq;
  1011. int virq, irq, evtchn;
  1012. for (virq = 0; virq < NR_VIRQS; virq++) {
  1013. if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
  1014. continue;
  1015. BUG_ON(virq_from_irq(irq) != virq);
  1016. /* Get a new binding from Xen. */
  1017. bind_virq.virq = virq;
  1018. bind_virq.vcpu = cpu;
  1019. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  1020. &bind_virq) != 0)
  1021. BUG();
  1022. evtchn = bind_virq.port;
  1023. /* Record the new mapping. */
  1024. evtchn_to_irq[evtchn] = irq;
  1025. irq_info[irq] = mk_virq_info(evtchn, virq);
  1026. bind_evtchn_to_cpu(evtchn, cpu);
  1027. }
  1028. }
  1029. static void restore_cpu_ipis(unsigned int cpu)
  1030. {
  1031. struct evtchn_bind_ipi bind_ipi;
  1032. int ipi, irq, evtchn;
  1033. for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
  1034. if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
  1035. continue;
  1036. BUG_ON(ipi_from_irq(irq) != ipi);
  1037. /* Get a new binding from Xen. */
  1038. bind_ipi.vcpu = cpu;
  1039. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  1040. &bind_ipi) != 0)
  1041. BUG();
  1042. evtchn = bind_ipi.port;
  1043. /* Record the new mapping. */
  1044. evtchn_to_irq[evtchn] = irq;
  1045. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  1046. bind_evtchn_to_cpu(evtchn, cpu);
  1047. }
  1048. }
  1049. /* Clear an irq's pending state, in preparation for polling on it */
  1050. void xen_clear_irq_pending(int irq)
  1051. {
  1052. int evtchn = evtchn_from_irq(irq);
  1053. if (VALID_EVTCHN(evtchn))
  1054. clear_evtchn(evtchn);
  1055. }
  1056. EXPORT_SYMBOL(xen_clear_irq_pending);
  1057. void xen_set_irq_pending(int irq)
  1058. {
  1059. int evtchn = evtchn_from_irq(irq);
  1060. if (VALID_EVTCHN(evtchn))
  1061. set_evtchn(evtchn);
  1062. }
  1063. bool xen_test_irq_pending(int irq)
  1064. {
  1065. int evtchn = evtchn_from_irq(irq);
  1066. bool ret = false;
  1067. if (VALID_EVTCHN(evtchn))
  1068. ret = test_evtchn(evtchn);
  1069. return ret;
  1070. }
  1071. /* Poll waiting for an irq to become pending with timeout. In the usual case,
  1072. * the irq will be disabled so it won't deliver an interrupt. */
  1073. void xen_poll_irq_timeout(int irq, u64 timeout)
  1074. {
  1075. evtchn_port_t evtchn = evtchn_from_irq(irq);
  1076. if (VALID_EVTCHN(evtchn)) {
  1077. struct sched_poll poll;
  1078. poll.nr_ports = 1;
  1079. poll.timeout = timeout;
  1080. set_xen_guest_handle(poll.ports, &evtchn);
  1081. if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
  1082. BUG();
  1083. }
  1084. }
  1085. EXPORT_SYMBOL(xen_poll_irq_timeout);
  1086. /* Poll waiting for an irq to become pending. In the usual case, the
  1087. * irq will be disabled so it won't deliver an interrupt. */
  1088. void xen_poll_irq(int irq)
  1089. {
  1090. xen_poll_irq_timeout(irq, 0 /* no timeout */);
  1091. }
  1092. void xen_irq_resume(void)
  1093. {
  1094. unsigned int cpu, irq, evtchn;
  1095. init_evtchn_cpu_bindings();
  1096. /* New event-channel space is not 'live' yet. */
  1097. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  1098. mask_evtchn(evtchn);
  1099. /* No IRQ <-> event-channel mappings. */
  1100. for (irq = 0; irq < nr_irqs; irq++)
  1101. irq_info[irq].evtchn = 0; /* zap event-channel binding */
  1102. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  1103. evtchn_to_irq[evtchn] = -1;
  1104. for_each_possible_cpu(cpu) {
  1105. restore_cpu_virqs(cpu);
  1106. restore_cpu_ipis(cpu);
  1107. }
  1108. restore_cpu_pirqs();
  1109. }
  1110. static struct irq_chip xen_dynamic_chip __read_mostly = {
  1111. .name = "xen-dyn",
  1112. .irq_disable = disable_dynirq,
  1113. .irq_mask = disable_dynirq,
  1114. .irq_unmask = enable_dynirq,
  1115. .irq_eoi = ack_dynirq,
  1116. .irq_set_affinity = set_affinity_irq,
  1117. .irq_retrigger = retrigger_dynirq,
  1118. };
  1119. static struct irq_chip xen_pirq_chip __read_mostly = {
  1120. .name = "xen-pirq",
  1121. .irq_startup = startup_pirq,
  1122. .irq_shutdown = shutdown_pirq,
  1123. .irq_enable = enable_pirq,
  1124. .irq_unmask = enable_pirq,
  1125. .irq_disable = disable_pirq,
  1126. .irq_mask = disable_pirq,
  1127. .irq_ack = ack_pirq,
  1128. .irq_set_affinity = set_affinity_irq,
  1129. .irq_retrigger = retrigger_dynirq,
  1130. };
  1131. static struct irq_chip xen_percpu_chip __read_mostly = {
  1132. .name = "xen-percpu",
  1133. .irq_disable = disable_dynirq,
  1134. .irq_mask = disable_dynirq,
  1135. .irq_unmask = enable_dynirq,
  1136. .irq_ack = ack_dynirq,
  1137. };
  1138. int xen_set_callback_via(uint64_t via)
  1139. {
  1140. struct xen_hvm_param a;
  1141. a.domid = DOMID_SELF;
  1142. a.index = HVM_PARAM_CALLBACK_IRQ;
  1143. a.value = via;
  1144. return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
  1145. }
  1146. EXPORT_SYMBOL_GPL(xen_set_callback_via);
  1147. #ifdef CONFIG_XEN_PVHVM
  1148. /* Vector callbacks are better than PCI interrupts to receive event
  1149. * channel notifications because we can receive vector callbacks on any
  1150. * vcpu and we don't need PCI support or APIC interactions. */
  1151. void xen_callback_vector(void)
  1152. {
  1153. int rc;
  1154. uint64_t callback_via;
  1155. if (xen_have_vector_callback) {
  1156. callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
  1157. rc = xen_set_callback_via(callback_via);
  1158. if (rc) {
  1159. printk(KERN_ERR "Request for Xen HVM callback vector"
  1160. " failed.\n");
  1161. xen_have_vector_callback = 0;
  1162. return;
  1163. }
  1164. printk(KERN_INFO "Xen HVM callback vector for event delivery is "
  1165. "enabled\n");
  1166. /* in the restore case the vector has already been allocated */
  1167. if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
  1168. alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
  1169. }
  1170. }
  1171. #else
  1172. void xen_callback_vector(void) {}
  1173. #endif
  1174. void __init xen_init_IRQ(void)
  1175. {
  1176. int i;
  1177. cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
  1178. GFP_KERNEL);
  1179. irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
  1180. /* We are using nr_irqs as the maximum number of pirq available but
  1181. * that number is actually chosen by Xen and we don't know exactly
  1182. * what it is. Be careful choosing high pirq numbers. */
  1183. pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
  1184. for (i = 0; i < nr_irqs; i++)
  1185. pirq_to_irq[i] = -1;
  1186. evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
  1187. GFP_KERNEL);
  1188. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1189. evtchn_to_irq[i] = -1;
  1190. init_evtchn_cpu_bindings();
  1191. /* No event channels are 'live' right now. */
  1192. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1193. mask_evtchn(i);
  1194. if (xen_hvm_domain()) {
  1195. xen_callback_vector();
  1196. native_init_IRQ();
  1197. /* pci_xen_hvm_init must be called after native_init_IRQ so that
  1198. * __acpi_register_gsi can point at the right function */
  1199. pci_xen_hvm_init();
  1200. } else {
  1201. irq_ctx_init(smp_processor_id());
  1202. if (xen_initial_domain())
  1203. xen_setup_pirqs();
  1204. }
  1205. }