events.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575
  1. /*
  2. * Xen event channels
  3. *
  4. * Xen models interrupts with abstract event channels. Because each
  5. * domain gets 1024 event channels, but NR_IRQ is not that large, we
  6. * must dynamically map irqs<->event channels. The event channels
  7. * interface with the rest of the kernel by defining a xen interrupt
  8. * chip. When an event is recieved, it is mapped to an irq and sent
  9. * through the normal interrupt processing path.
  10. *
  11. * There are four kinds of events which can be mapped to an event
  12. * channel:
  13. *
  14. * 1. Inter-domain notifications. This includes all the virtual
  15. * device events, since they're driven by front-ends in another domain
  16. * (typically dom0).
  17. * 2. VIRQs, typically used for timers. These are per-cpu events.
  18. * 3. IPIs.
  19. * 4. PIRQs - Hardware interrupts.
  20. *
  21. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  22. */
  23. #include <linux/linkage.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/irq.h>
  26. #include <linux/module.h>
  27. #include <linux/string.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/slab.h>
  30. #include <linux/irqnr.h>
  31. #include <linux/pci.h>
  32. #include <asm/desc.h>
  33. #include <asm/ptrace.h>
  34. #include <asm/irq.h>
  35. #include <asm/idle.h>
  36. #include <asm/io_apic.h>
  37. #include <asm/sync_bitops.h>
  38. #include <asm/xen/pci.h>
  39. #include <asm/xen/hypercall.h>
  40. #include <asm/xen/hypervisor.h>
  41. #include <xen/xen.h>
  42. #include <xen/hvm.h>
  43. #include <xen/xen-ops.h>
  44. #include <xen/events.h>
  45. #include <xen/interface/xen.h>
  46. #include <xen/interface/event_channel.h>
  47. #include <xen/interface/hvm/hvm_op.h>
  48. #include <xen/interface/hvm/params.h>
  49. /*
  50. * This lock protects updates to the following mapping and reference-count
  51. * arrays. The lock does not need to be acquired to read the mapping tables.
  52. */
  53. static DEFINE_SPINLOCK(irq_mapping_update_lock);
  54. /* IRQ <-> VIRQ mapping. */
  55. static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
  56. /* IRQ <-> IPI mapping */
  57. static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
  58. /* Interrupt types. */
  59. enum xen_irq_type {
  60. IRQT_UNBOUND = 0,
  61. IRQT_PIRQ,
  62. IRQT_VIRQ,
  63. IRQT_IPI,
  64. IRQT_EVTCHN
  65. };
  66. /*
  67. * Packed IRQ information:
  68. * type - enum xen_irq_type
  69. * event channel - irq->event channel mapping
  70. * cpu - cpu this event channel is bound to
  71. * index - type-specific information:
  72. * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
  73. * guest, or GSI (real passthrough IRQ) of the device.
  74. * VIRQ - virq number
  75. * IPI - IPI vector
  76. * EVTCHN -
  77. */
  78. struct irq_info
  79. {
  80. enum xen_irq_type type; /* type */
  81. unsigned short evtchn; /* event channel */
  82. unsigned short cpu; /* cpu bound */
  83. union {
  84. unsigned short virq;
  85. enum ipi_vector ipi;
  86. struct {
  87. unsigned short pirq;
  88. unsigned short gsi;
  89. unsigned char vector;
  90. unsigned char flags;
  91. } pirq;
  92. } u;
  93. };
  94. #define PIRQ_NEEDS_EOI (1 << 0)
  95. #define PIRQ_SHAREABLE (1 << 1)
  96. static struct irq_info *irq_info;
  97. static int *pirq_to_irq;
  98. static int *evtchn_to_irq;
  99. struct cpu_evtchn_s {
  100. unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
  101. };
  102. static __initdata struct cpu_evtchn_s init_evtchn_mask = {
  103. .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
  104. };
  105. static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
  106. static inline unsigned long *cpu_evtchn_mask(int cpu)
  107. {
  108. return cpu_evtchn_mask_p[cpu].bits;
  109. }
  110. /* Xen will never allocate port zero for any purpose. */
  111. #define VALID_EVTCHN(chn) ((chn) != 0)
  112. static struct irq_chip xen_dynamic_chip;
  113. static struct irq_chip xen_percpu_chip;
  114. static struct irq_chip xen_pirq_chip;
  115. /* Constructor for packed IRQ information. */
  116. static struct irq_info mk_unbound_info(void)
  117. {
  118. return (struct irq_info) { .type = IRQT_UNBOUND };
  119. }
  120. static struct irq_info mk_evtchn_info(unsigned short evtchn)
  121. {
  122. return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
  123. .cpu = 0 };
  124. }
  125. static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
  126. {
  127. return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
  128. .cpu = 0, .u.ipi = ipi };
  129. }
  130. static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
  131. {
  132. return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
  133. .cpu = 0, .u.virq = virq };
  134. }
  135. static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
  136. unsigned short gsi, unsigned short vector)
  137. {
  138. return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
  139. .cpu = 0,
  140. .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
  141. }
  142. /*
  143. * Accessors for packed IRQ information.
  144. */
  145. static struct irq_info *info_for_irq(unsigned irq)
  146. {
  147. return &irq_info[irq];
  148. }
  149. static unsigned int evtchn_from_irq(unsigned irq)
  150. {
  151. if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
  152. return 0;
  153. return info_for_irq(irq)->evtchn;
  154. }
  155. unsigned irq_from_evtchn(unsigned int evtchn)
  156. {
  157. return evtchn_to_irq[evtchn];
  158. }
  159. EXPORT_SYMBOL_GPL(irq_from_evtchn);
  160. static enum ipi_vector ipi_from_irq(unsigned irq)
  161. {
  162. struct irq_info *info = info_for_irq(irq);
  163. BUG_ON(info == NULL);
  164. BUG_ON(info->type != IRQT_IPI);
  165. return info->u.ipi;
  166. }
  167. static unsigned virq_from_irq(unsigned irq)
  168. {
  169. struct irq_info *info = info_for_irq(irq);
  170. BUG_ON(info == NULL);
  171. BUG_ON(info->type != IRQT_VIRQ);
  172. return info->u.virq;
  173. }
  174. static unsigned pirq_from_irq(unsigned irq)
  175. {
  176. struct irq_info *info = info_for_irq(irq);
  177. BUG_ON(info == NULL);
  178. BUG_ON(info->type != IRQT_PIRQ);
  179. return info->u.pirq.pirq;
  180. }
  181. static unsigned gsi_from_irq(unsigned irq)
  182. {
  183. struct irq_info *info = info_for_irq(irq);
  184. BUG_ON(info == NULL);
  185. BUG_ON(info->type != IRQT_PIRQ);
  186. return info->u.pirq.gsi;
  187. }
  188. static unsigned vector_from_irq(unsigned irq)
  189. {
  190. struct irq_info *info = info_for_irq(irq);
  191. BUG_ON(info == NULL);
  192. BUG_ON(info->type != IRQT_PIRQ);
  193. return info->u.pirq.vector;
  194. }
  195. static enum xen_irq_type type_from_irq(unsigned irq)
  196. {
  197. return info_for_irq(irq)->type;
  198. }
  199. static unsigned cpu_from_irq(unsigned irq)
  200. {
  201. return info_for_irq(irq)->cpu;
  202. }
  203. static unsigned int cpu_from_evtchn(unsigned int evtchn)
  204. {
  205. int irq = evtchn_to_irq[evtchn];
  206. unsigned ret = 0;
  207. if (irq != -1)
  208. ret = cpu_from_irq(irq);
  209. return ret;
  210. }
  211. static bool pirq_needs_eoi(unsigned irq)
  212. {
  213. struct irq_info *info = info_for_irq(irq);
  214. BUG_ON(info->type != IRQT_PIRQ);
  215. return info->u.pirq.flags & PIRQ_NEEDS_EOI;
  216. }
  217. static inline unsigned long active_evtchns(unsigned int cpu,
  218. struct shared_info *sh,
  219. unsigned int idx)
  220. {
  221. return (sh->evtchn_pending[idx] &
  222. cpu_evtchn_mask(cpu)[idx] &
  223. ~sh->evtchn_mask[idx]);
  224. }
  225. static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
  226. {
  227. int irq = evtchn_to_irq[chn];
  228. BUG_ON(irq == -1);
  229. #ifdef CONFIG_SMP
  230. cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
  231. #endif
  232. clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
  233. set_bit(chn, cpu_evtchn_mask(cpu));
  234. irq_info[irq].cpu = cpu;
  235. }
  236. static void init_evtchn_cpu_bindings(void)
  237. {
  238. int i;
  239. #ifdef CONFIG_SMP
  240. struct irq_desc *desc;
  241. /* By default all event channels notify CPU#0. */
  242. for_each_irq_desc(i, desc) {
  243. cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
  244. }
  245. #endif
  246. for_each_possible_cpu(i)
  247. memset(cpu_evtchn_mask(i),
  248. (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s));
  249. }
  250. static inline void clear_evtchn(int port)
  251. {
  252. struct shared_info *s = HYPERVISOR_shared_info;
  253. sync_clear_bit(port, &s->evtchn_pending[0]);
  254. }
  255. static inline void set_evtchn(int port)
  256. {
  257. struct shared_info *s = HYPERVISOR_shared_info;
  258. sync_set_bit(port, &s->evtchn_pending[0]);
  259. }
  260. static inline int test_evtchn(int port)
  261. {
  262. struct shared_info *s = HYPERVISOR_shared_info;
  263. return sync_test_bit(port, &s->evtchn_pending[0]);
  264. }
  265. /**
  266. * notify_remote_via_irq - send event to remote end of event channel via irq
  267. * @irq: irq of event channel to send event to
  268. *
  269. * Unlike notify_remote_via_evtchn(), this is safe to use across
  270. * save/restore. Notifications on a broken connection are silently
  271. * dropped.
  272. */
  273. void notify_remote_via_irq(int irq)
  274. {
  275. int evtchn = evtchn_from_irq(irq);
  276. if (VALID_EVTCHN(evtchn))
  277. notify_remote_via_evtchn(evtchn);
  278. }
  279. EXPORT_SYMBOL_GPL(notify_remote_via_irq);
  280. static void mask_evtchn(int port)
  281. {
  282. struct shared_info *s = HYPERVISOR_shared_info;
  283. sync_set_bit(port, &s->evtchn_mask[0]);
  284. }
  285. static void unmask_evtchn(int port)
  286. {
  287. struct shared_info *s = HYPERVISOR_shared_info;
  288. unsigned int cpu = get_cpu();
  289. BUG_ON(!irqs_disabled());
  290. /* Slow path (hypercall) if this is a non-local port. */
  291. if (unlikely(cpu != cpu_from_evtchn(port))) {
  292. struct evtchn_unmask unmask = { .port = port };
  293. (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
  294. } else {
  295. struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  296. sync_clear_bit(port, &s->evtchn_mask[0]);
  297. /*
  298. * The following is basically the equivalent of
  299. * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
  300. * the interrupt edge' if the channel is masked.
  301. */
  302. if (sync_test_bit(port, &s->evtchn_pending[0]) &&
  303. !sync_test_and_set_bit(port / BITS_PER_LONG,
  304. &vcpu_info->evtchn_pending_sel))
  305. vcpu_info->evtchn_upcall_pending = 1;
  306. }
  307. put_cpu();
  308. }
  309. static int xen_allocate_irq_dynamic(void)
  310. {
  311. int first = 0;
  312. int irq;
  313. #ifdef CONFIG_X86_IO_APIC
  314. /*
  315. * For an HVM guest or domain 0 which see "real" (emulated or
  316. * actual repectively) GSIs we allocate dynamic IRQs
  317. * e.g. those corresponding to event channels or MSIs
  318. * etc. from the range above those "real" GSIs to avoid
  319. * collisions.
  320. */
  321. if (xen_initial_domain() || xen_hvm_domain())
  322. first = get_nr_irqs_gsi();
  323. #endif
  324. retry:
  325. irq = irq_alloc_desc_from(first, -1);
  326. if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
  327. printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
  328. first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
  329. goto retry;
  330. }
  331. if (irq < 0)
  332. panic("No available IRQ to bind to: increase nr_irqs!\n");
  333. return irq;
  334. }
  335. static int xen_allocate_irq_gsi(unsigned gsi)
  336. {
  337. int irq;
  338. /*
  339. * A PV guest has no concept of a GSI (since it has no ACPI
  340. * nor access to/knowledge of the physical APICs). Therefore
  341. * all IRQs are dynamically allocated from the entire IRQ
  342. * space.
  343. */
  344. if (xen_pv_domain() && !xen_initial_domain())
  345. return xen_allocate_irq_dynamic();
  346. /* Legacy IRQ descriptors are already allocated by the arch. */
  347. if (gsi < NR_IRQS_LEGACY)
  348. return gsi;
  349. irq = irq_alloc_desc_at(gsi, -1);
  350. if (irq < 0)
  351. panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
  352. return irq;
  353. }
  354. static void xen_free_irq(unsigned irq)
  355. {
  356. /* Legacy IRQ descriptors are managed by the arch. */
  357. if (irq < NR_IRQS_LEGACY)
  358. return;
  359. irq_free_desc(irq);
  360. }
  361. static void pirq_unmask_notify(int irq)
  362. {
  363. struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
  364. if (unlikely(pirq_needs_eoi(irq))) {
  365. int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  366. WARN_ON(rc);
  367. }
  368. }
  369. static void pirq_query_unmask(int irq)
  370. {
  371. struct physdev_irq_status_query irq_status;
  372. struct irq_info *info = info_for_irq(irq);
  373. BUG_ON(info->type != IRQT_PIRQ);
  374. irq_status.irq = pirq_from_irq(irq);
  375. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  376. irq_status.flags = 0;
  377. info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
  378. if (irq_status.flags & XENIRQSTAT_needs_eoi)
  379. info->u.pirq.flags |= PIRQ_NEEDS_EOI;
  380. }
  381. static bool probing_irq(int irq)
  382. {
  383. struct irq_desc *desc = irq_to_desc(irq);
  384. return desc && desc->action == NULL;
  385. }
  386. static unsigned int __startup_pirq(unsigned int irq)
  387. {
  388. struct evtchn_bind_pirq bind_pirq;
  389. struct irq_info *info = info_for_irq(irq);
  390. int evtchn = evtchn_from_irq(irq);
  391. int rc;
  392. BUG_ON(info->type != IRQT_PIRQ);
  393. if (VALID_EVTCHN(evtchn))
  394. goto out;
  395. bind_pirq.pirq = pirq_from_irq(irq);
  396. /* NB. We are happy to share unless we are probing. */
  397. bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
  398. BIND_PIRQ__WILL_SHARE : 0;
  399. rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
  400. if (rc != 0) {
  401. if (!probing_irq(irq))
  402. printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
  403. irq);
  404. return 0;
  405. }
  406. evtchn = bind_pirq.port;
  407. pirq_query_unmask(irq);
  408. evtchn_to_irq[evtchn] = irq;
  409. bind_evtchn_to_cpu(evtchn, 0);
  410. info->evtchn = evtchn;
  411. out:
  412. unmask_evtchn(evtchn);
  413. pirq_unmask_notify(irq);
  414. return 0;
  415. }
  416. static unsigned int startup_pirq(struct irq_data *data)
  417. {
  418. return __startup_pirq(data->irq);
  419. }
  420. static void shutdown_pirq(struct irq_data *data)
  421. {
  422. struct evtchn_close close;
  423. unsigned int irq = data->irq;
  424. struct irq_info *info = info_for_irq(irq);
  425. int evtchn = evtchn_from_irq(irq);
  426. BUG_ON(info->type != IRQT_PIRQ);
  427. if (!VALID_EVTCHN(evtchn))
  428. return;
  429. mask_evtchn(evtchn);
  430. close.port = evtchn;
  431. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  432. BUG();
  433. bind_evtchn_to_cpu(evtchn, 0);
  434. evtchn_to_irq[evtchn] = -1;
  435. info->evtchn = 0;
  436. }
  437. static void enable_pirq(struct irq_data *data)
  438. {
  439. startup_pirq(data);
  440. }
  441. static void disable_pirq(struct irq_data *data)
  442. {
  443. }
  444. static void ack_pirq(struct irq_data *data)
  445. {
  446. int evtchn = evtchn_from_irq(data->irq);
  447. move_native_irq(data->irq);
  448. if (VALID_EVTCHN(evtchn)) {
  449. mask_evtchn(evtchn);
  450. clear_evtchn(evtchn);
  451. }
  452. }
  453. static int find_irq_by_gsi(unsigned gsi)
  454. {
  455. int irq;
  456. for (irq = 0; irq < nr_irqs; irq++) {
  457. struct irq_info *info = info_for_irq(irq);
  458. if (info == NULL || info->type != IRQT_PIRQ)
  459. continue;
  460. if (gsi_from_irq(irq) == gsi)
  461. return irq;
  462. }
  463. return -1;
  464. }
  465. int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
  466. {
  467. return xen_map_pirq_gsi(gsi, gsi, shareable, name);
  468. }
  469. /* xen_map_pirq_gsi might allocate irqs from the top down, as a
  470. * consequence don't assume that the irq number returned has a low value
  471. * or can be used as a pirq number unless you know otherwise.
  472. *
  473. * One notable exception is when xen_map_pirq_gsi is called passing an
  474. * hardware gsi as argument, in that case the irq number returned
  475. * matches the gsi number passed as second argument.
  476. *
  477. * Note: We don't assign an event channel until the irq actually started
  478. * up. Return an existing irq if we've already got one for the gsi.
  479. */
  480. int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
  481. {
  482. int irq = 0;
  483. struct physdev_irq irq_op;
  484. spin_lock(&irq_mapping_update_lock);
  485. if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
  486. printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
  487. pirq > nr_irqs ? "pirq" :"",
  488. gsi > nr_irqs ? "gsi" : "");
  489. goto out;
  490. }
  491. irq = find_irq_by_gsi(gsi);
  492. if (irq != -1) {
  493. printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
  494. irq, gsi);
  495. goto out; /* XXX need refcount? */
  496. }
  497. irq = xen_allocate_irq_gsi(gsi);
  498. set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
  499. handle_level_irq, name);
  500. irq_op.irq = irq;
  501. irq_op.vector = 0;
  502. /* Only the privileged domain can do this. For non-priv, the pcifront
  503. * driver provides a PCI bus that does the call to do exactly
  504. * this in the priv domain. */
  505. if (xen_initial_domain() &&
  506. HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
  507. xen_free_irq(irq);
  508. irq = -ENOSPC;
  509. goto out;
  510. }
  511. irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
  512. irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
  513. pirq_to_irq[pirq] = irq;
  514. out:
  515. spin_unlock(&irq_mapping_update_lock);
  516. return irq;
  517. }
  518. #ifdef CONFIG_PCI_MSI
  519. int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
  520. {
  521. int rc;
  522. struct physdev_get_free_pirq op_get_free_pirq;
  523. op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
  524. rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
  525. WARN_ONCE(rc == -ENOSYS,
  526. "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
  527. return rc ? -1 : op_get_free_pirq.pirq;
  528. }
  529. int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
  530. int pirq, int vector, const char *name)
  531. {
  532. int irq, ret;
  533. spin_lock(&irq_mapping_update_lock);
  534. irq = xen_allocate_irq_dynamic();
  535. if (irq == -1)
  536. goto out;
  537. set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
  538. handle_level_irq, name);
  539. irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
  540. pirq_to_irq[pirq] = irq;
  541. ret = set_irq_msi(irq, msidesc);
  542. if (ret < 0)
  543. goto error_irq;
  544. out:
  545. spin_unlock(&irq_mapping_update_lock);
  546. return irq;
  547. error_irq:
  548. spin_unlock(&irq_mapping_update_lock);
  549. xen_free_irq(irq);
  550. return -1;
  551. }
  552. #endif
  553. int xen_destroy_irq(int irq)
  554. {
  555. struct irq_desc *desc;
  556. struct physdev_unmap_pirq unmap_irq;
  557. struct irq_info *info = info_for_irq(irq);
  558. int rc = -ENOENT;
  559. spin_lock(&irq_mapping_update_lock);
  560. desc = irq_to_desc(irq);
  561. if (!desc)
  562. goto out;
  563. if (xen_initial_domain()) {
  564. unmap_irq.pirq = info->u.pirq.pirq;
  565. unmap_irq.domid = DOMID_SELF;
  566. rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
  567. if (rc) {
  568. printk(KERN_WARNING "unmap irq failed %d\n", rc);
  569. goto out;
  570. }
  571. }
  572. pirq_to_irq[info->u.pirq.pirq] = -1;
  573. irq_info[irq] = mk_unbound_info();
  574. xen_free_irq(irq);
  575. out:
  576. spin_unlock(&irq_mapping_update_lock);
  577. return rc;
  578. }
  579. int xen_vector_from_irq(unsigned irq)
  580. {
  581. return vector_from_irq(irq);
  582. }
  583. int xen_gsi_from_irq(unsigned irq)
  584. {
  585. return gsi_from_irq(irq);
  586. }
  587. int xen_irq_from_pirq(unsigned pirq)
  588. {
  589. return pirq_to_irq[pirq];
  590. }
  591. int bind_evtchn_to_irq(unsigned int evtchn)
  592. {
  593. int irq;
  594. spin_lock(&irq_mapping_update_lock);
  595. irq = evtchn_to_irq[evtchn];
  596. if (irq == -1) {
  597. irq = xen_allocate_irq_dynamic();
  598. set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
  599. handle_fasteoi_irq, "event");
  600. evtchn_to_irq[evtchn] = irq;
  601. irq_info[irq] = mk_evtchn_info(evtchn);
  602. }
  603. spin_unlock(&irq_mapping_update_lock);
  604. return irq;
  605. }
  606. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
  607. static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
  608. {
  609. struct evtchn_bind_ipi bind_ipi;
  610. int evtchn, irq;
  611. spin_lock(&irq_mapping_update_lock);
  612. irq = per_cpu(ipi_to_irq, cpu)[ipi];
  613. if (irq == -1) {
  614. irq = xen_allocate_irq_dynamic();
  615. if (irq < 0)
  616. goto out;
  617. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  618. handle_percpu_irq, "ipi");
  619. bind_ipi.vcpu = cpu;
  620. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  621. &bind_ipi) != 0)
  622. BUG();
  623. evtchn = bind_ipi.port;
  624. evtchn_to_irq[evtchn] = irq;
  625. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  626. per_cpu(ipi_to_irq, cpu)[ipi] = irq;
  627. bind_evtchn_to_cpu(evtchn, cpu);
  628. }
  629. out:
  630. spin_unlock(&irq_mapping_update_lock);
  631. return irq;
  632. }
  633. int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
  634. {
  635. struct evtchn_bind_virq bind_virq;
  636. int evtchn, irq;
  637. spin_lock(&irq_mapping_update_lock);
  638. irq = per_cpu(virq_to_irq, cpu)[virq];
  639. if (irq == -1) {
  640. irq = xen_allocate_irq_dynamic();
  641. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  642. handle_percpu_irq, "virq");
  643. bind_virq.virq = virq;
  644. bind_virq.vcpu = cpu;
  645. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  646. &bind_virq) != 0)
  647. BUG();
  648. evtchn = bind_virq.port;
  649. evtchn_to_irq[evtchn] = irq;
  650. irq_info[irq] = mk_virq_info(evtchn, virq);
  651. per_cpu(virq_to_irq, cpu)[virq] = irq;
  652. bind_evtchn_to_cpu(evtchn, cpu);
  653. }
  654. spin_unlock(&irq_mapping_update_lock);
  655. return irq;
  656. }
  657. static void unbind_from_irq(unsigned int irq)
  658. {
  659. struct evtchn_close close;
  660. int evtchn = evtchn_from_irq(irq);
  661. spin_lock(&irq_mapping_update_lock);
  662. if (VALID_EVTCHN(evtchn)) {
  663. close.port = evtchn;
  664. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  665. BUG();
  666. switch (type_from_irq(irq)) {
  667. case IRQT_VIRQ:
  668. per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
  669. [virq_from_irq(irq)] = -1;
  670. break;
  671. case IRQT_IPI:
  672. per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
  673. [ipi_from_irq(irq)] = -1;
  674. break;
  675. default:
  676. break;
  677. }
  678. /* Closed ports are implicitly re-bound to VCPU0. */
  679. bind_evtchn_to_cpu(evtchn, 0);
  680. evtchn_to_irq[evtchn] = -1;
  681. }
  682. if (irq_info[irq].type != IRQT_UNBOUND) {
  683. irq_info[irq] = mk_unbound_info();
  684. xen_free_irq(irq);
  685. }
  686. spin_unlock(&irq_mapping_update_lock);
  687. }
  688. int bind_evtchn_to_irqhandler(unsigned int evtchn,
  689. irq_handler_t handler,
  690. unsigned long irqflags,
  691. const char *devname, void *dev_id)
  692. {
  693. unsigned int irq;
  694. int retval;
  695. irq = bind_evtchn_to_irq(evtchn);
  696. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  697. if (retval != 0) {
  698. unbind_from_irq(irq);
  699. return retval;
  700. }
  701. return irq;
  702. }
  703. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
  704. int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
  705. irq_handler_t handler,
  706. unsigned long irqflags, const char *devname, void *dev_id)
  707. {
  708. unsigned int irq;
  709. int retval;
  710. irq = bind_virq_to_irq(virq, cpu);
  711. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  712. if (retval != 0) {
  713. unbind_from_irq(irq);
  714. return retval;
  715. }
  716. return irq;
  717. }
  718. EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
  719. int bind_ipi_to_irqhandler(enum ipi_vector ipi,
  720. unsigned int cpu,
  721. irq_handler_t handler,
  722. unsigned long irqflags,
  723. const char *devname,
  724. void *dev_id)
  725. {
  726. int irq, retval;
  727. irq = bind_ipi_to_irq(ipi, cpu);
  728. if (irq < 0)
  729. return irq;
  730. irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
  731. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  732. if (retval != 0) {
  733. unbind_from_irq(irq);
  734. return retval;
  735. }
  736. return irq;
  737. }
  738. void unbind_from_irqhandler(unsigned int irq, void *dev_id)
  739. {
  740. free_irq(irq, dev_id);
  741. unbind_from_irq(irq);
  742. }
  743. EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
  744. void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
  745. {
  746. int irq = per_cpu(ipi_to_irq, cpu)[vector];
  747. BUG_ON(irq < 0);
  748. notify_remote_via_irq(irq);
  749. }
  750. irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
  751. {
  752. struct shared_info *sh = HYPERVISOR_shared_info;
  753. int cpu = smp_processor_id();
  754. unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
  755. int i;
  756. unsigned long flags;
  757. static DEFINE_SPINLOCK(debug_lock);
  758. struct vcpu_info *v;
  759. spin_lock_irqsave(&debug_lock, flags);
  760. printk("\nvcpu %d\n ", cpu);
  761. for_each_online_cpu(i) {
  762. int pending;
  763. v = per_cpu(xen_vcpu, i);
  764. pending = (get_irq_regs() && i == cpu)
  765. ? xen_irqs_disabled(get_irq_regs())
  766. : v->evtchn_upcall_mask;
  767. printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
  768. pending, v->evtchn_upcall_pending,
  769. (int)(sizeof(v->evtchn_pending_sel)*2),
  770. v->evtchn_pending_sel);
  771. }
  772. v = per_cpu(xen_vcpu, cpu);
  773. printk("\npending:\n ");
  774. for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
  775. printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
  776. sh->evtchn_pending[i],
  777. i % 8 == 0 ? "\n " : " ");
  778. printk("\nglobal mask:\n ");
  779. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  780. printk("%0*lx%s",
  781. (int)(sizeof(sh->evtchn_mask[0])*2),
  782. sh->evtchn_mask[i],
  783. i % 8 == 0 ? "\n " : " ");
  784. printk("\nglobally unmasked:\n ");
  785. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  786. printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
  787. sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
  788. i % 8 == 0 ? "\n " : " ");
  789. printk("\nlocal cpu%d mask:\n ", cpu);
  790. for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
  791. printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
  792. cpu_evtchn[i],
  793. i % 8 == 0 ? "\n " : " ");
  794. printk("\nlocally unmasked:\n ");
  795. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
  796. unsigned long pending = sh->evtchn_pending[i]
  797. & ~sh->evtchn_mask[i]
  798. & cpu_evtchn[i];
  799. printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
  800. pending, i % 8 == 0 ? "\n " : " ");
  801. }
  802. printk("\npending list:\n");
  803. for (i = 0; i < NR_EVENT_CHANNELS; i++) {
  804. if (sync_test_bit(i, sh->evtchn_pending)) {
  805. int word_idx = i / BITS_PER_LONG;
  806. printk(" %d: event %d -> irq %d%s%s%s\n",
  807. cpu_from_evtchn(i), i,
  808. evtchn_to_irq[i],
  809. sync_test_bit(word_idx, &v->evtchn_pending_sel)
  810. ? "" : " l2-clear",
  811. !sync_test_bit(i, sh->evtchn_mask)
  812. ? "" : " globally-masked",
  813. sync_test_bit(i, cpu_evtchn)
  814. ? "" : " locally-masked");
  815. }
  816. }
  817. spin_unlock_irqrestore(&debug_lock, flags);
  818. return IRQ_HANDLED;
  819. }
  820. static DEFINE_PER_CPU(unsigned, xed_nesting_count);
  821. /*
  822. * Mask out the i least significant bits of w
  823. */
  824. #define MASK_LSBS(w, i) (w & ((~0UL) << i))
  825. /*
  826. * Search the CPUs pending events bitmasks. For each one found, map
  827. * the event number to an irq, and feed it into do_IRQ() for
  828. * handling.
  829. *
  830. * Xen uses a two-level bitmap to speed searching. The first level is
  831. * a bitset of words which contain pending event bits. The second
  832. * level is a bitset of pending events themselves.
  833. */
  834. static void __xen_evtchn_do_upcall(void)
  835. {
  836. static unsigned int last_word_idx = BITS_PER_LONG - 1;
  837. static unsigned int last_bit_idx = BITS_PER_LONG - 1;
  838. int word_idx, bit_idx;
  839. int cpu = get_cpu();
  840. struct shared_info *s = HYPERVISOR_shared_info;
  841. struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  842. unsigned count;
  843. do {
  844. unsigned long pending_words;
  845. vcpu_info->evtchn_upcall_pending = 0;
  846. if (__this_cpu_inc_return(xed_nesting_count) - 1)
  847. goto out;
  848. #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
  849. /* Clear master flag /before/ clearing selector flag. */
  850. wmb();
  851. #endif
  852. pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
  853. word_idx = last_word_idx;
  854. bit_idx = last_bit_idx;
  855. while (pending_words != 0) {
  856. unsigned long pending_bits;
  857. unsigned long words;
  858. word_idx = (word_idx + 1) % BITS_PER_LONG;
  859. words = MASK_LSBS(pending_words, word_idx);
  860. /*
  861. * If we masked out all events, wrap around to the
  862. * beginning.
  863. */
  864. if (words == 0) {
  865. word_idx = BITS_PER_LONG - 1;
  866. bit_idx = BITS_PER_LONG - 1;
  867. continue;
  868. }
  869. word_idx = __ffs(words);
  870. do {
  871. unsigned long bits;
  872. int port, irq;
  873. struct irq_desc *desc;
  874. pending_bits = active_evtchns(cpu, s, word_idx);
  875. bit_idx = (bit_idx + 1) % BITS_PER_LONG;
  876. bits = MASK_LSBS(pending_bits, bit_idx);
  877. /* If we masked out all events, move on. */
  878. if (bits == 0) {
  879. bit_idx = BITS_PER_LONG - 1;
  880. break;
  881. }
  882. bit_idx = __ffs(bits);
  883. /* Process port. */
  884. port = (word_idx * BITS_PER_LONG) + bit_idx;
  885. irq = evtchn_to_irq[port];
  886. mask_evtchn(port);
  887. clear_evtchn(port);
  888. if (irq != -1) {
  889. desc = irq_to_desc(irq);
  890. if (desc)
  891. generic_handle_irq_desc(irq, desc);
  892. }
  893. /*
  894. * If this is the final port processed, we'll
  895. * pick up here+1 next time.
  896. */
  897. last_word_idx = word_idx;
  898. last_bit_idx = bit_idx;
  899. } while (bit_idx != BITS_PER_LONG - 1);
  900. pending_bits = active_evtchns(cpu, s, word_idx);
  901. /*
  902. * We handled all ports, so we can clear the
  903. * selector bit.
  904. */
  905. if (pending_bits == 0)
  906. pending_words &= ~(1UL << word_idx);
  907. }
  908. BUG_ON(!irqs_disabled());
  909. count = __this_cpu_read(xed_nesting_count);
  910. __this_cpu_write(xed_nesting_count, 0);
  911. } while (count != 1 || vcpu_info->evtchn_upcall_pending);
  912. out:
  913. put_cpu();
  914. }
  915. void xen_evtchn_do_upcall(struct pt_regs *regs)
  916. {
  917. struct pt_regs *old_regs = set_irq_regs(regs);
  918. exit_idle();
  919. irq_enter();
  920. __xen_evtchn_do_upcall();
  921. irq_exit();
  922. set_irq_regs(old_regs);
  923. }
  924. void xen_hvm_evtchn_do_upcall(void)
  925. {
  926. __xen_evtchn_do_upcall();
  927. }
  928. EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
  929. /* Rebind a new event channel to an existing irq. */
  930. void rebind_evtchn_irq(int evtchn, int irq)
  931. {
  932. struct irq_info *info = info_for_irq(irq);
  933. /* Make sure the irq is masked, since the new event channel
  934. will also be masked. */
  935. disable_irq(irq);
  936. spin_lock(&irq_mapping_update_lock);
  937. /* After resume the irq<->evtchn mappings are all cleared out */
  938. BUG_ON(evtchn_to_irq[evtchn] != -1);
  939. /* Expect irq to have been bound before,
  940. so there should be a proper type */
  941. BUG_ON(info->type == IRQT_UNBOUND);
  942. evtchn_to_irq[evtchn] = irq;
  943. irq_info[irq] = mk_evtchn_info(evtchn);
  944. spin_unlock(&irq_mapping_update_lock);
  945. /* new event channels are always bound to cpu 0 */
  946. irq_set_affinity(irq, cpumask_of(0));
  947. /* Unmask the event channel. */
  948. enable_irq(irq);
  949. }
  950. /* Rebind an evtchn so that it gets delivered to a specific cpu */
  951. static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
  952. {
  953. struct evtchn_bind_vcpu bind_vcpu;
  954. int evtchn = evtchn_from_irq(irq);
  955. /* events delivered via platform PCI interrupts are always
  956. * routed to vcpu 0 */
  957. if (!VALID_EVTCHN(evtchn) ||
  958. (xen_hvm_domain() && !xen_have_vector_callback))
  959. return -1;
  960. /* Send future instances of this interrupt to other vcpu. */
  961. bind_vcpu.port = evtchn;
  962. bind_vcpu.vcpu = tcpu;
  963. /*
  964. * If this fails, it usually just indicates that we're dealing with a
  965. * virq or IPI channel, which don't actually need to be rebound. Ignore
  966. * it, but don't do the xenlinux-level rebind in that case.
  967. */
  968. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
  969. bind_evtchn_to_cpu(evtchn, tcpu);
  970. return 0;
  971. }
  972. static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
  973. bool force)
  974. {
  975. unsigned tcpu = cpumask_first(dest);
  976. return rebind_irq_to_cpu(data->irq, tcpu);
  977. }
  978. int resend_irq_on_evtchn(unsigned int irq)
  979. {
  980. int masked, evtchn = evtchn_from_irq(irq);
  981. struct shared_info *s = HYPERVISOR_shared_info;
  982. if (!VALID_EVTCHN(evtchn))
  983. return 1;
  984. masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
  985. sync_set_bit(evtchn, s->evtchn_pending);
  986. if (!masked)
  987. unmask_evtchn(evtchn);
  988. return 1;
  989. }
  990. static void enable_dynirq(struct irq_data *data)
  991. {
  992. int evtchn = evtchn_from_irq(data->irq);
  993. if (VALID_EVTCHN(evtchn))
  994. unmask_evtchn(evtchn);
  995. }
  996. static void disable_dynirq(struct irq_data *data)
  997. {
  998. int evtchn = evtchn_from_irq(data->irq);
  999. if (VALID_EVTCHN(evtchn))
  1000. mask_evtchn(evtchn);
  1001. }
  1002. static void ack_dynirq(struct irq_data *data)
  1003. {
  1004. int evtchn = evtchn_from_irq(data->irq);
  1005. move_masked_irq(data->irq);
  1006. if (VALID_EVTCHN(evtchn))
  1007. unmask_evtchn(evtchn);
  1008. }
  1009. static int retrigger_dynirq(struct irq_data *data)
  1010. {
  1011. int evtchn = evtchn_from_irq(data->irq);
  1012. struct shared_info *sh = HYPERVISOR_shared_info;
  1013. int ret = 0;
  1014. if (VALID_EVTCHN(evtchn)) {
  1015. int masked;
  1016. masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
  1017. sync_set_bit(evtchn, sh->evtchn_pending);
  1018. if (!masked)
  1019. unmask_evtchn(evtchn);
  1020. ret = 1;
  1021. }
  1022. return ret;
  1023. }
  1024. static void restore_cpu_pirqs(void)
  1025. {
  1026. int pirq, rc, irq, gsi;
  1027. struct physdev_map_pirq map_irq;
  1028. for (pirq = 0; pirq < nr_irqs; pirq++) {
  1029. irq = pirq_to_irq[pirq];
  1030. if (irq == -1)
  1031. continue;
  1032. /* save/restore of PT devices doesn't work, so at this point the
  1033. * only devices present are GSI based emulated devices */
  1034. gsi = gsi_from_irq(irq);
  1035. if (!gsi)
  1036. continue;
  1037. map_irq.domid = DOMID_SELF;
  1038. map_irq.type = MAP_PIRQ_TYPE_GSI;
  1039. map_irq.index = gsi;
  1040. map_irq.pirq = pirq;
  1041. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  1042. if (rc) {
  1043. printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
  1044. gsi, irq, pirq, rc);
  1045. irq_info[irq] = mk_unbound_info();
  1046. pirq_to_irq[pirq] = -1;
  1047. continue;
  1048. }
  1049. printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
  1050. __startup_pirq(irq);
  1051. }
  1052. }
  1053. static void restore_cpu_virqs(unsigned int cpu)
  1054. {
  1055. struct evtchn_bind_virq bind_virq;
  1056. int virq, irq, evtchn;
  1057. for (virq = 0; virq < NR_VIRQS; virq++) {
  1058. if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
  1059. continue;
  1060. BUG_ON(virq_from_irq(irq) != virq);
  1061. /* Get a new binding from Xen. */
  1062. bind_virq.virq = virq;
  1063. bind_virq.vcpu = cpu;
  1064. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  1065. &bind_virq) != 0)
  1066. BUG();
  1067. evtchn = bind_virq.port;
  1068. /* Record the new mapping. */
  1069. evtchn_to_irq[evtchn] = irq;
  1070. irq_info[irq] = mk_virq_info(evtchn, virq);
  1071. bind_evtchn_to_cpu(evtchn, cpu);
  1072. }
  1073. }
  1074. static void restore_cpu_ipis(unsigned int cpu)
  1075. {
  1076. struct evtchn_bind_ipi bind_ipi;
  1077. int ipi, irq, evtchn;
  1078. for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
  1079. if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
  1080. continue;
  1081. BUG_ON(ipi_from_irq(irq) != ipi);
  1082. /* Get a new binding from Xen. */
  1083. bind_ipi.vcpu = cpu;
  1084. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  1085. &bind_ipi) != 0)
  1086. BUG();
  1087. evtchn = bind_ipi.port;
  1088. /* Record the new mapping. */
  1089. evtchn_to_irq[evtchn] = irq;
  1090. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  1091. bind_evtchn_to_cpu(evtchn, cpu);
  1092. }
  1093. }
  1094. /* Clear an irq's pending state, in preparation for polling on it */
  1095. void xen_clear_irq_pending(int irq)
  1096. {
  1097. int evtchn = evtchn_from_irq(irq);
  1098. if (VALID_EVTCHN(evtchn))
  1099. clear_evtchn(evtchn);
  1100. }
  1101. EXPORT_SYMBOL(xen_clear_irq_pending);
  1102. void xen_set_irq_pending(int irq)
  1103. {
  1104. int evtchn = evtchn_from_irq(irq);
  1105. if (VALID_EVTCHN(evtchn))
  1106. set_evtchn(evtchn);
  1107. }
  1108. bool xen_test_irq_pending(int irq)
  1109. {
  1110. int evtchn = evtchn_from_irq(irq);
  1111. bool ret = false;
  1112. if (VALID_EVTCHN(evtchn))
  1113. ret = test_evtchn(evtchn);
  1114. return ret;
  1115. }
  1116. /* Poll waiting for an irq to become pending with timeout. In the usual case,
  1117. * the irq will be disabled so it won't deliver an interrupt. */
  1118. void xen_poll_irq_timeout(int irq, u64 timeout)
  1119. {
  1120. evtchn_port_t evtchn = evtchn_from_irq(irq);
  1121. if (VALID_EVTCHN(evtchn)) {
  1122. struct sched_poll poll;
  1123. poll.nr_ports = 1;
  1124. poll.timeout = timeout;
  1125. set_xen_guest_handle(poll.ports, &evtchn);
  1126. if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
  1127. BUG();
  1128. }
  1129. }
  1130. EXPORT_SYMBOL(xen_poll_irq_timeout);
  1131. /* Poll waiting for an irq to become pending. In the usual case, the
  1132. * irq will be disabled so it won't deliver an interrupt. */
  1133. void xen_poll_irq(int irq)
  1134. {
  1135. xen_poll_irq_timeout(irq, 0 /* no timeout */);
  1136. }
  1137. void xen_irq_resume(void)
  1138. {
  1139. unsigned int cpu, irq, evtchn;
  1140. init_evtchn_cpu_bindings();
  1141. /* New event-channel space is not 'live' yet. */
  1142. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  1143. mask_evtchn(evtchn);
  1144. /* No IRQ <-> event-channel mappings. */
  1145. for (irq = 0; irq < nr_irqs; irq++)
  1146. irq_info[irq].evtchn = 0; /* zap event-channel binding */
  1147. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  1148. evtchn_to_irq[evtchn] = -1;
  1149. for_each_possible_cpu(cpu) {
  1150. restore_cpu_virqs(cpu);
  1151. restore_cpu_ipis(cpu);
  1152. }
  1153. restore_cpu_pirqs();
  1154. }
  1155. static struct irq_chip xen_dynamic_chip __read_mostly = {
  1156. .name = "xen-dyn",
  1157. .irq_disable = disable_dynirq,
  1158. .irq_mask = disable_dynirq,
  1159. .irq_unmask = enable_dynirq,
  1160. .irq_eoi = ack_dynirq,
  1161. .irq_set_affinity = set_affinity_irq,
  1162. .irq_retrigger = retrigger_dynirq,
  1163. };
  1164. static struct irq_chip xen_pirq_chip __read_mostly = {
  1165. .name = "xen-pirq",
  1166. .irq_startup = startup_pirq,
  1167. .irq_shutdown = shutdown_pirq,
  1168. .irq_enable = enable_pirq,
  1169. .irq_unmask = enable_pirq,
  1170. .irq_disable = disable_pirq,
  1171. .irq_mask = disable_pirq,
  1172. .irq_ack = ack_pirq,
  1173. .irq_set_affinity = set_affinity_irq,
  1174. .irq_retrigger = retrigger_dynirq,
  1175. };
  1176. static struct irq_chip xen_percpu_chip __read_mostly = {
  1177. .name = "xen-percpu",
  1178. .irq_disable = disable_dynirq,
  1179. .irq_mask = disable_dynirq,
  1180. .irq_unmask = enable_dynirq,
  1181. .irq_ack = ack_dynirq,
  1182. };
  1183. int xen_set_callback_via(uint64_t via)
  1184. {
  1185. struct xen_hvm_param a;
  1186. a.domid = DOMID_SELF;
  1187. a.index = HVM_PARAM_CALLBACK_IRQ;
  1188. a.value = via;
  1189. return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
  1190. }
  1191. EXPORT_SYMBOL_GPL(xen_set_callback_via);
  1192. #ifdef CONFIG_XEN_PVHVM
  1193. /* Vector callbacks are better than PCI interrupts to receive event
  1194. * channel notifications because we can receive vector callbacks on any
  1195. * vcpu and we don't need PCI support or APIC interactions. */
  1196. void xen_callback_vector(void)
  1197. {
  1198. int rc;
  1199. uint64_t callback_via;
  1200. if (xen_have_vector_callback) {
  1201. callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
  1202. rc = xen_set_callback_via(callback_via);
  1203. if (rc) {
  1204. printk(KERN_ERR "Request for Xen HVM callback vector"
  1205. " failed.\n");
  1206. xen_have_vector_callback = 0;
  1207. return;
  1208. }
  1209. printk(KERN_INFO "Xen HVM callback vector for event delivery is "
  1210. "enabled\n");
  1211. /* in the restore case the vector has already been allocated */
  1212. if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
  1213. alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
  1214. }
  1215. }
  1216. #else
  1217. void xen_callback_vector(void) {}
  1218. #endif
  1219. void __init xen_init_IRQ(void)
  1220. {
  1221. int i;
  1222. cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
  1223. GFP_KERNEL);
  1224. irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
  1225. /* We are using nr_irqs as the maximum number of pirq available but
  1226. * that number is actually chosen by Xen and we don't know exactly
  1227. * what it is. Be careful choosing high pirq numbers. */
  1228. pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
  1229. for (i = 0; i < nr_irqs; i++)
  1230. pirq_to_irq[i] = -1;
  1231. evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
  1232. GFP_KERNEL);
  1233. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1234. evtchn_to_irq[i] = -1;
  1235. init_evtchn_cpu_bindings();
  1236. /* No event channels are 'live' right now. */
  1237. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1238. mask_evtchn(i);
  1239. if (xen_hvm_domain()) {
  1240. xen_callback_vector();
  1241. native_init_IRQ();
  1242. /* pci_xen_hvm_init must be called after native_init_IRQ so that
  1243. * __acpi_register_gsi can point at the right function */
  1244. pci_xen_hvm_init();
  1245. } else {
  1246. irq_ctx_init(smp_processor_id());
  1247. if (xen_initial_domain())
  1248. xen_setup_pirqs();
  1249. }
  1250. }