events.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596
  1. /*
  2. * Xen event channels
  3. *
  4. * Xen models interrupts with abstract event channels. Because each
  5. * domain gets 1024 event channels, but NR_IRQ is not that large, we
  6. * must dynamically map irqs<->event channels. The event channels
  7. * interface with the rest of the kernel by defining a xen interrupt
  8. * chip. When an event is recieved, it is mapped to an irq and sent
  9. * through the normal interrupt processing path.
  10. *
  11. * There are four kinds of events which can be mapped to an event
  12. * channel:
  13. *
  14. * 1. Inter-domain notifications. This includes all the virtual
  15. * device events, since they're driven by front-ends in another domain
  16. * (typically dom0).
  17. * 2. VIRQs, typically used for timers. These are per-cpu events.
  18. * 3. IPIs.
  19. * 4. PIRQs - Hardware interrupts.
  20. *
  21. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  22. */
  23. #include <linux/linkage.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/irq.h>
  26. #include <linux/module.h>
  27. #include <linux/string.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/slab.h>
  30. #include <linux/irqnr.h>
  31. #include <linux/pci.h>
  32. #include <asm/desc.h>
  33. #include <asm/ptrace.h>
  34. #include <asm/irq.h>
  35. #include <asm/idle.h>
  36. #include <asm/io_apic.h>
  37. #include <asm/sync_bitops.h>
  38. #include <asm/xen/pci.h>
  39. #include <asm/xen/hypercall.h>
  40. #include <asm/xen/hypervisor.h>
  41. #include <xen/xen.h>
  42. #include <xen/hvm.h>
  43. #include <xen/xen-ops.h>
  44. #include <xen/events.h>
  45. #include <xen/interface/xen.h>
  46. #include <xen/interface/event_channel.h>
  47. #include <xen/interface/hvm/hvm_op.h>
  48. #include <xen/interface/hvm/params.h>
  49. /*
  50. * This lock protects updates to the following mapping and reference-count
  51. * arrays. The lock does not need to be acquired to read the mapping tables.
  52. */
  53. static DEFINE_SPINLOCK(irq_mapping_update_lock);
  54. /* IRQ <-> VIRQ mapping. */
  55. static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
  56. /* IRQ <-> IPI mapping */
  57. static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
  58. /* Interrupt types. */
  59. enum xen_irq_type {
  60. IRQT_UNBOUND = 0,
  61. IRQT_PIRQ,
  62. IRQT_VIRQ,
  63. IRQT_IPI,
  64. IRQT_EVTCHN
  65. };
  66. /*
  67. * Packed IRQ information:
  68. * type - enum xen_irq_type
  69. * event channel - irq->event channel mapping
  70. * cpu - cpu this event channel is bound to
  71. * index - type-specific information:
  72. * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
  73. * guest, or GSI (real passthrough IRQ) of the device.
  74. * VIRQ - virq number
  75. * IPI - IPI vector
  76. * EVTCHN -
  77. */
  78. struct irq_info
  79. {
  80. enum xen_irq_type type; /* type */
  81. unsigned short evtchn; /* event channel */
  82. unsigned short cpu; /* cpu bound */
  83. union {
  84. unsigned short virq;
  85. enum ipi_vector ipi;
  86. struct {
  87. unsigned short pirq;
  88. unsigned short gsi;
  89. unsigned char vector;
  90. unsigned char flags;
  91. } pirq;
  92. } u;
  93. };
  94. #define PIRQ_NEEDS_EOI (1 << 0)
  95. #define PIRQ_SHAREABLE (1 << 1)
  96. static struct irq_info *irq_info;
  97. static int *pirq_to_irq;
  98. static int *evtchn_to_irq;
  99. struct cpu_evtchn_s {
  100. unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
  101. };
  102. static __initdata struct cpu_evtchn_s init_evtchn_mask = {
  103. .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
  104. };
  105. static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
  106. static inline unsigned long *cpu_evtchn_mask(int cpu)
  107. {
  108. return cpu_evtchn_mask_p[cpu].bits;
  109. }
  110. /* Xen will never allocate port zero for any purpose. */
  111. #define VALID_EVTCHN(chn) ((chn) != 0)
  112. static struct irq_chip xen_dynamic_chip;
  113. static struct irq_chip xen_percpu_chip;
  114. static struct irq_chip xen_pirq_chip;
  115. /* Constructor for packed IRQ information. */
  116. static struct irq_info mk_unbound_info(void)
  117. {
  118. return (struct irq_info) { .type = IRQT_UNBOUND };
  119. }
  120. static struct irq_info mk_evtchn_info(unsigned short evtchn)
  121. {
  122. return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
  123. .cpu = 0 };
  124. }
  125. static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
  126. {
  127. return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
  128. .cpu = 0, .u.ipi = ipi };
  129. }
  130. static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
  131. {
  132. return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
  133. .cpu = 0, .u.virq = virq };
  134. }
  135. static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
  136. unsigned short gsi, unsigned short vector)
  137. {
  138. return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
  139. .cpu = 0,
  140. .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
  141. }
  142. /*
  143. * Accessors for packed IRQ information.
  144. */
  145. static struct irq_info *info_for_irq(unsigned irq)
  146. {
  147. return &irq_info[irq];
  148. }
  149. static unsigned int evtchn_from_irq(unsigned irq)
  150. {
  151. return info_for_irq(irq)->evtchn;
  152. }
  153. unsigned irq_from_evtchn(unsigned int evtchn)
  154. {
  155. return evtchn_to_irq[evtchn];
  156. }
  157. EXPORT_SYMBOL_GPL(irq_from_evtchn);
  158. static enum ipi_vector ipi_from_irq(unsigned irq)
  159. {
  160. struct irq_info *info = info_for_irq(irq);
  161. BUG_ON(info == NULL);
  162. BUG_ON(info->type != IRQT_IPI);
  163. return info->u.ipi;
  164. }
  165. static unsigned virq_from_irq(unsigned irq)
  166. {
  167. struct irq_info *info = info_for_irq(irq);
  168. BUG_ON(info == NULL);
  169. BUG_ON(info->type != IRQT_VIRQ);
  170. return info->u.virq;
  171. }
  172. static unsigned pirq_from_irq(unsigned irq)
  173. {
  174. struct irq_info *info = info_for_irq(irq);
  175. BUG_ON(info == NULL);
  176. BUG_ON(info->type != IRQT_PIRQ);
  177. return info->u.pirq.pirq;
  178. }
  179. static unsigned gsi_from_irq(unsigned irq)
  180. {
  181. struct irq_info *info = info_for_irq(irq);
  182. BUG_ON(info == NULL);
  183. BUG_ON(info->type != IRQT_PIRQ);
  184. return info->u.pirq.gsi;
  185. }
  186. static unsigned vector_from_irq(unsigned irq)
  187. {
  188. struct irq_info *info = info_for_irq(irq);
  189. BUG_ON(info == NULL);
  190. BUG_ON(info->type != IRQT_PIRQ);
  191. return info->u.pirq.vector;
  192. }
  193. static enum xen_irq_type type_from_irq(unsigned irq)
  194. {
  195. return info_for_irq(irq)->type;
  196. }
  197. static unsigned cpu_from_irq(unsigned irq)
  198. {
  199. return info_for_irq(irq)->cpu;
  200. }
  201. static unsigned int cpu_from_evtchn(unsigned int evtchn)
  202. {
  203. int irq = evtchn_to_irq[evtchn];
  204. unsigned ret = 0;
  205. if (irq != -1)
  206. ret = cpu_from_irq(irq);
  207. return ret;
  208. }
  209. static bool pirq_needs_eoi(unsigned irq)
  210. {
  211. struct irq_info *info = info_for_irq(irq);
  212. BUG_ON(info->type != IRQT_PIRQ);
  213. return info->u.pirq.flags & PIRQ_NEEDS_EOI;
  214. }
  215. static inline unsigned long active_evtchns(unsigned int cpu,
  216. struct shared_info *sh,
  217. unsigned int idx)
  218. {
  219. return (sh->evtchn_pending[idx] &
  220. cpu_evtchn_mask(cpu)[idx] &
  221. ~sh->evtchn_mask[idx]);
  222. }
  223. static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
  224. {
  225. int irq = evtchn_to_irq[chn];
  226. BUG_ON(irq == -1);
  227. #ifdef CONFIG_SMP
  228. cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
  229. #endif
  230. clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
  231. set_bit(chn, cpu_evtchn_mask(cpu));
  232. irq_info[irq].cpu = cpu;
  233. }
  234. static void init_evtchn_cpu_bindings(void)
  235. {
  236. int i;
  237. #ifdef CONFIG_SMP
  238. struct irq_desc *desc;
  239. /* By default all event channels notify CPU#0. */
  240. for_each_irq_desc(i, desc) {
  241. cpumask_copy(desc->affinity, cpumask_of(0));
  242. }
  243. #endif
  244. for_each_possible_cpu(i)
  245. memset(cpu_evtchn_mask(i),
  246. (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s));
  247. }
  248. static inline void clear_evtchn(int port)
  249. {
  250. struct shared_info *s = HYPERVISOR_shared_info;
  251. sync_clear_bit(port, &s->evtchn_pending[0]);
  252. }
  253. static inline void set_evtchn(int port)
  254. {
  255. struct shared_info *s = HYPERVISOR_shared_info;
  256. sync_set_bit(port, &s->evtchn_pending[0]);
  257. }
  258. static inline int test_evtchn(int port)
  259. {
  260. struct shared_info *s = HYPERVISOR_shared_info;
  261. return sync_test_bit(port, &s->evtchn_pending[0]);
  262. }
  263. /**
  264. * notify_remote_via_irq - send event to remote end of event channel via irq
  265. * @irq: irq of event channel to send event to
  266. *
  267. * Unlike notify_remote_via_evtchn(), this is safe to use across
  268. * save/restore. Notifications on a broken connection are silently
  269. * dropped.
  270. */
  271. void notify_remote_via_irq(int irq)
  272. {
  273. int evtchn = evtchn_from_irq(irq);
  274. if (VALID_EVTCHN(evtchn))
  275. notify_remote_via_evtchn(evtchn);
  276. }
  277. EXPORT_SYMBOL_GPL(notify_remote_via_irq);
  278. static void mask_evtchn(int port)
  279. {
  280. struct shared_info *s = HYPERVISOR_shared_info;
  281. sync_set_bit(port, &s->evtchn_mask[0]);
  282. }
  283. static void unmask_evtchn(int port)
  284. {
  285. struct shared_info *s = HYPERVISOR_shared_info;
  286. unsigned int cpu = get_cpu();
  287. BUG_ON(!irqs_disabled());
  288. /* Slow path (hypercall) if this is a non-local port. */
  289. if (unlikely(cpu != cpu_from_evtchn(port))) {
  290. struct evtchn_unmask unmask = { .port = port };
  291. (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
  292. } else {
  293. struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
  294. sync_clear_bit(port, &s->evtchn_mask[0]);
  295. /*
  296. * The following is basically the equivalent of
  297. * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
  298. * the interrupt edge' if the channel is masked.
  299. */
  300. if (sync_test_bit(port, &s->evtchn_pending[0]) &&
  301. !sync_test_and_set_bit(port / BITS_PER_LONG,
  302. &vcpu_info->evtchn_pending_sel))
  303. vcpu_info->evtchn_upcall_pending = 1;
  304. }
  305. put_cpu();
  306. }
  307. static int get_nr_hw_irqs(void)
  308. {
  309. int ret = 1;
  310. #ifdef CONFIG_X86_IO_APIC
  311. ret = get_nr_irqs_gsi();
  312. #endif
  313. return ret;
  314. }
  315. static int find_unbound_pirq(int type)
  316. {
  317. int rc, i;
  318. struct physdev_get_free_pirq op_get_free_pirq;
  319. op_get_free_pirq.type = type;
  320. rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
  321. if (!rc)
  322. return op_get_free_pirq.pirq;
  323. for (i = 0; i < nr_irqs; i++) {
  324. if (pirq_to_irq[i] < 0)
  325. return i;
  326. }
  327. return -1;
  328. }
  329. static int find_unbound_irq(void)
  330. {
  331. struct irq_data *data;
  332. int irq, res;
  333. int start = get_nr_hw_irqs();
  334. if (start == nr_irqs)
  335. goto no_irqs;
  336. /* nr_irqs is a magic value. Must not use it.*/
  337. for (irq = nr_irqs-1; irq > start; irq--) {
  338. data = irq_get_irq_data(irq);
  339. /* only 0->15 have init'd desc; handle irq > 16 */
  340. if (!data)
  341. break;
  342. if (data->chip == &no_irq_chip)
  343. break;
  344. if (data->chip != &xen_dynamic_chip)
  345. continue;
  346. if (irq_info[irq].type == IRQT_UNBOUND)
  347. return irq;
  348. }
  349. if (irq == start)
  350. goto no_irqs;
  351. res = irq_alloc_desc_at(irq, -1);
  352. if (WARN_ON(res != irq))
  353. return -1;
  354. return irq;
  355. no_irqs:
  356. panic("No available IRQ to bind to: increase nr_irqs!\n");
  357. }
  358. static bool identity_mapped_irq(unsigned irq)
  359. {
  360. /* identity map all the hardware irqs */
  361. return irq < get_nr_hw_irqs();
  362. }
  363. static void pirq_unmask_notify(int irq)
  364. {
  365. struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
  366. if (unlikely(pirq_needs_eoi(irq))) {
  367. int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  368. WARN_ON(rc);
  369. }
  370. }
  371. static void pirq_query_unmask(int irq)
  372. {
  373. struct physdev_irq_status_query irq_status;
  374. struct irq_info *info = info_for_irq(irq);
  375. BUG_ON(info->type != IRQT_PIRQ);
  376. irq_status.irq = pirq_from_irq(irq);
  377. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  378. irq_status.flags = 0;
  379. info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
  380. if (irq_status.flags & XENIRQSTAT_needs_eoi)
  381. info->u.pirq.flags |= PIRQ_NEEDS_EOI;
  382. }
  383. static bool probing_irq(int irq)
  384. {
  385. struct irq_desc *desc = irq_to_desc(irq);
  386. return desc && desc->action == NULL;
  387. }
  388. static unsigned int startup_pirq(unsigned int irq)
  389. {
  390. struct evtchn_bind_pirq bind_pirq;
  391. struct irq_info *info = info_for_irq(irq);
  392. int evtchn = evtchn_from_irq(irq);
  393. int rc;
  394. BUG_ON(info->type != IRQT_PIRQ);
  395. if (VALID_EVTCHN(evtchn))
  396. goto out;
  397. bind_pirq.pirq = pirq_from_irq(irq);
  398. /* NB. We are happy to share unless we are probing. */
  399. bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
  400. BIND_PIRQ__WILL_SHARE : 0;
  401. rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
  402. if (rc != 0) {
  403. if (!probing_irq(irq))
  404. printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
  405. irq);
  406. return 0;
  407. }
  408. evtchn = bind_pirq.port;
  409. pirq_query_unmask(irq);
  410. evtchn_to_irq[evtchn] = irq;
  411. bind_evtchn_to_cpu(evtchn, 0);
  412. info->evtchn = evtchn;
  413. out:
  414. unmask_evtchn(evtchn);
  415. pirq_unmask_notify(irq);
  416. return 0;
  417. }
  418. static void shutdown_pirq(unsigned int irq)
  419. {
  420. struct evtchn_close close;
  421. struct irq_info *info = info_for_irq(irq);
  422. int evtchn = evtchn_from_irq(irq);
  423. BUG_ON(info->type != IRQT_PIRQ);
  424. if (!VALID_EVTCHN(evtchn))
  425. return;
  426. mask_evtchn(evtchn);
  427. close.port = evtchn;
  428. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  429. BUG();
  430. bind_evtchn_to_cpu(evtchn, 0);
  431. evtchn_to_irq[evtchn] = -1;
  432. info->evtchn = 0;
  433. }
  434. static void enable_pirq(unsigned int irq)
  435. {
  436. startup_pirq(irq);
  437. }
  438. static void disable_pirq(unsigned int irq)
  439. {
  440. }
  441. static void ack_pirq(unsigned int irq)
  442. {
  443. int evtchn = evtchn_from_irq(irq);
  444. move_native_irq(irq);
  445. if (VALID_EVTCHN(evtchn)) {
  446. mask_evtchn(evtchn);
  447. clear_evtchn(evtchn);
  448. }
  449. }
  450. static void end_pirq(unsigned int irq)
  451. {
  452. int evtchn = evtchn_from_irq(irq);
  453. struct irq_desc *desc = irq_to_desc(irq);
  454. if (WARN_ON(!desc))
  455. return;
  456. if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
  457. (IRQ_DISABLED|IRQ_PENDING)) {
  458. shutdown_pirq(irq);
  459. } else if (VALID_EVTCHN(evtchn)) {
  460. unmask_evtchn(evtchn);
  461. pirq_unmask_notify(irq);
  462. }
  463. }
  464. static int find_irq_by_gsi(unsigned gsi)
  465. {
  466. int irq;
  467. for (irq = 0; irq < nr_irqs; irq++) {
  468. struct irq_info *info = info_for_irq(irq);
  469. if (info == NULL || info->type != IRQT_PIRQ)
  470. continue;
  471. if (gsi_from_irq(irq) == gsi)
  472. return irq;
  473. }
  474. return -1;
  475. }
  476. int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
  477. {
  478. return xen_map_pirq_gsi(gsi, gsi, shareable, name);
  479. }
  480. /* xen_map_pirq_gsi might allocate irqs from the top down, as a
  481. * consequence don't assume that the irq number returned has a low value
  482. * or can be used as a pirq number unless you know otherwise.
  483. *
  484. * One notable exception is when xen_map_pirq_gsi is called passing an
  485. * hardware gsi as argument, in that case the irq number returned
  486. * matches the gsi number passed as second argument.
  487. *
  488. * Note: We don't assign an event channel until the irq actually started
  489. * up. Return an existing irq if we've already got one for the gsi.
  490. */
  491. int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
  492. {
  493. int irq = 0;
  494. struct physdev_irq irq_op;
  495. spin_lock(&irq_mapping_update_lock);
  496. if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
  497. printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
  498. pirq > nr_irqs ? "pirq" :"",
  499. gsi > nr_irqs ? "gsi" : "");
  500. goto out;
  501. }
  502. irq = find_irq_by_gsi(gsi);
  503. if (irq != -1) {
  504. printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
  505. irq, gsi);
  506. goto out; /* XXX need refcount? */
  507. }
  508. /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
  509. * we are using the !xen_initial_domain() to drop in the function.*/
  510. if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
  511. xen_pv_domain())) {
  512. irq = gsi;
  513. irq_alloc_desc_at(irq, -1);
  514. } else
  515. irq = find_unbound_irq();
  516. set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
  517. handle_level_irq, name);
  518. irq_op.irq = irq;
  519. irq_op.vector = 0;
  520. /* Only the privileged domain can do this. For non-priv, the pcifront
  521. * driver provides a PCI bus that does the call to do exactly
  522. * this in the priv domain. */
  523. if (xen_initial_domain() &&
  524. HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
  525. irq_free_desc(irq);
  526. irq = -ENOSPC;
  527. goto out;
  528. }
  529. irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
  530. irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
  531. pirq_to_irq[pirq] = irq;
  532. out:
  533. spin_unlock(&irq_mapping_update_lock);
  534. return irq;
  535. }
  536. #ifdef CONFIG_PCI_MSI
  537. #include <linux/msi.h>
  538. #include "../pci/msi.h"
  539. void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
  540. {
  541. spin_lock(&irq_mapping_update_lock);
  542. if (alloc & XEN_ALLOC_IRQ) {
  543. *irq = find_unbound_irq();
  544. if (*irq == -1)
  545. goto out;
  546. }
  547. if (alloc & XEN_ALLOC_PIRQ) {
  548. *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI);
  549. if (*pirq == -1)
  550. goto out;
  551. }
  552. set_irq_chip_and_handler_name(*irq, &xen_pirq_chip,
  553. handle_level_irq, name);
  554. irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0);
  555. pirq_to_irq[*pirq] = *irq;
  556. out:
  557. spin_unlock(&irq_mapping_update_lock);
  558. }
  559. int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
  560. {
  561. int irq = -1;
  562. struct physdev_map_pirq map_irq;
  563. int rc;
  564. int pos;
  565. u32 table_offset, bir;
  566. memset(&map_irq, 0, sizeof(map_irq));
  567. map_irq.domid = DOMID_SELF;
  568. map_irq.type = MAP_PIRQ_TYPE_MSI;
  569. map_irq.index = -1;
  570. map_irq.pirq = -1;
  571. map_irq.bus = dev->bus->number;
  572. map_irq.devfn = dev->devfn;
  573. if (type == PCI_CAP_ID_MSIX) {
  574. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  575. pci_read_config_dword(dev, msix_table_offset_reg(pos),
  576. &table_offset);
  577. bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
  578. map_irq.table_base = pci_resource_start(dev, bir);
  579. map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
  580. }
  581. spin_lock(&irq_mapping_update_lock);
  582. irq = find_unbound_irq();
  583. if (irq == -1)
  584. goto out;
  585. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  586. if (rc) {
  587. printk(KERN_WARNING "xen map irq failed %d\n", rc);
  588. irq_free_desc(irq);
  589. irq = -1;
  590. goto out;
  591. }
  592. irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index);
  593. set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
  594. handle_level_irq,
  595. (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
  596. out:
  597. spin_unlock(&irq_mapping_update_lock);
  598. return irq;
  599. }
  600. #endif
  601. int xen_destroy_irq(int irq)
  602. {
  603. struct irq_desc *desc;
  604. struct physdev_unmap_pirq unmap_irq;
  605. struct irq_info *info = info_for_irq(irq);
  606. int rc = -ENOENT;
  607. spin_lock(&irq_mapping_update_lock);
  608. desc = irq_to_desc(irq);
  609. if (!desc)
  610. goto out;
  611. if (xen_initial_domain()) {
  612. unmap_irq.pirq = info->u.pirq.pirq;
  613. unmap_irq.domid = DOMID_SELF;
  614. rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
  615. if (rc) {
  616. printk(KERN_WARNING "unmap irq failed %d\n", rc);
  617. goto out;
  618. }
  619. pirq_to_irq[info->u.pirq.pirq] = -1;
  620. }
  621. irq_info[irq] = mk_unbound_info();
  622. irq_free_desc(irq);
  623. out:
  624. spin_unlock(&irq_mapping_update_lock);
  625. return rc;
  626. }
  627. int xen_vector_from_irq(unsigned irq)
  628. {
  629. return vector_from_irq(irq);
  630. }
  631. int xen_gsi_from_irq(unsigned irq)
  632. {
  633. return gsi_from_irq(irq);
  634. }
  635. int xen_irq_from_pirq(unsigned pirq)
  636. {
  637. return pirq_to_irq[pirq];
  638. }
  639. int bind_evtchn_to_irq(unsigned int evtchn)
  640. {
  641. int irq;
  642. spin_lock(&irq_mapping_update_lock);
  643. irq = evtchn_to_irq[evtchn];
  644. if (irq == -1) {
  645. irq = find_unbound_irq();
  646. set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
  647. handle_fasteoi_irq, "event");
  648. evtchn_to_irq[evtchn] = irq;
  649. irq_info[irq] = mk_evtchn_info(evtchn);
  650. }
  651. spin_unlock(&irq_mapping_update_lock);
  652. return irq;
  653. }
  654. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
  655. static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
  656. {
  657. struct evtchn_bind_ipi bind_ipi;
  658. int evtchn, irq;
  659. spin_lock(&irq_mapping_update_lock);
  660. irq = per_cpu(ipi_to_irq, cpu)[ipi];
  661. if (irq == -1) {
  662. irq = find_unbound_irq();
  663. if (irq < 0)
  664. goto out;
  665. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  666. handle_percpu_irq, "ipi");
  667. bind_ipi.vcpu = cpu;
  668. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  669. &bind_ipi) != 0)
  670. BUG();
  671. evtchn = bind_ipi.port;
  672. evtchn_to_irq[evtchn] = irq;
  673. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  674. per_cpu(ipi_to_irq, cpu)[ipi] = irq;
  675. bind_evtchn_to_cpu(evtchn, cpu);
  676. }
  677. out:
  678. spin_unlock(&irq_mapping_update_lock);
  679. return irq;
  680. }
  681. int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
  682. {
  683. struct evtchn_bind_virq bind_virq;
  684. int evtchn, irq;
  685. spin_lock(&irq_mapping_update_lock);
  686. irq = per_cpu(virq_to_irq, cpu)[virq];
  687. if (irq == -1) {
  688. irq = find_unbound_irq();
  689. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  690. handle_percpu_irq, "virq");
  691. bind_virq.virq = virq;
  692. bind_virq.vcpu = cpu;
  693. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  694. &bind_virq) != 0)
  695. BUG();
  696. evtchn = bind_virq.port;
  697. evtchn_to_irq[evtchn] = irq;
  698. irq_info[irq] = mk_virq_info(evtchn, virq);
  699. per_cpu(virq_to_irq, cpu)[virq] = irq;
  700. bind_evtchn_to_cpu(evtchn, cpu);
  701. }
  702. spin_unlock(&irq_mapping_update_lock);
  703. return irq;
  704. }
  705. static void unbind_from_irq(unsigned int irq)
  706. {
  707. struct evtchn_close close;
  708. int evtchn = evtchn_from_irq(irq);
  709. spin_lock(&irq_mapping_update_lock);
  710. if (VALID_EVTCHN(evtchn)) {
  711. close.port = evtchn;
  712. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  713. BUG();
  714. switch (type_from_irq(irq)) {
  715. case IRQT_VIRQ:
  716. per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
  717. [virq_from_irq(irq)] = -1;
  718. break;
  719. case IRQT_IPI:
  720. per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
  721. [ipi_from_irq(irq)] = -1;
  722. break;
  723. default:
  724. break;
  725. }
  726. /* Closed ports are implicitly re-bound to VCPU0. */
  727. bind_evtchn_to_cpu(evtchn, 0);
  728. evtchn_to_irq[evtchn] = -1;
  729. }
  730. if (irq_info[irq].type != IRQT_UNBOUND) {
  731. irq_info[irq] = mk_unbound_info();
  732. irq_free_desc(irq);
  733. }
  734. spin_unlock(&irq_mapping_update_lock);
  735. }
  736. int bind_evtchn_to_irqhandler(unsigned int evtchn,
  737. irq_handler_t handler,
  738. unsigned long irqflags,
  739. const char *devname, void *dev_id)
  740. {
  741. unsigned int irq;
  742. int retval;
  743. irq = bind_evtchn_to_irq(evtchn);
  744. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  745. if (retval != 0) {
  746. unbind_from_irq(irq);
  747. return retval;
  748. }
  749. return irq;
  750. }
  751. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
  752. int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
  753. irq_handler_t handler,
  754. unsigned long irqflags, const char *devname, void *dev_id)
  755. {
  756. unsigned int irq;
  757. int retval;
  758. irq = bind_virq_to_irq(virq, cpu);
  759. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  760. if (retval != 0) {
  761. unbind_from_irq(irq);
  762. return retval;
  763. }
  764. return irq;
  765. }
  766. EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
  767. int bind_ipi_to_irqhandler(enum ipi_vector ipi,
  768. unsigned int cpu,
  769. irq_handler_t handler,
  770. unsigned long irqflags,
  771. const char *devname,
  772. void *dev_id)
  773. {
  774. int irq, retval;
  775. irq = bind_ipi_to_irq(ipi, cpu);
  776. if (irq < 0)
  777. return irq;
  778. irqflags |= IRQF_NO_SUSPEND;
  779. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  780. if (retval != 0) {
  781. unbind_from_irq(irq);
  782. return retval;
  783. }
  784. return irq;
  785. }
  786. void unbind_from_irqhandler(unsigned int irq, void *dev_id)
  787. {
  788. free_irq(irq, dev_id);
  789. unbind_from_irq(irq);
  790. }
  791. EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
  792. void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
  793. {
  794. int irq = per_cpu(ipi_to_irq, cpu)[vector];
  795. BUG_ON(irq < 0);
  796. notify_remote_via_irq(irq);
  797. }
  798. irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
  799. {
  800. struct shared_info *sh = HYPERVISOR_shared_info;
  801. int cpu = smp_processor_id();
  802. unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
  803. int i;
  804. unsigned long flags;
  805. static DEFINE_SPINLOCK(debug_lock);
  806. struct vcpu_info *v;
  807. spin_lock_irqsave(&debug_lock, flags);
  808. printk("\nvcpu %d\n ", cpu);
  809. for_each_online_cpu(i) {
  810. int pending;
  811. v = per_cpu(xen_vcpu, i);
  812. pending = (get_irq_regs() && i == cpu)
  813. ? xen_irqs_disabled(get_irq_regs())
  814. : v->evtchn_upcall_mask;
  815. printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
  816. pending, v->evtchn_upcall_pending,
  817. (int)(sizeof(v->evtchn_pending_sel)*2),
  818. v->evtchn_pending_sel);
  819. }
  820. v = per_cpu(xen_vcpu, cpu);
  821. printk("\npending:\n ");
  822. for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
  823. printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
  824. sh->evtchn_pending[i],
  825. i % 8 == 0 ? "\n " : " ");
  826. printk("\nglobal mask:\n ");
  827. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  828. printk("%0*lx%s",
  829. (int)(sizeof(sh->evtchn_mask[0])*2),
  830. sh->evtchn_mask[i],
  831. i % 8 == 0 ? "\n " : " ");
  832. printk("\nglobally unmasked:\n ");
  833. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  834. printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
  835. sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
  836. i % 8 == 0 ? "\n " : " ");
  837. printk("\nlocal cpu%d mask:\n ", cpu);
  838. for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
  839. printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
  840. cpu_evtchn[i],
  841. i % 8 == 0 ? "\n " : " ");
  842. printk("\nlocally unmasked:\n ");
  843. for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
  844. unsigned long pending = sh->evtchn_pending[i]
  845. & ~sh->evtchn_mask[i]
  846. & cpu_evtchn[i];
  847. printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
  848. pending, i % 8 == 0 ? "\n " : " ");
  849. }
  850. printk("\npending list:\n");
  851. for (i = 0; i < NR_EVENT_CHANNELS; i++) {
  852. if (sync_test_bit(i, sh->evtchn_pending)) {
  853. int word_idx = i / BITS_PER_LONG;
  854. printk(" %d: event %d -> irq %d%s%s%s\n",
  855. cpu_from_evtchn(i), i,
  856. evtchn_to_irq[i],
  857. sync_test_bit(word_idx, &v->evtchn_pending_sel)
  858. ? "" : " l2-clear",
  859. !sync_test_bit(i, sh->evtchn_mask)
  860. ? "" : " globally-masked",
  861. sync_test_bit(i, cpu_evtchn)
  862. ? "" : " locally-masked");
  863. }
  864. }
  865. spin_unlock_irqrestore(&debug_lock, flags);
  866. return IRQ_HANDLED;
  867. }
  868. static DEFINE_PER_CPU(unsigned, xed_nesting_count);
  869. /*
  870. * Search the CPUs pending events bitmasks. For each one found, map
  871. * the event number to an irq, and feed it into do_IRQ() for
  872. * handling.
  873. *
  874. * Xen uses a two-level bitmap to speed searching. The first level is
  875. * a bitset of words which contain pending event bits. The second
  876. * level is a bitset of pending events themselves.
  877. */
  878. static void __xen_evtchn_do_upcall(void)
  879. {
  880. int cpu = get_cpu();
  881. struct shared_info *s = HYPERVISOR_shared_info;
  882. struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
  883. unsigned count;
  884. do {
  885. unsigned long pending_words;
  886. vcpu_info->evtchn_upcall_pending = 0;
  887. if (__get_cpu_var(xed_nesting_count)++)
  888. goto out;
  889. #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
  890. /* Clear master flag /before/ clearing selector flag. */
  891. wmb();
  892. #endif
  893. pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
  894. while (pending_words != 0) {
  895. unsigned long pending_bits;
  896. int word_idx = __ffs(pending_words);
  897. pending_words &= ~(1UL << word_idx);
  898. while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
  899. int bit_idx = __ffs(pending_bits);
  900. int port = (word_idx * BITS_PER_LONG) + bit_idx;
  901. int irq = evtchn_to_irq[port];
  902. struct irq_desc *desc;
  903. mask_evtchn(port);
  904. clear_evtchn(port);
  905. if (irq != -1) {
  906. desc = irq_to_desc(irq);
  907. if (desc)
  908. generic_handle_irq_desc(irq, desc);
  909. }
  910. }
  911. }
  912. BUG_ON(!irqs_disabled());
  913. count = __get_cpu_var(xed_nesting_count);
  914. __get_cpu_var(xed_nesting_count) = 0;
  915. } while (count != 1 || vcpu_info->evtchn_upcall_pending);
  916. out:
  917. put_cpu();
  918. }
  919. void xen_evtchn_do_upcall(struct pt_regs *regs)
  920. {
  921. struct pt_regs *old_regs = set_irq_regs(regs);
  922. exit_idle();
  923. irq_enter();
  924. __xen_evtchn_do_upcall();
  925. irq_exit();
  926. set_irq_regs(old_regs);
  927. }
  928. void xen_hvm_evtchn_do_upcall(void)
  929. {
  930. __xen_evtchn_do_upcall();
  931. }
  932. EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
  933. /* Rebind a new event channel to an existing irq. */
  934. void rebind_evtchn_irq(int evtchn, int irq)
  935. {
  936. struct irq_info *info = info_for_irq(irq);
  937. /* Make sure the irq is masked, since the new event channel
  938. will also be masked. */
  939. disable_irq(irq);
  940. spin_lock(&irq_mapping_update_lock);
  941. /* After resume the irq<->evtchn mappings are all cleared out */
  942. BUG_ON(evtchn_to_irq[evtchn] != -1);
  943. /* Expect irq to have been bound before,
  944. so there should be a proper type */
  945. BUG_ON(info->type == IRQT_UNBOUND);
  946. evtchn_to_irq[evtchn] = irq;
  947. irq_info[irq] = mk_evtchn_info(evtchn);
  948. spin_unlock(&irq_mapping_update_lock);
  949. /* new event channels are always bound to cpu 0 */
  950. irq_set_affinity(irq, cpumask_of(0));
  951. /* Unmask the event channel. */
  952. enable_irq(irq);
  953. }
  954. /* Rebind an evtchn so that it gets delivered to a specific cpu */
  955. static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
  956. {
  957. struct evtchn_bind_vcpu bind_vcpu;
  958. int evtchn = evtchn_from_irq(irq);
  959. /* events delivered via platform PCI interrupts are always
  960. * routed to vcpu 0 */
  961. if (!VALID_EVTCHN(evtchn) ||
  962. (xen_hvm_domain() && !xen_have_vector_callback))
  963. return -1;
  964. /* Send future instances of this interrupt to other vcpu. */
  965. bind_vcpu.port = evtchn;
  966. bind_vcpu.vcpu = tcpu;
  967. /*
  968. * If this fails, it usually just indicates that we're dealing with a
  969. * virq or IPI channel, which don't actually need to be rebound. Ignore
  970. * it, but don't do the xenlinux-level rebind in that case.
  971. */
  972. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
  973. bind_evtchn_to_cpu(evtchn, tcpu);
  974. return 0;
  975. }
  976. static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
  977. {
  978. unsigned tcpu = cpumask_first(dest);
  979. return rebind_irq_to_cpu(irq, tcpu);
  980. }
  981. int resend_irq_on_evtchn(unsigned int irq)
  982. {
  983. int masked, evtchn = evtchn_from_irq(irq);
  984. struct shared_info *s = HYPERVISOR_shared_info;
  985. if (!VALID_EVTCHN(evtchn))
  986. return 1;
  987. masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
  988. sync_set_bit(evtchn, s->evtchn_pending);
  989. if (!masked)
  990. unmask_evtchn(evtchn);
  991. return 1;
  992. }
  993. static void enable_dynirq(unsigned int irq)
  994. {
  995. int evtchn = evtchn_from_irq(irq);
  996. if (VALID_EVTCHN(evtchn))
  997. unmask_evtchn(evtchn);
  998. }
  999. static void disable_dynirq(unsigned int irq)
  1000. {
  1001. int evtchn = evtchn_from_irq(irq);
  1002. if (VALID_EVTCHN(evtchn))
  1003. mask_evtchn(evtchn);
  1004. }
  1005. static void ack_dynirq(unsigned int irq)
  1006. {
  1007. int evtchn = evtchn_from_irq(irq);
  1008. move_masked_irq(irq);
  1009. if (VALID_EVTCHN(evtchn))
  1010. unmask_evtchn(evtchn);
  1011. }
  1012. static int retrigger_dynirq(unsigned int irq)
  1013. {
  1014. int evtchn = evtchn_from_irq(irq);
  1015. struct shared_info *sh = HYPERVISOR_shared_info;
  1016. int ret = 0;
  1017. if (VALID_EVTCHN(evtchn)) {
  1018. int masked;
  1019. masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
  1020. sync_set_bit(evtchn, sh->evtchn_pending);
  1021. if (!masked)
  1022. unmask_evtchn(evtchn);
  1023. ret = 1;
  1024. }
  1025. return ret;
  1026. }
  1027. static void restore_cpu_pirqs(void)
  1028. {
  1029. int pirq, rc, irq, gsi;
  1030. struct physdev_map_pirq map_irq;
  1031. for (pirq = 0; pirq < nr_irqs; pirq++) {
  1032. irq = pirq_to_irq[pirq];
  1033. if (irq == -1)
  1034. continue;
  1035. /* save/restore of PT devices doesn't work, so at this point the
  1036. * only devices present are GSI based emulated devices */
  1037. gsi = gsi_from_irq(irq);
  1038. if (!gsi)
  1039. continue;
  1040. map_irq.domid = DOMID_SELF;
  1041. map_irq.type = MAP_PIRQ_TYPE_GSI;
  1042. map_irq.index = gsi;
  1043. map_irq.pirq = pirq;
  1044. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  1045. if (rc) {
  1046. printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
  1047. gsi, irq, pirq, rc);
  1048. irq_info[irq] = mk_unbound_info();
  1049. pirq_to_irq[pirq] = -1;
  1050. continue;
  1051. }
  1052. printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
  1053. startup_pirq(irq);
  1054. }
  1055. }
  1056. static void restore_cpu_virqs(unsigned int cpu)
  1057. {
  1058. struct evtchn_bind_virq bind_virq;
  1059. int virq, irq, evtchn;
  1060. for (virq = 0; virq < NR_VIRQS; virq++) {
  1061. if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
  1062. continue;
  1063. BUG_ON(virq_from_irq(irq) != virq);
  1064. /* Get a new binding from Xen. */
  1065. bind_virq.virq = virq;
  1066. bind_virq.vcpu = cpu;
  1067. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  1068. &bind_virq) != 0)
  1069. BUG();
  1070. evtchn = bind_virq.port;
  1071. /* Record the new mapping. */
  1072. evtchn_to_irq[evtchn] = irq;
  1073. irq_info[irq] = mk_virq_info(evtchn, virq);
  1074. bind_evtchn_to_cpu(evtchn, cpu);
  1075. }
  1076. }
  1077. static void restore_cpu_ipis(unsigned int cpu)
  1078. {
  1079. struct evtchn_bind_ipi bind_ipi;
  1080. int ipi, irq, evtchn;
  1081. for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
  1082. if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
  1083. continue;
  1084. BUG_ON(ipi_from_irq(irq) != ipi);
  1085. /* Get a new binding from Xen. */
  1086. bind_ipi.vcpu = cpu;
  1087. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  1088. &bind_ipi) != 0)
  1089. BUG();
  1090. evtchn = bind_ipi.port;
  1091. /* Record the new mapping. */
  1092. evtchn_to_irq[evtchn] = irq;
  1093. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  1094. bind_evtchn_to_cpu(evtchn, cpu);
  1095. }
  1096. }
  1097. /* Clear an irq's pending state, in preparation for polling on it */
  1098. void xen_clear_irq_pending(int irq)
  1099. {
  1100. int evtchn = evtchn_from_irq(irq);
  1101. if (VALID_EVTCHN(evtchn))
  1102. clear_evtchn(evtchn);
  1103. }
  1104. EXPORT_SYMBOL(xen_clear_irq_pending);
  1105. void xen_set_irq_pending(int irq)
  1106. {
  1107. int evtchn = evtchn_from_irq(irq);
  1108. if (VALID_EVTCHN(evtchn))
  1109. set_evtchn(evtchn);
  1110. }
  1111. bool xen_test_irq_pending(int irq)
  1112. {
  1113. int evtchn = evtchn_from_irq(irq);
  1114. bool ret = false;
  1115. if (VALID_EVTCHN(evtchn))
  1116. ret = test_evtchn(evtchn);
  1117. return ret;
  1118. }
  1119. /* Poll waiting for an irq to become pending with timeout. In the usual case,
  1120. * the irq will be disabled so it won't deliver an interrupt. */
  1121. void xen_poll_irq_timeout(int irq, u64 timeout)
  1122. {
  1123. evtchn_port_t evtchn = evtchn_from_irq(irq);
  1124. if (VALID_EVTCHN(evtchn)) {
  1125. struct sched_poll poll;
  1126. poll.nr_ports = 1;
  1127. poll.timeout = timeout;
  1128. set_xen_guest_handle(poll.ports, &evtchn);
  1129. if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
  1130. BUG();
  1131. }
  1132. }
  1133. EXPORT_SYMBOL(xen_poll_irq_timeout);
  1134. /* Poll waiting for an irq to become pending. In the usual case, the
  1135. * irq will be disabled so it won't deliver an interrupt. */
  1136. void xen_poll_irq(int irq)
  1137. {
  1138. xen_poll_irq_timeout(irq, 0 /* no timeout */);
  1139. }
  1140. void xen_irq_resume(void)
  1141. {
  1142. unsigned int cpu, irq, evtchn;
  1143. struct irq_desc *desc;
  1144. init_evtchn_cpu_bindings();
  1145. /* New event-channel space is not 'live' yet. */
  1146. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  1147. mask_evtchn(evtchn);
  1148. /* No IRQ <-> event-channel mappings. */
  1149. for (irq = 0; irq < nr_irqs; irq++)
  1150. irq_info[irq].evtchn = 0; /* zap event-channel binding */
  1151. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  1152. evtchn_to_irq[evtchn] = -1;
  1153. for_each_possible_cpu(cpu) {
  1154. restore_cpu_virqs(cpu);
  1155. restore_cpu_ipis(cpu);
  1156. }
  1157. /*
  1158. * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
  1159. * are not handled by the IRQ core.
  1160. */
  1161. for_each_irq_desc(irq, desc) {
  1162. if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
  1163. continue;
  1164. if (desc->status & IRQ_DISABLED)
  1165. continue;
  1166. evtchn = evtchn_from_irq(irq);
  1167. if (evtchn == -1)
  1168. continue;
  1169. unmask_evtchn(evtchn);
  1170. }
  1171. restore_cpu_pirqs();
  1172. }
  1173. static struct irq_chip xen_dynamic_chip __read_mostly = {
  1174. .name = "xen-dyn",
  1175. .disable = disable_dynirq,
  1176. .mask = disable_dynirq,
  1177. .unmask = enable_dynirq,
  1178. .eoi = ack_dynirq,
  1179. .set_affinity = set_affinity_irq,
  1180. .retrigger = retrigger_dynirq,
  1181. };
  1182. static struct irq_chip xen_pirq_chip __read_mostly = {
  1183. .name = "xen-pirq",
  1184. .startup = startup_pirq,
  1185. .shutdown = shutdown_pirq,
  1186. .enable = enable_pirq,
  1187. .unmask = enable_pirq,
  1188. .disable = disable_pirq,
  1189. .mask = disable_pirq,
  1190. .ack = ack_pirq,
  1191. .end = end_pirq,
  1192. .set_affinity = set_affinity_irq,
  1193. .retrigger = retrigger_dynirq,
  1194. };
  1195. static struct irq_chip xen_percpu_chip __read_mostly = {
  1196. .name = "xen-percpu",
  1197. .disable = disable_dynirq,
  1198. .mask = disable_dynirq,
  1199. .unmask = enable_dynirq,
  1200. .ack = ack_dynirq,
  1201. };
  1202. int xen_set_callback_via(uint64_t via)
  1203. {
  1204. struct xen_hvm_param a;
  1205. a.domid = DOMID_SELF;
  1206. a.index = HVM_PARAM_CALLBACK_IRQ;
  1207. a.value = via;
  1208. return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
  1209. }
  1210. EXPORT_SYMBOL_GPL(xen_set_callback_via);
  1211. #ifdef CONFIG_XEN_PVHVM
  1212. /* Vector callbacks are better than PCI interrupts to receive event
  1213. * channel notifications because we can receive vector callbacks on any
  1214. * vcpu and we don't need PCI support or APIC interactions. */
  1215. void xen_callback_vector(void)
  1216. {
  1217. int rc;
  1218. uint64_t callback_via;
  1219. if (xen_have_vector_callback) {
  1220. callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
  1221. rc = xen_set_callback_via(callback_via);
  1222. if (rc) {
  1223. printk(KERN_ERR "Request for Xen HVM callback vector"
  1224. " failed.\n");
  1225. xen_have_vector_callback = 0;
  1226. return;
  1227. }
  1228. printk(KERN_INFO "Xen HVM callback vector for event delivery is "
  1229. "enabled\n");
  1230. /* in the restore case the vector has already been allocated */
  1231. if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
  1232. alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
  1233. }
  1234. }
  1235. #else
  1236. void xen_callback_vector(void) {}
  1237. #endif
  1238. void __init xen_init_IRQ(void)
  1239. {
  1240. int i;
  1241. cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
  1242. GFP_KERNEL);
  1243. irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
  1244. /* We are using nr_irqs as the maximum number of pirq available but
  1245. * that number is actually chosen by Xen and we don't know exactly
  1246. * what it is. Be careful choosing high pirq numbers. */
  1247. pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
  1248. for (i = 0; i < nr_irqs; i++)
  1249. pirq_to_irq[i] = -1;
  1250. evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
  1251. GFP_KERNEL);
  1252. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1253. evtchn_to_irq[i] = -1;
  1254. init_evtchn_cpu_bindings();
  1255. /* No event channels are 'live' right now. */
  1256. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1257. mask_evtchn(i);
  1258. if (xen_hvm_domain()) {
  1259. xen_callback_vector();
  1260. native_init_IRQ();
  1261. /* pci_xen_hvm_init must be called after native_init_IRQ so that
  1262. * __acpi_register_gsi can point at the right function */
  1263. pci_xen_hvm_init();
  1264. } else {
  1265. irq_ctx_init(smp_processor_id());
  1266. if (xen_initial_domain())
  1267. xen_setup_pirqs();
  1268. }
  1269. }