events.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347
  1. /*
  2. * Xen event channels
  3. *
  4. * Xen models interrupts with abstract event channels. Because each
  5. * domain gets 1024 event channels, but NR_IRQ is not that large, we
  6. * must dynamically map irqs<->event channels. The event channels
  7. * interface with the rest of the kernel by defining a xen interrupt
  8. * chip. When an event is recieved, it is mapped to an irq and sent
  9. * through the normal interrupt processing path.
  10. *
  11. * There are four kinds of events which can be mapped to an event
  12. * channel:
  13. *
  14. * 1. Inter-domain notifications. This includes all the virtual
  15. * device events, since they're driven by front-ends in another domain
  16. * (typically dom0).
  17. * 2. VIRQs, typically used for timers. These are per-cpu events.
  18. * 3. IPIs.
  19. * 4. PIRQs - Hardware interrupts.
  20. *
  21. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  22. */
  23. #include <linux/linkage.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/irq.h>
  26. #include <linux/module.h>
  27. #include <linux/string.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/slab.h>
  30. #include <linux/irqnr.h>
  31. #include <asm/desc.h>
  32. #include <asm/ptrace.h>
  33. #include <asm/irq.h>
  34. #include <asm/idle.h>
  35. #include <asm/io_apic.h>
  36. #include <asm/sync_bitops.h>
  37. #include <asm/xen/hypercall.h>
  38. #include <asm/xen/hypervisor.h>
  39. #include <xen/xen.h>
  40. #include <xen/hvm.h>
  41. #include <xen/xen-ops.h>
  42. #include <xen/events.h>
  43. #include <xen/interface/xen.h>
  44. #include <xen/interface/event_channel.h>
  45. #include <xen/interface/hvm/hvm_op.h>
  46. #include <xen/interface/hvm/params.h>
  47. /*
  48. * This lock protects updates to the following mapping and reference-count
  49. * arrays. The lock does not need to be acquired to read the mapping tables.
  50. */
  51. static DEFINE_SPINLOCK(irq_mapping_update_lock);
  52. /* IRQ <-> VIRQ mapping. */
  53. static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
  54. /* IRQ <-> IPI mapping */
  55. static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
  56. /* Interrupt types. */
  57. enum xen_irq_type {
  58. IRQT_UNBOUND = 0,
  59. IRQT_PIRQ,
  60. IRQT_VIRQ,
  61. IRQT_IPI,
  62. IRQT_EVTCHN
  63. };
  64. /*
  65. * Packed IRQ information:
  66. * type - enum xen_irq_type
  67. * event channel - irq->event channel mapping
  68. * cpu - cpu this event channel is bound to
  69. * index - type-specific information:
  70. * PIRQ - vector, with MSB being "needs EIO"
  71. * VIRQ - virq number
  72. * IPI - IPI vector
  73. * EVTCHN -
  74. */
  75. struct irq_info
  76. {
  77. enum xen_irq_type type; /* type */
  78. unsigned short evtchn; /* event channel */
  79. unsigned short cpu; /* cpu bound */
  80. union {
  81. unsigned short virq;
  82. enum ipi_vector ipi;
  83. struct {
  84. unsigned short gsi;
  85. unsigned char vector;
  86. unsigned char flags;
  87. } pirq;
  88. } u;
  89. };
  90. #define PIRQ_NEEDS_EOI (1 << 0)
  91. #define PIRQ_SHAREABLE (1 << 1)
  92. static struct irq_info *irq_info;
  93. static int *evtchn_to_irq;
  94. struct cpu_evtchn_s {
  95. unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
  96. };
  97. static __initdata struct cpu_evtchn_s init_evtchn_mask = {
  98. .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
  99. };
  100. static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
  101. static inline unsigned long *cpu_evtchn_mask(int cpu)
  102. {
  103. return cpu_evtchn_mask_p[cpu].bits;
  104. }
  105. /* Xen will never allocate port zero for any purpose. */
  106. #define VALID_EVTCHN(chn) ((chn) != 0)
  107. static struct irq_chip xen_dynamic_chip;
  108. static struct irq_chip xen_percpu_chip;
  109. static struct irq_chip xen_pirq_chip;
  110. /* Constructor for packed IRQ information. */
  111. static struct irq_info mk_unbound_info(void)
  112. {
  113. return (struct irq_info) { .type = IRQT_UNBOUND };
  114. }
  115. static struct irq_info mk_evtchn_info(unsigned short evtchn)
  116. {
  117. return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
  118. .cpu = 0 };
  119. }
  120. static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
  121. {
  122. return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
  123. .cpu = 0, .u.ipi = ipi };
  124. }
  125. static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
  126. {
  127. return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
  128. .cpu = 0, .u.virq = virq };
  129. }
  130. static struct irq_info mk_pirq_info(unsigned short evtchn,
  131. unsigned short gsi, unsigned short vector)
  132. {
  133. return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
  134. .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
  135. }
  136. /*
  137. * Accessors for packed IRQ information.
  138. */
  139. static struct irq_info *info_for_irq(unsigned irq)
  140. {
  141. return &irq_info[irq];
  142. }
  143. static unsigned int evtchn_from_irq(unsigned irq)
  144. {
  145. return info_for_irq(irq)->evtchn;
  146. }
  147. unsigned irq_from_evtchn(unsigned int evtchn)
  148. {
  149. return evtchn_to_irq[evtchn];
  150. }
  151. EXPORT_SYMBOL_GPL(irq_from_evtchn);
  152. static enum ipi_vector ipi_from_irq(unsigned irq)
  153. {
  154. struct irq_info *info = info_for_irq(irq);
  155. BUG_ON(info == NULL);
  156. BUG_ON(info->type != IRQT_IPI);
  157. return info->u.ipi;
  158. }
  159. static unsigned virq_from_irq(unsigned irq)
  160. {
  161. struct irq_info *info = info_for_irq(irq);
  162. BUG_ON(info == NULL);
  163. BUG_ON(info->type != IRQT_VIRQ);
  164. return info->u.virq;
  165. }
  166. static unsigned gsi_from_irq(unsigned irq)
  167. {
  168. struct irq_info *info = info_for_irq(irq);
  169. BUG_ON(info == NULL);
  170. BUG_ON(info->type != IRQT_PIRQ);
  171. return info->u.pirq.gsi;
  172. }
  173. static unsigned vector_from_irq(unsigned irq)
  174. {
  175. struct irq_info *info = info_for_irq(irq);
  176. BUG_ON(info == NULL);
  177. BUG_ON(info->type != IRQT_PIRQ);
  178. return info->u.pirq.vector;
  179. }
  180. static enum xen_irq_type type_from_irq(unsigned irq)
  181. {
  182. return info_for_irq(irq)->type;
  183. }
  184. static unsigned cpu_from_irq(unsigned irq)
  185. {
  186. return info_for_irq(irq)->cpu;
  187. }
  188. static unsigned int cpu_from_evtchn(unsigned int evtchn)
  189. {
  190. int irq = evtchn_to_irq[evtchn];
  191. unsigned ret = 0;
  192. if (irq != -1)
  193. ret = cpu_from_irq(irq);
  194. return ret;
  195. }
  196. static bool pirq_needs_eoi(unsigned irq)
  197. {
  198. struct irq_info *info = info_for_irq(irq);
  199. BUG_ON(info->type != IRQT_PIRQ);
  200. return info->u.pirq.flags & PIRQ_NEEDS_EOI;
  201. }
  202. static inline unsigned long active_evtchns(unsigned int cpu,
  203. struct shared_info *sh,
  204. unsigned int idx)
  205. {
  206. return (sh->evtchn_pending[idx] &
  207. cpu_evtchn_mask(cpu)[idx] &
  208. ~sh->evtchn_mask[idx]);
  209. }
  210. static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
  211. {
  212. int irq = evtchn_to_irq[chn];
  213. BUG_ON(irq == -1);
  214. #ifdef CONFIG_SMP
  215. cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
  216. #endif
  217. __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
  218. __set_bit(chn, cpu_evtchn_mask(cpu));
  219. irq_info[irq].cpu = cpu;
  220. }
  221. static void init_evtchn_cpu_bindings(void)
  222. {
  223. #ifdef CONFIG_SMP
  224. struct irq_desc *desc;
  225. int i;
  226. /* By default all event channels notify CPU#0. */
  227. for_each_irq_desc(i, desc) {
  228. cpumask_copy(desc->affinity, cpumask_of(0));
  229. }
  230. #endif
  231. memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
  232. }
  233. static inline void clear_evtchn(int port)
  234. {
  235. struct shared_info *s = HYPERVISOR_shared_info;
  236. sync_clear_bit(port, &s->evtchn_pending[0]);
  237. }
  238. static inline void set_evtchn(int port)
  239. {
  240. struct shared_info *s = HYPERVISOR_shared_info;
  241. sync_set_bit(port, &s->evtchn_pending[0]);
  242. }
  243. static inline int test_evtchn(int port)
  244. {
  245. struct shared_info *s = HYPERVISOR_shared_info;
  246. return sync_test_bit(port, &s->evtchn_pending[0]);
  247. }
  248. /**
  249. * notify_remote_via_irq - send event to remote end of event channel via irq
  250. * @irq: irq of event channel to send event to
  251. *
  252. * Unlike notify_remote_via_evtchn(), this is safe to use across
  253. * save/restore. Notifications on a broken connection are silently
  254. * dropped.
  255. */
  256. void notify_remote_via_irq(int irq)
  257. {
  258. int evtchn = evtchn_from_irq(irq);
  259. if (VALID_EVTCHN(evtchn))
  260. notify_remote_via_evtchn(evtchn);
  261. }
  262. EXPORT_SYMBOL_GPL(notify_remote_via_irq);
  263. static void mask_evtchn(int port)
  264. {
  265. struct shared_info *s = HYPERVISOR_shared_info;
  266. sync_set_bit(port, &s->evtchn_mask[0]);
  267. }
  268. static void unmask_evtchn(int port)
  269. {
  270. struct shared_info *s = HYPERVISOR_shared_info;
  271. unsigned int cpu = get_cpu();
  272. BUG_ON(!irqs_disabled());
  273. /* Slow path (hypercall) if this is a non-local port. */
  274. if (unlikely(cpu != cpu_from_evtchn(port))) {
  275. struct evtchn_unmask unmask = { .port = port };
  276. (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
  277. } else {
  278. struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
  279. sync_clear_bit(port, &s->evtchn_mask[0]);
  280. /*
  281. * The following is basically the equivalent of
  282. * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
  283. * the interrupt edge' if the channel is masked.
  284. */
  285. if (sync_test_bit(port, &s->evtchn_pending[0]) &&
  286. !sync_test_and_set_bit(port / BITS_PER_LONG,
  287. &vcpu_info->evtchn_pending_sel))
  288. vcpu_info->evtchn_upcall_pending = 1;
  289. }
  290. put_cpu();
  291. }
  292. static int get_nr_hw_irqs(void)
  293. {
  294. int ret = 1;
  295. #ifdef CONFIG_X86_IO_APIC
  296. ret = get_nr_irqs_gsi();
  297. #endif
  298. return ret;
  299. }
  300. static int find_unbound_irq(void)
  301. {
  302. struct irq_data *data;
  303. int irq, res;
  304. int start = get_nr_hw_irqs();
  305. if (start == nr_irqs)
  306. goto no_irqs;
  307. /* nr_irqs is a magic value. Must not use it.*/
  308. for (irq = nr_irqs-1; irq > start; irq--) {
  309. data = irq_get_irq_data(irq);
  310. /* only 0->15 have init'd desc; handle irq > 16 */
  311. if (!data)
  312. break;
  313. if (data->chip == &no_irq_chip)
  314. break;
  315. if (data->chip != &xen_dynamic_chip)
  316. continue;
  317. if (irq_info[irq].type == IRQT_UNBOUND)
  318. return irq;
  319. }
  320. if (irq == start)
  321. goto no_irqs;
  322. res = irq_alloc_desc_at(irq, 0);
  323. if (WARN_ON(res != irq))
  324. return -1;
  325. return irq;
  326. no_irqs:
  327. panic("No available IRQ to bind to: increase nr_irqs!\n");
  328. }
  329. static bool identity_mapped_irq(unsigned irq)
  330. {
  331. /* identity map all the hardware irqs */
  332. return irq < get_nr_hw_irqs();
  333. }
  334. static void pirq_unmask_notify(int irq)
  335. {
  336. struct physdev_eoi eoi = { .irq = irq };
  337. if (unlikely(pirq_needs_eoi(irq))) {
  338. int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  339. WARN_ON(rc);
  340. }
  341. }
  342. static void pirq_query_unmask(int irq)
  343. {
  344. struct physdev_irq_status_query irq_status;
  345. struct irq_info *info = info_for_irq(irq);
  346. BUG_ON(info->type != IRQT_PIRQ);
  347. irq_status.irq = irq;
  348. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  349. irq_status.flags = 0;
  350. info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
  351. if (irq_status.flags & XENIRQSTAT_needs_eoi)
  352. info->u.pirq.flags |= PIRQ_NEEDS_EOI;
  353. }
  354. static bool probing_irq(int irq)
  355. {
  356. struct irq_desc *desc = irq_to_desc(irq);
  357. return desc && desc->action == NULL;
  358. }
  359. static unsigned int startup_pirq(unsigned int irq)
  360. {
  361. struct evtchn_bind_pirq bind_pirq;
  362. struct irq_info *info = info_for_irq(irq);
  363. int evtchn = evtchn_from_irq(irq);
  364. int rc;
  365. BUG_ON(info->type != IRQT_PIRQ);
  366. if (VALID_EVTCHN(evtchn))
  367. goto out;
  368. bind_pirq.pirq = irq;
  369. /* NB. We are happy to share unless we are probing. */
  370. bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
  371. BIND_PIRQ__WILL_SHARE : 0;
  372. rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
  373. if (rc != 0) {
  374. if (!probing_irq(irq))
  375. printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
  376. irq);
  377. return 0;
  378. }
  379. evtchn = bind_pirq.port;
  380. pirq_query_unmask(irq);
  381. evtchn_to_irq[evtchn] = irq;
  382. bind_evtchn_to_cpu(evtchn, 0);
  383. info->evtchn = evtchn;
  384. out:
  385. unmask_evtchn(evtchn);
  386. pirq_unmask_notify(irq);
  387. return 0;
  388. }
  389. static void shutdown_pirq(unsigned int irq)
  390. {
  391. struct evtchn_close close;
  392. struct irq_info *info = info_for_irq(irq);
  393. int evtchn = evtchn_from_irq(irq);
  394. BUG_ON(info->type != IRQT_PIRQ);
  395. if (!VALID_EVTCHN(evtchn))
  396. return;
  397. mask_evtchn(evtchn);
  398. close.port = evtchn;
  399. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  400. BUG();
  401. bind_evtchn_to_cpu(evtchn, 0);
  402. evtchn_to_irq[evtchn] = -1;
  403. info->evtchn = 0;
  404. }
  405. static void enable_pirq(unsigned int irq)
  406. {
  407. startup_pirq(irq);
  408. }
  409. static void disable_pirq(unsigned int irq)
  410. {
  411. }
  412. static void ack_pirq(unsigned int irq)
  413. {
  414. int evtchn = evtchn_from_irq(irq);
  415. move_native_irq(irq);
  416. if (VALID_EVTCHN(evtchn)) {
  417. mask_evtchn(evtchn);
  418. clear_evtchn(evtchn);
  419. }
  420. }
  421. static void end_pirq(unsigned int irq)
  422. {
  423. int evtchn = evtchn_from_irq(irq);
  424. struct irq_desc *desc = irq_to_desc(irq);
  425. if (WARN_ON(!desc))
  426. return;
  427. if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
  428. (IRQ_DISABLED|IRQ_PENDING)) {
  429. shutdown_pirq(irq);
  430. } else if (VALID_EVTCHN(evtchn)) {
  431. unmask_evtchn(evtchn);
  432. pirq_unmask_notify(irq);
  433. }
  434. }
  435. static int find_irq_by_gsi(unsigned gsi)
  436. {
  437. int irq;
  438. for (irq = 0; irq < nr_irqs; irq++) {
  439. struct irq_info *info = info_for_irq(irq);
  440. if (info == NULL || info->type != IRQT_PIRQ)
  441. continue;
  442. if (gsi_from_irq(irq) == gsi)
  443. return irq;
  444. }
  445. return -1;
  446. }
  447. /* xen_allocate_irq might allocate irqs from the top down, as a
  448. * consequence don't assume that the irq number returned has a low value
  449. * or can be used as a pirq number unless you know otherwise.
  450. *
  451. * One notable exception is when xen_allocate_irq is called passing an
  452. * hardware gsi as argument, in that case the irq number returned
  453. * matches the gsi number passed as first argument.
  454. * Note: We don't assign an
  455. * event channel until the irq actually started up. Return an
  456. * existing irq if we've already got one for the gsi.
  457. */
  458. int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
  459. {
  460. int irq;
  461. struct physdev_irq irq_op;
  462. spin_lock(&irq_mapping_update_lock);
  463. irq = find_irq_by_gsi(gsi);
  464. if (irq != -1) {
  465. printk(KERN_INFO "xen_allocate_pirq: returning irq %d for gsi %u\n",
  466. irq, gsi);
  467. goto out; /* XXX need refcount? */
  468. }
  469. /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
  470. * we are using the !xen_initial_domain() to drop in the function.*/
  471. if (identity_mapped_irq(gsi) || !xen_initial_domain()) {
  472. irq = gsi;
  473. irq_alloc_desc_at(irq, 0);
  474. } else
  475. irq = find_unbound_irq();
  476. set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
  477. handle_level_irq, name);
  478. irq_op.irq = irq;
  479. irq_op.vector = 0;
  480. /* Only the privileged domain can do this. For non-priv, the pcifront
  481. * driver provides a PCI bus that does the call to do exactly
  482. * this in the priv domain. */
  483. if (xen_initial_domain() &&
  484. HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
  485. irq_free_desc(irq);
  486. irq = -ENOSPC;
  487. goto out;
  488. }
  489. irq_info[irq] = mk_pirq_info(0, gsi, irq_op.vector);
  490. irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
  491. out:
  492. spin_unlock(&irq_mapping_update_lock);
  493. return irq;
  494. }
  495. int xen_destroy_irq(int irq)
  496. {
  497. struct irq_desc *desc;
  498. int rc = -ENOENT;
  499. spin_lock(&irq_mapping_update_lock);
  500. desc = irq_to_desc(irq);
  501. if (!desc)
  502. goto out;
  503. irq_info[irq] = mk_unbound_info();
  504. irq_free_desc(irq);
  505. out:
  506. spin_unlock(&irq_mapping_update_lock);
  507. return rc;
  508. }
  509. int xen_vector_from_irq(unsigned irq)
  510. {
  511. return vector_from_irq(irq);
  512. }
  513. int xen_gsi_from_irq(unsigned irq)
  514. {
  515. return gsi_from_irq(irq);
  516. }
  517. int bind_evtchn_to_irq(unsigned int evtchn)
  518. {
  519. int irq;
  520. spin_lock(&irq_mapping_update_lock);
  521. irq = evtchn_to_irq[evtchn];
  522. if (irq == -1) {
  523. irq = find_unbound_irq();
  524. set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
  525. handle_edge_irq, "event");
  526. evtchn_to_irq[evtchn] = irq;
  527. irq_info[irq] = mk_evtchn_info(evtchn);
  528. }
  529. spin_unlock(&irq_mapping_update_lock);
  530. return irq;
  531. }
  532. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
  533. static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
  534. {
  535. struct evtchn_bind_ipi bind_ipi;
  536. int evtchn, irq;
  537. spin_lock(&irq_mapping_update_lock);
  538. irq = per_cpu(ipi_to_irq, cpu)[ipi];
  539. if (irq == -1) {
  540. irq = find_unbound_irq();
  541. if (irq < 0)
  542. goto out;
  543. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  544. handle_percpu_irq, "ipi");
  545. bind_ipi.vcpu = cpu;
  546. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  547. &bind_ipi) != 0)
  548. BUG();
  549. evtchn = bind_ipi.port;
  550. evtchn_to_irq[evtchn] = irq;
  551. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  552. per_cpu(ipi_to_irq, cpu)[ipi] = irq;
  553. bind_evtchn_to_cpu(evtchn, cpu);
  554. }
  555. out:
  556. spin_unlock(&irq_mapping_update_lock);
  557. return irq;
  558. }
  559. static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
  560. {
  561. struct evtchn_bind_virq bind_virq;
  562. int evtchn, irq;
  563. spin_lock(&irq_mapping_update_lock);
  564. irq = per_cpu(virq_to_irq, cpu)[virq];
  565. if (irq == -1) {
  566. bind_virq.virq = virq;
  567. bind_virq.vcpu = cpu;
  568. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  569. &bind_virq) != 0)
  570. BUG();
  571. evtchn = bind_virq.port;
  572. irq = find_unbound_irq();
  573. set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
  574. handle_percpu_irq, "virq");
  575. evtchn_to_irq[evtchn] = irq;
  576. irq_info[irq] = mk_virq_info(evtchn, virq);
  577. per_cpu(virq_to_irq, cpu)[virq] = irq;
  578. bind_evtchn_to_cpu(evtchn, cpu);
  579. }
  580. spin_unlock(&irq_mapping_update_lock);
  581. return irq;
  582. }
  583. static void unbind_from_irq(unsigned int irq)
  584. {
  585. struct evtchn_close close;
  586. int evtchn = evtchn_from_irq(irq);
  587. spin_lock(&irq_mapping_update_lock);
  588. if (VALID_EVTCHN(evtchn)) {
  589. close.port = evtchn;
  590. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  591. BUG();
  592. switch (type_from_irq(irq)) {
  593. case IRQT_VIRQ:
  594. per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
  595. [virq_from_irq(irq)] = -1;
  596. break;
  597. case IRQT_IPI:
  598. per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
  599. [ipi_from_irq(irq)] = -1;
  600. break;
  601. default:
  602. break;
  603. }
  604. /* Closed ports are implicitly re-bound to VCPU0. */
  605. bind_evtchn_to_cpu(evtchn, 0);
  606. evtchn_to_irq[evtchn] = -1;
  607. }
  608. if (irq_info[irq].type != IRQT_UNBOUND) {
  609. irq_info[irq] = mk_unbound_info();
  610. irq_free_desc(irq);
  611. }
  612. spin_unlock(&irq_mapping_update_lock);
  613. }
  614. int bind_evtchn_to_irqhandler(unsigned int evtchn,
  615. irq_handler_t handler,
  616. unsigned long irqflags,
  617. const char *devname, void *dev_id)
  618. {
  619. unsigned int irq;
  620. int retval;
  621. irq = bind_evtchn_to_irq(evtchn);
  622. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  623. if (retval != 0) {
  624. unbind_from_irq(irq);
  625. return retval;
  626. }
  627. return irq;
  628. }
  629. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
  630. int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
  631. irq_handler_t handler,
  632. unsigned long irqflags, const char *devname, void *dev_id)
  633. {
  634. unsigned int irq;
  635. int retval;
  636. irq = bind_virq_to_irq(virq, cpu);
  637. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  638. if (retval != 0) {
  639. unbind_from_irq(irq);
  640. return retval;
  641. }
  642. return irq;
  643. }
  644. EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
  645. int bind_ipi_to_irqhandler(enum ipi_vector ipi,
  646. unsigned int cpu,
  647. irq_handler_t handler,
  648. unsigned long irqflags,
  649. const char *devname,
  650. void *dev_id)
  651. {
  652. int irq, retval;
  653. irq = bind_ipi_to_irq(ipi, cpu);
  654. if (irq < 0)
  655. return irq;
  656. irqflags |= IRQF_NO_SUSPEND;
  657. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  658. if (retval != 0) {
  659. unbind_from_irq(irq);
  660. return retval;
  661. }
  662. return irq;
  663. }
  664. void unbind_from_irqhandler(unsigned int irq, void *dev_id)
  665. {
  666. free_irq(irq, dev_id);
  667. unbind_from_irq(irq);
  668. }
  669. EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
  670. void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
  671. {
  672. int irq = per_cpu(ipi_to_irq, cpu)[vector];
  673. BUG_ON(irq < 0);
  674. notify_remote_via_irq(irq);
  675. }
  676. irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
  677. {
  678. struct shared_info *sh = HYPERVISOR_shared_info;
  679. int cpu = smp_processor_id();
  680. int i;
  681. unsigned long flags;
  682. static DEFINE_SPINLOCK(debug_lock);
  683. spin_lock_irqsave(&debug_lock, flags);
  684. printk("vcpu %d\n ", cpu);
  685. for_each_online_cpu(i) {
  686. struct vcpu_info *v = per_cpu(xen_vcpu, i);
  687. printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
  688. (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
  689. v->evtchn_upcall_pending,
  690. v->evtchn_pending_sel);
  691. }
  692. printk("pending:\n ");
  693. for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
  694. printk("%08lx%s", sh->evtchn_pending[i],
  695. i % 8 == 0 ? "\n " : " ");
  696. printk("\nmasks:\n ");
  697. for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  698. printk("%08lx%s", sh->evtchn_mask[i],
  699. i % 8 == 0 ? "\n " : " ");
  700. printk("\nunmasked:\n ");
  701. for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
  702. printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
  703. i % 8 == 0 ? "\n " : " ");
  704. printk("\npending list:\n");
  705. for(i = 0; i < NR_EVENT_CHANNELS; i++) {
  706. if (sync_test_bit(i, sh->evtchn_pending)) {
  707. printk(" %d: event %d -> irq %d\n",
  708. cpu_from_evtchn(i), i,
  709. evtchn_to_irq[i]);
  710. }
  711. }
  712. spin_unlock_irqrestore(&debug_lock, flags);
  713. return IRQ_HANDLED;
  714. }
  715. static DEFINE_PER_CPU(unsigned, xed_nesting_count);
  716. /*
  717. * Search the CPUs pending events bitmasks. For each one found, map
  718. * the event number to an irq, and feed it into do_IRQ() for
  719. * handling.
  720. *
  721. * Xen uses a two-level bitmap to speed searching. The first level is
  722. * a bitset of words which contain pending event bits. The second
  723. * level is a bitset of pending events themselves.
  724. */
  725. static void __xen_evtchn_do_upcall(void)
  726. {
  727. int cpu = get_cpu();
  728. struct shared_info *s = HYPERVISOR_shared_info;
  729. struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
  730. unsigned count;
  731. do {
  732. unsigned long pending_words;
  733. vcpu_info->evtchn_upcall_pending = 0;
  734. if (__get_cpu_var(xed_nesting_count)++)
  735. goto out;
  736. #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
  737. /* Clear master flag /before/ clearing selector flag. */
  738. wmb();
  739. #endif
  740. pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
  741. while (pending_words != 0) {
  742. unsigned long pending_bits;
  743. int word_idx = __ffs(pending_words);
  744. pending_words &= ~(1UL << word_idx);
  745. while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
  746. int bit_idx = __ffs(pending_bits);
  747. int port = (word_idx * BITS_PER_LONG) + bit_idx;
  748. int irq = evtchn_to_irq[port];
  749. struct irq_desc *desc;
  750. if (irq != -1) {
  751. desc = irq_to_desc(irq);
  752. if (desc)
  753. generic_handle_irq_desc(irq, desc);
  754. }
  755. }
  756. }
  757. BUG_ON(!irqs_disabled());
  758. count = __get_cpu_var(xed_nesting_count);
  759. __get_cpu_var(xed_nesting_count) = 0;
  760. } while (count != 1 || vcpu_info->evtchn_upcall_pending);
  761. out:
  762. put_cpu();
  763. }
  764. void xen_evtchn_do_upcall(struct pt_regs *regs)
  765. {
  766. struct pt_regs *old_regs = set_irq_regs(regs);
  767. exit_idle();
  768. irq_enter();
  769. __xen_evtchn_do_upcall();
  770. irq_exit();
  771. set_irq_regs(old_regs);
  772. }
  773. void xen_hvm_evtchn_do_upcall(void)
  774. {
  775. __xen_evtchn_do_upcall();
  776. }
  777. EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
  778. /* Rebind a new event channel to an existing irq. */
  779. void rebind_evtchn_irq(int evtchn, int irq)
  780. {
  781. struct irq_info *info = info_for_irq(irq);
  782. /* Make sure the irq is masked, since the new event channel
  783. will also be masked. */
  784. disable_irq(irq);
  785. spin_lock(&irq_mapping_update_lock);
  786. /* After resume the irq<->evtchn mappings are all cleared out */
  787. BUG_ON(evtchn_to_irq[evtchn] != -1);
  788. /* Expect irq to have been bound before,
  789. so there should be a proper type */
  790. BUG_ON(info->type == IRQT_UNBOUND);
  791. evtchn_to_irq[evtchn] = irq;
  792. irq_info[irq] = mk_evtchn_info(evtchn);
  793. spin_unlock(&irq_mapping_update_lock);
  794. /* new event channels are always bound to cpu 0 */
  795. irq_set_affinity(irq, cpumask_of(0));
  796. /* Unmask the event channel. */
  797. enable_irq(irq);
  798. }
  799. /* Rebind an evtchn so that it gets delivered to a specific cpu */
  800. static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
  801. {
  802. struct evtchn_bind_vcpu bind_vcpu;
  803. int evtchn = evtchn_from_irq(irq);
  804. /* events delivered via platform PCI interrupts are always
  805. * routed to vcpu 0 */
  806. if (!VALID_EVTCHN(evtchn) ||
  807. (xen_hvm_domain() && !xen_have_vector_callback))
  808. return -1;
  809. /* Send future instances of this interrupt to other vcpu. */
  810. bind_vcpu.port = evtchn;
  811. bind_vcpu.vcpu = tcpu;
  812. /*
  813. * If this fails, it usually just indicates that we're dealing with a
  814. * virq or IPI channel, which don't actually need to be rebound. Ignore
  815. * it, but don't do the xenlinux-level rebind in that case.
  816. */
  817. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
  818. bind_evtchn_to_cpu(evtchn, tcpu);
  819. return 0;
  820. }
  821. static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
  822. {
  823. unsigned tcpu = cpumask_first(dest);
  824. return rebind_irq_to_cpu(irq, tcpu);
  825. }
  826. int resend_irq_on_evtchn(unsigned int irq)
  827. {
  828. int masked, evtchn = evtchn_from_irq(irq);
  829. struct shared_info *s = HYPERVISOR_shared_info;
  830. if (!VALID_EVTCHN(evtchn))
  831. return 1;
  832. masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
  833. sync_set_bit(evtchn, s->evtchn_pending);
  834. if (!masked)
  835. unmask_evtchn(evtchn);
  836. return 1;
  837. }
  838. static void enable_dynirq(unsigned int irq)
  839. {
  840. int evtchn = evtchn_from_irq(irq);
  841. if (VALID_EVTCHN(evtchn))
  842. unmask_evtchn(evtchn);
  843. }
  844. static void disable_dynirq(unsigned int irq)
  845. {
  846. int evtchn = evtchn_from_irq(irq);
  847. if (VALID_EVTCHN(evtchn))
  848. mask_evtchn(evtchn);
  849. }
  850. static void ack_dynirq(unsigned int irq)
  851. {
  852. int evtchn = evtchn_from_irq(irq);
  853. move_native_irq(irq);
  854. if (VALID_EVTCHN(evtchn))
  855. clear_evtchn(evtchn);
  856. }
  857. static int retrigger_dynirq(unsigned int irq)
  858. {
  859. int evtchn = evtchn_from_irq(irq);
  860. struct shared_info *sh = HYPERVISOR_shared_info;
  861. int ret = 0;
  862. if (VALID_EVTCHN(evtchn)) {
  863. int masked;
  864. masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
  865. sync_set_bit(evtchn, sh->evtchn_pending);
  866. if (!masked)
  867. unmask_evtchn(evtchn);
  868. ret = 1;
  869. }
  870. return ret;
  871. }
  872. static void restore_cpu_virqs(unsigned int cpu)
  873. {
  874. struct evtchn_bind_virq bind_virq;
  875. int virq, irq, evtchn;
  876. for (virq = 0; virq < NR_VIRQS; virq++) {
  877. if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
  878. continue;
  879. BUG_ON(virq_from_irq(irq) != virq);
  880. /* Get a new binding from Xen. */
  881. bind_virq.virq = virq;
  882. bind_virq.vcpu = cpu;
  883. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  884. &bind_virq) != 0)
  885. BUG();
  886. evtchn = bind_virq.port;
  887. /* Record the new mapping. */
  888. evtchn_to_irq[evtchn] = irq;
  889. irq_info[irq] = mk_virq_info(evtchn, virq);
  890. bind_evtchn_to_cpu(evtchn, cpu);
  891. /* Ready for use. */
  892. unmask_evtchn(evtchn);
  893. }
  894. }
  895. static void restore_cpu_ipis(unsigned int cpu)
  896. {
  897. struct evtchn_bind_ipi bind_ipi;
  898. int ipi, irq, evtchn;
  899. for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
  900. if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
  901. continue;
  902. BUG_ON(ipi_from_irq(irq) != ipi);
  903. /* Get a new binding from Xen. */
  904. bind_ipi.vcpu = cpu;
  905. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  906. &bind_ipi) != 0)
  907. BUG();
  908. evtchn = bind_ipi.port;
  909. /* Record the new mapping. */
  910. evtchn_to_irq[evtchn] = irq;
  911. irq_info[irq] = mk_ipi_info(evtchn, ipi);
  912. bind_evtchn_to_cpu(evtchn, cpu);
  913. /* Ready for use. */
  914. unmask_evtchn(evtchn);
  915. }
  916. }
  917. /* Clear an irq's pending state, in preparation for polling on it */
  918. void xen_clear_irq_pending(int irq)
  919. {
  920. int evtchn = evtchn_from_irq(irq);
  921. if (VALID_EVTCHN(evtchn))
  922. clear_evtchn(evtchn);
  923. }
  924. EXPORT_SYMBOL(xen_clear_irq_pending);
  925. void xen_set_irq_pending(int irq)
  926. {
  927. int evtchn = evtchn_from_irq(irq);
  928. if (VALID_EVTCHN(evtchn))
  929. set_evtchn(evtchn);
  930. }
  931. bool xen_test_irq_pending(int irq)
  932. {
  933. int evtchn = evtchn_from_irq(irq);
  934. bool ret = false;
  935. if (VALID_EVTCHN(evtchn))
  936. ret = test_evtchn(evtchn);
  937. return ret;
  938. }
  939. /* Poll waiting for an irq to become pending with timeout. In the usual case,
  940. * the irq will be disabled so it won't deliver an interrupt. */
  941. void xen_poll_irq_timeout(int irq, u64 timeout)
  942. {
  943. evtchn_port_t evtchn = evtchn_from_irq(irq);
  944. if (VALID_EVTCHN(evtchn)) {
  945. struct sched_poll poll;
  946. poll.nr_ports = 1;
  947. poll.timeout = timeout;
  948. set_xen_guest_handle(poll.ports, &evtchn);
  949. if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
  950. BUG();
  951. }
  952. }
  953. EXPORT_SYMBOL(xen_poll_irq_timeout);
  954. /* Poll waiting for an irq to become pending. In the usual case, the
  955. * irq will be disabled so it won't deliver an interrupt. */
  956. void xen_poll_irq(int irq)
  957. {
  958. xen_poll_irq_timeout(irq, 0 /* no timeout */);
  959. }
  960. void xen_irq_resume(void)
  961. {
  962. unsigned int cpu, irq, evtchn;
  963. init_evtchn_cpu_bindings();
  964. /* New event-channel space is not 'live' yet. */
  965. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  966. mask_evtchn(evtchn);
  967. /* No IRQ <-> event-channel mappings. */
  968. for (irq = 0; irq < nr_irqs; irq++)
  969. irq_info[irq].evtchn = 0; /* zap event-channel binding */
  970. for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  971. evtchn_to_irq[evtchn] = -1;
  972. for_each_possible_cpu(cpu) {
  973. restore_cpu_virqs(cpu);
  974. restore_cpu_ipis(cpu);
  975. }
  976. }
  977. static struct irq_chip xen_dynamic_chip __read_mostly = {
  978. .name = "xen-dyn",
  979. .disable = disable_dynirq,
  980. .mask = disable_dynirq,
  981. .unmask = enable_dynirq,
  982. .ack = ack_dynirq,
  983. .set_affinity = set_affinity_irq,
  984. .retrigger = retrigger_dynirq,
  985. };
  986. static struct irq_chip xen_pirq_chip __read_mostly = {
  987. .name = "xen-pirq",
  988. .startup = startup_pirq,
  989. .shutdown = shutdown_pirq,
  990. .enable = enable_pirq,
  991. .unmask = enable_pirq,
  992. .disable = disable_pirq,
  993. .mask = disable_pirq,
  994. .ack = ack_pirq,
  995. .end = end_pirq,
  996. .set_affinity = set_affinity_irq,
  997. .retrigger = retrigger_dynirq,
  998. };
  999. static struct irq_chip xen_percpu_chip __read_mostly = {
  1000. .name = "xen-percpu",
  1001. .disable = disable_dynirq,
  1002. .mask = disable_dynirq,
  1003. .unmask = enable_dynirq,
  1004. .ack = ack_dynirq,
  1005. };
  1006. int xen_set_callback_via(uint64_t via)
  1007. {
  1008. struct xen_hvm_param a;
  1009. a.domid = DOMID_SELF;
  1010. a.index = HVM_PARAM_CALLBACK_IRQ;
  1011. a.value = via;
  1012. return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
  1013. }
  1014. EXPORT_SYMBOL_GPL(xen_set_callback_via);
  1015. #ifdef CONFIG_XEN_PVHVM
  1016. /* Vector callbacks are better than PCI interrupts to receive event
  1017. * channel notifications because we can receive vector callbacks on any
  1018. * vcpu and we don't need PCI support or APIC interactions. */
  1019. void xen_callback_vector(void)
  1020. {
  1021. int rc;
  1022. uint64_t callback_via;
  1023. if (xen_have_vector_callback) {
  1024. callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
  1025. rc = xen_set_callback_via(callback_via);
  1026. if (rc) {
  1027. printk(KERN_ERR "Request for Xen HVM callback vector"
  1028. " failed.\n");
  1029. xen_have_vector_callback = 0;
  1030. return;
  1031. }
  1032. printk(KERN_INFO "Xen HVM callback vector for event delivery is "
  1033. "enabled\n");
  1034. /* in the restore case the vector has already been allocated */
  1035. if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
  1036. alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
  1037. }
  1038. }
  1039. #else
  1040. void xen_callback_vector(void) {}
  1041. #endif
  1042. void __init xen_init_IRQ(void)
  1043. {
  1044. int i;
  1045. cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
  1046. GFP_KERNEL);
  1047. irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
  1048. evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
  1049. GFP_KERNEL);
  1050. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1051. evtchn_to_irq[i] = -1;
  1052. init_evtchn_cpu_bindings();
  1053. /* No event channels are 'live' right now. */
  1054. for (i = 0; i < NR_EVENT_CHANNELS; i++)
  1055. mask_evtchn(i);
  1056. if (xen_hvm_domain()) {
  1057. xen_callback_vector();
  1058. native_init_IRQ();
  1059. } else {
  1060. irq_ctx_init(smp_processor_id());
  1061. }
  1062. }