irq_xen.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /******************************************************************************
  2. * arch/ia64/xen/irq_xen.c
  3. *
  4. * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
  5. * VA Linux Systems Japan K.K.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/cpu.h>
  23. #include <xen/interface/xen.h>
  24. #include <xen/interface/callback.h>
  25. #include <xen/events.h>
  26. #include <asm/xen/privop.h>
  27. #include "irq_xen.h"
  28. /***************************************************************************
  29. * pv_irq_ops
  30. * irq operations
  31. */
  32. static int
  33. xen_assign_irq_vector(int irq)
  34. {
  35. struct physdev_irq irq_op;
  36. irq_op.irq = irq;
  37. if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
  38. return -ENOSPC;
  39. return irq_op.vector;
  40. }
  41. static void
  42. xen_free_irq_vector(int vector)
  43. {
  44. struct physdev_irq irq_op;
  45. if (vector < IA64_FIRST_DEVICE_VECTOR ||
  46. vector > IA64_LAST_DEVICE_VECTOR)
  47. return;
  48. irq_op.vector = vector;
  49. if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
  50. printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
  51. __func__, vector);
  52. }
  53. static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
  54. static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
  55. static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
  56. static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
  57. static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
  58. static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
  59. #define NAME_SIZE 15
  60. static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
  61. static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
  62. static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
  63. static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
  64. static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
  65. static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
  66. #undef NAME_SIZE
  67. struct saved_irq {
  68. unsigned int irq;
  69. struct irqaction *action;
  70. };
  71. /* 16 should be far optimistic value, since only several percpu irqs
  72. * are registered early.
  73. */
  74. #define MAX_LATE_IRQ 16
  75. static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
  76. static unsigned short late_irq_cnt;
  77. static unsigned short saved_irq_cnt;
  78. static int xen_slab_ready;
  79. #ifdef CONFIG_SMP
  80. /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
  81. * it ends up to issue several memory accesses upon percpu data and
  82. * thus adds unnecessary traffic to other paths.
  83. */
  84. static irqreturn_t
  85. xen_dummy_handler(int irq, void *dev_id)
  86. {
  87. return IRQ_HANDLED;
  88. }
  89. static struct irqaction xen_ipi_irqaction = {
  90. .handler = handle_IPI,
  91. .flags = IRQF_DISABLED,
  92. .name = "IPI"
  93. };
  94. static struct irqaction xen_resched_irqaction = {
  95. .handler = xen_dummy_handler,
  96. .flags = IRQF_DISABLED,
  97. .name = "resched"
  98. };
  99. static struct irqaction xen_tlb_irqaction = {
  100. .handler = xen_dummy_handler,
  101. .flags = IRQF_DISABLED,
  102. .name = "tlb_flush"
  103. };
  104. #endif
  105. /*
  106. * This is xen version percpu irq registration, which needs bind
  107. * to xen specific evtchn sub-system. One trick here is that xen
  108. * evtchn binding interface depends on kmalloc because related
  109. * port needs to be freed at device/cpu down. So we cache the
  110. * registration on BSP before slab is ready and then deal them
  111. * at later point. For rest instances happening after slab ready,
  112. * we hook them to xen evtchn immediately.
  113. *
  114. * FIXME: MCA is not supported by far, and thus "nomca" boot param is
  115. * required.
  116. */
  117. static void
  118. __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
  119. struct irqaction *action, int save)
  120. {
  121. int irq = 0;
  122. if (xen_slab_ready) {
  123. switch (vec) {
  124. case IA64_TIMER_VECTOR:
  125. snprintf(per_cpu(xen_timer_name, cpu),
  126. sizeof(per_cpu(xen_timer_name, cpu)),
  127. "%s%d", action->name, cpu);
  128. irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
  129. action->handler, action->flags,
  130. per_cpu(xen_timer_name, cpu), action->dev_id);
  131. per_cpu(xen_timer_irq, cpu) = irq;
  132. break;
  133. case IA64_IPI_RESCHEDULE:
  134. snprintf(per_cpu(xen_resched_name, cpu),
  135. sizeof(per_cpu(xen_resched_name, cpu)),
  136. "%s%d", action->name, cpu);
  137. irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
  138. action->handler, action->flags,
  139. per_cpu(xen_resched_name, cpu), action->dev_id);
  140. per_cpu(xen_resched_irq, cpu) = irq;
  141. break;
  142. case IA64_IPI_VECTOR:
  143. snprintf(per_cpu(xen_ipi_name, cpu),
  144. sizeof(per_cpu(xen_ipi_name, cpu)),
  145. "%s%d", action->name, cpu);
  146. irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
  147. action->handler, action->flags,
  148. per_cpu(xen_ipi_name, cpu), action->dev_id);
  149. per_cpu(xen_ipi_irq, cpu) = irq;
  150. break;
  151. case IA64_CMC_VECTOR:
  152. snprintf(per_cpu(xen_cmc_name, cpu),
  153. sizeof(per_cpu(xen_cmc_name, cpu)),
  154. "%s%d", action->name, cpu);
  155. irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
  156. action->handler,
  157. action->flags,
  158. per_cpu(xen_cmc_name, cpu),
  159. action->dev_id);
  160. per_cpu(xen_cmc_irq, cpu) = irq;
  161. break;
  162. case IA64_CMCP_VECTOR:
  163. snprintf(per_cpu(xen_cmcp_name, cpu),
  164. sizeof(per_cpu(xen_cmcp_name, cpu)),
  165. "%s%d", action->name, cpu);
  166. irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
  167. action->handler,
  168. action->flags,
  169. per_cpu(xen_cmcp_name, cpu),
  170. action->dev_id);
  171. per_cpu(xen_cmcp_irq, cpu) = irq;
  172. break;
  173. case IA64_CPEP_VECTOR:
  174. snprintf(per_cpu(xen_cpep_name, cpu),
  175. sizeof(per_cpu(xen_cpep_name, cpu)),
  176. "%s%d", action->name, cpu);
  177. irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
  178. action->handler,
  179. action->flags,
  180. per_cpu(xen_cpep_name, cpu),
  181. action->dev_id);
  182. per_cpu(xen_cpep_irq, cpu) = irq;
  183. break;
  184. case IA64_CPE_VECTOR:
  185. case IA64_MCA_RENDEZ_VECTOR:
  186. case IA64_PERFMON_VECTOR:
  187. case IA64_MCA_WAKEUP_VECTOR:
  188. case IA64_SPURIOUS_INT_VECTOR:
  189. /* No need to complain, these aren't supported. */
  190. break;
  191. default:
  192. printk(KERN_WARNING "Percpu irq %d is unsupported "
  193. "by xen!\n", vec);
  194. break;
  195. }
  196. BUG_ON(irq < 0);
  197. if (irq > 0) {
  198. /*
  199. * Mark percpu. Without this, migrate_irqs() will
  200. * mark the interrupt for migrations and trigger it
  201. * on cpu hotplug.
  202. */
  203. irq_set_status_flags(irq, IRQ_PER_CPU);
  204. }
  205. }
  206. /* For BSP, we cache registered percpu irqs, and then re-walk
  207. * them when initializing APs
  208. */
  209. if (!cpu && save) {
  210. BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
  211. saved_percpu_irqs[saved_irq_cnt].irq = vec;
  212. saved_percpu_irqs[saved_irq_cnt].action = action;
  213. saved_irq_cnt++;
  214. if (!xen_slab_ready)
  215. late_irq_cnt++;
  216. }
  217. }
  218. static void
  219. xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
  220. {
  221. __xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
  222. }
  223. static void
  224. xen_bind_early_percpu_irq(void)
  225. {
  226. int i;
  227. xen_slab_ready = 1;
  228. /* There's no race when accessing this cached array, since only
  229. * BSP will face with such step shortly
  230. */
  231. for (i = 0; i < late_irq_cnt; i++)
  232. __xen_register_percpu_irq(smp_processor_id(),
  233. saved_percpu_irqs[i].irq,
  234. saved_percpu_irqs[i].action, 0);
  235. }
  236. /* FIXME: There's no obvious point to check whether slab is ready. So
  237. * a hack is used here by utilizing a late time hook.
  238. */
  239. #ifdef CONFIG_HOTPLUG_CPU
  240. static int __devinit
  241. unbind_evtchn_callback(struct notifier_block *nfb,
  242. unsigned long action, void *hcpu)
  243. {
  244. unsigned int cpu = (unsigned long)hcpu;
  245. if (action == CPU_DEAD) {
  246. /* Unregister evtchn. */
  247. if (per_cpu(xen_cpep_irq, cpu) >= 0) {
  248. unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
  249. NULL);
  250. per_cpu(xen_cpep_irq, cpu) = -1;
  251. }
  252. if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
  253. unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
  254. NULL);
  255. per_cpu(xen_cmcp_irq, cpu) = -1;
  256. }
  257. if (per_cpu(xen_cmc_irq, cpu) >= 0) {
  258. unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
  259. per_cpu(xen_cmc_irq, cpu) = -1;
  260. }
  261. if (per_cpu(xen_ipi_irq, cpu) >= 0) {
  262. unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
  263. per_cpu(xen_ipi_irq, cpu) = -1;
  264. }
  265. if (per_cpu(xen_resched_irq, cpu) >= 0) {
  266. unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
  267. NULL);
  268. per_cpu(xen_resched_irq, cpu) = -1;
  269. }
  270. if (per_cpu(xen_timer_irq, cpu) >= 0) {
  271. unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
  272. NULL);
  273. per_cpu(xen_timer_irq, cpu) = -1;
  274. }
  275. }
  276. return NOTIFY_OK;
  277. }
  278. static struct notifier_block unbind_evtchn_notifier = {
  279. .notifier_call = unbind_evtchn_callback,
  280. .priority = 0
  281. };
  282. #endif
  283. void xen_smp_intr_init_early(unsigned int cpu)
  284. {
  285. #ifdef CONFIG_SMP
  286. unsigned int i;
  287. for (i = 0; i < saved_irq_cnt; i++)
  288. __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
  289. saved_percpu_irqs[i].action, 0);
  290. #endif
  291. }
  292. void xen_smp_intr_init(void)
  293. {
  294. #ifdef CONFIG_SMP
  295. unsigned int cpu = smp_processor_id();
  296. struct callback_register event = {
  297. .type = CALLBACKTYPE_event,
  298. .address = { .ip = (unsigned long)&xen_event_callback },
  299. };
  300. if (cpu == 0) {
  301. /* Initialization was already done for boot cpu. */
  302. #ifdef CONFIG_HOTPLUG_CPU
  303. /* Register the notifier only once. */
  304. register_cpu_notifier(&unbind_evtchn_notifier);
  305. #endif
  306. return;
  307. }
  308. /* This should be piggyback when setup vcpu guest context */
  309. BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
  310. #endif /* CONFIG_SMP */
  311. }
  312. void __init
  313. xen_irq_init(void)
  314. {
  315. struct callback_register event = {
  316. .type = CALLBACKTYPE_event,
  317. .address = { .ip = (unsigned long)&xen_event_callback },
  318. };
  319. xen_init_IRQ();
  320. BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
  321. late_time_init = xen_bind_early_percpu_irq;
  322. }
  323. void
  324. xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
  325. {
  326. #ifdef CONFIG_SMP
  327. /* TODO: we need to call vcpu_up here */
  328. if (unlikely(vector == ap_wakeup_vector)) {
  329. /* XXX
  330. * This should be in __cpu_up(cpu) in ia64 smpboot.c
  331. * like x86. But don't want to modify it,
  332. * keep it untouched.
  333. */
  334. xen_smp_intr_init_early(cpu);
  335. xen_send_ipi(cpu, vector);
  336. /* vcpu_prepare_and_up(cpu); */
  337. return;
  338. }
  339. #endif
  340. switch (vector) {
  341. case IA64_IPI_VECTOR:
  342. xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
  343. break;
  344. case IA64_IPI_RESCHEDULE:
  345. xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
  346. break;
  347. case IA64_CMCP_VECTOR:
  348. xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
  349. break;
  350. case IA64_CPEP_VECTOR:
  351. xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
  352. break;
  353. case IA64_TIMER_VECTOR: {
  354. /* this is used only once by check_sal_cache_flush()
  355. at boot time */
  356. static int used = 0;
  357. if (!used) {
  358. xen_send_ipi(cpu, IA64_TIMER_VECTOR);
  359. used = 1;
  360. break;
  361. }
  362. /* fallthrough */
  363. }
  364. default:
  365. printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
  366. vector);
  367. notify_remote_via_irq(0); /* defaults to 0 irq */
  368. break;
  369. }
  370. }
  371. static void __init
  372. xen_register_ipi(void)
  373. {
  374. #ifdef CONFIG_SMP
  375. register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
  376. register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
  377. register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
  378. #endif
  379. }
  380. static void
  381. xen_resend_irq(unsigned int vector)
  382. {
  383. (void)resend_irq_on_evtchn(vector);
  384. }
  385. const struct pv_irq_ops xen_irq_ops __initdata = {
  386. .register_ipi = xen_register_ipi,
  387. .assign_irq_vector = xen_assign_irq_vector,
  388. .free_irq_vector = xen_free_irq_vector,
  389. .register_percpu_irq = xen_register_percpu_irq,
  390. .resend_irq = xen_resend_irq,
  391. };