irq_xen.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. /******************************************************************************
  2. * arch/ia64/xen/irq_xen.c
  3. *
  4. * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
  5. * VA Linux Systems Japan K.K.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/cpu.h>
  23. #include <xen/interface/xen.h>
  24. #include <xen/interface/callback.h>
  25. #include <xen/events.h>
  26. #include <asm/xen/privop.h>
  27. #include "irq_xen.h"
  28. /***************************************************************************
  29. * pv_irq_ops
  30. * irq operations
  31. */
  32. static int
  33. xen_assign_irq_vector(int irq)
  34. {
  35. struct physdev_irq irq_op;
  36. irq_op.irq = irq;
  37. if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
  38. return -ENOSPC;
  39. return irq_op.vector;
  40. }
  41. static void
  42. xen_free_irq_vector(int vector)
  43. {
  44. struct physdev_irq irq_op;
  45. if (vector < IA64_FIRST_DEVICE_VECTOR ||
  46. vector > IA64_LAST_DEVICE_VECTOR)
  47. return;
  48. irq_op.vector = vector;
  49. if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
  50. printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
  51. __func__, vector);
  52. }
  53. static DEFINE_PER_CPU(int, timer_irq) = -1;
  54. static DEFINE_PER_CPU(int, ipi_irq) = -1;
  55. static DEFINE_PER_CPU(int, resched_irq) = -1;
  56. static DEFINE_PER_CPU(int, cmc_irq) = -1;
  57. static DEFINE_PER_CPU(int, cmcp_irq) = -1;
  58. static DEFINE_PER_CPU(int, cpep_irq) = -1;
  59. #define NAME_SIZE 15
  60. static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
  61. static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
  62. static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
  63. static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
  64. static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
  65. static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
  66. #undef NAME_SIZE
  67. struct saved_irq {
  68. unsigned int irq;
  69. struct irqaction *action;
  70. };
  71. /* 16 should be far optimistic value, since only several percpu irqs
  72. * are registered early.
  73. */
  74. #define MAX_LATE_IRQ 16
  75. static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
  76. static unsigned short late_irq_cnt;
  77. static unsigned short saved_irq_cnt;
  78. static int xen_slab_ready;
  79. #ifdef CONFIG_SMP
  80. /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
  81. * it ends up to issue several memory accesses upon percpu data and
  82. * thus adds unnecessary traffic to other paths.
  83. */
  84. static irqreturn_t
  85. xen_dummy_handler(int irq, void *dev_id)
  86. {
  87. return IRQ_HANDLED;
  88. }
  89. static struct irqaction xen_ipi_irqaction = {
  90. .handler = handle_IPI,
  91. .flags = IRQF_DISABLED,
  92. .name = "IPI"
  93. };
  94. static struct irqaction xen_resched_irqaction = {
  95. .handler = xen_dummy_handler,
  96. .flags = IRQF_DISABLED,
  97. .name = "resched"
  98. };
  99. static struct irqaction xen_tlb_irqaction = {
  100. .handler = xen_dummy_handler,
  101. .flags = IRQF_DISABLED,
  102. .name = "tlb_flush"
  103. };
  104. #endif
  105. /*
  106. * This is xen version percpu irq registration, which needs bind
  107. * to xen specific evtchn sub-system. One trick here is that xen
  108. * evtchn binding interface depends on kmalloc because related
  109. * port needs to be freed at device/cpu down. So we cache the
  110. * registration on BSP before slab is ready and then deal them
  111. * at later point. For rest instances happening after slab ready,
  112. * we hook them to xen evtchn immediately.
  113. *
  114. * FIXME: MCA is not supported by far, and thus "nomca" boot param is
  115. * required.
  116. */
  117. static void
  118. __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
  119. struct irqaction *action, int save)
  120. {
  121. irq_desc_t *desc;
  122. int irq = 0;
  123. if (xen_slab_ready) {
  124. switch (vec) {
  125. case IA64_TIMER_VECTOR:
  126. snprintf(per_cpu(timer_name, cpu),
  127. sizeof(per_cpu(timer_name, cpu)),
  128. "%s%d", action->name, cpu);
  129. irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
  130. action->handler, action->flags,
  131. per_cpu(timer_name, cpu), action->dev_id);
  132. per_cpu(timer_irq, cpu) = irq;
  133. break;
  134. case IA64_IPI_RESCHEDULE:
  135. snprintf(per_cpu(resched_name, cpu),
  136. sizeof(per_cpu(resched_name, cpu)),
  137. "%s%d", action->name, cpu);
  138. irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
  139. action->handler, action->flags,
  140. per_cpu(resched_name, cpu), action->dev_id);
  141. per_cpu(resched_irq, cpu) = irq;
  142. break;
  143. case IA64_IPI_VECTOR:
  144. snprintf(per_cpu(ipi_name, cpu),
  145. sizeof(per_cpu(ipi_name, cpu)),
  146. "%s%d", action->name, cpu);
  147. irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
  148. action->handler, action->flags,
  149. per_cpu(ipi_name, cpu), action->dev_id);
  150. per_cpu(ipi_irq, cpu) = irq;
  151. break;
  152. case IA64_CMC_VECTOR:
  153. snprintf(per_cpu(cmc_name, cpu),
  154. sizeof(per_cpu(cmc_name, cpu)),
  155. "%s%d", action->name, cpu);
  156. irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
  157. action->handler,
  158. action->flags,
  159. per_cpu(cmc_name, cpu),
  160. action->dev_id);
  161. per_cpu(cmc_irq, cpu) = irq;
  162. break;
  163. case IA64_CMCP_VECTOR:
  164. snprintf(per_cpu(cmcp_name, cpu),
  165. sizeof(per_cpu(cmcp_name, cpu)),
  166. "%s%d", action->name, cpu);
  167. irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
  168. action->handler,
  169. action->flags,
  170. per_cpu(cmcp_name, cpu),
  171. action->dev_id);
  172. per_cpu(cmcp_irq, cpu) = irq;
  173. break;
  174. case IA64_CPEP_VECTOR:
  175. snprintf(per_cpu(cpep_name, cpu),
  176. sizeof(per_cpu(cpep_name, cpu)),
  177. "%s%d", action->name, cpu);
  178. irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
  179. action->handler,
  180. action->flags,
  181. per_cpu(cpep_name, cpu),
  182. action->dev_id);
  183. per_cpu(cpep_irq, cpu) = irq;
  184. break;
  185. case IA64_CPE_VECTOR:
  186. case IA64_MCA_RENDEZ_VECTOR:
  187. case IA64_PERFMON_VECTOR:
  188. case IA64_MCA_WAKEUP_VECTOR:
  189. case IA64_SPURIOUS_INT_VECTOR:
  190. /* No need to complain, these aren't supported. */
  191. break;
  192. default:
  193. printk(KERN_WARNING "Percpu irq %d is unsupported "
  194. "by xen!\n", vec);
  195. break;
  196. }
  197. BUG_ON(irq < 0);
  198. if (irq > 0) {
  199. /*
  200. * Mark percpu. Without this, migrate_irqs() will
  201. * mark the interrupt for migrations and trigger it
  202. * on cpu hotplug.
  203. */
  204. desc = irq_desc + irq;
  205. desc->status |= IRQ_PER_CPU;
  206. }
  207. }
  208. /* For BSP, we cache registered percpu irqs, and then re-walk
  209. * them when initializing APs
  210. */
  211. if (!cpu && save) {
  212. BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
  213. saved_percpu_irqs[saved_irq_cnt].irq = vec;
  214. saved_percpu_irqs[saved_irq_cnt].action = action;
  215. saved_irq_cnt++;
  216. if (!xen_slab_ready)
  217. late_irq_cnt++;
  218. }
  219. }
  220. static void
  221. xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
  222. {
  223. __xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
  224. }
  225. static void
  226. xen_bind_early_percpu_irq(void)
  227. {
  228. int i;
  229. xen_slab_ready = 1;
  230. /* There's no race when accessing this cached array, since only
  231. * BSP will face with such step shortly
  232. */
  233. for (i = 0; i < late_irq_cnt; i++)
  234. __xen_register_percpu_irq(smp_processor_id(),
  235. saved_percpu_irqs[i].irq,
  236. saved_percpu_irqs[i].action, 0);
  237. }
  238. /* FIXME: There's no obvious point to check whether slab is ready. So
  239. * a hack is used here by utilizing a late time hook.
  240. */
  241. #ifdef CONFIG_HOTPLUG_CPU
  242. static int __devinit
  243. unbind_evtchn_callback(struct notifier_block *nfb,
  244. unsigned long action, void *hcpu)
  245. {
  246. unsigned int cpu = (unsigned long)hcpu;
  247. if (action == CPU_DEAD) {
  248. /* Unregister evtchn. */
  249. if (per_cpu(cpep_irq, cpu) >= 0) {
  250. unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
  251. per_cpu(cpep_irq, cpu) = -1;
  252. }
  253. if (per_cpu(cmcp_irq, cpu) >= 0) {
  254. unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
  255. per_cpu(cmcp_irq, cpu) = -1;
  256. }
  257. if (per_cpu(cmc_irq, cpu) >= 0) {
  258. unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
  259. per_cpu(cmc_irq, cpu) = -1;
  260. }
  261. if (per_cpu(ipi_irq, cpu) >= 0) {
  262. unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
  263. per_cpu(ipi_irq, cpu) = -1;
  264. }
  265. if (per_cpu(resched_irq, cpu) >= 0) {
  266. unbind_from_irqhandler(per_cpu(resched_irq, cpu),
  267. NULL);
  268. per_cpu(resched_irq, cpu) = -1;
  269. }
  270. if (per_cpu(timer_irq, cpu) >= 0) {
  271. unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
  272. per_cpu(timer_irq, cpu) = -1;
  273. }
  274. }
  275. return NOTIFY_OK;
  276. }
  277. static struct notifier_block unbind_evtchn_notifier = {
  278. .notifier_call = unbind_evtchn_callback,
  279. .priority = 0
  280. };
  281. #endif
  282. void xen_smp_intr_init_early(unsigned int cpu)
  283. {
  284. #ifdef CONFIG_SMP
  285. unsigned int i;
  286. for (i = 0; i < saved_irq_cnt; i++)
  287. __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
  288. saved_percpu_irqs[i].action, 0);
  289. #endif
  290. }
  291. void xen_smp_intr_init(void)
  292. {
  293. #ifdef CONFIG_SMP
  294. unsigned int cpu = smp_processor_id();
  295. struct callback_register event = {
  296. .type = CALLBACKTYPE_event,
  297. .address = { .ip = (unsigned long)&xen_event_callback },
  298. };
  299. if (cpu == 0) {
  300. /* Initialization was already done for boot cpu. */
  301. #ifdef CONFIG_HOTPLUG_CPU
  302. /* Register the notifier only once. */
  303. register_cpu_notifier(&unbind_evtchn_notifier);
  304. #endif
  305. return;
  306. }
  307. /* This should be piggyback when setup vcpu guest context */
  308. BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
  309. #endif /* CONFIG_SMP */
  310. }
  311. void __init
  312. xen_irq_init(void)
  313. {
  314. struct callback_register event = {
  315. .type = CALLBACKTYPE_event,
  316. .address = { .ip = (unsigned long)&xen_event_callback },
  317. };
  318. xen_init_IRQ();
  319. BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
  320. late_time_init = xen_bind_early_percpu_irq;
  321. }
  322. void
  323. xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
  324. {
  325. #ifdef CONFIG_SMP
  326. /* TODO: we need to call vcpu_up here */
  327. if (unlikely(vector == ap_wakeup_vector)) {
  328. /* XXX
  329. * This should be in __cpu_up(cpu) in ia64 smpboot.c
  330. * like x86. But don't want to modify it,
  331. * keep it untouched.
  332. */
  333. xen_smp_intr_init_early(cpu);
  334. xen_send_ipi(cpu, vector);
  335. /* vcpu_prepare_and_up(cpu); */
  336. return;
  337. }
  338. #endif
  339. switch (vector) {
  340. case IA64_IPI_VECTOR:
  341. xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
  342. break;
  343. case IA64_IPI_RESCHEDULE:
  344. xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
  345. break;
  346. case IA64_CMCP_VECTOR:
  347. xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
  348. break;
  349. case IA64_CPEP_VECTOR:
  350. xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
  351. break;
  352. case IA64_TIMER_VECTOR: {
  353. /* this is used only once by check_sal_cache_flush()
  354. at boot time */
  355. static int used = 0;
  356. if (!used) {
  357. xen_send_ipi(cpu, IA64_TIMER_VECTOR);
  358. used = 1;
  359. break;
  360. }
  361. /* fallthrough */
  362. }
  363. default:
  364. printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
  365. vector);
  366. notify_remote_via_irq(0); /* defaults to 0 irq */
  367. break;
  368. }
  369. }
  370. static void __init
  371. xen_register_ipi(void)
  372. {
  373. #ifdef CONFIG_SMP
  374. register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
  375. register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
  376. register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
  377. #endif
  378. }
  379. static void
  380. xen_resend_irq(unsigned int vector)
  381. {
  382. (void)resend_irq_on_evtchn(vector);
  383. }
  384. const struct pv_irq_ops xen_irq_ops __initdata = {
  385. .register_ipi = xen_register_ipi,
  386. .assign_irq_vector = xen_assign_irq_vector,
  387. .free_irq_vector = xen_free_irq_vector,
  388. .register_percpu_irq = xen_register_percpu_irq,
  389. .resend_irq = xen_resend_irq,
  390. };