smp.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. /*
  2. * Xen SMP support
  3. *
  4. * This file implements the Xen versions of smp_ops. SMP under Xen is
  5. * very straightforward. Bringing a CPU up is simply a matter of
  6. * loading its initial context and setting it running.
  7. *
  8. * IPIs are handled through the Xen event mechanism.
  9. *
  10. * Because virtual CPUs can be scheduled onto any real CPU, there's no
  11. * useful topology information for the kernel to make use of. As a
  12. * result, all CPUs are treated as if they're single-core and
  13. * single-threaded.
  14. */
  15. #include <linux/sched.h>
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/smp.h>
  19. #include <linux/irq_work.h>
  20. #include <linux/tick.h>
  21. #include <asm/paravirt.h>
  22. #include <asm/desc.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/cpu.h>
  25. #include <xen/interface/xen.h>
  26. #include <xen/interface/vcpu.h>
  27. #include <asm/xen/interface.h>
  28. #include <asm/xen/hypercall.h>
  29. #include <xen/xen.h>
  30. #include <xen/page.h>
  31. #include <xen/events.h>
  32. #include <xen/hvc-console.h>
  33. #include "xen-ops.h"
  34. #include "mmu.h"
  35. cpumask_var_t xen_cpu_initialized_map;
  36. struct xen_common_irq {
  37. int irq;
  38. char *name;
  39. };
  40. static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
  41. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
  42. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
  43. static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
  44. static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
  45. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
  46. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
  47. static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
  48. /*
  49. * Reschedule call back.
  50. */
  51. static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
  52. {
  53. inc_irq_stat(irq_resched_count);
  54. scheduler_ipi();
  55. return IRQ_HANDLED;
  56. }
  57. static void cpu_bringup(void)
  58. {
  59. int cpu;
  60. cpu_init();
  61. touch_softlockup_watchdog();
  62. preempt_disable();
  63. xen_enable_sysenter();
  64. xen_enable_syscall();
  65. cpu = smp_processor_id();
  66. smp_store_cpu_info(cpu);
  67. cpu_data(cpu).x86_max_cores = 1;
  68. set_cpu_sibling_map(cpu);
  69. xen_setup_cpu_clockevents();
  70. notify_cpu_starting(cpu);
  71. set_cpu_online(cpu, true);
  72. this_cpu_write(cpu_state, CPU_ONLINE);
  73. wmb();
  74. /* We can take interrupts now: we're officially "up". */
  75. local_irq_enable();
  76. wmb(); /* make sure everything is out */
  77. }
  78. static void cpu_bringup_and_idle(void)
  79. {
  80. cpu_bringup();
  81. cpu_startup_entry(CPUHP_ONLINE);
  82. }
  83. static void xen_smp_intr_free(unsigned int cpu)
  84. {
  85. if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
  86. unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
  87. per_cpu(xen_resched_irq, cpu).irq = -1;
  88. kfree(per_cpu(xen_resched_irq, cpu).name);
  89. per_cpu(xen_resched_irq, cpu).name = NULL;
  90. }
  91. if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
  92. unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
  93. per_cpu(xen_callfunc_irq, cpu).irq = -1;
  94. kfree(per_cpu(xen_callfunc_irq, cpu).name);
  95. per_cpu(xen_callfunc_irq, cpu).name = NULL;
  96. }
  97. if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
  98. unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
  99. per_cpu(xen_debug_irq, cpu).irq = -1;
  100. kfree(per_cpu(xen_debug_irq, cpu).name);
  101. per_cpu(xen_debug_irq, cpu).name = NULL;
  102. }
  103. if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
  104. unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
  105. NULL);
  106. per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
  107. kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
  108. per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
  109. }
  110. if (xen_hvm_domain())
  111. return;
  112. if (per_cpu(xen_irq_work, cpu).irq >= 0) {
  113. unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
  114. per_cpu(xen_irq_work, cpu).irq = -1;
  115. kfree(per_cpu(xen_irq_work, cpu).name);
  116. per_cpu(xen_irq_work, cpu).name = NULL;
  117. }
  118. };
  119. static int xen_smp_intr_init(unsigned int cpu)
  120. {
  121. int rc;
  122. char *resched_name, *callfunc_name, *debug_name;
  123. resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
  124. rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
  125. cpu,
  126. xen_reschedule_interrupt,
  127. IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
  128. resched_name,
  129. NULL);
  130. if (rc < 0)
  131. goto fail;
  132. per_cpu(xen_resched_irq, cpu).irq = rc;
  133. per_cpu(xen_resched_irq, cpu).name = resched_name;
  134. callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
  135. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
  136. cpu,
  137. xen_call_function_interrupt,
  138. IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
  139. callfunc_name,
  140. NULL);
  141. if (rc < 0)
  142. goto fail;
  143. per_cpu(xen_callfunc_irq, cpu).irq = rc;
  144. per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
  145. debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
  146. rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
  147. IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
  148. debug_name, NULL);
  149. if (rc < 0)
  150. goto fail;
  151. per_cpu(xen_debug_irq, cpu).irq = rc;
  152. per_cpu(xen_debug_irq, cpu).name = debug_name;
  153. callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
  154. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
  155. cpu,
  156. xen_call_function_single_interrupt,
  157. IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
  158. callfunc_name,
  159. NULL);
  160. if (rc < 0)
  161. goto fail;
  162. per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
  163. per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
  164. /*
  165. * The IRQ worker on PVHVM goes through the native path and uses the
  166. * IPI mechanism.
  167. */
  168. if (xen_hvm_domain())
  169. return 0;
  170. callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
  171. rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
  172. cpu,
  173. xen_irq_work_interrupt,
  174. IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
  175. callfunc_name,
  176. NULL);
  177. if (rc < 0)
  178. goto fail;
  179. per_cpu(xen_irq_work, cpu).irq = rc;
  180. per_cpu(xen_irq_work, cpu).name = callfunc_name;
  181. return 0;
  182. fail:
  183. xen_smp_intr_free(cpu);
  184. return rc;
  185. }
  186. static void __init xen_fill_possible_map(void)
  187. {
  188. int i, rc;
  189. if (xen_initial_domain())
  190. return;
  191. for (i = 0; i < nr_cpu_ids; i++) {
  192. rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
  193. if (rc >= 0) {
  194. num_processors++;
  195. set_cpu_possible(i, true);
  196. }
  197. }
  198. }
  199. static void __init xen_filter_cpu_maps(void)
  200. {
  201. int i, rc;
  202. unsigned int subtract = 0;
  203. if (!xen_initial_domain())
  204. return;
  205. num_processors = 0;
  206. disabled_cpus = 0;
  207. for (i = 0; i < nr_cpu_ids; i++) {
  208. rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
  209. if (rc >= 0) {
  210. num_processors++;
  211. set_cpu_possible(i, true);
  212. } else {
  213. set_cpu_possible(i, false);
  214. set_cpu_present(i, false);
  215. subtract++;
  216. }
  217. }
  218. #ifdef CONFIG_HOTPLUG_CPU
  219. /* This is akin to using 'nr_cpus' on the Linux command line.
  220. * Which is OK as when we use 'dom0_max_vcpus=X' we can only
  221. * have up to X, while nr_cpu_ids is greater than X. This
  222. * normally is not a problem, except when CPU hotplugging
  223. * is involved and then there might be more than X CPUs
  224. * in the guest - which will not work as there is no
  225. * hypercall to expand the max number of VCPUs an already
  226. * running guest has. So cap it up to X. */
  227. if (subtract)
  228. nr_cpu_ids = nr_cpu_ids - subtract;
  229. #endif
  230. }
  231. static void __init xen_smp_prepare_boot_cpu(void)
  232. {
  233. BUG_ON(smp_processor_id() != 0);
  234. native_smp_prepare_boot_cpu();
  235. if (xen_pv_domain()) {
  236. /* We've switched to the "real" per-cpu gdt, so make sure the
  237. old memory can be recycled */
  238. make_lowmem_page_readwrite(xen_initial_gdt);
  239. #ifdef CONFIG_X86_32
  240. /*
  241. * Xen starts us with XEN_FLAT_RING1_DS, but linux code
  242. * expects __USER_DS
  243. */
  244. loadsegment(ds, __USER_DS);
  245. loadsegment(es, __USER_DS);
  246. #endif
  247. xen_filter_cpu_maps();
  248. xen_setup_vcpu_info_placement();
  249. }
  250. /*
  251. * The alternative logic (which patches the unlock/lock) runs before
  252. * the smp bootup up code is activated. Hence we need to set this up
  253. * the core kernel is being patched. Otherwise we will have only
  254. * modules patched but not core code.
  255. */
  256. xen_init_spinlocks();
  257. }
  258. static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
  259. {
  260. unsigned cpu;
  261. unsigned int i;
  262. if (skip_ioapic_setup) {
  263. char *m = (max_cpus == 0) ?
  264. "The nosmp parameter is incompatible with Xen; " \
  265. "use Xen dom0_max_vcpus=1 parameter" :
  266. "The noapic parameter is incompatible with Xen";
  267. xen_raw_printk(m);
  268. panic(m);
  269. }
  270. xen_init_lock_cpu(0);
  271. smp_store_boot_cpu_info();
  272. cpu_data(0).x86_max_cores = 1;
  273. for_each_possible_cpu(i) {
  274. zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
  275. zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
  276. zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
  277. }
  278. set_cpu_sibling_map(0);
  279. if (xen_smp_intr_init(0))
  280. BUG();
  281. if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
  282. panic("could not allocate xen_cpu_initialized_map\n");
  283. cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
  284. /* Restrict the possible_map according to max_cpus. */
  285. while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
  286. for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
  287. continue;
  288. set_cpu_possible(cpu, false);
  289. }
  290. for_each_possible_cpu(cpu)
  291. set_cpu_present(cpu, true);
  292. }
  293. static int
  294. cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
  295. {
  296. struct vcpu_guest_context *ctxt;
  297. struct desc_struct *gdt;
  298. unsigned long gdt_mfn;
  299. if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
  300. return 0;
  301. ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
  302. if (ctxt == NULL)
  303. return -ENOMEM;
  304. gdt = get_cpu_gdt_table(cpu);
  305. ctxt->flags = VGCF_IN_KERNEL;
  306. ctxt->user_regs.ss = __KERNEL_DS;
  307. #ifdef CONFIG_X86_32
  308. ctxt->user_regs.fs = __KERNEL_PERCPU;
  309. ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
  310. #else
  311. ctxt->gs_base_kernel = per_cpu_offset(cpu);
  312. #endif
  313. ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
  314. memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
  315. {
  316. ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
  317. ctxt->user_regs.ds = __USER_DS;
  318. ctxt->user_regs.es = __USER_DS;
  319. xen_copy_trap_info(ctxt->trap_ctxt);
  320. ctxt->ldt_ents = 0;
  321. BUG_ON((unsigned long)gdt & ~PAGE_MASK);
  322. gdt_mfn = arbitrary_virt_to_mfn(gdt);
  323. make_lowmem_page_readonly(gdt);
  324. make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
  325. ctxt->gdt_frames[0] = gdt_mfn;
  326. ctxt->gdt_ents = GDT_ENTRIES;
  327. ctxt->kernel_ss = __KERNEL_DS;
  328. ctxt->kernel_sp = idle->thread.sp0;
  329. #ifdef CONFIG_X86_32
  330. ctxt->event_callback_cs = __KERNEL_CS;
  331. ctxt->failsafe_callback_cs = __KERNEL_CS;
  332. #endif
  333. ctxt->event_callback_eip =
  334. (unsigned long)xen_hypervisor_callback;
  335. ctxt->failsafe_callback_eip =
  336. (unsigned long)xen_failsafe_callback;
  337. }
  338. ctxt->user_regs.cs = __KERNEL_CS;
  339. ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
  340. per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
  341. ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
  342. if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
  343. BUG();
  344. kfree(ctxt);
  345. return 0;
  346. }
  347. static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
  348. {
  349. int rc;
  350. per_cpu(current_task, cpu) = idle;
  351. #ifdef CONFIG_X86_32
  352. irq_ctx_init(cpu);
  353. #else
  354. clear_tsk_thread_flag(idle, TIF_FORK);
  355. per_cpu(kernel_stack, cpu) =
  356. (unsigned long)task_stack_page(idle) -
  357. KERNEL_STACK_OFFSET + THREAD_SIZE;
  358. #endif
  359. xen_setup_runstate_info(cpu);
  360. xen_setup_timer(cpu);
  361. xen_init_lock_cpu(cpu);
  362. per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
  363. /* make sure interrupts start blocked */
  364. per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
  365. rc = cpu_initialize_context(cpu, idle);
  366. if (rc)
  367. return rc;
  368. if (num_online_cpus() == 1)
  369. /* Just in case we booted with a single CPU. */
  370. alternatives_enable_smp();
  371. rc = xen_smp_intr_init(cpu);
  372. if (rc)
  373. return rc;
  374. rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
  375. BUG_ON(rc);
  376. while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
  377. HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
  378. barrier();
  379. }
  380. return 0;
  381. }
  382. static void xen_smp_cpus_done(unsigned int max_cpus)
  383. {
  384. }
  385. #ifdef CONFIG_HOTPLUG_CPU
  386. static int xen_cpu_disable(void)
  387. {
  388. unsigned int cpu = smp_processor_id();
  389. if (cpu == 0)
  390. return -EBUSY;
  391. cpu_disable_common();
  392. load_cr3(swapper_pg_dir);
  393. return 0;
  394. }
  395. static void xen_cpu_die(unsigned int cpu)
  396. {
  397. while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
  398. current->state = TASK_UNINTERRUPTIBLE;
  399. schedule_timeout(HZ/10);
  400. }
  401. xen_smp_intr_free(cpu);
  402. xen_uninit_lock_cpu(cpu);
  403. xen_teardown_timer(cpu);
  404. }
  405. static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
  406. {
  407. play_dead_common();
  408. HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
  409. cpu_bringup();
  410. /*
  411. * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
  412. * clears certain data that the cpu_idle loop (which called us
  413. * and that we return from) expects. The only way to get that
  414. * data back is to call:
  415. */
  416. tick_nohz_idle_enter();
  417. }
  418. #else /* !CONFIG_HOTPLUG_CPU */
  419. static int xen_cpu_disable(void)
  420. {
  421. return -ENOSYS;
  422. }
  423. static void xen_cpu_die(unsigned int cpu)
  424. {
  425. BUG();
  426. }
  427. static void xen_play_dead(void)
  428. {
  429. BUG();
  430. }
  431. #endif
  432. static void stop_self(void *v)
  433. {
  434. int cpu = smp_processor_id();
  435. /* make sure we're not pinning something down */
  436. load_cr3(swapper_pg_dir);
  437. /* should set up a minimal gdt */
  438. set_cpu_online(cpu, false);
  439. HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
  440. BUG();
  441. }
  442. static void xen_stop_other_cpus(int wait)
  443. {
  444. smp_call_function(stop_self, NULL, wait);
  445. }
  446. static void xen_smp_send_reschedule(int cpu)
  447. {
  448. xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
  449. }
  450. static void __xen_send_IPI_mask(const struct cpumask *mask,
  451. int vector)
  452. {
  453. unsigned cpu;
  454. for_each_cpu_and(cpu, mask, cpu_online_mask)
  455. xen_send_IPI_one(cpu, vector);
  456. }
  457. static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
  458. {
  459. int cpu;
  460. __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
  461. /* Make sure other vcpus get a chance to run if they need to. */
  462. for_each_cpu(cpu, mask) {
  463. if (xen_vcpu_stolen(cpu)) {
  464. HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
  465. break;
  466. }
  467. }
  468. }
  469. static void xen_smp_send_call_function_single_ipi(int cpu)
  470. {
  471. __xen_send_IPI_mask(cpumask_of(cpu),
  472. XEN_CALL_FUNCTION_SINGLE_VECTOR);
  473. }
  474. static inline int xen_map_vector(int vector)
  475. {
  476. int xen_vector;
  477. switch (vector) {
  478. case RESCHEDULE_VECTOR:
  479. xen_vector = XEN_RESCHEDULE_VECTOR;
  480. break;
  481. case CALL_FUNCTION_VECTOR:
  482. xen_vector = XEN_CALL_FUNCTION_VECTOR;
  483. break;
  484. case CALL_FUNCTION_SINGLE_VECTOR:
  485. xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
  486. break;
  487. case IRQ_WORK_VECTOR:
  488. xen_vector = XEN_IRQ_WORK_VECTOR;
  489. break;
  490. #ifdef CONFIG_X86_64
  491. case NMI_VECTOR:
  492. case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
  493. xen_vector = XEN_NMI_VECTOR;
  494. break;
  495. #endif
  496. default:
  497. xen_vector = -1;
  498. printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
  499. vector);
  500. }
  501. return xen_vector;
  502. }
  503. void xen_send_IPI_mask(const struct cpumask *mask,
  504. int vector)
  505. {
  506. int xen_vector = xen_map_vector(vector);
  507. if (xen_vector >= 0)
  508. __xen_send_IPI_mask(mask, xen_vector);
  509. }
  510. void xen_send_IPI_all(int vector)
  511. {
  512. int xen_vector = xen_map_vector(vector);
  513. if (xen_vector >= 0)
  514. __xen_send_IPI_mask(cpu_online_mask, xen_vector);
  515. }
  516. void xen_send_IPI_self(int vector)
  517. {
  518. int xen_vector = xen_map_vector(vector);
  519. if (xen_vector >= 0)
  520. xen_send_IPI_one(smp_processor_id(), xen_vector);
  521. }
  522. void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
  523. int vector)
  524. {
  525. unsigned cpu;
  526. unsigned int this_cpu = smp_processor_id();
  527. int xen_vector = xen_map_vector(vector);
  528. if (!(num_online_cpus() > 1) || (xen_vector < 0))
  529. return;
  530. for_each_cpu_and(cpu, mask, cpu_online_mask) {
  531. if (this_cpu == cpu)
  532. continue;
  533. xen_send_IPI_one(cpu, xen_vector);
  534. }
  535. }
  536. void xen_send_IPI_allbutself(int vector)
  537. {
  538. xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
  539. }
  540. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
  541. {
  542. irq_enter();
  543. generic_smp_call_function_interrupt();
  544. inc_irq_stat(irq_call_count);
  545. irq_exit();
  546. return IRQ_HANDLED;
  547. }
  548. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
  549. {
  550. irq_enter();
  551. generic_smp_call_function_single_interrupt();
  552. inc_irq_stat(irq_call_count);
  553. irq_exit();
  554. return IRQ_HANDLED;
  555. }
  556. static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
  557. {
  558. irq_enter();
  559. irq_work_run();
  560. inc_irq_stat(apic_irq_work_irqs);
  561. irq_exit();
  562. return IRQ_HANDLED;
  563. }
  564. static const struct smp_ops xen_smp_ops __initconst = {
  565. .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
  566. .smp_prepare_cpus = xen_smp_prepare_cpus,
  567. .smp_cpus_done = xen_smp_cpus_done,
  568. .cpu_up = xen_cpu_up,
  569. .cpu_die = xen_cpu_die,
  570. .cpu_disable = xen_cpu_disable,
  571. .play_dead = xen_play_dead,
  572. .stop_other_cpus = xen_stop_other_cpus,
  573. .smp_send_reschedule = xen_smp_send_reschedule,
  574. .send_call_func_ipi = xen_smp_send_call_function_ipi,
  575. .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
  576. };
  577. void __init xen_smp_init(void)
  578. {
  579. smp_ops = xen_smp_ops;
  580. xen_fill_possible_map();
  581. }
  582. static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
  583. {
  584. native_smp_prepare_cpus(max_cpus);
  585. WARN_ON(xen_smp_intr_init(0));
  586. xen_init_lock_cpu(0);
  587. }
  588. static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
  589. {
  590. int rc;
  591. /*
  592. * xen_smp_intr_init() needs to run before native_cpu_up()
  593. * so that IPI vectors are set up on the booting CPU before
  594. * it is marked online in native_cpu_up().
  595. */
  596. rc = xen_smp_intr_init(cpu);
  597. WARN_ON(rc);
  598. if (!rc)
  599. rc = native_cpu_up(cpu, tidle);
  600. /*
  601. * We must initialize the slowpath CPU kicker _after_ the native
  602. * path has executed. If we initialized it before none of the
  603. * unlocker IPI kicks would reach the booting CPU as the booting
  604. * CPU had not set itself 'online' in cpu_online_mask. That mask
  605. * is checked when IPIs are sent (on HVM at least).
  606. */
  607. xen_init_lock_cpu(cpu);
  608. return rc;
  609. }
  610. static void xen_hvm_cpu_die(unsigned int cpu)
  611. {
  612. xen_cpu_die(cpu);
  613. native_cpu_die(cpu);
  614. }
  615. void __init xen_hvm_smp_init(void)
  616. {
  617. if (!xen_have_vector_callback)
  618. return;
  619. smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
  620. smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
  621. smp_ops.cpu_up = xen_hvm_cpu_up;
  622. smp_ops.cpu_die = xen_hvm_cpu_die;
  623. smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
  624. smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
  625. smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
  626. }