powerpc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  19. */
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/module.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/hrtimer.h>
  26. #include <linux/fs.h>
  27. #include <linux/slab.h>
  28. #include <asm/cputable.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/kvm_ppc.h>
  31. #include <asm/tlbflush.h>
  32. #include <asm/cputhreads.h>
  33. #include "timing.h"
  34. #include "../mm/mmu_decl.h"
  35. #define CREATE_TRACE_POINTS
  36. #include "trace.h"
  37. int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  38. {
  39. #ifndef CONFIG_KVM_BOOK3S_64_HV
  40. return !(v->arch.shared->msr & MSR_WE) ||
  41. !!(v->arch.pending_exceptions);
  42. #else
  43. return !(v->arch.ceded) || !!(v->arch.pending_exceptions);
  44. #endif
  45. }
  46. int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
  47. {
  48. int nr = kvmppc_get_gpr(vcpu, 11);
  49. int r;
  50. unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
  51. unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
  52. unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
  53. unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
  54. unsigned long r2 = 0;
  55. if (!(vcpu->arch.shared->msr & MSR_SF)) {
  56. /* 32 bit mode */
  57. param1 &= 0xffffffff;
  58. param2 &= 0xffffffff;
  59. param3 &= 0xffffffff;
  60. param4 &= 0xffffffff;
  61. }
  62. switch (nr) {
  63. case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
  64. {
  65. vcpu->arch.magic_page_pa = param1;
  66. vcpu->arch.magic_page_ea = param2;
  67. r2 = KVM_MAGIC_FEAT_SR;
  68. r = HC_EV_SUCCESS;
  69. break;
  70. }
  71. case HC_VENDOR_KVM | KVM_HC_FEATURES:
  72. r = HC_EV_SUCCESS;
  73. #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
  74. /* XXX Missing magic page on 44x */
  75. r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
  76. #endif
  77. /* Second return value is in r4 */
  78. break;
  79. default:
  80. r = HC_EV_UNIMPLEMENTED;
  81. break;
  82. }
  83. kvmppc_set_gpr(vcpu, 4, r2);
  84. return r;
  85. }
  86. int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
  87. {
  88. int r = false;
  89. /* We have to know what CPU to virtualize */
  90. if (!vcpu->arch.pvr)
  91. goto out;
  92. /* PAPR only works with book3s_64 */
  93. if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
  94. goto out;
  95. #ifdef CONFIG_KVM_BOOK3S_64_HV
  96. /* HV KVM can only do PAPR mode for now */
  97. if (!vcpu->arch.papr_enabled)
  98. goto out;
  99. #endif
  100. r = true;
  101. out:
  102. vcpu->arch.sane = r;
  103. return r ? 0 : -EINVAL;
  104. }
  105. int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
  106. {
  107. enum emulation_result er;
  108. int r;
  109. er = kvmppc_emulate_instruction(run, vcpu);
  110. switch (er) {
  111. case EMULATE_DONE:
  112. /* Future optimization: only reload non-volatiles if they were
  113. * actually modified. */
  114. r = RESUME_GUEST_NV;
  115. break;
  116. case EMULATE_DO_MMIO:
  117. run->exit_reason = KVM_EXIT_MMIO;
  118. /* We must reload nonvolatiles because "update" load/store
  119. * instructions modify register state. */
  120. /* Future optimization: only reload non-volatiles if they were
  121. * actually modified. */
  122. r = RESUME_HOST_NV;
  123. break;
  124. case EMULATE_FAIL:
  125. /* XXX Deliver Program interrupt to guest. */
  126. printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
  127. kvmppc_get_last_inst(vcpu));
  128. r = RESUME_HOST;
  129. break;
  130. default:
  131. BUG();
  132. }
  133. return r;
  134. }
  135. int kvm_arch_hardware_enable(void *garbage)
  136. {
  137. return 0;
  138. }
  139. void kvm_arch_hardware_disable(void *garbage)
  140. {
  141. }
  142. int kvm_arch_hardware_setup(void)
  143. {
  144. return 0;
  145. }
  146. void kvm_arch_hardware_unsetup(void)
  147. {
  148. }
  149. void kvm_arch_check_processor_compat(void *rtn)
  150. {
  151. *(int *)rtn = kvmppc_core_check_processor_compat();
  152. }
  153. int kvm_arch_init_vm(struct kvm *kvm)
  154. {
  155. return kvmppc_core_init_vm(kvm);
  156. }
  157. void kvm_arch_destroy_vm(struct kvm *kvm)
  158. {
  159. unsigned int i;
  160. struct kvm_vcpu *vcpu;
  161. kvm_for_each_vcpu(i, vcpu, kvm)
  162. kvm_arch_vcpu_free(vcpu);
  163. mutex_lock(&kvm->lock);
  164. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  165. kvm->vcpus[i] = NULL;
  166. atomic_set(&kvm->online_vcpus, 0);
  167. kvmppc_core_destroy_vm(kvm);
  168. mutex_unlock(&kvm->lock);
  169. }
  170. void kvm_arch_sync_events(struct kvm *kvm)
  171. {
  172. }
  173. int kvm_dev_ioctl_check_extension(long ext)
  174. {
  175. int r;
  176. switch (ext) {
  177. #ifdef CONFIG_BOOKE
  178. case KVM_CAP_PPC_BOOKE_SREGS:
  179. #else
  180. case KVM_CAP_PPC_SEGSTATE:
  181. case KVM_CAP_PPC_HIOR:
  182. case KVM_CAP_PPC_PAPR:
  183. #endif
  184. case KVM_CAP_PPC_UNSET_IRQ:
  185. case KVM_CAP_PPC_IRQ_LEVEL:
  186. case KVM_CAP_ENABLE_CAP:
  187. r = 1;
  188. break;
  189. #ifndef CONFIG_KVM_BOOK3S_64_HV
  190. case KVM_CAP_PPC_PAIRED_SINGLES:
  191. case KVM_CAP_PPC_OSI:
  192. case KVM_CAP_PPC_GET_PVINFO:
  193. r = 1;
  194. break;
  195. case KVM_CAP_COALESCED_MMIO:
  196. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  197. break;
  198. #endif
  199. #ifdef CONFIG_KVM_BOOK3S_64_HV
  200. case KVM_CAP_SPAPR_TCE:
  201. r = 1;
  202. break;
  203. case KVM_CAP_PPC_SMT:
  204. r = threads_per_core;
  205. break;
  206. case KVM_CAP_PPC_RMA:
  207. r = 1;
  208. /* PPC970 requires an RMA */
  209. if (cpu_has_feature(CPU_FTR_ARCH_201))
  210. r = 2;
  211. break;
  212. #endif
  213. default:
  214. r = 0;
  215. break;
  216. }
  217. return r;
  218. }
  219. long kvm_arch_dev_ioctl(struct file *filp,
  220. unsigned int ioctl, unsigned long arg)
  221. {
  222. return -EINVAL;
  223. }
  224. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  225. struct kvm_memory_slot *memslot,
  226. struct kvm_memory_slot old,
  227. struct kvm_userspace_memory_region *mem,
  228. int user_alloc)
  229. {
  230. return kvmppc_core_prepare_memory_region(kvm, mem);
  231. }
  232. void kvm_arch_commit_memory_region(struct kvm *kvm,
  233. struct kvm_userspace_memory_region *mem,
  234. struct kvm_memory_slot old,
  235. int user_alloc)
  236. {
  237. kvmppc_core_commit_memory_region(kvm, mem);
  238. }
  239. void kvm_arch_flush_shadow(struct kvm *kvm)
  240. {
  241. }
  242. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
  243. {
  244. struct kvm_vcpu *vcpu;
  245. vcpu = kvmppc_core_vcpu_create(kvm, id);
  246. if (!IS_ERR(vcpu))
  247. kvmppc_create_vcpu_debugfs(vcpu, id);
  248. return vcpu;
  249. }
  250. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  251. {
  252. /* Make sure we're not using the vcpu anymore */
  253. hrtimer_cancel(&vcpu->arch.dec_timer);
  254. tasklet_kill(&vcpu->arch.tasklet);
  255. kvmppc_remove_vcpu_debugfs(vcpu);
  256. kvmppc_core_vcpu_free(vcpu);
  257. }
  258. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  259. {
  260. kvm_arch_vcpu_free(vcpu);
  261. }
  262. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  263. {
  264. return kvmppc_core_pending_dec(vcpu);
  265. }
  266. static void kvmppc_decrementer_func(unsigned long data)
  267. {
  268. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  269. kvmppc_core_queue_dec(vcpu);
  270. if (waitqueue_active(&vcpu->wq)) {
  271. wake_up_interruptible(&vcpu->wq);
  272. vcpu->stat.halt_wakeup++;
  273. }
  274. }
  275. /*
  276. * low level hrtimer wake routine. Because this runs in hardirq context
  277. * we schedule a tasklet to do the real work.
  278. */
  279. enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
  280. {
  281. struct kvm_vcpu *vcpu;
  282. vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
  283. tasklet_schedule(&vcpu->arch.tasklet);
  284. return HRTIMER_NORESTART;
  285. }
  286. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  287. {
  288. hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
  289. tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
  290. vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
  291. vcpu->arch.dec_expires = ~(u64)0;
  292. #ifdef CONFIG_KVM_EXIT_TIMING
  293. mutex_init(&vcpu->arch.exit_timing_lock);
  294. #endif
  295. return 0;
  296. }
  297. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  298. {
  299. kvmppc_mmu_destroy(vcpu);
  300. }
  301. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  302. {
  303. #ifdef CONFIG_BOOKE
  304. /*
  305. * vrsave (formerly usprg0) isn't used by Linux, but may
  306. * be used by the guest.
  307. *
  308. * On non-booke this is associated with Altivec and
  309. * is handled by code in book3s.c.
  310. */
  311. mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
  312. #endif
  313. kvmppc_core_vcpu_load(vcpu, cpu);
  314. vcpu->cpu = smp_processor_id();
  315. }
  316. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  317. {
  318. kvmppc_core_vcpu_put(vcpu);
  319. #ifdef CONFIG_BOOKE
  320. vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
  321. #endif
  322. vcpu->cpu = -1;
  323. }
  324. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  325. struct kvm_guest_debug *dbg)
  326. {
  327. return -EINVAL;
  328. }
  329. static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
  330. struct kvm_run *run)
  331. {
  332. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
  333. }
  334. static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
  335. struct kvm_run *run)
  336. {
  337. u64 uninitialized_var(gpr);
  338. if (run->mmio.len > sizeof(gpr)) {
  339. printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
  340. return;
  341. }
  342. if (vcpu->arch.mmio_is_bigendian) {
  343. switch (run->mmio.len) {
  344. case 8: gpr = *(u64 *)run->mmio.data; break;
  345. case 4: gpr = *(u32 *)run->mmio.data; break;
  346. case 2: gpr = *(u16 *)run->mmio.data; break;
  347. case 1: gpr = *(u8 *)run->mmio.data; break;
  348. }
  349. } else {
  350. /* Convert BE data from userland back to LE. */
  351. switch (run->mmio.len) {
  352. case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
  353. case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
  354. case 1: gpr = *(u8 *)run->mmio.data; break;
  355. }
  356. }
  357. if (vcpu->arch.mmio_sign_extend) {
  358. switch (run->mmio.len) {
  359. #ifdef CONFIG_PPC64
  360. case 4:
  361. gpr = (s64)(s32)gpr;
  362. break;
  363. #endif
  364. case 2:
  365. gpr = (s64)(s16)gpr;
  366. break;
  367. case 1:
  368. gpr = (s64)(s8)gpr;
  369. break;
  370. }
  371. }
  372. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
  373. switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
  374. case KVM_REG_GPR:
  375. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
  376. break;
  377. case KVM_REG_FPR:
  378. vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
  379. break;
  380. #ifdef CONFIG_PPC_BOOK3S
  381. case KVM_REG_QPR:
  382. vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
  383. break;
  384. case KVM_REG_FQPR:
  385. vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
  386. vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
  387. break;
  388. #endif
  389. default:
  390. BUG();
  391. }
  392. }
  393. int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
  394. unsigned int rt, unsigned int bytes, int is_bigendian)
  395. {
  396. if (bytes > sizeof(run->mmio.data)) {
  397. printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
  398. run->mmio.len);
  399. }
  400. run->mmio.phys_addr = vcpu->arch.paddr_accessed;
  401. run->mmio.len = bytes;
  402. run->mmio.is_write = 0;
  403. vcpu->arch.io_gpr = rt;
  404. vcpu->arch.mmio_is_bigendian = is_bigendian;
  405. vcpu->mmio_needed = 1;
  406. vcpu->mmio_is_write = 0;
  407. vcpu->arch.mmio_sign_extend = 0;
  408. return EMULATE_DO_MMIO;
  409. }
  410. /* Same as above, but sign extends */
  411. int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
  412. unsigned int rt, unsigned int bytes, int is_bigendian)
  413. {
  414. int r;
  415. r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
  416. vcpu->arch.mmio_sign_extend = 1;
  417. return r;
  418. }
  419. int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
  420. u64 val, unsigned int bytes, int is_bigendian)
  421. {
  422. void *data = run->mmio.data;
  423. if (bytes > sizeof(run->mmio.data)) {
  424. printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
  425. run->mmio.len);
  426. }
  427. run->mmio.phys_addr = vcpu->arch.paddr_accessed;
  428. run->mmio.len = bytes;
  429. run->mmio.is_write = 1;
  430. vcpu->mmio_needed = 1;
  431. vcpu->mmio_is_write = 1;
  432. /* Store the value at the lowest bytes in 'data'. */
  433. if (is_bigendian) {
  434. switch (bytes) {
  435. case 8: *(u64 *)data = val; break;
  436. case 4: *(u32 *)data = val; break;
  437. case 2: *(u16 *)data = val; break;
  438. case 1: *(u8 *)data = val; break;
  439. }
  440. } else {
  441. /* Store LE value into 'data'. */
  442. switch (bytes) {
  443. case 4: st_le32(data, val); break;
  444. case 2: st_le16(data, val); break;
  445. case 1: *(u8 *)data = val; break;
  446. }
  447. }
  448. return EMULATE_DO_MMIO;
  449. }
  450. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
  451. {
  452. int r;
  453. sigset_t sigsaved;
  454. if (vcpu->sigset_active)
  455. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  456. if (vcpu->mmio_needed) {
  457. if (!vcpu->mmio_is_write)
  458. kvmppc_complete_mmio_load(vcpu, run);
  459. vcpu->mmio_needed = 0;
  460. } else if (vcpu->arch.dcr_needed) {
  461. if (!vcpu->arch.dcr_is_write)
  462. kvmppc_complete_dcr_load(vcpu, run);
  463. vcpu->arch.dcr_needed = 0;
  464. } else if (vcpu->arch.osi_needed) {
  465. u64 *gprs = run->osi.gprs;
  466. int i;
  467. for (i = 0; i < 32; i++)
  468. kvmppc_set_gpr(vcpu, i, gprs[i]);
  469. vcpu->arch.osi_needed = 0;
  470. } else if (vcpu->arch.hcall_needed) {
  471. int i;
  472. kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
  473. for (i = 0; i < 9; ++i)
  474. kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
  475. vcpu->arch.hcall_needed = 0;
  476. }
  477. kvmppc_core_deliver_interrupts(vcpu);
  478. r = kvmppc_vcpu_run(run, vcpu);
  479. if (vcpu->sigset_active)
  480. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  481. return r;
  482. }
  483. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
  484. {
  485. if (irq->irq == KVM_INTERRUPT_UNSET)
  486. kvmppc_core_dequeue_external(vcpu, irq);
  487. else
  488. kvmppc_core_queue_external(vcpu, irq);
  489. if (waitqueue_active(&vcpu->wq)) {
  490. wake_up_interruptible(&vcpu->wq);
  491. vcpu->stat.halt_wakeup++;
  492. } else if (vcpu->cpu != -1) {
  493. smp_send_reschedule(vcpu->cpu);
  494. }
  495. return 0;
  496. }
  497. static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
  498. struct kvm_enable_cap *cap)
  499. {
  500. int r;
  501. if (cap->flags)
  502. return -EINVAL;
  503. switch (cap->cap) {
  504. case KVM_CAP_PPC_OSI:
  505. r = 0;
  506. vcpu->arch.osi_enabled = true;
  507. break;
  508. case KVM_CAP_PPC_PAPR:
  509. r = 0;
  510. vcpu->arch.papr_enabled = true;
  511. break;
  512. default:
  513. r = -EINVAL;
  514. break;
  515. }
  516. if (!r)
  517. r = kvmppc_sanity_check(vcpu);
  518. return r;
  519. }
  520. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  521. struct kvm_mp_state *mp_state)
  522. {
  523. return -EINVAL;
  524. }
  525. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  526. struct kvm_mp_state *mp_state)
  527. {
  528. return -EINVAL;
  529. }
  530. long kvm_arch_vcpu_ioctl(struct file *filp,
  531. unsigned int ioctl, unsigned long arg)
  532. {
  533. struct kvm_vcpu *vcpu = filp->private_data;
  534. void __user *argp = (void __user *)arg;
  535. long r;
  536. switch (ioctl) {
  537. case KVM_INTERRUPT: {
  538. struct kvm_interrupt irq;
  539. r = -EFAULT;
  540. if (copy_from_user(&irq, argp, sizeof(irq)))
  541. goto out;
  542. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  543. goto out;
  544. }
  545. case KVM_ENABLE_CAP:
  546. {
  547. struct kvm_enable_cap cap;
  548. r = -EFAULT;
  549. if (copy_from_user(&cap, argp, sizeof(cap)))
  550. goto out;
  551. r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
  552. break;
  553. }
  554. default:
  555. r = -EINVAL;
  556. }
  557. out:
  558. return r;
  559. }
  560. static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
  561. {
  562. u32 inst_lis = 0x3c000000;
  563. u32 inst_ori = 0x60000000;
  564. u32 inst_nop = 0x60000000;
  565. u32 inst_sc = 0x44000002;
  566. u32 inst_imm_mask = 0xffff;
  567. /*
  568. * The hypercall to get into KVM from within guest context is as
  569. * follows:
  570. *
  571. * lis r0, r0, KVM_SC_MAGIC_R0@h
  572. * ori r0, KVM_SC_MAGIC_R0@l
  573. * sc
  574. * nop
  575. */
  576. pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
  577. pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
  578. pvinfo->hcall[2] = inst_sc;
  579. pvinfo->hcall[3] = inst_nop;
  580. return 0;
  581. }
  582. long kvm_arch_vm_ioctl(struct file *filp,
  583. unsigned int ioctl, unsigned long arg)
  584. {
  585. void __user *argp = (void __user *)arg;
  586. long r;
  587. switch (ioctl) {
  588. case KVM_PPC_GET_PVINFO: {
  589. struct kvm_ppc_pvinfo pvinfo;
  590. memset(&pvinfo, 0, sizeof(pvinfo));
  591. r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
  592. if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
  593. r = -EFAULT;
  594. goto out;
  595. }
  596. break;
  597. }
  598. #ifdef CONFIG_KVM_BOOK3S_64_HV
  599. case KVM_CREATE_SPAPR_TCE: {
  600. struct kvm_create_spapr_tce create_tce;
  601. struct kvm *kvm = filp->private_data;
  602. r = -EFAULT;
  603. if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
  604. goto out;
  605. r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
  606. goto out;
  607. }
  608. case KVM_ALLOCATE_RMA: {
  609. struct kvm *kvm = filp->private_data;
  610. struct kvm_allocate_rma rma;
  611. r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
  612. if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
  613. r = -EFAULT;
  614. break;
  615. }
  616. #endif /* CONFIG_KVM_BOOK3S_64_HV */
  617. default:
  618. r = -ENOTTY;
  619. }
  620. out:
  621. return r;
  622. }
  623. int kvm_arch_init(void *opaque)
  624. {
  625. return 0;
  626. }
  627. void kvm_arch_exit(void)
  628. {
  629. }