powerpc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  19. */
  20. #include <linux/errno.h>
  21. #include <linux/err.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/module.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/hrtimer.h>
  26. #include <linux/fs.h>
  27. #include <linux/slab.h>
  28. #include <asm/cputable.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/kvm_ppc.h>
  31. #include <asm/tlbflush.h>
  32. #include "timing.h"
  33. #include "../mm/mmu_decl.h"
  34. #define CREATE_TRACE_POINTS
  35. #include "trace.h"
  36. int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  37. {
  38. return !(v->arch.shared->msr & MSR_WE) ||
  39. !!(v->arch.pending_exceptions);
  40. }
  41. int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
  42. {
  43. int nr = kvmppc_get_gpr(vcpu, 11);
  44. int r;
  45. unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
  46. unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
  47. unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
  48. unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
  49. unsigned long r2 = 0;
  50. if (!(vcpu->arch.shared->msr & MSR_SF)) {
  51. /* 32 bit mode */
  52. param1 &= 0xffffffff;
  53. param2 &= 0xffffffff;
  54. param3 &= 0xffffffff;
  55. param4 &= 0xffffffff;
  56. }
  57. switch (nr) {
  58. case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
  59. {
  60. vcpu->arch.magic_page_pa = param1;
  61. vcpu->arch.magic_page_ea = param2;
  62. r2 = KVM_MAGIC_FEAT_SR;
  63. r = HC_EV_SUCCESS;
  64. break;
  65. }
  66. case HC_VENDOR_KVM | KVM_HC_FEATURES:
  67. r = HC_EV_SUCCESS;
  68. #if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
  69. r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
  70. #endif
  71. /* Second return value is in r4 */
  72. break;
  73. default:
  74. r = HC_EV_UNIMPLEMENTED;
  75. break;
  76. }
  77. kvmppc_set_gpr(vcpu, 4, r2);
  78. return r;
  79. }
  80. int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
  81. {
  82. enum emulation_result er;
  83. int r;
  84. er = kvmppc_emulate_instruction(run, vcpu);
  85. switch (er) {
  86. case EMULATE_DONE:
  87. /* Future optimization: only reload non-volatiles if they were
  88. * actually modified. */
  89. r = RESUME_GUEST_NV;
  90. break;
  91. case EMULATE_DO_MMIO:
  92. run->exit_reason = KVM_EXIT_MMIO;
  93. /* We must reload nonvolatiles because "update" load/store
  94. * instructions modify register state. */
  95. /* Future optimization: only reload non-volatiles if they were
  96. * actually modified. */
  97. r = RESUME_HOST_NV;
  98. break;
  99. case EMULATE_FAIL:
  100. /* XXX Deliver Program interrupt to guest. */
  101. printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
  102. kvmppc_get_last_inst(vcpu));
  103. r = RESUME_HOST;
  104. break;
  105. default:
  106. BUG();
  107. }
  108. return r;
  109. }
  110. int kvm_arch_hardware_enable(void *garbage)
  111. {
  112. return 0;
  113. }
  114. void kvm_arch_hardware_disable(void *garbage)
  115. {
  116. }
  117. int kvm_arch_hardware_setup(void)
  118. {
  119. return 0;
  120. }
  121. void kvm_arch_hardware_unsetup(void)
  122. {
  123. }
  124. void kvm_arch_check_processor_compat(void *rtn)
  125. {
  126. *(int *)rtn = kvmppc_core_check_processor_compat();
  127. }
  128. int kvm_arch_init_vm(struct kvm *kvm)
  129. {
  130. return 0;
  131. }
  132. void kvm_arch_destroy_vm(struct kvm *kvm)
  133. {
  134. unsigned int i;
  135. struct kvm_vcpu *vcpu;
  136. kvm_for_each_vcpu(i, vcpu, kvm)
  137. kvm_arch_vcpu_free(vcpu);
  138. mutex_lock(&kvm->lock);
  139. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  140. kvm->vcpus[i] = NULL;
  141. atomic_set(&kvm->online_vcpus, 0);
  142. mutex_unlock(&kvm->lock);
  143. }
  144. void kvm_arch_sync_events(struct kvm *kvm)
  145. {
  146. }
  147. int kvm_dev_ioctl_check_extension(long ext)
  148. {
  149. int r;
  150. switch (ext) {
  151. case KVM_CAP_PPC_SEGSTATE:
  152. case KVM_CAP_PPC_PAIRED_SINGLES:
  153. case KVM_CAP_PPC_UNSET_IRQ:
  154. case KVM_CAP_PPC_IRQ_LEVEL:
  155. case KVM_CAP_ENABLE_CAP:
  156. case KVM_CAP_PPC_OSI:
  157. case KVM_CAP_PPC_GET_PVINFO:
  158. r = 1;
  159. break;
  160. case KVM_CAP_COALESCED_MMIO:
  161. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  162. break;
  163. default:
  164. r = 0;
  165. break;
  166. }
  167. return r;
  168. }
  169. long kvm_arch_dev_ioctl(struct file *filp,
  170. unsigned int ioctl, unsigned long arg)
  171. {
  172. return -EINVAL;
  173. }
  174. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  175. struct kvm_memory_slot *memslot,
  176. struct kvm_memory_slot old,
  177. struct kvm_userspace_memory_region *mem,
  178. int user_alloc)
  179. {
  180. return 0;
  181. }
  182. void kvm_arch_commit_memory_region(struct kvm *kvm,
  183. struct kvm_userspace_memory_region *mem,
  184. struct kvm_memory_slot old,
  185. int user_alloc)
  186. {
  187. return;
  188. }
  189. void kvm_arch_flush_shadow(struct kvm *kvm)
  190. {
  191. }
  192. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
  193. {
  194. struct kvm_vcpu *vcpu;
  195. vcpu = kvmppc_core_vcpu_create(kvm, id);
  196. if (!IS_ERR(vcpu))
  197. kvmppc_create_vcpu_debugfs(vcpu, id);
  198. return vcpu;
  199. }
  200. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  201. {
  202. /* Make sure we're not using the vcpu anymore */
  203. hrtimer_cancel(&vcpu->arch.dec_timer);
  204. tasklet_kill(&vcpu->arch.tasklet);
  205. kvmppc_remove_vcpu_debugfs(vcpu);
  206. kvmppc_core_vcpu_free(vcpu);
  207. }
  208. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  209. {
  210. kvm_arch_vcpu_free(vcpu);
  211. }
  212. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  213. {
  214. return kvmppc_core_pending_dec(vcpu);
  215. }
  216. static void kvmppc_decrementer_func(unsigned long data)
  217. {
  218. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  219. kvmppc_core_queue_dec(vcpu);
  220. if (waitqueue_active(&vcpu->wq)) {
  221. wake_up_interruptible(&vcpu->wq);
  222. vcpu->stat.halt_wakeup++;
  223. }
  224. }
  225. /*
  226. * low level hrtimer wake routine. Because this runs in hardirq context
  227. * we schedule a tasklet to do the real work.
  228. */
  229. enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
  230. {
  231. struct kvm_vcpu *vcpu;
  232. vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
  233. tasklet_schedule(&vcpu->arch.tasklet);
  234. return HRTIMER_NORESTART;
  235. }
  236. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  237. {
  238. hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
  239. tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
  240. vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
  241. #ifdef CONFIG_KVM_EXIT_TIMING
  242. mutex_init(&vcpu->arch.exit_timing_lock);
  243. #endif
  244. return 0;
  245. }
  246. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  247. {
  248. kvmppc_mmu_destroy(vcpu);
  249. }
  250. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  251. {
  252. #ifdef CONFIG_BOOKE
  253. /*
  254. * vrsave (formerly usprg0) isn't used by Linux, but may
  255. * be used by the guest.
  256. *
  257. * On non-booke this is associated with Altivec and
  258. * is handled by code in book3s.c.
  259. */
  260. mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
  261. #endif
  262. kvmppc_core_vcpu_load(vcpu, cpu);
  263. }
  264. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  265. {
  266. kvmppc_core_vcpu_put(vcpu);
  267. #ifdef CONFIG_BOOKE
  268. vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
  269. #endif
  270. }
  271. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  272. struct kvm_guest_debug *dbg)
  273. {
  274. return -EINVAL;
  275. }
  276. static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
  277. struct kvm_run *run)
  278. {
  279. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
  280. }
  281. static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
  282. struct kvm_run *run)
  283. {
  284. u64 uninitialized_var(gpr);
  285. if (run->mmio.len > sizeof(gpr)) {
  286. printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
  287. return;
  288. }
  289. if (vcpu->arch.mmio_is_bigendian) {
  290. switch (run->mmio.len) {
  291. case 8: gpr = *(u64 *)run->mmio.data; break;
  292. case 4: gpr = *(u32 *)run->mmio.data; break;
  293. case 2: gpr = *(u16 *)run->mmio.data; break;
  294. case 1: gpr = *(u8 *)run->mmio.data; break;
  295. }
  296. } else {
  297. /* Convert BE data from userland back to LE. */
  298. switch (run->mmio.len) {
  299. case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
  300. case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
  301. case 1: gpr = *(u8 *)run->mmio.data; break;
  302. }
  303. }
  304. if (vcpu->arch.mmio_sign_extend) {
  305. switch (run->mmio.len) {
  306. #ifdef CONFIG_PPC64
  307. case 4:
  308. gpr = (s64)(s32)gpr;
  309. break;
  310. #endif
  311. case 2:
  312. gpr = (s64)(s16)gpr;
  313. break;
  314. case 1:
  315. gpr = (s64)(s8)gpr;
  316. break;
  317. }
  318. }
  319. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
  320. switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
  321. case KVM_REG_GPR:
  322. kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
  323. break;
  324. case KVM_REG_FPR:
  325. vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
  326. break;
  327. #ifdef CONFIG_PPC_BOOK3S
  328. case KVM_REG_QPR:
  329. vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
  330. break;
  331. case KVM_REG_FQPR:
  332. vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
  333. vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
  334. break;
  335. #endif
  336. default:
  337. BUG();
  338. }
  339. }
  340. int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
  341. unsigned int rt, unsigned int bytes, int is_bigendian)
  342. {
  343. if (bytes > sizeof(run->mmio.data)) {
  344. printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
  345. run->mmio.len);
  346. }
  347. run->mmio.phys_addr = vcpu->arch.paddr_accessed;
  348. run->mmio.len = bytes;
  349. run->mmio.is_write = 0;
  350. vcpu->arch.io_gpr = rt;
  351. vcpu->arch.mmio_is_bigendian = is_bigendian;
  352. vcpu->mmio_needed = 1;
  353. vcpu->mmio_is_write = 0;
  354. vcpu->arch.mmio_sign_extend = 0;
  355. return EMULATE_DO_MMIO;
  356. }
  357. /* Same as above, but sign extends */
  358. int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
  359. unsigned int rt, unsigned int bytes, int is_bigendian)
  360. {
  361. int r;
  362. r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
  363. vcpu->arch.mmio_sign_extend = 1;
  364. return r;
  365. }
  366. int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
  367. u64 val, unsigned int bytes, int is_bigendian)
  368. {
  369. void *data = run->mmio.data;
  370. if (bytes > sizeof(run->mmio.data)) {
  371. printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
  372. run->mmio.len);
  373. }
  374. run->mmio.phys_addr = vcpu->arch.paddr_accessed;
  375. run->mmio.len = bytes;
  376. run->mmio.is_write = 1;
  377. vcpu->mmio_needed = 1;
  378. vcpu->mmio_is_write = 1;
  379. /* Store the value at the lowest bytes in 'data'. */
  380. if (is_bigendian) {
  381. switch (bytes) {
  382. case 8: *(u64 *)data = val; break;
  383. case 4: *(u32 *)data = val; break;
  384. case 2: *(u16 *)data = val; break;
  385. case 1: *(u8 *)data = val; break;
  386. }
  387. } else {
  388. /* Store LE value into 'data'. */
  389. switch (bytes) {
  390. case 4: st_le32(data, val); break;
  391. case 2: st_le16(data, val); break;
  392. case 1: *(u8 *)data = val; break;
  393. }
  394. }
  395. return EMULATE_DO_MMIO;
  396. }
  397. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
  398. {
  399. int r;
  400. sigset_t sigsaved;
  401. if (vcpu->sigset_active)
  402. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  403. if (vcpu->mmio_needed) {
  404. if (!vcpu->mmio_is_write)
  405. kvmppc_complete_mmio_load(vcpu, run);
  406. vcpu->mmio_needed = 0;
  407. } else if (vcpu->arch.dcr_needed) {
  408. if (!vcpu->arch.dcr_is_write)
  409. kvmppc_complete_dcr_load(vcpu, run);
  410. vcpu->arch.dcr_needed = 0;
  411. } else if (vcpu->arch.osi_needed) {
  412. u64 *gprs = run->osi.gprs;
  413. int i;
  414. for (i = 0; i < 32; i++)
  415. kvmppc_set_gpr(vcpu, i, gprs[i]);
  416. vcpu->arch.osi_needed = 0;
  417. }
  418. kvmppc_core_deliver_interrupts(vcpu);
  419. local_irq_disable();
  420. kvm_guest_enter();
  421. r = __kvmppc_vcpu_run(run, vcpu);
  422. kvm_guest_exit();
  423. local_irq_enable();
  424. if (vcpu->sigset_active)
  425. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  426. return r;
  427. }
  428. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
  429. {
  430. if (irq->irq == KVM_INTERRUPT_UNSET)
  431. kvmppc_core_dequeue_external(vcpu, irq);
  432. else
  433. kvmppc_core_queue_external(vcpu, irq);
  434. if (waitqueue_active(&vcpu->wq)) {
  435. wake_up_interruptible(&vcpu->wq);
  436. vcpu->stat.halt_wakeup++;
  437. }
  438. return 0;
  439. }
  440. static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
  441. struct kvm_enable_cap *cap)
  442. {
  443. int r;
  444. if (cap->flags)
  445. return -EINVAL;
  446. switch (cap->cap) {
  447. case KVM_CAP_PPC_OSI:
  448. r = 0;
  449. vcpu->arch.osi_enabled = true;
  450. break;
  451. default:
  452. r = -EINVAL;
  453. break;
  454. }
  455. return r;
  456. }
  457. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  458. struct kvm_mp_state *mp_state)
  459. {
  460. return -EINVAL;
  461. }
  462. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  463. struct kvm_mp_state *mp_state)
  464. {
  465. return -EINVAL;
  466. }
  467. long kvm_arch_vcpu_ioctl(struct file *filp,
  468. unsigned int ioctl, unsigned long arg)
  469. {
  470. struct kvm_vcpu *vcpu = filp->private_data;
  471. void __user *argp = (void __user *)arg;
  472. long r;
  473. switch (ioctl) {
  474. case KVM_INTERRUPT: {
  475. struct kvm_interrupt irq;
  476. r = -EFAULT;
  477. if (copy_from_user(&irq, argp, sizeof(irq)))
  478. goto out;
  479. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  480. goto out;
  481. }
  482. case KVM_ENABLE_CAP:
  483. {
  484. struct kvm_enable_cap cap;
  485. r = -EFAULT;
  486. if (copy_from_user(&cap, argp, sizeof(cap)))
  487. goto out;
  488. r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
  489. break;
  490. }
  491. default:
  492. r = -EINVAL;
  493. }
  494. out:
  495. return r;
  496. }
  497. static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
  498. {
  499. u32 inst_lis = 0x3c000000;
  500. u32 inst_ori = 0x60000000;
  501. u32 inst_nop = 0x60000000;
  502. u32 inst_sc = 0x44000002;
  503. u32 inst_imm_mask = 0xffff;
  504. /*
  505. * The hypercall to get into KVM from within guest context is as
  506. * follows:
  507. *
  508. * lis r0, r0, KVM_SC_MAGIC_R0@h
  509. * ori r0, KVM_SC_MAGIC_R0@l
  510. * sc
  511. * nop
  512. */
  513. pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
  514. pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
  515. pvinfo->hcall[2] = inst_sc;
  516. pvinfo->hcall[3] = inst_nop;
  517. return 0;
  518. }
  519. long kvm_arch_vm_ioctl(struct file *filp,
  520. unsigned int ioctl, unsigned long arg)
  521. {
  522. void __user *argp = (void __user *)arg;
  523. long r;
  524. switch (ioctl) {
  525. case KVM_PPC_GET_PVINFO: {
  526. struct kvm_ppc_pvinfo pvinfo;
  527. memset(&pvinfo, 0, sizeof(pvinfo));
  528. r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
  529. if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
  530. r = -EFAULT;
  531. goto out;
  532. }
  533. break;
  534. }
  535. default:
  536. r = -ENOTTY;
  537. }
  538. out:
  539. return r;
  540. }
  541. int kvm_arch_init(void *opaque)
  542. {
  543. return 0;
  544. }
  545. void kvm_arch_exit(void)
  546. {
  547. }