book3s_pr.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277
  1. /*
  2. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Kevin Wolf <mail@kevin-wolf.de>
  7. * Paul Mackerras <paulus@samba.org>
  8. *
  9. * Description:
  10. * Functions relating to running KVM on Book 3S processors where
  11. * we don't have access to hypervisor mode, and we run the guest
  12. * in problem state (user mode).
  13. *
  14. * This file is derived from arch/powerpc/kvm/44x.c,
  15. * by Hollis Blanchard <hollisb@us.ibm.com>.
  16. *
  17. * This program is free software; you can redistribute it and/or modify
  18. * it under the terms of the GNU General Public License, version 2, as
  19. * published by the Free Software Foundation.
  20. */
  21. #include <linux/kvm_host.h>
  22. #include <linux/export.h>
  23. #include <linux/err.h>
  24. #include <linux/slab.h>
  25. #include <asm/reg.h>
  26. #include <asm/cputable.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/io.h>
  31. #include <asm/kvm_ppc.h>
  32. #include <asm/kvm_book3s.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/switch_to.h>
  35. #include <linux/gfp.h>
  36. #include <linux/sched.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/highmem.h>
  39. #include "trace.h"
  40. /* #define EXIT_DEBUG */
  41. /* #define DEBUG_EXT */
  42. static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
  43. ulong msr);
  44. /* Some compatibility defines */
  45. #ifdef CONFIG_PPC_BOOK3S_32
  46. #define MSR_USER32 MSR_USER
  47. #define MSR_USER64 MSR_USER
  48. #define HW_PAGE_SIZE PAGE_SIZE
  49. #define __hard_irq_disable local_irq_disable
  50. #define __hard_irq_enable local_irq_enable
  51. #endif
  52. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  53. {
  54. #ifdef CONFIG_PPC_BOOK3S_64
  55. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  56. memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
  57. memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
  58. sizeof(get_paca()->shadow_vcpu));
  59. svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
  60. svcpu_put(svcpu);
  61. #endif
  62. #ifdef CONFIG_PPC_BOOK3S_32
  63. current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
  64. #endif
  65. }
  66. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  67. {
  68. #ifdef CONFIG_PPC_BOOK3S_64
  69. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  70. memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
  71. memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
  72. sizeof(get_paca()->shadow_vcpu));
  73. to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
  74. svcpu_put(svcpu);
  75. #endif
  76. kvmppc_giveup_ext(vcpu, MSR_FP);
  77. kvmppc_giveup_ext(vcpu, MSR_VEC);
  78. kvmppc_giveup_ext(vcpu, MSR_VSX);
  79. }
  80. void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
  81. {
  82. /* We misuse TLB_FLUSH to indicate that we want to clear
  83. all shadow cache entries */
  84. if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
  85. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  86. }
  87. /************* MMU Notifiers *************/
  88. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
  89. {
  90. trace_kvm_unmap_hva(hva);
  91. /*
  92. * Flush all shadow tlb entries everywhere. This is slow, but
  93. * we are 100% sure that we catch the to be unmapped page
  94. */
  95. kvm_flush_remote_tlbs(kvm);
  96. return 0;
  97. }
  98. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  99. {
  100. /* kvm_unmap_hva flushes everything anyways */
  101. kvm_unmap_hva(kvm, start);
  102. return 0;
  103. }
  104. int kvm_age_hva(struct kvm *kvm, unsigned long hva)
  105. {
  106. /* XXX could be more clever ;) */
  107. return 0;
  108. }
  109. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  110. {
  111. /* XXX could be more clever ;) */
  112. return 0;
  113. }
  114. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  115. {
  116. /* The page will get remapped properly on its next fault */
  117. kvm_unmap_hva(kvm, hva);
  118. }
  119. /*****************************************/
  120. static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
  121. {
  122. ulong smsr = vcpu->arch.shared->msr;
  123. /* Guest MSR values */
  124. smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
  125. /* Process MSR values */
  126. smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
  127. /* External providers the guest reserved */
  128. smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
  129. /* 64-bit Process MSR values */
  130. #ifdef CONFIG_PPC_BOOK3S_64
  131. smsr |= MSR_ISF | MSR_HV;
  132. #endif
  133. vcpu->arch.shadow_msr = smsr;
  134. }
  135. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  136. {
  137. ulong old_msr = vcpu->arch.shared->msr;
  138. #ifdef EXIT_DEBUG
  139. printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
  140. #endif
  141. msr &= to_book3s(vcpu)->msr_mask;
  142. vcpu->arch.shared->msr = msr;
  143. kvmppc_recalc_shadow_msr(vcpu);
  144. if (msr & MSR_POW) {
  145. if (!vcpu->arch.pending_exceptions) {
  146. kvm_vcpu_block(vcpu);
  147. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  148. vcpu->stat.halt_wakeup++;
  149. /* Unset POW bit after we woke up */
  150. msr &= ~MSR_POW;
  151. vcpu->arch.shared->msr = msr;
  152. }
  153. }
  154. if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
  155. (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
  156. kvmppc_mmu_flush_segments(vcpu);
  157. kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
  158. /* Preload magic page segment when in kernel mode */
  159. if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
  160. struct kvm_vcpu_arch *a = &vcpu->arch;
  161. if (msr & MSR_DR)
  162. kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
  163. else
  164. kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
  165. }
  166. }
  167. /*
  168. * When switching from 32 to 64-bit, we may have a stale 32-bit
  169. * magic page around, we need to flush it. Typically 32-bit magic
  170. * page will be instanciated when calling into RTAS. Note: We
  171. * assume that such transition only happens while in kernel mode,
  172. * ie, we never transition from user 32-bit to kernel 64-bit with
  173. * a 32-bit magic page around.
  174. */
  175. if (vcpu->arch.magic_page_pa &&
  176. !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
  177. /* going from RTAS to normal kernel code */
  178. kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
  179. ~0xFFFUL);
  180. }
  181. /* Preload FPU if it's enabled */
  182. if (vcpu->arch.shared->msr & MSR_FP)
  183. kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
  184. }
  185. void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
  186. {
  187. u32 host_pvr;
  188. vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
  189. vcpu->arch.pvr = pvr;
  190. #ifdef CONFIG_PPC_BOOK3S_64
  191. if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
  192. kvmppc_mmu_book3s_64_init(vcpu);
  193. if (!to_book3s(vcpu)->hior_explicit)
  194. to_book3s(vcpu)->hior = 0xfff00000;
  195. to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
  196. vcpu->arch.cpu_type = KVM_CPU_3S_64;
  197. } else
  198. #endif
  199. {
  200. kvmppc_mmu_book3s_32_init(vcpu);
  201. if (!to_book3s(vcpu)->hior_explicit)
  202. to_book3s(vcpu)->hior = 0;
  203. to_book3s(vcpu)->msr_mask = 0xffffffffULL;
  204. vcpu->arch.cpu_type = KVM_CPU_3S_32;
  205. }
  206. kvmppc_sanity_check(vcpu);
  207. /* If we are in hypervisor level on 970, we can tell the CPU to
  208. * treat DCBZ as 32 bytes store */
  209. vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
  210. if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
  211. !strcmp(cur_cpu_spec->platform, "ppc970"))
  212. vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
  213. /* Cell performs badly if MSR_FEx are set. So let's hope nobody
  214. really needs them in a VM on Cell and force disable them. */
  215. if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
  216. to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
  217. #ifdef CONFIG_PPC_BOOK3S_32
  218. /* 32 bit Book3S always has 32 byte dcbz */
  219. vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
  220. #endif
  221. /* On some CPUs we can execute paired single operations natively */
  222. asm ( "mfpvr %0" : "=r"(host_pvr));
  223. switch (host_pvr) {
  224. case 0x00080200: /* lonestar 2.0 */
  225. case 0x00088202: /* lonestar 2.2 */
  226. case 0x70000100: /* gekko 1.0 */
  227. case 0x00080100: /* gekko 2.0 */
  228. case 0x00083203: /* gekko 2.3a */
  229. case 0x00083213: /* gekko 2.3b */
  230. case 0x00083204: /* gekko 2.4 */
  231. case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
  232. case 0x00087200: /* broadway */
  233. vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
  234. /* Enable HID2.PSE - in case we need it later */
  235. mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
  236. }
  237. }
  238. /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
  239. * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
  240. * emulate 32 bytes dcbz length.
  241. *
  242. * The Book3s_64 inventors also realized this case and implemented a special bit
  243. * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
  244. *
  245. * My approach here is to patch the dcbz instruction on executing pages.
  246. */
  247. static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
  248. {
  249. struct page *hpage;
  250. u64 hpage_offset;
  251. u32 *page;
  252. int i;
  253. hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
  254. if (is_error_page(hpage))
  255. return;
  256. hpage_offset = pte->raddr & ~PAGE_MASK;
  257. hpage_offset &= ~0xFFFULL;
  258. hpage_offset /= 4;
  259. get_page(hpage);
  260. page = kmap_atomic(hpage);
  261. /* patch dcbz into reserved instruction, so we trap */
  262. for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
  263. if ((page[i] & 0xff0007ff) == INS_DCBZ)
  264. page[i] &= 0xfffffff7;
  265. kunmap_atomic(page);
  266. put_page(hpage);
  267. }
  268. static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  269. {
  270. ulong mp_pa = vcpu->arch.magic_page_pa;
  271. if (!(vcpu->arch.shared->msr & MSR_SF))
  272. mp_pa = (uint32_t)mp_pa;
  273. if (unlikely(mp_pa) &&
  274. unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
  275. return 1;
  276. }
  277. return kvm_is_visible_gfn(vcpu->kvm, gfn);
  278. }
  279. int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
  280. ulong eaddr, int vec)
  281. {
  282. bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
  283. int r = RESUME_GUEST;
  284. int relocated;
  285. int page_found = 0;
  286. struct kvmppc_pte pte;
  287. bool is_mmio = false;
  288. bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
  289. bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
  290. u64 vsid;
  291. relocated = data ? dr : ir;
  292. /* Resolve real address if translation turned on */
  293. if (relocated) {
  294. page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
  295. } else {
  296. pte.may_execute = true;
  297. pte.may_read = true;
  298. pte.may_write = true;
  299. pte.raddr = eaddr & KVM_PAM;
  300. pte.eaddr = eaddr;
  301. pte.vpage = eaddr >> 12;
  302. }
  303. switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
  304. case 0:
  305. pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
  306. break;
  307. case MSR_DR:
  308. case MSR_IR:
  309. vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
  310. if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
  311. pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
  312. else
  313. pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
  314. pte.vpage |= vsid;
  315. if (vsid == -1)
  316. page_found = -EINVAL;
  317. break;
  318. }
  319. if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
  320. (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
  321. /*
  322. * If we do the dcbz hack, we have to NX on every execution,
  323. * so we can patch the executing code. This renders our guest
  324. * NX-less.
  325. */
  326. pte.may_execute = !data;
  327. }
  328. if (page_found == -ENOENT) {
  329. /* Page not found in guest PTE entries */
  330. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  331. vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
  332. vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
  333. vcpu->arch.shared->msr |=
  334. (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
  335. svcpu_put(svcpu);
  336. kvmppc_book3s_queue_irqprio(vcpu, vec);
  337. } else if (page_found == -EPERM) {
  338. /* Storage protection */
  339. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  340. vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
  341. vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
  342. vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
  343. vcpu->arch.shared->msr |=
  344. svcpu->shadow_srr1 & 0x00000000f8000000ULL;
  345. svcpu_put(svcpu);
  346. kvmppc_book3s_queue_irqprio(vcpu, vec);
  347. } else if (page_found == -EINVAL) {
  348. /* Page not found in guest SLB */
  349. vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
  350. kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
  351. } else if (!is_mmio &&
  352. kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
  353. /* The guest's PTE is not mapped yet. Map on the host */
  354. kvmppc_mmu_map_page(vcpu, &pte);
  355. if (data)
  356. vcpu->stat.sp_storage++;
  357. else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
  358. (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
  359. kvmppc_patch_dcbz(vcpu, &pte);
  360. } else {
  361. /* MMIO */
  362. vcpu->stat.mmio_exits++;
  363. vcpu->arch.paddr_accessed = pte.raddr;
  364. vcpu->arch.vaddr_accessed = pte.eaddr;
  365. r = kvmppc_emulate_mmio(run, vcpu);
  366. if ( r == RESUME_HOST_NV )
  367. r = RESUME_HOST;
  368. }
  369. return r;
  370. }
  371. static inline int get_fpr_index(int i)
  372. {
  373. #ifdef CONFIG_VSX
  374. i *= 2;
  375. #endif
  376. return i;
  377. }
  378. /* Give up external provider (FPU, Altivec, VSX) */
  379. void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
  380. {
  381. struct thread_struct *t = &current->thread;
  382. u64 *vcpu_fpr = vcpu->arch.fpr;
  383. #ifdef CONFIG_VSX
  384. u64 *vcpu_vsx = vcpu->arch.vsr;
  385. #endif
  386. u64 *thread_fpr = (u64*)t->fpr;
  387. int i;
  388. if (!(vcpu->arch.guest_owned_ext & msr))
  389. return;
  390. #ifdef DEBUG_EXT
  391. printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
  392. #endif
  393. switch (msr) {
  394. case MSR_FP:
  395. giveup_fpu(current);
  396. for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
  397. vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
  398. vcpu->arch.fpscr = t->fpscr.val;
  399. break;
  400. case MSR_VEC:
  401. #ifdef CONFIG_ALTIVEC
  402. giveup_altivec(current);
  403. memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
  404. vcpu->arch.vscr = t->vscr;
  405. #endif
  406. break;
  407. case MSR_VSX:
  408. #ifdef CONFIG_VSX
  409. __giveup_vsx(current);
  410. for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
  411. vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
  412. #endif
  413. break;
  414. default:
  415. BUG();
  416. }
  417. vcpu->arch.guest_owned_ext &= ~msr;
  418. current->thread.regs->msr &= ~msr;
  419. kvmppc_recalc_shadow_msr(vcpu);
  420. }
  421. static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
  422. {
  423. ulong srr0 = kvmppc_get_pc(vcpu);
  424. u32 last_inst = kvmppc_get_last_inst(vcpu);
  425. int ret;
  426. ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
  427. if (ret == -ENOENT) {
  428. ulong msr = vcpu->arch.shared->msr;
  429. msr = kvmppc_set_field(msr, 33, 33, 1);
  430. msr = kvmppc_set_field(msr, 34, 36, 0);
  431. vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
  432. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
  433. return EMULATE_AGAIN;
  434. }
  435. return EMULATE_DONE;
  436. }
  437. static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
  438. {
  439. /* Need to do paired single emulation? */
  440. if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
  441. return EMULATE_DONE;
  442. /* Read out the instruction */
  443. if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
  444. /* Need to emulate */
  445. return EMULATE_FAIL;
  446. return EMULATE_AGAIN;
  447. }
  448. /* Handle external providers (FPU, Altivec, VSX) */
  449. static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
  450. ulong msr)
  451. {
  452. struct thread_struct *t = &current->thread;
  453. u64 *vcpu_fpr = vcpu->arch.fpr;
  454. #ifdef CONFIG_VSX
  455. u64 *vcpu_vsx = vcpu->arch.vsr;
  456. #endif
  457. u64 *thread_fpr = (u64*)t->fpr;
  458. int i;
  459. /* When we have paired singles, we emulate in software */
  460. if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
  461. return RESUME_GUEST;
  462. if (!(vcpu->arch.shared->msr & msr)) {
  463. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  464. return RESUME_GUEST;
  465. }
  466. /* We already own the ext */
  467. if (vcpu->arch.guest_owned_ext & msr) {
  468. return RESUME_GUEST;
  469. }
  470. #ifdef DEBUG_EXT
  471. printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
  472. #endif
  473. current->thread.regs->msr |= msr;
  474. switch (msr) {
  475. case MSR_FP:
  476. for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
  477. thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
  478. t->fpscr.val = vcpu->arch.fpscr;
  479. t->fpexc_mode = 0;
  480. kvmppc_load_up_fpu();
  481. break;
  482. case MSR_VEC:
  483. #ifdef CONFIG_ALTIVEC
  484. memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
  485. t->vscr = vcpu->arch.vscr;
  486. t->vrsave = -1;
  487. kvmppc_load_up_altivec();
  488. #endif
  489. break;
  490. case MSR_VSX:
  491. #ifdef CONFIG_VSX
  492. for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
  493. thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
  494. kvmppc_load_up_vsx();
  495. #endif
  496. break;
  497. default:
  498. BUG();
  499. }
  500. vcpu->arch.guest_owned_ext |= msr;
  501. kvmppc_recalc_shadow_msr(vcpu);
  502. return RESUME_GUEST;
  503. }
  504. int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
  505. unsigned int exit_nr)
  506. {
  507. int r = RESUME_HOST;
  508. vcpu->stat.sum_exits++;
  509. run->exit_reason = KVM_EXIT_UNKNOWN;
  510. run->ready_for_interrupt_injection = 1;
  511. /* We get here with MSR.EE=0, so enable it to be a nice citizen */
  512. __hard_irq_enable();
  513. trace_kvm_exit(exit_nr, vcpu);
  514. kvm_guest_exit();
  515. preempt_enable();
  516. switch (exit_nr) {
  517. case BOOK3S_INTERRUPT_INST_STORAGE:
  518. {
  519. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  520. ulong shadow_srr1 = svcpu->shadow_srr1;
  521. vcpu->stat.pf_instruc++;
  522. #ifdef CONFIG_PPC_BOOK3S_32
  523. /* We set segments as unused segments when invalidating them. So
  524. * treat the respective fault as segment fault. */
  525. if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
  526. kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
  527. r = RESUME_GUEST;
  528. svcpu_put(svcpu);
  529. break;
  530. }
  531. #endif
  532. svcpu_put(svcpu);
  533. /* only care about PTEG not found errors, but leave NX alone */
  534. if (shadow_srr1 & 0x40000000) {
  535. r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
  536. vcpu->stat.sp_instruc++;
  537. } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
  538. (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
  539. /*
  540. * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
  541. * so we can't use the NX bit inside the guest. Let's cross our fingers,
  542. * that no guest that needs the dcbz hack does NX.
  543. */
  544. kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
  545. r = RESUME_GUEST;
  546. } else {
  547. vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
  548. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  549. r = RESUME_GUEST;
  550. }
  551. break;
  552. }
  553. case BOOK3S_INTERRUPT_DATA_STORAGE:
  554. {
  555. ulong dar = kvmppc_get_fault_dar(vcpu);
  556. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  557. u32 fault_dsisr = svcpu->fault_dsisr;
  558. vcpu->stat.pf_storage++;
  559. #ifdef CONFIG_PPC_BOOK3S_32
  560. /* We set segments as unused segments when invalidating them. So
  561. * treat the respective fault as segment fault. */
  562. if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
  563. kvmppc_mmu_map_segment(vcpu, dar);
  564. r = RESUME_GUEST;
  565. svcpu_put(svcpu);
  566. break;
  567. }
  568. #endif
  569. svcpu_put(svcpu);
  570. /* The only case we need to handle is missing shadow PTEs */
  571. if (fault_dsisr & DSISR_NOHPTE) {
  572. r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
  573. } else {
  574. vcpu->arch.shared->dar = dar;
  575. vcpu->arch.shared->dsisr = fault_dsisr;
  576. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  577. r = RESUME_GUEST;
  578. }
  579. break;
  580. }
  581. case BOOK3S_INTERRUPT_DATA_SEGMENT:
  582. if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
  583. vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
  584. kvmppc_book3s_queue_irqprio(vcpu,
  585. BOOK3S_INTERRUPT_DATA_SEGMENT);
  586. }
  587. r = RESUME_GUEST;
  588. break;
  589. case BOOK3S_INTERRUPT_INST_SEGMENT:
  590. if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
  591. kvmppc_book3s_queue_irqprio(vcpu,
  592. BOOK3S_INTERRUPT_INST_SEGMENT);
  593. }
  594. r = RESUME_GUEST;
  595. break;
  596. /* We're good on these - the host merely wanted to get our attention */
  597. case BOOK3S_INTERRUPT_DECREMENTER:
  598. case BOOK3S_INTERRUPT_HV_DECREMENTER:
  599. vcpu->stat.dec_exits++;
  600. r = RESUME_GUEST;
  601. break;
  602. case BOOK3S_INTERRUPT_EXTERNAL:
  603. case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
  604. case BOOK3S_INTERRUPT_EXTERNAL_HV:
  605. vcpu->stat.ext_intr_exits++;
  606. r = RESUME_GUEST;
  607. break;
  608. case BOOK3S_INTERRUPT_PERFMON:
  609. r = RESUME_GUEST;
  610. break;
  611. case BOOK3S_INTERRUPT_PROGRAM:
  612. case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
  613. {
  614. enum emulation_result er;
  615. struct kvmppc_book3s_shadow_vcpu *svcpu;
  616. ulong flags;
  617. program_interrupt:
  618. svcpu = svcpu_get(vcpu);
  619. flags = svcpu->shadow_srr1 & 0x1f0000ull;
  620. svcpu_put(svcpu);
  621. if (vcpu->arch.shared->msr & MSR_PR) {
  622. #ifdef EXIT_DEBUG
  623. printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
  624. #endif
  625. if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
  626. (INS_DCBZ & 0xfffffff7)) {
  627. kvmppc_core_queue_program(vcpu, flags);
  628. r = RESUME_GUEST;
  629. break;
  630. }
  631. }
  632. vcpu->stat.emulated_inst_exits++;
  633. er = kvmppc_emulate_instruction(run, vcpu);
  634. switch (er) {
  635. case EMULATE_DONE:
  636. r = RESUME_GUEST_NV;
  637. break;
  638. case EMULATE_AGAIN:
  639. r = RESUME_GUEST;
  640. break;
  641. case EMULATE_FAIL:
  642. printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
  643. __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
  644. kvmppc_core_queue_program(vcpu, flags);
  645. r = RESUME_GUEST;
  646. break;
  647. case EMULATE_DO_MMIO:
  648. run->exit_reason = KVM_EXIT_MMIO;
  649. r = RESUME_HOST_NV;
  650. break;
  651. default:
  652. BUG();
  653. }
  654. break;
  655. }
  656. case BOOK3S_INTERRUPT_SYSCALL:
  657. if (vcpu->arch.papr_enabled &&
  658. (kvmppc_get_last_inst(vcpu) == 0x44000022) &&
  659. !(vcpu->arch.shared->msr & MSR_PR)) {
  660. /* SC 1 papr hypercalls */
  661. ulong cmd = kvmppc_get_gpr(vcpu, 3);
  662. int i;
  663. #ifdef CONFIG_KVM_BOOK3S_64_PR
  664. if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
  665. r = RESUME_GUEST;
  666. break;
  667. }
  668. #endif
  669. run->papr_hcall.nr = cmd;
  670. for (i = 0; i < 9; ++i) {
  671. ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
  672. run->papr_hcall.args[i] = gpr;
  673. }
  674. run->exit_reason = KVM_EXIT_PAPR_HCALL;
  675. vcpu->arch.hcall_needed = 1;
  676. r = RESUME_HOST;
  677. } else if (vcpu->arch.osi_enabled &&
  678. (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
  679. (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
  680. /* MOL hypercalls */
  681. u64 *gprs = run->osi.gprs;
  682. int i;
  683. run->exit_reason = KVM_EXIT_OSI;
  684. for (i = 0; i < 32; i++)
  685. gprs[i] = kvmppc_get_gpr(vcpu, i);
  686. vcpu->arch.osi_needed = 1;
  687. r = RESUME_HOST_NV;
  688. } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
  689. (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
  690. /* KVM PV hypercalls */
  691. kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
  692. r = RESUME_GUEST;
  693. } else {
  694. /* Guest syscalls */
  695. vcpu->stat.syscall_exits++;
  696. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  697. r = RESUME_GUEST;
  698. }
  699. break;
  700. case BOOK3S_INTERRUPT_FP_UNAVAIL:
  701. case BOOK3S_INTERRUPT_ALTIVEC:
  702. case BOOK3S_INTERRUPT_VSX:
  703. {
  704. int ext_msr = 0;
  705. switch (exit_nr) {
  706. case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
  707. case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
  708. case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
  709. }
  710. switch (kvmppc_check_ext(vcpu, exit_nr)) {
  711. case EMULATE_DONE:
  712. /* everything ok - let's enable the ext */
  713. r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
  714. break;
  715. case EMULATE_FAIL:
  716. /* we need to emulate this instruction */
  717. goto program_interrupt;
  718. break;
  719. default:
  720. /* nothing to worry about - go again */
  721. break;
  722. }
  723. break;
  724. }
  725. case BOOK3S_INTERRUPT_ALIGNMENT:
  726. if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
  727. vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
  728. kvmppc_get_last_inst(vcpu));
  729. vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
  730. kvmppc_get_last_inst(vcpu));
  731. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  732. }
  733. r = RESUME_GUEST;
  734. break;
  735. case BOOK3S_INTERRUPT_MACHINE_CHECK:
  736. case BOOK3S_INTERRUPT_TRACE:
  737. kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
  738. r = RESUME_GUEST;
  739. break;
  740. default:
  741. {
  742. struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
  743. ulong shadow_srr1 = svcpu->shadow_srr1;
  744. svcpu_put(svcpu);
  745. /* Ugh - bork here! What did we get? */
  746. printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
  747. exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
  748. r = RESUME_HOST;
  749. BUG();
  750. break;
  751. }
  752. }
  753. preempt_disable();
  754. if (!(r & RESUME_HOST)) {
  755. /* To avoid clobbering exit_reason, only check for signals if
  756. * we aren't already exiting to userspace for some other
  757. * reason. */
  758. /*
  759. * Interrupts could be timers for the guest which we have to
  760. * inject again, so let's postpone them until we're in the guest
  761. * and if we really did time things so badly, then we just exit
  762. * again due to a host external interrupt.
  763. */
  764. __hard_irq_disable();
  765. if (kvmppc_prepare_to_enter(vcpu)) {
  766. /* local_irq_enable(); */
  767. run->exit_reason = KVM_EXIT_INTR;
  768. r = -EINTR;
  769. } else {
  770. /* Going back to guest */
  771. kvm_guest_enter();
  772. }
  773. }
  774. trace_kvm_book3s_reenter(r, vcpu);
  775. return r;
  776. }
  777. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  778. struct kvm_sregs *sregs)
  779. {
  780. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  781. int i;
  782. sregs->pvr = vcpu->arch.pvr;
  783. sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
  784. if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
  785. for (i = 0; i < 64; i++) {
  786. sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
  787. sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
  788. }
  789. } else {
  790. for (i = 0; i < 16; i++)
  791. sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
  792. for (i = 0; i < 8; i++) {
  793. sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
  794. sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
  795. }
  796. }
  797. return 0;
  798. }
  799. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  800. struct kvm_sregs *sregs)
  801. {
  802. struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
  803. int i;
  804. kvmppc_set_pvr(vcpu, sregs->pvr);
  805. vcpu3s->sdr1 = sregs->u.s.sdr1;
  806. if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
  807. for (i = 0; i < 64; i++) {
  808. vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
  809. sregs->u.s.ppc64.slb[i].slbe);
  810. }
  811. } else {
  812. for (i = 0; i < 16; i++) {
  813. vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
  814. }
  815. for (i = 0; i < 8; i++) {
  816. kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
  817. (u32)sregs->u.s.ppc32.ibat[i]);
  818. kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
  819. (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
  820. kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
  821. (u32)sregs->u.s.ppc32.dbat[i]);
  822. kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
  823. (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
  824. }
  825. }
  826. /* Flush the MMU after messing with the segments */
  827. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  828. return 0;
  829. }
  830. int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  831. {
  832. int r = -EINVAL;
  833. switch (reg->id) {
  834. case KVM_REG_PPC_HIOR:
  835. r = copy_to_user((u64 __user *)(long)reg->addr,
  836. &to_book3s(vcpu)->hior, sizeof(u64));
  837. break;
  838. default:
  839. break;
  840. }
  841. return r;
  842. }
  843. int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
  844. {
  845. int r = -EINVAL;
  846. switch (reg->id) {
  847. case KVM_REG_PPC_HIOR:
  848. r = copy_from_user(&to_book3s(vcpu)->hior,
  849. (u64 __user *)(long)reg->addr, sizeof(u64));
  850. if (!r)
  851. to_book3s(vcpu)->hior_explicit = true;
  852. break;
  853. default:
  854. break;
  855. }
  856. return r;
  857. }
  858. int kvmppc_core_check_processor_compat(void)
  859. {
  860. return 0;
  861. }
  862. struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
  863. {
  864. struct kvmppc_vcpu_book3s *vcpu_book3s;
  865. struct kvm_vcpu *vcpu;
  866. int err = -ENOMEM;
  867. unsigned long p;
  868. vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
  869. if (!vcpu_book3s)
  870. goto out;
  871. vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
  872. kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
  873. if (!vcpu_book3s->shadow_vcpu)
  874. goto free_vcpu;
  875. vcpu = &vcpu_book3s->vcpu;
  876. err = kvm_vcpu_init(vcpu, kvm, id);
  877. if (err)
  878. goto free_shadow_vcpu;
  879. p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
  880. /* the real shared page fills the last 4k of our page */
  881. vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
  882. if (!p)
  883. goto uninit_vcpu;
  884. #ifdef CONFIG_PPC_BOOK3S_64
  885. /* default to book3s_64 (970fx) */
  886. vcpu->arch.pvr = 0x3C0301;
  887. #else
  888. /* default to book3s_32 (750) */
  889. vcpu->arch.pvr = 0x84202;
  890. #endif
  891. kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
  892. vcpu->arch.slb_nr = 64;
  893. vcpu->arch.shadow_msr = MSR_USER64;
  894. err = kvmppc_mmu_init(vcpu);
  895. if (err < 0)
  896. goto uninit_vcpu;
  897. return vcpu;
  898. uninit_vcpu:
  899. kvm_vcpu_uninit(vcpu);
  900. free_shadow_vcpu:
  901. kfree(vcpu_book3s->shadow_vcpu);
  902. free_vcpu:
  903. vfree(vcpu_book3s);
  904. out:
  905. return ERR_PTR(err);
  906. }
  907. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  908. {
  909. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  910. free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
  911. kvm_vcpu_uninit(vcpu);
  912. kfree(vcpu_book3s->shadow_vcpu);
  913. vfree(vcpu_book3s);
  914. }
  915. int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  916. {
  917. int ret;
  918. double fpr[32][TS_FPRWIDTH];
  919. unsigned int fpscr;
  920. int fpexc_mode;
  921. #ifdef CONFIG_ALTIVEC
  922. vector128 vr[32];
  923. vector128 vscr;
  924. unsigned long uninitialized_var(vrsave);
  925. int used_vr;
  926. #endif
  927. #ifdef CONFIG_VSX
  928. int used_vsr;
  929. #endif
  930. ulong ext_msr;
  931. preempt_disable();
  932. /* Check if we can run the vcpu at all */
  933. if (!vcpu->arch.sane) {
  934. kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  935. ret = -EINVAL;
  936. goto out;
  937. }
  938. /*
  939. * Interrupts could be timers for the guest which we have to inject
  940. * again, so let's postpone them until we're in the guest and if we
  941. * really did time things so badly, then we just exit again due to
  942. * a host external interrupt.
  943. */
  944. __hard_irq_disable();
  945. if (kvmppc_prepare_to_enter(vcpu)) {
  946. __hard_irq_enable();
  947. kvm_run->exit_reason = KVM_EXIT_INTR;
  948. ret = -EINTR;
  949. goto out;
  950. }
  951. /* Save FPU state in stack */
  952. if (current->thread.regs->msr & MSR_FP)
  953. giveup_fpu(current);
  954. memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
  955. fpscr = current->thread.fpscr.val;
  956. fpexc_mode = current->thread.fpexc_mode;
  957. #ifdef CONFIG_ALTIVEC
  958. /* Save Altivec state in stack */
  959. used_vr = current->thread.used_vr;
  960. if (used_vr) {
  961. if (current->thread.regs->msr & MSR_VEC)
  962. giveup_altivec(current);
  963. memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
  964. vscr = current->thread.vscr;
  965. vrsave = current->thread.vrsave;
  966. }
  967. #endif
  968. #ifdef CONFIG_VSX
  969. /* Save VSX state in stack */
  970. used_vsr = current->thread.used_vsr;
  971. if (used_vsr && (current->thread.regs->msr & MSR_VSX))
  972. __giveup_vsx(current);
  973. #endif
  974. /* Remember the MSR with disabled extensions */
  975. ext_msr = current->thread.regs->msr;
  976. /* Preload FPU if it's enabled */
  977. if (vcpu->arch.shared->msr & MSR_FP)
  978. kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
  979. kvm_guest_enter();
  980. ret = __kvmppc_vcpu_run(kvm_run, vcpu);
  981. /* No need for kvm_guest_exit. It's done in handle_exit.
  982. We also get here with interrupts enabled. */
  983. current->thread.regs->msr = ext_msr;
  984. /* Make sure we save the guest FPU/Altivec/VSX state */
  985. kvmppc_giveup_ext(vcpu, MSR_FP);
  986. kvmppc_giveup_ext(vcpu, MSR_VEC);
  987. kvmppc_giveup_ext(vcpu, MSR_VSX);
  988. /* Restore FPU state from stack */
  989. memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
  990. current->thread.fpscr.val = fpscr;
  991. current->thread.fpexc_mode = fpexc_mode;
  992. #ifdef CONFIG_ALTIVEC
  993. /* Restore Altivec state from stack */
  994. if (used_vr && current->thread.used_vr) {
  995. memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
  996. current->thread.vscr = vscr;
  997. current->thread.vrsave = vrsave;
  998. }
  999. current->thread.used_vr = used_vr;
  1000. #endif
  1001. #ifdef CONFIG_VSX
  1002. current->thread.used_vsr = used_vsr;
  1003. #endif
  1004. out:
  1005. vcpu->mode = OUTSIDE_GUEST_MODE;
  1006. preempt_enable();
  1007. return ret;
  1008. }
  1009. /*
  1010. * Get (and clear) the dirty memory log for a memory slot.
  1011. */
  1012. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  1013. struct kvm_dirty_log *log)
  1014. {
  1015. struct kvm_memory_slot *memslot;
  1016. struct kvm_vcpu *vcpu;
  1017. ulong ga, ga_end;
  1018. int is_dirty = 0;
  1019. int r;
  1020. unsigned long n;
  1021. mutex_lock(&kvm->slots_lock);
  1022. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  1023. if (r)
  1024. goto out;
  1025. /* If nothing is dirty, don't bother messing with page tables. */
  1026. if (is_dirty) {
  1027. memslot = id_to_memslot(kvm->memslots, log->slot);
  1028. ga = memslot->base_gfn << PAGE_SHIFT;
  1029. ga_end = ga + (memslot->npages << PAGE_SHIFT);
  1030. kvm_for_each_vcpu(n, vcpu, kvm)
  1031. kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
  1032. n = kvm_dirty_bitmap_bytes(memslot);
  1033. memset(memslot->dirty_bitmap, 0, n);
  1034. }
  1035. r = 0;
  1036. out:
  1037. mutex_unlock(&kvm->slots_lock);
  1038. return r;
  1039. }
  1040. #ifdef CONFIG_PPC64
  1041. int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
  1042. {
  1043. /* No flags */
  1044. info->flags = 0;
  1045. /* SLB is always 64 entries */
  1046. info->slb_size = 64;
  1047. /* Standard 4k base page size segment */
  1048. info->sps[0].page_shift = 12;
  1049. info->sps[0].slb_enc = 0;
  1050. info->sps[0].enc[0].page_shift = 12;
  1051. info->sps[0].enc[0].pte_enc = 0;
  1052. /* Standard 16M large page size segment */
  1053. info->sps[1].page_shift = 24;
  1054. info->sps[1].slb_enc = SLB_VSID_L;
  1055. info->sps[1].enc[0].page_shift = 24;
  1056. info->sps[1].enc[0].pte_enc = 0;
  1057. return 0;
  1058. }
  1059. #endif /* CONFIG_PPC64 */
  1060. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  1061. struct kvm_userspace_memory_region *mem)
  1062. {
  1063. return 0;
  1064. }
  1065. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  1066. struct kvm_userspace_memory_region *mem)
  1067. {
  1068. }
  1069. int kvmppc_core_init_vm(struct kvm *kvm)
  1070. {
  1071. #ifdef CONFIG_PPC64
  1072. INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
  1073. #endif
  1074. return 0;
  1075. }
  1076. void kvmppc_core_destroy_vm(struct kvm *kvm)
  1077. {
  1078. #ifdef CONFIG_PPC64
  1079. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  1080. #endif
  1081. }
  1082. static int kvmppc_book3s_init(void)
  1083. {
  1084. int r;
  1085. r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
  1086. THIS_MODULE);
  1087. if (r)
  1088. return r;
  1089. r = kvmppc_mmu_hpte_sysinit();
  1090. return r;
  1091. }
  1092. static void kvmppc_book3s_exit(void)
  1093. {
  1094. kvmppc_mmu_hpte_sysexit();
  1095. kvm_exit();
  1096. }
  1097. module_init(kvmppc_book3s_init);
  1098. module_exit(kvmppc_book3s_exit);