x86.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. *
  8. * Authors:
  9. * Avi Kivity <avi@qumranet.com>
  10. * Yaniv Kamay <yaniv@qumranet.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2. See
  13. * the COPYING file in the top-level directory.
  14. *
  15. */
  16. #include "kvm.h"
  17. #include "x86.h"
  18. #include "segment_descriptor.h"
  19. #include "irq.h"
  20. #include <linux/kvm.h>
  21. #include <linux/fs.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/module.h>
  24. #include <asm/uaccess.h>
  25. #define MAX_IO_MSRS 256
  26. #define CR0_RESERVED_BITS \
  27. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  28. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  29. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  30. #define CR4_RESERVED_BITS \
  31. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  32. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  33. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  34. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  35. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  36. #define EFER_RESERVED_BITS 0xfffffffffffff2fe
  37. #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
  38. struct kvm_stats_debugfs_item debugfs_entries[] = {
  39. { "pf_fixed", STAT_OFFSET(pf_fixed) },
  40. { "pf_guest", STAT_OFFSET(pf_guest) },
  41. { "tlb_flush", STAT_OFFSET(tlb_flush) },
  42. { "invlpg", STAT_OFFSET(invlpg) },
  43. { "exits", STAT_OFFSET(exits) },
  44. { "io_exits", STAT_OFFSET(io_exits) },
  45. { "mmio_exits", STAT_OFFSET(mmio_exits) },
  46. { "signal_exits", STAT_OFFSET(signal_exits) },
  47. { "irq_window", STAT_OFFSET(irq_window_exits) },
  48. { "halt_exits", STAT_OFFSET(halt_exits) },
  49. { "halt_wakeup", STAT_OFFSET(halt_wakeup) },
  50. { "request_irq", STAT_OFFSET(request_irq_exits) },
  51. { "irq_exits", STAT_OFFSET(irq_exits) },
  52. { "light_exits", STAT_OFFSET(light_exits) },
  53. { "efer_reload", STAT_OFFSET(efer_reload) },
  54. { NULL }
  55. };
  56. unsigned long segment_base(u16 selector)
  57. {
  58. struct descriptor_table gdt;
  59. struct segment_descriptor *d;
  60. unsigned long table_base;
  61. unsigned long v;
  62. if (selector == 0)
  63. return 0;
  64. asm("sgdt %0" : "=m"(gdt));
  65. table_base = gdt.base;
  66. if (selector & 4) { /* from ldt */
  67. u16 ldt_selector;
  68. asm("sldt %0" : "=g"(ldt_selector));
  69. table_base = segment_base(ldt_selector);
  70. }
  71. d = (struct segment_descriptor *)(table_base + (selector & ~7));
  72. v = d->base_low | ((unsigned long)d->base_mid << 16) |
  73. ((unsigned long)d->base_high << 24);
  74. #ifdef CONFIG_X86_64
  75. if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
  76. v |= ((unsigned long) \
  77. ((struct segment_descriptor_64 *)d)->base_higher) << 32;
  78. #endif
  79. return v;
  80. }
  81. EXPORT_SYMBOL_GPL(segment_base);
  82. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  83. {
  84. if (irqchip_in_kernel(vcpu->kvm))
  85. return vcpu->apic_base;
  86. else
  87. return vcpu->apic_base;
  88. }
  89. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  90. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  91. {
  92. /* TODO: reserve bits check */
  93. if (irqchip_in_kernel(vcpu->kvm))
  94. kvm_lapic_set_base(vcpu, data);
  95. else
  96. vcpu->apic_base = data;
  97. }
  98. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  99. static void inject_gp(struct kvm_vcpu *vcpu)
  100. {
  101. kvm_x86_ops->inject_gp(vcpu, 0);
  102. }
  103. /*
  104. * Load the pae pdptrs. Return true is they are all valid.
  105. */
  106. int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  107. {
  108. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  109. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  110. int i;
  111. int ret;
  112. u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
  113. mutex_lock(&vcpu->kvm->lock);
  114. ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
  115. offset * sizeof(u64), sizeof(pdpte));
  116. if (ret < 0) {
  117. ret = 0;
  118. goto out;
  119. }
  120. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  121. if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
  122. ret = 0;
  123. goto out;
  124. }
  125. }
  126. ret = 1;
  127. memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
  128. out:
  129. mutex_unlock(&vcpu->kvm->lock);
  130. return ret;
  131. }
  132. void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  133. {
  134. if (cr0 & CR0_RESERVED_BITS) {
  135. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  136. cr0, vcpu->cr0);
  137. inject_gp(vcpu);
  138. return;
  139. }
  140. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
  141. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  142. inject_gp(vcpu);
  143. return;
  144. }
  145. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
  146. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  147. "and a clear PE flag\n");
  148. inject_gp(vcpu);
  149. return;
  150. }
  151. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  152. #ifdef CONFIG_X86_64
  153. if ((vcpu->shadow_efer & EFER_LME)) {
  154. int cs_db, cs_l;
  155. if (!is_pae(vcpu)) {
  156. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  157. "in long mode while PAE is disabled\n");
  158. inject_gp(vcpu);
  159. return;
  160. }
  161. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  162. if (cs_l) {
  163. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  164. "in long mode while CS.L == 1\n");
  165. inject_gp(vcpu);
  166. return;
  167. }
  168. } else
  169. #endif
  170. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
  171. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  172. "reserved bits\n");
  173. inject_gp(vcpu);
  174. return;
  175. }
  176. }
  177. kvm_x86_ops->set_cr0(vcpu, cr0);
  178. vcpu->cr0 = cr0;
  179. mutex_lock(&vcpu->kvm->lock);
  180. kvm_mmu_reset_context(vcpu);
  181. mutex_unlock(&vcpu->kvm->lock);
  182. return;
  183. }
  184. EXPORT_SYMBOL_GPL(set_cr0);
  185. void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  186. {
  187. set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
  188. }
  189. EXPORT_SYMBOL_GPL(lmsw);
  190. void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  191. {
  192. if (cr4 & CR4_RESERVED_BITS) {
  193. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  194. inject_gp(vcpu);
  195. return;
  196. }
  197. if (is_long_mode(vcpu)) {
  198. if (!(cr4 & X86_CR4_PAE)) {
  199. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  200. "in long mode\n");
  201. inject_gp(vcpu);
  202. return;
  203. }
  204. } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
  205. && !load_pdptrs(vcpu, vcpu->cr3)) {
  206. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  207. inject_gp(vcpu);
  208. return;
  209. }
  210. if (cr4 & X86_CR4_VMXE) {
  211. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  212. inject_gp(vcpu);
  213. return;
  214. }
  215. kvm_x86_ops->set_cr4(vcpu, cr4);
  216. vcpu->cr4 = cr4;
  217. mutex_lock(&vcpu->kvm->lock);
  218. kvm_mmu_reset_context(vcpu);
  219. mutex_unlock(&vcpu->kvm->lock);
  220. }
  221. EXPORT_SYMBOL_GPL(set_cr4);
  222. void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  223. {
  224. if (is_long_mode(vcpu)) {
  225. if (cr3 & CR3_L_MODE_RESERVED_BITS) {
  226. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  227. inject_gp(vcpu);
  228. return;
  229. }
  230. } else {
  231. if (is_pae(vcpu)) {
  232. if (cr3 & CR3_PAE_RESERVED_BITS) {
  233. printk(KERN_DEBUG
  234. "set_cr3: #GP, reserved bits\n");
  235. inject_gp(vcpu);
  236. return;
  237. }
  238. if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
  239. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  240. "reserved bits\n");
  241. inject_gp(vcpu);
  242. return;
  243. }
  244. }
  245. /*
  246. * We don't check reserved bits in nonpae mode, because
  247. * this isn't enforced, and VMware depends on this.
  248. */
  249. }
  250. mutex_lock(&vcpu->kvm->lock);
  251. /*
  252. * Does the new cr3 value map to physical memory? (Note, we
  253. * catch an invalid cr3 even in real-mode, because it would
  254. * cause trouble later on when we turn on paging anyway.)
  255. *
  256. * A real CPU would silently accept an invalid cr3 and would
  257. * attempt to use it - with largely undefined (and often hard
  258. * to debug) behavior on the guest side.
  259. */
  260. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  261. inject_gp(vcpu);
  262. else {
  263. vcpu->cr3 = cr3;
  264. vcpu->mmu.new_cr3(vcpu);
  265. }
  266. mutex_unlock(&vcpu->kvm->lock);
  267. }
  268. EXPORT_SYMBOL_GPL(set_cr3);
  269. void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  270. {
  271. if (cr8 & CR8_RESERVED_BITS) {
  272. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  273. inject_gp(vcpu);
  274. return;
  275. }
  276. if (irqchip_in_kernel(vcpu->kvm))
  277. kvm_lapic_set_tpr(vcpu, cr8);
  278. else
  279. vcpu->cr8 = cr8;
  280. }
  281. EXPORT_SYMBOL_GPL(set_cr8);
  282. unsigned long get_cr8(struct kvm_vcpu *vcpu)
  283. {
  284. if (irqchip_in_kernel(vcpu->kvm))
  285. return kvm_lapic_get_cr8(vcpu);
  286. else
  287. return vcpu->cr8;
  288. }
  289. EXPORT_SYMBOL_GPL(get_cr8);
  290. /*
  291. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  292. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  293. *
  294. * This list is modified at module load time to reflect the
  295. * capabilities of the host cpu.
  296. */
  297. static u32 msrs_to_save[] = {
  298. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  299. MSR_K6_STAR,
  300. #ifdef CONFIG_X86_64
  301. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  302. #endif
  303. MSR_IA32_TIME_STAMP_COUNTER,
  304. };
  305. static unsigned num_msrs_to_save;
  306. static u32 emulated_msrs[] = {
  307. MSR_IA32_MISC_ENABLE,
  308. };
  309. #ifdef CONFIG_X86_64
  310. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  311. {
  312. if (efer & EFER_RESERVED_BITS) {
  313. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  314. efer);
  315. inject_gp(vcpu);
  316. return;
  317. }
  318. if (is_paging(vcpu)
  319. && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  320. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  321. inject_gp(vcpu);
  322. return;
  323. }
  324. kvm_x86_ops->set_efer(vcpu, efer);
  325. efer &= ~EFER_LMA;
  326. efer |= vcpu->shadow_efer & EFER_LMA;
  327. vcpu->shadow_efer = efer;
  328. }
  329. #endif
  330. /*
  331. * Writes msr value into into the appropriate "register".
  332. * Returns 0 on success, non-0 otherwise.
  333. * Assumes vcpu_load() was already called.
  334. */
  335. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  336. {
  337. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  338. }
  339. /*
  340. * Adapt set_msr() to msr_io()'s calling convention
  341. */
  342. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  343. {
  344. return kvm_set_msr(vcpu, index, *data);
  345. }
  346. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  347. {
  348. switch (msr) {
  349. #ifdef CONFIG_X86_64
  350. case MSR_EFER:
  351. set_efer(vcpu, data);
  352. break;
  353. #endif
  354. case MSR_IA32_MC0_STATUS:
  355. pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
  356. __FUNCTION__, data);
  357. break;
  358. case MSR_IA32_MCG_STATUS:
  359. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
  360. __FUNCTION__, data);
  361. break;
  362. case MSR_IA32_UCODE_REV:
  363. case MSR_IA32_UCODE_WRITE:
  364. case 0x200 ... 0x2ff: /* MTRRs */
  365. break;
  366. case MSR_IA32_APICBASE:
  367. kvm_set_apic_base(vcpu, data);
  368. break;
  369. case MSR_IA32_MISC_ENABLE:
  370. vcpu->ia32_misc_enable_msr = data;
  371. break;
  372. default:
  373. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
  374. return 1;
  375. }
  376. return 0;
  377. }
  378. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  379. /*
  380. * Reads an msr value (of 'msr_index') into 'pdata'.
  381. * Returns 0 on success, non-0 otherwise.
  382. * Assumes vcpu_load() was already called.
  383. */
  384. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  385. {
  386. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  387. }
  388. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  389. {
  390. u64 data;
  391. switch (msr) {
  392. case 0xc0010010: /* SYSCFG */
  393. case 0xc0010015: /* HWCR */
  394. case MSR_IA32_PLATFORM_ID:
  395. case MSR_IA32_P5_MC_ADDR:
  396. case MSR_IA32_P5_MC_TYPE:
  397. case MSR_IA32_MC0_CTL:
  398. case MSR_IA32_MCG_STATUS:
  399. case MSR_IA32_MCG_CAP:
  400. case MSR_IA32_MC0_MISC:
  401. case MSR_IA32_MC0_MISC+4:
  402. case MSR_IA32_MC0_MISC+8:
  403. case MSR_IA32_MC0_MISC+12:
  404. case MSR_IA32_MC0_MISC+16:
  405. case MSR_IA32_UCODE_REV:
  406. case MSR_IA32_PERF_STATUS:
  407. case MSR_IA32_EBL_CR_POWERON:
  408. /* MTRR registers */
  409. case 0xfe:
  410. case 0x200 ... 0x2ff:
  411. data = 0;
  412. break;
  413. case 0xcd: /* fsb frequency */
  414. data = 3;
  415. break;
  416. case MSR_IA32_APICBASE:
  417. data = kvm_get_apic_base(vcpu);
  418. break;
  419. case MSR_IA32_MISC_ENABLE:
  420. data = vcpu->ia32_misc_enable_msr;
  421. break;
  422. #ifdef CONFIG_X86_64
  423. case MSR_EFER:
  424. data = vcpu->shadow_efer;
  425. break;
  426. #endif
  427. default:
  428. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  429. return 1;
  430. }
  431. *pdata = data;
  432. return 0;
  433. }
  434. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  435. /*
  436. * Read or write a bunch of msrs. All parameters are kernel addresses.
  437. *
  438. * @return number of msrs set successfully.
  439. */
  440. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  441. struct kvm_msr_entry *entries,
  442. int (*do_msr)(struct kvm_vcpu *vcpu,
  443. unsigned index, u64 *data))
  444. {
  445. int i;
  446. vcpu_load(vcpu);
  447. for (i = 0; i < msrs->nmsrs; ++i)
  448. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  449. break;
  450. vcpu_put(vcpu);
  451. return i;
  452. }
  453. /*
  454. * Read or write a bunch of msrs. Parameters are user addresses.
  455. *
  456. * @return number of msrs set successfully.
  457. */
  458. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  459. int (*do_msr)(struct kvm_vcpu *vcpu,
  460. unsigned index, u64 *data),
  461. int writeback)
  462. {
  463. struct kvm_msrs msrs;
  464. struct kvm_msr_entry *entries;
  465. int r, n;
  466. unsigned size;
  467. r = -EFAULT;
  468. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  469. goto out;
  470. r = -E2BIG;
  471. if (msrs.nmsrs >= MAX_IO_MSRS)
  472. goto out;
  473. r = -ENOMEM;
  474. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  475. entries = vmalloc(size);
  476. if (!entries)
  477. goto out;
  478. r = -EFAULT;
  479. if (copy_from_user(entries, user_msrs->entries, size))
  480. goto out_free;
  481. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  482. if (r < 0)
  483. goto out_free;
  484. r = -EFAULT;
  485. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  486. goto out_free;
  487. r = n;
  488. out_free:
  489. vfree(entries);
  490. out:
  491. return r;
  492. }
  493. long kvm_arch_dev_ioctl(struct file *filp,
  494. unsigned int ioctl, unsigned long arg)
  495. {
  496. void __user *argp = (void __user *)arg;
  497. long r;
  498. switch (ioctl) {
  499. case KVM_GET_MSR_INDEX_LIST: {
  500. struct kvm_msr_list __user *user_msr_list = argp;
  501. struct kvm_msr_list msr_list;
  502. unsigned n;
  503. r = -EFAULT;
  504. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  505. goto out;
  506. n = msr_list.nmsrs;
  507. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  508. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  509. goto out;
  510. r = -E2BIG;
  511. if (n < num_msrs_to_save)
  512. goto out;
  513. r = -EFAULT;
  514. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  515. num_msrs_to_save * sizeof(u32)))
  516. goto out;
  517. if (copy_to_user(user_msr_list->indices
  518. + num_msrs_to_save * sizeof(u32),
  519. &emulated_msrs,
  520. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  521. goto out;
  522. r = 0;
  523. break;
  524. }
  525. default:
  526. r = -EINVAL;
  527. }
  528. out:
  529. return r;
  530. }
  531. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  532. {
  533. kvm_x86_ops->vcpu_load(vcpu, cpu);
  534. }
  535. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  536. {
  537. kvm_x86_ops->vcpu_put(vcpu);
  538. }
  539. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  540. {
  541. u64 efer;
  542. int i;
  543. struct kvm_cpuid_entry *e, *entry;
  544. rdmsrl(MSR_EFER, efer);
  545. entry = NULL;
  546. for (i = 0; i < vcpu->cpuid_nent; ++i) {
  547. e = &vcpu->cpuid_entries[i];
  548. if (e->function == 0x80000001) {
  549. entry = e;
  550. break;
  551. }
  552. }
  553. if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
  554. entry->edx &= ~(1 << 20);
  555. printk(KERN_INFO "kvm: guest NX capability removed\n");
  556. }
  557. }
  558. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  559. struct kvm_cpuid *cpuid,
  560. struct kvm_cpuid_entry __user *entries)
  561. {
  562. int r;
  563. r = -E2BIG;
  564. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  565. goto out;
  566. r = -EFAULT;
  567. if (copy_from_user(&vcpu->cpuid_entries, entries,
  568. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  569. goto out;
  570. vcpu->cpuid_nent = cpuid->nent;
  571. cpuid_fix_nx_cap(vcpu);
  572. return 0;
  573. out:
  574. return r;
  575. }
  576. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  577. struct kvm_lapic_state *s)
  578. {
  579. vcpu_load(vcpu);
  580. memcpy(s->regs, vcpu->apic->regs, sizeof *s);
  581. vcpu_put(vcpu);
  582. return 0;
  583. }
  584. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  585. struct kvm_lapic_state *s)
  586. {
  587. vcpu_load(vcpu);
  588. memcpy(vcpu->apic->regs, s->regs, sizeof *s);
  589. kvm_apic_post_state_restore(vcpu);
  590. vcpu_put(vcpu);
  591. return 0;
  592. }
  593. long kvm_arch_vcpu_ioctl(struct file *filp,
  594. unsigned int ioctl, unsigned long arg)
  595. {
  596. struct kvm_vcpu *vcpu = filp->private_data;
  597. void __user *argp = (void __user *)arg;
  598. int r;
  599. switch (ioctl) {
  600. case KVM_GET_LAPIC: {
  601. struct kvm_lapic_state lapic;
  602. memset(&lapic, 0, sizeof lapic);
  603. r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
  604. if (r)
  605. goto out;
  606. r = -EFAULT;
  607. if (copy_to_user(argp, &lapic, sizeof lapic))
  608. goto out;
  609. r = 0;
  610. break;
  611. }
  612. case KVM_SET_LAPIC: {
  613. struct kvm_lapic_state lapic;
  614. r = -EFAULT;
  615. if (copy_from_user(&lapic, argp, sizeof lapic))
  616. goto out;
  617. r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
  618. if (r)
  619. goto out;
  620. r = 0;
  621. break;
  622. }
  623. case KVM_SET_CPUID: {
  624. struct kvm_cpuid __user *cpuid_arg = argp;
  625. struct kvm_cpuid cpuid;
  626. r = -EFAULT;
  627. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  628. goto out;
  629. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  630. if (r)
  631. goto out;
  632. break;
  633. }
  634. case KVM_GET_MSRS:
  635. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  636. break;
  637. case KVM_SET_MSRS:
  638. r = msr_io(vcpu, argp, do_set_msr, 0);
  639. break;
  640. default:
  641. r = -EINVAL;
  642. }
  643. out:
  644. return r;
  645. }
  646. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  647. {
  648. int ret;
  649. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  650. return -1;
  651. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  652. return ret;
  653. }
  654. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  655. u32 kvm_nr_mmu_pages)
  656. {
  657. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  658. return -EINVAL;
  659. mutex_lock(&kvm->lock);
  660. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  661. kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
  662. mutex_unlock(&kvm->lock);
  663. return 0;
  664. }
  665. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  666. {
  667. return kvm->n_alloc_mmu_pages;
  668. }
  669. /*
  670. * Set a new alias region. Aliases map a portion of physical memory into
  671. * another portion. This is useful for memory windows, for example the PC
  672. * VGA region.
  673. */
  674. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  675. struct kvm_memory_alias *alias)
  676. {
  677. int r, n;
  678. struct kvm_mem_alias *p;
  679. r = -EINVAL;
  680. /* General sanity checks */
  681. if (alias->memory_size & (PAGE_SIZE - 1))
  682. goto out;
  683. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  684. goto out;
  685. if (alias->slot >= KVM_ALIAS_SLOTS)
  686. goto out;
  687. if (alias->guest_phys_addr + alias->memory_size
  688. < alias->guest_phys_addr)
  689. goto out;
  690. if (alias->target_phys_addr + alias->memory_size
  691. < alias->target_phys_addr)
  692. goto out;
  693. mutex_lock(&kvm->lock);
  694. p = &kvm->aliases[alias->slot];
  695. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  696. p->npages = alias->memory_size >> PAGE_SHIFT;
  697. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  698. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  699. if (kvm->aliases[n - 1].npages)
  700. break;
  701. kvm->naliases = n;
  702. kvm_mmu_zap_all(kvm);
  703. mutex_unlock(&kvm->lock);
  704. return 0;
  705. out:
  706. return r;
  707. }
  708. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  709. {
  710. int r;
  711. r = 0;
  712. switch (chip->chip_id) {
  713. case KVM_IRQCHIP_PIC_MASTER:
  714. memcpy(&chip->chip.pic,
  715. &pic_irqchip(kvm)->pics[0],
  716. sizeof(struct kvm_pic_state));
  717. break;
  718. case KVM_IRQCHIP_PIC_SLAVE:
  719. memcpy(&chip->chip.pic,
  720. &pic_irqchip(kvm)->pics[1],
  721. sizeof(struct kvm_pic_state));
  722. break;
  723. case KVM_IRQCHIP_IOAPIC:
  724. memcpy(&chip->chip.ioapic,
  725. ioapic_irqchip(kvm),
  726. sizeof(struct kvm_ioapic_state));
  727. break;
  728. default:
  729. r = -EINVAL;
  730. break;
  731. }
  732. return r;
  733. }
  734. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  735. {
  736. int r;
  737. r = 0;
  738. switch (chip->chip_id) {
  739. case KVM_IRQCHIP_PIC_MASTER:
  740. memcpy(&pic_irqchip(kvm)->pics[0],
  741. &chip->chip.pic,
  742. sizeof(struct kvm_pic_state));
  743. break;
  744. case KVM_IRQCHIP_PIC_SLAVE:
  745. memcpy(&pic_irqchip(kvm)->pics[1],
  746. &chip->chip.pic,
  747. sizeof(struct kvm_pic_state));
  748. break;
  749. case KVM_IRQCHIP_IOAPIC:
  750. memcpy(ioapic_irqchip(kvm),
  751. &chip->chip.ioapic,
  752. sizeof(struct kvm_ioapic_state));
  753. break;
  754. default:
  755. r = -EINVAL;
  756. break;
  757. }
  758. kvm_pic_update_irq(pic_irqchip(kvm));
  759. return r;
  760. }
  761. long kvm_arch_vm_ioctl(struct file *filp,
  762. unsigned int ioctl, unsigned long arg)
  763. {
  764. struct kvm *kvm = filp->private_data;
  765. void __user *argp = (void __user *)arg;
  766. int r = -EINVAL;
  767. switch (ioctl) {
  768. case KVM_SET_TSS_ADDR:
  769. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  770. if (r < 0)
  771. goto out;
  772. break;
  773. case KVM_SET_MEMORY_REGION: {
  774. struct kvm_memory_region kvm_mem;
  775. struct kvm_userspace_memory_region kvm_userspace_mem;
  776. r = -EFAULT;
  777. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  778. goto out;
  779. kvm_userspace_mem.slot = kvm_mem.slot;
  780. kvm_userspace_mem.flags = kvm_mem.flags;
  781. kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
  782. kvm_userspace_mem.memory_size = kvm_mem.memory_size;
  783. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
  784. if (r)
  785. goto out;
  786. break;
  787. }
  788. case KVM_SET_NR_MMU_PAGES:
  789. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  790. if (r)
  791. goto out;
  792. break;
  793. case KVM_GET_NR_MMU_PAGES:
  794. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  795. break;
  796. case KVM_SET_MEMORY_ALIAS: {
  797. struct kvm_memory_alias alias;
  798. r = -EFAULT;
  799. if (copy_from_user(&alias, argp, sizeof alias))
  800. goto out;
  801. r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
  802. if (r)
  803. goto out;
  804. break;
  805. }
  806. case KVM_CREATE_IRQCHIP:
  807. r = -ENOMEM;
  808. kvm->vpic = kvm_create_pic(kvm);
  809. if (kvm->vpic) {
  810. r = kvm_ioapic_init(kvm);
  811. if (r) {
  812. kfree(kvm->vpic);
  813. kvm->vpic = NULL;
  814. goto out;
  815. }
  816. } else
  817. goto out;
  818. break;
  819. case KVM_IRQ_LINE: {
  820. struct kvm_irq_level irq_event;
  821. r = -EFAULT;
  822. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  823. goto out;
  824. if (irqchip_in_kernel(kvm)) {
  825. mutex_lock(&kvm->lock);
  826. if (irq_event.irq < 16)
  827. kvm_pic_set_irq(pic_irqchip(kvm),
  828. irq_event.irq,
  829. irq_event.level);
  830. kvm_ioapic_set_irq(kvm->vioapic,
  831. irq_event.irq,
  832. irq_event.level);
  833. mutex_unlock(&kvm->lock);
  834. r = 0;
  835. }
  836. break;
  837. }
  838. case KVM_GET_IRQCHIP: {
  839. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  840. struct kvm_irqchip chip;
  841. r = -EFAULT;
  842. if (copy_from_user(&chip, argp, sizeof chip))
  843. goto out;
  844. r = -ENXIO;
  845. if (!irqchip_in_kernel(kvm))
  846. goto out;
  847. r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
  848. if (r)
  849. goto out;
  850. r = -EFAULT;
  851. if (copy_to_user(argp, &chip, sizeof chip))
  852. goto out;
  853. r = 0;
  854. break;
  855. }
  856. case KVM_SET_IRQCHIP: {
  857. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  858. struct kvm_irqchip chip;
  859. r = -EFAULT;
  860. if (copy_from_user(&chip, argp, sizeof chip))
  861. goto out;
  862. r = -ENXIO;
  863. if (!irqchip_in_kernel(kvm))
  864. goto out;
  865. r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
  866. if (r)
  867. goto out;
  868. r = 0;
  869. break;
  870. }
  871. default:
  872. ;
  873. }
  874. out:
  875. return r;
  876. }
  877. static __init void kvm_init_msr_list(void)
  878. {
  879. u32 dummy[2];
  880. unsigned i, j;
  881. for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
  882. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  883. continue;
  884. if (j < i)
  885. msrs_to_save[j] = msrs_to_save[i];
  886. j++;
  887. }
  888. num_msrs_to_save = j;
  889. }
  890. /*
  891. * Only apic need an MMIO device hook, so shortcut now..
  892. */
  893. static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
  894. gpa_t addr)
  895. {
  896. struct kvm_io_device *dev;
  897. if (vcpu->apic) {
  898. dev = &vcpu->apic->dev;
  899. if (dev->in_range(dev, addr))
  900. return dev;
  901. }
  902. return NULL;
  903. }
  904. static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
  905. gpa_t addr)
  906. {
  907. struct kvm_io_device *dev;
  908. dev = vcpu_find_pervcpu_dev(vcpu, addr);
  909. if (dev == NULL)
  910. dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
  911. return dev;
  912. }
  913. int emulator_read_std(unsigned long addr,
  914. void *val,
  915. unsigned int bytes,
  916. struct kvm_vcpu *vcpu)
  917. {
  918. void *data = val;
  919. while (bytes) {
  920. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  921. unsigned offset = addr & (PAGE_SIZE-1);
  922. unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
  923. int ret;
  924. if (gpa == UNMAPPED_GVA)
  925. return X86EMUL_PROPAGATE_FAULT;
  926. ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
  927. if (ret < 0)
  928. return X86EMUL_UNHANDLEABLE;
  929. bytes -= tocopy;
  930. data += tocopy;
  931. addr += tocopy;
  932. }
  933. return X86EMUL_CONTINUE;
  934. }
  935. EXPORT_SYMBOL_GPL(emulator_read_std);
  936. static int emulator_write_std(unsigned long addr,
  937. const void *val,
  938. unsigned int bytes,
  939. struct kvm_vcpu *vcpu)
  940. {
  941. pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
  942. return X86EMUL_UNHANDLEABLE;
  943. }
  944. static int emulator_read_emulated(unsigned long addr,
  945. void *val,
  946. unsigned int bytes,
  947. struct kvm_vcpu *vcpu)
  948. {
  949. struct kvm_io_device *mmio_dev;
  950. gpa_t gpa;
  951. if (vcpu->mmio_read_completed) {
  952. memcpy(val, vcpu->mmio_data, bytes);
  953. vcpu->mmio_read_completed = 0;
  954. return X86EMUL_CONTINUE;
  955. }
  956. gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  957. /* For APIC access vmexit */
  958. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  959. goto mmio;
  960. if (emulator_read_std(addr, val, bytes, vcpu)
  961. == X86EMUL_CONTINUE)
  962. return X86EMUL_CONTINUE;
  963. if (gpa == UNMAPPED_GVA)
  964. return X86EMUL_PROPAGATE_FAULT;
  965. mmio:
  966. /*
  967. * Is this MMIO handled locally?
  968. */
  969. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
  970. if (mmio_dev) {
  971. kvm_iodevice_read(mmio_dev, gpa, bytes, val);
  972. return X86EMUL_CONTINUE;
  973. }
  974. vcpu->mmio_needed = 1;
  975. vcpu->mmio_phys_addr = gpa;
  976. vcpu->mmio_size = bytes;
  977. vcpu->mmio_is_write = 0;
  978. return X86EMUL_UNHANDLEABLE;
  979. }
  980. static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  981. const void *val, int bytes)
  982. {
  983. int ret;
  984. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  985. if (ret < 0)
  986. return 0;
  987. kvm_mmu_pte_write(vcpu, gpa, val, bytes);
  988. return 1;
  989. }
  990. static int emulator_write_emulated_onepage(unsigned long addr,
  991. const void *val,
  992. unsigned int bytes,
  993. struct kvm_vcpu *vcpu)
  994. {
  995. struct kvm_io_device *mmio_dev;
  996. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  997. if (gpa == UNMAPPED_GVA) {
  998. kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
  999. return X86EMUL_PROPAGATE_FAULT;
  1000. }
  1001. /* For APIC access vmexit */
  1002. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1003. goto mmio;
  1004. if (emulator_write_phys(vcpu, gpa, val, bytes))
  1005. return X86EMUL_CONTINUE;
  1006. mmio:
  1007. /*
  1008. * Is this MMIO handled locally?
  1009. */
  1010. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
  1011. if (mmio_dev) {
  1012. kvm_iodevice_write(mmio_dev, gpa, bytes, val);
  1013. return X86EMUL_CONTINUE;
  1014. }
  1015. vcpu->mmio_needed = 1;
  1016. vcpu->mmio_phys_addr = gpa;
  1017. vcpu->mmio_size = bytes;
  1018. vcpu->mmio_is_write = 1;
  1019. memcpy(vcpu->mmio_data, val, bytes);
  1020. return X86EMUL_CONTINUE;
  1021. }
  1022. int emulator_write_emulated(unsigned long addr,
  1023. const void *val,
  1024. unsigned int bytes,
  1025. struct kvm_vcpu *vcpu)
  1026. {
  1027. /* Crossing a page boundary? */
  1028. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  1029. int rc, now;
  1030. now = -addr & ~PAGE_MASK;
  1031. rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
  1032. if (rc != X86EMUL_CONTINUE)
  1033. return rc;
  1034. addr += now;
  1035. val += now;
  1036. bytes -= now;
  1037. }
  1038. return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
  1039. }
  1040. EXPORT_SYMBOL_GPL(emulator_write_emulated);
  1041. static int emulator_cmpxchg_emulated(unsigned long addr,
  1042. const void *old,
  1043. const void *new,
  1044. unsigned int bytes,
  1045. struct kvm_vcpu *vcpu)
  1046. {
  1047. static int reported;
  1048. if (!reported) {
  1049. reported = 1;
  1050. printk(KERN_WARNING "kvm: emulating exchange as write\n");
  1051. }
  1052. return emulator_write_emulated(addr, new, bytes, vcpu);
  1053. }
  1054. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  1055. {
  1056. return kvm_x86_ops->get_segment_base(vcpu, seg);
  1057. }
  1058. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  1059. {
  1060. return X86EMUL_CONTINUE;
  1061. }
  1062. int emulate_clts(struct kvm_vcpu *vcpu)
  1063. {
  1064. kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
  1065. return X86EMUL_CONTINUE;
  1066. }
  1067. int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  1068. {
  1069. struct kvm_vcpu *vcpu = ctxt->vcpu;
  1070. switch (dr) {
  1071. case 0 ... 3:
  1072. *dest = kvm_x86_ops->get_dr(vcpu, dr);
  1073. return X86EMUL_CONTINUE;
  1074. default:
  1075. pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
  1076. return X86EMUL_UNHANDLEABLE;
  1077. }
  1078. }
  1079. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  1080. {
  1081. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  1082. int exception;
  1083. kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
  1084. if (exception) {
  1085. /* FIXME: better handling */
  1086. return X86EMUL_UNHANDLEABLE;
  1087. }
  1088. return X86EMUL_CONTINUE;
  1089. }
  1090. void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
  1091. {
  1092. static int reported;
  1093. u8 opcodes[4];
  1094. unsigned long rip = vcpu->rip;
  1095. unsigned long rip_linear;
  1096. rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
  1097. if (reported)
  1098. return;
  1099. emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
  1100. printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
  1101. context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  1102. reported = 1;
  1103. }
  1104. EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
  1105. struct x86_emulate_ops emulate_ops = {
  1106. .read_std = emulator_read_std,
  1107. .write_std = emulator_write_std,
  1108. .read_emulated = emulator_read_emulated,
  1109. .write_emulated = emulator_write_emulated,
  1110. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  1111. };
  1112. int emulate_instruction(struct kvm_vcpu *vcpu,
  1113. struct kvm_run *run,
  1114. unsigned long cr2,
  1115. u16 error_code,
  1116. int no_decode)
  1117. {
  1118. int r;
  1119. vcpu->mmio_fault_cr2 = cr2;
  1120. kvm_x86_ops->cache_regs(vcpu);
  1121. vcpu->mmio_is_write = 0;
  1122. vcpu->pio.string = 0;
  1123. if (!no_decode) {
  1124. int cs_db, cs_l;
  1125. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  1126. vcpu->emulate_ctxt.vcpu = vcpu;
  1127. vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  1128. vcpu->emulate_ctxt.cr2 = cr2;
  1129. vcpu->emulate_ctxt.mode =
  1130. (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
  1131. ? X86EMUL_MODE_REAL : cs_l
  1132. ? X86EMUL_MODE_PROT64 : cs_db
  1133. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  1134. if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
  1135. vcpu->emulate_ctxt.cs_base = 0;
  1136. vcpu->emulate_ctxt.ds_base = 0;
  1137. vcpu->emulate_ctxt.es_base = 0;
  1138. vcpu->emulate_ctxt.ss_base = 0;
  1139. } else {
  1140. vcpu->emulate_ctxt.cs_base =
  1141. get_segment_base(vcpu, VCPU_SREG_CS);
  1142. vcpu->emulate_ctxt.ds_base =
  1143. get_segment_base(vcpu, VCPU_SREG_DS);
  1144. vcpu->emulate_ctxt.es_base =
  1145. get_segment_base(vcpu, VCPU_SREG_ES);
  1146. vcpu->emulate_ctxt.ss_base =
  1147. get_segment_base(vcpu, VCPU_SREG_SS);
  1148. }
  1149. vcpu->emulate_ctxt.gs_base =
  1150. get_segment_base(vcpu, VCPU_SREG_GS);
  1151. vcpu->emulate_ctxt.fs_base =
  1152. get_segment_base(vcpu, VCPU_SREG_FS);
  1153. r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
  1154. if (r) {
  1155. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  1156. return EMULATE_DONE;
  1157. return EMULATE_FAIL;
  1158. }
  1159. }
  1160. r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
  1161. if (vcpu->pio.string)
  1162. return EMULATE_DO_MMIO;
  1163. if ((r || vcpu->mmio_is_write) && run) {
  1164. run->exit_reason = KVM_EXIT_MMIO;
  1165. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  1166. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  1167. run->mmio.len = vcpu->mmio_size;
  1168. run->mmio.is_write = vcpu->mmio_is_write;
  1169. }
  1170. if (r) {
  1171. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  1172. return EMULATE_DONE;
  1173. if (!vcpu->mmio_needed) {
  1174. kvm_report_emulation_failure(vcpu, "mmio");
  1175. return EMULATE_FAIL;
  1176. }
  1177. return EMULATE_DO_MMIO;
  1178. }
  1179. kvm_x86_ops->decache_regs(vcpu);
  1180. kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
  1181. if (vcpu->mmio_is_write) {
  1182. vcpu->mmio_needed = 0;
  1183. return EMULATE_DO_MMIO;
  1184. }
  1185. return EMULATE_DONE;
  1186. }
  1187. EXPORT_SYMBOL_GPL(emulate_instruction);
  1188. static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
  1189. {
  1190. int i;
  1191. for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
  1192. if (vcpu->pio.guest_pages[i]) {
  1193. kvm_release_page(vcpu->pio.guest_pages[i]);
  1194. vcpu->pio.guest_pages[i] = NULL;
  1195. }
  1196. }
  1197. static int pio_copy_data(struct kvm_vcpu *vcpu)
  1198. {
  1199. void *p = vcpu->pio_data;
  1200. void *q;
  1201. unsigned bytes;
  1202. int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
  1203. q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
  1204. PAGE_KERNEL);
  1205. if (!q) {
  1206. free_pio_guest_pages(vcpu);
  1207. return -ENOMEM;
  1208. }
  1209. q += vcpu->pio.guest_page_offset;
  1210. bytes = vcpu->pio.size * vcpu->pio.cur_count;
  1211. if (vcpu->pio.in)
  1212. memcpy(q, p, bytes);
  1213. else
  1214. memcpy(p, q, bytes);
  1215. q -= vcpu->pio.guest_page_offset;
  1216. vunmap(q);
  1217. free_pio_guest_pages(vcpu);
  1218. return 0;
  1219. }
  1220. int complete_pio(struct kvm_vcpu *vcpu)
  1221. {
  1222. struct kvm_pio_request *io = &vcpu->pio;
  1223. long delta;
  1224. int r;
  1225. kvm_x86_ops->cache_regs(vcpu);
  1226. if (!io->string) {
  1227. if (io->in)
  1228. memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
  1229. io->size);
  1230. } else {
  1231. if (io->in) {
  1232. r = pio_copy_data(vcpu);
  1233. if (r) {
  1234. kvm_x86_ops->cache_regs(vcpu);
  1235. return r;
  1236. }
  1237. }
  1238. delta = 1;
  1239. if (io->rep) {
  1240. delta *= io->cur_count;
  1241. /*
  1242. * The size of the register should really depend on
  1243. * current address size.
  1244. */
  1245. vcpu->regs[VCPU_REGS_RCX] -= delta;
  1246. }
  1247. if (io->down)
  1248. delta = -delta;
  1249. delta *= io->size;
  1250. if (io->in)
  1251. vcpu->regs[VCPU_REGS_RDI] += delta;
  1252. else
  1253. vcpu->regs[VCPU_REGS_RSI] += delta;
  1254. }
  1255. kvm_x86_ops->decache_regs(vcpu);
  1256. io->count -= io->cur_count;
  1257. io->cur_count = 0;
  1258. return 0;
  1259. }
  1260. static void kernel_pio(struct kvm_io_device *pio_dev,
  1261. struct kvm_vcpu *vcpu,
  1262. void *pd)
  1263. {
  1264. /* TODO: String I/O for in kernel device */
  1265. mutex_lock(&vcpu->kvm->lock);
  1266. if (vcpu->pio.in)
  1267. kvm_iodevice_read(pio_dev, vcpu->pio.port,
  1268. vcpu->pio.size,
  1269. pd);
  1270. else
  1271. kvm_iodevice_write(pio_dev, vcpu->pio.port,
  1272. vcpu->pio.size,
  1273. pd);
  1274. mutex_unlock(&vcpu->kvm->lock);
  1275. }
  1276. static void pio_string_write(struct kvm_io_device *pio_dev,
  1277. struct kvm_vcpu *vcpu)
  1278. {
  1279. struct kvm_pio_request *io = &vcpu->pio;
  1280. void *pd = vcpu->pio_data;
  1281. int i;
  1282. mutex_lock(&vcpu->kvm->lock);
  1283. for (i = 0; i < io->cur_count; i++) {
  1284. kvm_iodevice_write(pio_dev, io->port,
  1285. io->size,
  1286. pd);
  1287. pd += io->size;
  1288. }
  1289. mutex_unlock(&vcpu->kvm->lock);
  1290. }
  1291. static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
  1292. gpa_t addr)
  1293. {
  1294. return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
  1295. }
  1296. int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  1297. int size, unsigned port)
  1298. {
  1299. struct kvm_io_device *pio_dev;
  1300. vcpu->run->exit_reason = KVM_EXIT_IO;
  1301. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  1302. vcpu->run->io.size = vcpu->pio.size = size;
  1303. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  1304. vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
  1305. vcpu->run->io.port = vcpu->pio.port = port;
  1306. vcpu->pio.in = in;
  1307. vcpu->pio.string = 0;
  1308. vcpu->pio.down = 0;
  1309. vcpu->pio.guest_page_offset = 0;
  1310. vcpu->pio.rep = 0;
  1311. kvm_x86_ops->cache_regs(vcpu);
  1312. memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
  1313. kvm_x86_ops->decache_regs(vcpu);
  1314. kvm_x86_ops->skip_emulated_instruction(vcpu);
  1315. pio_dev = vcpu_find_pio_dev(vcpu, port);
  1316. if (pio_dev) {
  1317. kernel_pio(pio_dev, vcpu, vcpu->pio_data);
  1318. complete_pio(vcpu);
  1319. return 1;
  1320. }
  1321. return 0;
  1322. }
  1323. EXPORT_SYMBOL_GPL(kvm_emulate_pio);
  1324. int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  1325. int size, unsigned long count, int down,
  1326. gva_t address, int rep, unsigned port)
  1327. {
  1328. unsigned now, in_page;
  1329. int i, ret = 0;
  1330. int nr_pages = 1;
  1331. struct page *page;
  1332. struct kvm_io_device *pio_dev;
  1333. vcpu->run->exit_reason = KVM_EXIT_IO;
  1334. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  1335. vcpu->run->io.size = vcpu->pio.size = size;
  1336. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  1337. vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
  1338. vcpu->run->io.port = vcpu->pio.port = port;
  1339. vcpu->pio.in = in;
  1340. vcpu->pio.string = 1;
  1341. vcpu->pio.down = down;
  1342. vcpu->pio.guest_page_offset = offset_in_page(address);
  1343. vcpu->pio.rep = rep;
  1344. if (!count) {
  1345. kvm_x86_ops->skip_emulated_instruction(vcpu);
  1346. return 1;
  1347. }
  1348. if (!down)
  1349. in_page = PAGE_SIZE - offset_in_page(address);
  1350. else
  1351. in_page = offset_in_page(address) + size;
  1352. now = min(count, (unsigned long)in_page / size);
  1353. if (!now) {
  1354. /*
  1355. * String I/O straddles page boundary. Pin two guest pages
  1356. * so that we satisfy atomicity constraints. Do just one
  1357. * transaction to avoid complexity.
  1358. */
  1359. nr_pages = 2;
  1360. now = 1;
  1361. }
  1362. if (down) {
  1363. /*
  1364. * String I/O in reverse. Yuck. Kill the guest, fix later.
  1365. */
  1366. pr_unimpl(vcpu, "guest string pio down\n");
  1367. inject_gp(vcpu);
  1368. return 1;
  1369. }
  1370. vcpu->run->io.count = now;
  1371. vcpu->pio.cur_count = now;
  1372. if (vcpu->pio.cur_count == vcpu->pio.count)
  1373. kvm_x86_ops->skip_emulated_instruction(vcpu);
  1374. for (i = 0; i < nr_pages; ++i) {
  1375. mutex_lock(&vcpu->kvm->lock);
  1376. page = gva_to_page(vcpu, address + i * PAGE_SIZE);
  1377. vcpu->pio.guest_pages[i] = page;
  1378. mutex_unlock(&vcpu->kvm->lock);
  1379. if (!page) {
  1380. inject_gp(vcpu);
  1381. free_pio_guest_pages(vcpu);
  1382. return 1;
  1383. }
  1384. }
  1385. pio_dev = vcpu_find_pio_dev(vcpu, port);
  1386. if (!vcpu->pio.in) {
  1387. /* string PIO write */
  1388. ret = pio_copy_data(vcpu);
  1389. if (ret >= 0 && pio_dev) {
  1390. pio_string_write(pio_dev, vcpu);
  1391. complete_pio(vcpu);
  1392. if (vcpu->pio.count == 0)
  1393. ret = 1;
  1394. }
  1395. } else if (pio_dev)
  1396. pr_unimpl(vcpu, "no string pio read support yet, "
  1397. "port %x size %d count %ld\n",
  1398. port, size, count);
  1399. return ret;
  1400. }
  1401. EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
  1402. __init void kvm_arch_init(void)
  1403. {
  1404. kvm_init_msr_list();
  1405. }
  1406. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  1407. {
  1408. ++vcpu->stat.halt_exits;
  1409. if (irqchip_in_kernel(vcpu->kvm)) {
  1410. vcpu->mp_state = VCPU_MP_STATE_HALTED;
  1411. kvm_vcpu_block(vcpu);
  1412. if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
  1413. return -EINTR;
  1414. return 1;
  1415. } else {
  1416. vcpu->run->exit_reason = KVM_EXIT_HLT;
  1417. return 0;
  1418. }
  1419. }
  1420. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  1421. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  1422. {
  1423. unsigned long nr, a0, a1, a2, a3, ret;
  1424. kvm_x86_ops->cache_regs(vcpu);
  1425. nr = vcpu->regs[VCPU_REGS_RAX];
  1426. a0 = vcpu->regs[VCPU_REGS_RBX];
  1427. a1 = vcpu->regs[VCPU_REGS_RCX];
  1428. a2 = vcpu->regs[VCPU_REGS_RDX];
  1429. a3 = vcpu->regs[VCPU_REGS_RSI];
  1430. if (!is_long_mode(vcpu)) {
  1431. nr &= 0xFFFFFFFF;
  1432. a0 &= 0xFFFFFFFF;
  1433. a1 &= 0xFFFFFFFF;
  1434. a2 &= 0xFFFFFFFF;
  1435. a3 &= 0xFFFFFFFF;
  1436. }
  1437. switch (nr) {
  1438. default:
  1439. ret = -KVM_ENOSYS;
  1440. break;
  1441. }
  1442. vcpu->regs[VCPU_REGS_RAX] = ret;
  1443. kvm_x86_ops->decache_regs(vcpu);
  1444. return 0;
  1445. }
  1446. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  1447. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  1448. {
  1449. char instruction[3];
  1450. int ret = 0;
  1451. mutex_lock(&vcpu->kvm->lock);
  1452. /*
  1453. * Blow out the MMU to ensure that no other VCPU has an active mapping
  1454. * to ensure that the updated hypercall appears atomically across all
  1455. * VCPUs.
  1456. */
  1457. kvm_mmu_zap_all(vcpu->kvm);
  1458. kvm_x86_ops->cache_regs(vcpu);
  1459. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  1460. if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
  1461. != X86EMUL_CONTINUE)
  1462. ret = -EFAULT;
  1463. mutex_unlock(&vcpu->kvm->lock);
  1464. return ret;
  1465. }
  1466. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  1467. {
  1468. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  1469. }
  1470. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  1471. {
  1472. struct descriptor_table dt = { limit, base };
  1473. kvm_x86_ops->set_gdt(vcpu, &dt);
  1474. }
  1475. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  1476. {
  1477. struct descriptor_table dt = { limit, base };
  1478. kvm_x86_ops->set_idt(vcpu, &dt);
  1479. }
  1480. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  1481. unsigned long *rflags)
  1482. {
  1483. lmsw(vcpu, msw);
  1484. *rflags = kvm_x86_ops->get_rflags(vcpu);
  1485. }
  1486. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  1487. {
  1488. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  1489. switch (cr) {
  1490. case 0:
  1491. return vcpu->cr0;
  1492. case 2:
  1493. return vcpu->cr2;
  1494. case 3:
  1495. return vcpu->cr3;
  1496. case 4:
  1497. return vcpu->cr4;
  1498. default:
  1499. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  1500. return 0;
  1501. }
  1502. }
  1503. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  1504. unsigned long *rflags)
  1505. {
  1506. switch (cr) {
  1507. case 0:
  1508. set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
  1509. *rflags = kvm_x86_ops->get_rflags(vcpu);
  1510. break;
  1511. case 2:
  1512. vcpu->cr2 = val;
  1513. break;
  1514. case 3:
  1515. set_cr3(vcpu, val);
  1516. break;
  1517. case 4:
  1518. set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
  1519. break;
  1520. default:
  1521. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  1522. }
  1523. }
  1524. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  1525. {
  1526. int i;
  1527. u32 function;
  1528. struct kvm_cpuid_entry *e, *best;
  1529. kvm_x86_ops->cache_regs(vcpu);
  1530. function = vcpu->regs[VCPU_REGS_RAX];
  1531. vcpu->regs[VCPU_REGS_RAX] = 0;
  1532. vcpu->regs[VCPU_REGS_RBX] = 0;
  1533. vcpu->regs[VCPU_REGS_RCX] = 0;
  1534. vcpu->regs[VCPU_REGS_RDX] = 0;
  1535. best = NULL;
  1536. for (i = 0; i < vcpu->cpuid_nent; ++i) {
  1537. e = &vcpu->cpuid_entries[i];
  1538. if (e->function == function) {
  1539. best = e;
  1540. break;
  1541. }
  1542. /*
  1543. * Both basic or both extended?
  1544. */
  1545. if (((e->function ^ function) & 0x80000000) == 0)
  1546. if (!best || e->function > best->function)
  1547. best = e;
  1548. }
  1549. if (best) {
  1550. vcpu->regs[VCPU_REGS_RAX] = best->eax;
  1551. vcpu->regs[VCPU_REGS_RBX] = best->ebx;
  1552. vcpu->regs[VCPU_REGS_RCX] = best->ecx;
  1553. vcpu->regs[VCPU_REGS_RDX] = best->edx;
  1554. }
  1555. kvm_x86_ops->decache_regs(vcpu);
  1556. kvm_x86_ops->skip_emulated_instruction(vcpu);
  1557. }
  1558. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  1559. /*
  1560. * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
  1561. * we have asm/x86/processor.h
  1562. */
  1563. struct fxsave {
  1564. u16 cwd;
  1565. u16 swd;
  1566. u16 twd;
  1567. u16 fop;
  1568. u64 rip;
  1569. u64 rdp;
  1570. u32 mxcsr;
  1571. u32 mxcsr_mask;
  1572. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  1573. #ifdef CONFIG_X86_64
  1574. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  1575. #else
  1576. u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
  1577. #endif
  1578. };
  1579. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1580. {
  1581. struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
  1582. vcpu_load(vcpu);
  1583. memcpy(fpu->fpr, fxsave->st_space, 128);
  1584. fpu->fcw = fxsave->cwd;
  1585. fpu->fsw = fxsave->swd;
  1586. fpu->ftwx = fxsave->twd;
  1587. fpu->last_opcode = fxsave->fop;
  1588. fpu->last_ip = fxsave->rip;
  1589. fpu->last_dp = fxsave->rdp;
  1590. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  1591. vcpu_put(vcpu);
  1592. return 0;
  1593. }
  1594. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1595. {
  1596. struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
  1597. vcpu_load(vcpu);
  1598. memcpy(fxsave->st_space, fpu->fpr, 128);
  1599. fxsave->cwd = fpu->fcw;
  1600. fxsave->swd = fpu->fsw;
  1601. fxsave->twd = fpu->ftwx;
  1602. fxsave->fop = fpu->last_opcode;
  1603. fxsave->rip = fpu->last_ip;
  1604. fxsave->rdp = fpu->last_dp;
  1605. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  1606. vcpu_put(vcpu);
  1607. return 0;
  1608. }
  1609. void fx_init(struct kvm_vcpu *vcpu)
  1610. {
  1611. unsigned after_mxcsr_mask;
  1612. /* Initialize guest FPU by resetting ours and saving into guest's */
  1613. preempt_disable();
  1614. fx_save(&vcpu->host_fx_image);
  1615. fpu_init();
  1616. fx_save(&vcpu->guest_fx_image);
  1617. fx_restore(&vcpu->host_fx_image);
  1618. preempt_enable();
  1619. vcpu->cr0 |= X86_CR0_ET;
  1620. after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
  1621. vcpu->guest_fx_image.mxcsr = 0x1f80;
  1622. memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
  1623. 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
  1624. }
  1625. EXPORT_SYMBOL_GPL(fx_init);
  1626. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  1627. {
  1628. if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
  1629. return;
  1630. vcpu->guest_fpu_loaded = 1;
  1631. fx_save(&vcpu->host_fx_image);
  1632. fx_restore(&vcpu->guest_fx_image);
  1633. }
  1634. EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
  1635. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  1636. {
  1637. if (!vcpu->guest_fpu_loaded)
  1638. return;
  1639. vcpu->guest_fpu_loaded = 0;
  1640. fx_save(&vcpu->guest_fx_image);
  1641. fx_restore(&vcpu->host_fx_image);
  1642. }
  1643. EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);