vmx.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "kvm.h"
  18. #include "vmx.h"
  19. #include "kvm_vmx.h"
  20. #include <linux/module.h>
  21. #include <linux/mm.h>
  22. #include <linux/highmem.h>
  23. #include <asm/io.h>
  24. #include <asm/desc.h>
  25. #include "segment_descriptor.h"
  26. MODULE_AUTHOR("Qumranet");
  27. MODULE_LICENSE("GPL");
  28. static DEFINE_PER_CPU(struct vmcs *, vmxarea);
  29. static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
  30. #ifdef CONFIG_X86_64
  31. #define HOST_IS_64 1
  32. #else
  33. #define HOST_IS_64 0
  34. #endif
  35. static struct vmcs_descriptor {
  36. int size;
  37. int order;
  38. u32 revision_id;
  39. } vmcs_descriptor;
  40. #define VMX_SEGMENT_FIELD(seg) \
  41. [VCPU_SREG_##seg] = { \
  42. .selector = GUEST_##seg##_SELECTOR, \
  43. .base = GUEST_##seg##_BASE, \
  44. .limit = GUEST_##seg##_LIMIT, \
  45. .ar_bytes = GUEST_##seg##_AR_BYTES, \
  46. }
  47. static struct kvm_vmx_segment_field {
  48. unsigned selector;
  49. unsigned base;
  50. unsigned limit;
  51. unsigned ar_bytes;
  52. } kvm_vmx_segment_fields[] = {
  53. VMX_SEGMENT_FIELD(CS),
  54. VMX_SEGMENT_FIELD(DS),
  55. VMX_SEGMENT_FIELD(ES),
  56. VMX_SEGMENT_FIELD(FS),
  57. VMX_SEGMENT_FIELD(GS),
  58. VMX_SEGMENT_FIELD(SS),
  59. VMX_SEGMENT_FIELD(TR),
  60. VMX_SEGMENT_FIELD(LDTR),
  61. };
  62. static const u32 vmx_msr_index[] = {
  63. #ifdef CONFIG_X86_64
  64. MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
  65. #endif
  66. MSR_EFER, MSR_K6_STAR,
  67. };
  68. #define NR_VMX_MSR (sizeof(vmx_msr_index) / sizeof(*vmx_msr_index))
  69. static inline int is_page_fault(u32 intr_info)
  70. {
  71. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
  72. INTR_INFO_VALID_MASK)) ==
  73. (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
  74. }
  75. static inline int is_external_interrupt(u32 intr_info)
  76. {
  77. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
  78. == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
  79. }
  80. static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
  81. {
  82. int i;
  83. for (i = 0; i < vcpu->nmsrs; ++i)
  84. if (vcpu->guest_msrs[i].index == msr)
  85. return &vcpu->guest_msrs[i];
  86. return 0;
  87. }
  88. static void vmcs_clear(struct vmcs *vmcs)
  89. {
  90. u64 phys_addr = __pa(vmcs);
  91. u8 error;
  92. asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
  93. : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
  94. : "cc", "memory");
  95. if (error)
  96. printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
  97. vmcs, phys_addr);
  98. }
  99. static void __vcpu_clear(void *arg)
  100. {
  101. struct kvm_vcpu *vcpu = arg;
  102. int cpu = raw_smp_processor_id();
  103. if (vcpu->cpu == cpu)
  104. vmcs_clear(vcpu->vmcs);
  105. if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
  106. per_cpu(current_vmcs, cpu) = NULL;
  107. }
  108. static unsigned long vmcs_readl(unsigned long field)
  109. {
  110. unsigned long value;
  111. asm volatile (ASM_VMX_VMREAD_RDX_RAX
  112. : "=a"(value) : "d"(field) : "cc");
  113. return value;
  114. }
  115. static u16 vmcs_read16(unsigned long field)
  116. {
  117. return vmcs_readl(field);
  118. }
  119. static u32 vmcs_read32(unsigned long field)
  120. {
  121. return vmcs_readl(field);
  122. }
  123. static u64 vmcs_read64(unsigned long field)
  124. {
  125. #ifdef CONFIG_X86_64
  126. return vmcs_readl(field);
  127. #else
  128. return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
  129. #endif
  130. }
  131. static void vmcs_writel(unsigned long field, unsigned long value)
  132. {
  133. u8 error;
  134. asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
  135. : "=q"(error) : "a"(value), "d"(field) : "cc" );
  136. if (error)
  137. printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
  138. field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
  139. }
  140. static void vmcs_write16(unsigned long field, u16 value)
  141. {
  142. vmcs_writel(field, value);
  143. }
  144. static void vmcs_write32(unsigned long field, u32 value)
  145. {
  146. vmcs_writel(field, value);
  147. }
  148. static void vmcs_write64(unsigned long field, u64 value)
  149. {
  150. #ifdef CONFIG_X86_64
  151. vmcs_writel(field, value);
  152. #else
  153. vmcs_writel(field, value);
  154. asm volatile ("");
  155. vmcs_writel(field+1, value >> 32);
  156. #endif
  157. }
  158. /*
  159. * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  160. * vcpu mutex is already taken.
  161. */
  162. static struct kvm_vcpu *vmx_vcpu_load(struct kvm_vcpu *vcpu)
  163. {
  164. u64 phys_addr = __pa(vcpu->vmcs);
  165. int cpu;
  166. cpu = get_cpu();
  167. if (vcpu->cpu != cpu) {
  168. smp_call_function(__vcpu_clear, vcpu, 0, 1);
  169. vcpu->launched = 0;
  170. }
  171. if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) {
  172. u8 error;
  173. per_cpu(current_vmcs, cpu) = vcpu->vmcs;
  174. asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
  175. : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
  176. : "cc");
  177. if (error)
  178. printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
  179. vcpu->vmcs, phys_addr);
  180. }
  181. if (vcpu->cpu != cpu) {
  182. struct descriptor_table dt;
  183. unsigned long sysenter_esp;
  184. vcpu->cpu = cpu;
  185. /*
  186. * Linux uses per-cpu TSS and GDT, so set these when switching
  187. * processors.
  188. */
  189. vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
  190. get_gdt(&dt);
  191. vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
  192. rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
  193. vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
  194. }
  195. return vcpu;
  196. }
  197. static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
  198. {
  199. put_cpu();
  200. }
  201. static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
  202. {
  203. return vmcs_readl(GUEST_RFLAGS);
  204. }
  205. static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  206. {
  207. vmcs_writel(GUEST_RFLAGS, rflags);
  208. }
  209. static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  210. {
  211. unsigned long rip;
  212. u32 interruptibility;
  213. rip = vmcs_readl(GUEST_RIP);
  214. rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  215. vmcs_writel(GUEST_RIP, rip);
  216. /*
  217. * We emulated an instruction, so temporary interrupt blocking
  218. * should be removed, if set.
  219. */
  220. interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
  221. if (interruptibility & 3)
  222. vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
  223. interruptibility & ~3);
  224. }
  225. static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
  226. {
  227. printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
  228. vmcs_readl(GUEST_RIP));
  229. vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
  230. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
  231. GP_VECTOR |
  232. INTR_TYPE_EXCEPTION |
  233. INTR_INFO_DELIEVER_CODE_MASK |
  234. INTR_INFO_VALID_MASK);
  235. }
  236. /*
  237. * reads and returns guest's timestamp counter "register"
  238. * guest_tsc = host_tsc + tsc_offset -- 21.3
  239. */
  240. static u64 guest_read_tsc(void)
  241. {
  242. u64 host_tsc, tsc_offset;
  243. rdtscll(host_tsc);
  244. tsc_offset = vmcs_read64(TSC_OFFSET);
  245. return host_tsc + tsc_offset;
  246. }
  247. /*
  248. * writes 'guest_tsc' into guest's timestamp counter "register"
  249. * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
  250. */
  251. static void guest_write_tsc(u64 guest_tsc)
  252. {
  253. u64 host_tsc;
  254. rdtscll(host_tsc);
  255. vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
  256. }
  257. static void reload_tss(void)
  258. {
  259. #ifndef CONFIG_X86_64
  260. /*
  261. * VT restores TR but not its size. Useless.
  262. */
  263. struct descriptor_table gdt;
  264. struct segment_descriptor *descs;
  265. get_gdt(&gdt);
  266. descs = (void *)gdt.base;
  267. descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
  268. load_TR_desc();
  269. #endif
  270. }
  271. /*
  272. * Reads an msr value (of 'msr_index') into 'pdata'.
  273. * Returns 0 on success, non-0 otherwise.
  274. * Assumes vcpu_load() was already called.
  275. */
  276. static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  277. {
  278. u64 data;
  279. struct vmx_msr_entry *msr;
  280. if (!pdata) {
  281. printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
  282. return -EINVAL;
  283. }
  284. switch (msr_index) {
  285. #ifdef CONFIG_X86_64
  286. case MSR_FS_BASE:
  287. data = vmcs_readl(GUEST_FS_BASE);
  288. break;
  289. case MSR_GS_BASE:
  290. data = vmcs_readl(GUEST_GS_BASE);
  291. break;
  292. case MSR_EFER:
  293. return kvm_get_msr_common(vcpu, msr_index, pdata);
  294. #endif
  295. case MSR_IA32_TIME_STAMP_COUNTER:
  296. data = guest_read_tsc();
  297. break;
  298. case MSR_IA32_SYSENTER_CS:
  299. data = vmcs_read32(GUEST_SYSENTER_CS);
  300. break;
  301. case MSR_IA32_SYSENTER_EIP:
  302. data = vmcs_read32(GUEST_SYSENTER_EIP);
  303. break;
  304. case MSR_IA32_SYSENTER_ESP:
  305. data = vmcs_read32(GUEST_SYSENTER_ESP);
  306. break;
  307. default:
  308. msr = find_msr_entry(vcpu, msr_index);
  309. if (msr) {
  310. data = msr->data;
  311. break;
  312. }
  313. return kvm_get_msr_common(vcpu, msr_index, pdata);
  314. }
  315. *pdata = data;
  316. return 0;
  317. }
  318. /*
  319. * Writes msr value into into the appropriate "register".
  320. * Returns 0 on success, non-0 otherwise.
  321. * Assumes vcpu_load() was already called.
  322. */
  323. static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  324. {
  325. struct vmx_msr_entry *msr;
  326. switch (msr_index) {
  327. #ifdef CONFIG_X86_64
  328. case MSR_EFER:
  329. return kvm_set_msr_common(vcpu, msr_index, data);
  330. case MSR_FS_BASE:
  331. vmcs_writel(GUEST_FS_BASE, data);
  332. break;
  333. case MSR_GS_BASE:
  334. vmcs_writel(GUEST_GS_BASE, data);
  335. break;
  336. #endif
  337. case MSR_IA32_SYSENTER_CS:
  338. vmcs_write32(GUEST_SYSENTER_CS, data);
  339. break;
  340. case MSR_IA32_SYSENTER_EIP:
  341. vmcs_write32(GUEST_SYSENTER_EIP, data);
  342. break;
  343. case MSR_IA32_SYSENTER_ESP:
  344. vmcs_write32(GUEST_SYSENTER_ESP, data);
  345. break;
  346. case MSR_IA32_TIME_STAMP_COUNTER: {
  347. guest_write_tsc(data);
  348. break;
  349. }
  350. default:
  351. msr = find_msr_entry(vcpu, msr_index);
  352. if (msr) {
  353. msr->data = data;
  354. break;
  355. }
  356. return kvm_set_msr_common(vcpu, msr_index, data);
  357. msr->data = data;
  358. break;
  359. }
  360. return 0;
  361. }
  362. /*
  363. * Sync the rsp and rip registers into the vcpu structure. This allows
  364. * registers to be accessed by indexing vcpu->regs.
  365. */
  366. static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
  367. {
  368. vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
  369. vcpu->rip = vmcs_readl(GUEST_RIP);
  370. }
  371. /*
  372. * Syncs rsp and rip back into the vmcs. Should be called after possible
  373. * modification.
  374. */
  375. static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
  376. {
  377. vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
  378. vmcs_writel(GUEST_RIP, vcpu->rip);
  379. }
  380. static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
  381. {
  382. unsigned long dr7 = 0x400;
  383. u32 exception_bitmap;
  384. int old_singlestep;
  385. exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
  386. old_singlestep = vcpu->guest_debug.singlestep;
  387. vcpu->guest_debug.enabled = dbg->enabled;
  388. if (vcpu->guest_debug.enabled) {
  389. int i;
  390. dr7 |= 0x200; /* exact */
  391. for (i = 0; i < 4; ++i) {
  392. if (!dbg->breakpoints[i].enabled)
  393. continue;
  394. vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
  395. dr7 |= 2 << (i*2); /* global enable */
  396. dr7 |= 0 << (i*4+16); /* execution breakpoint */
  397. }
  398. exception_bitmap |= (1u << 1); /* Trap debug exceptions */
  399. vcpu->guest_debug.singlestep = dbg->singlestep;
  400. } else {
  401. exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
  402. vcpu->guest_debug.singlestep = 0;
  403. }
  404. if (old_singlestep && !vcpu->guest_debug.singlestep) {
  405. unsigned long flags;
  406. flags = vmcs_readl(GUEST_RFLAGS);
  407. flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  408. vmcs_writel(GUEST_RFLAGS, flags);
  409. }
  410. vmcs_write32(EXCEPTION_BITMAP, exception_bitmap);
  411. vmcs_writel(GUEST_DR7, dr7);
  412. return 0;
  413. }
  414. static __init int cpu_has_kvm_support(void)
  415. {
  416. unsigned long ecx = cpuid_ecx(1);
  417. return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
  418. }
  419. static __init int vmx_disabled_by_bios(void)
  420. {
  421. u64 msr;
  422. rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
  423. return (msr & 5) == 1; /* locked but not enabled */
  424. }
  425. static __init void hardware_enable(void *garbage)
  426. {
  427. int cpu = raw_smp_processor_id();
  428. u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
  429. u64 old;
  430. rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
  431. if ((old & 5) != 5)
  432. /* enable and lock */
  433. wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 5);
  434. write_cr4(read_cr4() | CR4_VMXE); /* FIXME: not cpu hotplug safe */
  435. asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
  436. : "memory", "cc");
  437. }
  438. static void hardware_disable(void *garbage)
  439. {
  440. asm volatile (ASM_VMX_VMXOFF : : : "cc");
  441. }
  442. static __init void setup_vmcs_descriptor(void)
  443. {
  444. u32 vmx_msr_low, vmx_msr_high;
  445. rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
  446. vmcs_descriptor.size = vmx_msr_high & 0x1fff;
  447. vmcs_descriptor.order = get_order(vmcs_descriptor.size);
  448. vmcs_descriptor.revision_id = vmx_msr_low;
  449. }
  450. static struct vmcs *alloc_vmcs_cpu(int cpu)
  451. {
  452. int node = cpu_to_node(cpu);
  453. struct page *pages;
  454. struct vmcs *vmcs;
  455. pages = alloc_pages_node(node, GFP_KERNEL, vmcs_descriptor.order);
  456. if (!pages)
  457. return NULL;
  458. vmcs = page_address(pages);
  459. memset(vmcs, 0, vmcs_descriptor.size);
  460. vmcs->revision_id = vmcs_descriptor.revision_id; /* vmcs revision id */
  461. return vmcs;
  462. }
  463. static struct vmcs *alloc_vmcs(void)
  464. {
  465. return alloc_vmcs_cpu(raw_smp_processor_id());
  466. }
  467. static void free_vmcs(struct vmcs *vmcs)
  468. {
  469. free_pages((unsigned long)vmcs, vmcs_descriptor.order);
  470. }
  471. static __exit void free_kvm_area(void)
  472. {
  473. int cpu;
  474. for_each_online_cpu(cpu)
  475. free_vmcs(per_cpu(vmxarea, cpu));
  476. }
  477. extern struct vmcs *alloc_vmcs_cpu(int cpu);
  478. static __init int alloc_kvm_area(void)
  479. {
  480. int cpu;
  481. for_each_online_cpu(cpu) {
  482. struct vmcs *vmcs;
  483. vmcs = alloc_vmcs_cpu(cpu);
  484. if (!vmcs) {
  485. free_kvm_area();
  486. return -ENOMEM;
  487. }
  488. per_cpu(vmxarea, cpu) = vmcs;
  489. }
  490. return 0;
  491. }
  492. static __init int hardware_setup(void)
  493. {
  494. setup_vmcs_descriptor();
  495. return alloc_kvm_area();
  496. }
  497. static __exit void hardware_unsetup(void)
  498. {
  499. free_kvm_area();
  500. }
  501. static void update_exception_bitmap(struct kvm_vcpu *vcpu)
  502. {
  503. if (vcpu->rmode.active)
  504. vmcs_write32(EXCEPTION_BITMAP, ~0);
  505. else
  506. vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
  507. }
  508. static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
  509. {
  510. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  511. if (vmcs_readl(sf->base) == save->base) {
  512. vmcs_write16(sf->selector, save->selector);
  513. vmcs_writel(sf->base, save->base);
  514. vmcs_write32(sf->limit, save->limit);
  515. vmcs_write32(sf->ar_bytes, save->ar);
  516. } else {
  517. u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
  518. << AR_DPL_SHIFT;
  519. vmcs_write32(sf->ar_bytes, 0x93 | dpl);
  520. }
  521. }
  522. static void enter_pmode(struct kvm_vcpu *vcpu)
  523. {
  524. unsigned long flags;
  525. vcpu->rmode.active = 0;
  526. vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
  527. vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
  528. vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
  529. flags = vmcs_readl(GUEST_RFLAGS);
  530. flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
  531. flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
  532. vmcs_writel(GUEST_RFLAGS, flags);
  533. vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~CR4_VME_MASK) |
  534. (vmcs_readl(CR4_READ_SHADOW) & CR4_VME_MASK));
  535. update_exception_bitmap(vcpu);
  536. fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
  537. fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
  538. fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
  539. fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
  540. vmcs_write16(GUEST_SS_SELECTOR, 0);
  541. vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
  542. vmcs_write16(GUEST_CS_SELECTOR,
  543. vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
  544. vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
  545. }
  546. static int rmode_tss_base(struct kvm* kvm)
  547. {
  548. gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
  549. return base_gfn << PAGE_SHIFT;
  550. }
  551. static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
  552. {
  553. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  554. save->selector = vmcs_read16(sf->selector);
  555. save->base = vmcs_readl(sf->base);
  556. save->limit = vmcs_read32(sf->limit);
  557. save->ar = vmcs_read32(sf->ar_bytes);
  558. vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
  559. vmcs_write32(sf->limit, 0xffff);
  560. vmcs_write32(sf->ar_bytes, 0xf3);
  561. }
  562. static void enter_rmode(struct kvm_vcpu *vcpu)
  563. {
  564. unsigned long flags;
  565. vcpu->rmode.active = 1;
  566. vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
  567. vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
  568. vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
  569. vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
  570. vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
  571. vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
  572. flags = vmcs_readl(GUEST_RFLAGS);
  573. vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
  574. flags |= IOPL_MASK | X86_EFLAGS_VM;
  575. vmcs_writel(GUEST_RFLAGS, flags);
  576. vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | CR4_VME_MASK);
  577. update_exception_bitmap(vcpu);
  578. vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
  579. vmcs_write32(GUEST_SS_LIMIT, 0xffff);
  580. vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
  581. vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
  582. vmcs_write32(GUEST_CS_LIMIT, 0xffff);
  583. vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
  584. fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
  585. fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
  586. fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
  587. fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
  588. }
  589. #ifdef CONFIG_X86_64
  590. static void enter_lmode(struct kvm_vcpu *vcpu)
  591. {
  592. u32 guest_tr_ar;
  593. guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
  594. if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
  595. printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
  596. __FUNCTION__);
  597. vmcs_write32(GUEST_TR_AR_BYTES,
  598. (guest_tr_ar & ~AR_TYPE_MASK)
  599. | AR_TYPE_BUSY_64_TSS);
  600. }
  601. vcpu->shadow_efer |= EFER_LMA;
  602. find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
  603. vmcs_write32(VM_ENTRY_CONTROLS,
  604. vmcs_read32(VM_ENTRY_CONTROLS)
  605. | VM_ENTRY_CONTROLS_IA32E_MASK);
  606. }
  607. static void exit_lmode(struct kvm_vcpu *vcpu)
  608. {
  609. vcpu->shadow_efer &= ~EFER_LMA;
  610. vmcs_write32(VM_ENTRY_CONTROLS,
  611. vmcs_read32(VM_ENTRY_CONTROLS)
  612. & ~VM_ENTRY_CONTROLS_IA32E_MASK);
  613. }
  614. #endif
  615. static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  616. {
  617. if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
  618. enter_pmode(vcpu);
  619. if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
  620. enter_rmode(vcpu);
  621. #ifdef CONFIG_X86_64
  622. if (vcpu->shadow_efer & EFER_LME) {
  623. if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
  624. enter_lmode(vcpu);
  625. if (is_paging(vcpu) && !(cr0 & CR0_PG_MASK))
  626. exit_lmode(vcpu);
  627. }
  628. #endif
  629. vmcs_writel(CR0_READ_SHADOW, cr0);
  630. vmcs_writel(GUEST_CR0,
  631. (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
  632. vcpu->cr0 = cr0;
  633. }
  634. /*
  635. * Used when restoring the VM to avoid corrupting segment registers
  636. */
  637. static void vmx_set_cr0_no_modeswitch(struct kvm_vcpu *vcpu, unsigned long cr0)
  638. {
  639. vcpu->rmode.active = ((cr0 & CR0_PE_MASK) == 0);
  640. update_exception_bitmap(vcpu);
  641. vmcs_writel(CR0_READ_SHADOW, cr0);
  642. vmcs_writel(GUEST_CR0,
  643. (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
  644. vcpu->cr0 = cr0;
  645. }
  646. static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  647. {
  648. vmcs_writel(GUEST_CR3, cr3);
  649. }
  650. static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  651. {
  652. vmcs_writel(CR4_READ_SHADOW, cr4);
  653. vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
  654. KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
  655. vcpu->cr4 = cr4;
  656. }
  657. #ifdef CONFIG_X86_64
  658. static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  659. {
  660. struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
  661. vcpu->shadow_efer = efer;
  662. if (efer & EFER_LMA) {
  663. vmcs_write32(VM_ENTRY_CONTROLS,
  664. vmcs_read32(VM_ENTRY_CONTROLS) |
  665. VM_ENTRY_CONTROLS_IA32E_MASK);
  666. msr->data = efer;
  667. } else {
  668. vmcs_write32(VM_ENTRY_CONTROLS,
  669. vmcs_read32(VM_ENTRY_CONTROLS) &
  670. ~VM_ENTRY_CONTROLS_IA32E_MASK);
  671. msr->data = efer & ~EFER_LME;
  672. }
  673. }
  674. #endif
  675. static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
  676. {
  677. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  678. return vmcs_readl(sf->base);
  679. }
  680. static void vmx_get_segment(struct kvm_vcpu *vcpu,
  681. struct kvm_segment *var, int seg)
  682. {
  683. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  684. u32 ar;
  685. var->base = vmcs_readl(sf->base);
  686. var->limit = vmcs_read32(sf->limit);
  687. var->selector = vmcs_read16(sf->selector);
  688. ar = vmcs_read32(sf->ar_bytes);
  689. if (ar & AR_UNUSABLE_MASK)
  690. ar = 0;
  691. var->type = ar & 15;
  692. var->s = (ar >> 4) & 1;
  693. var->dpl = (ar >> 5) & 3;
  694. var->present = (ar >> 7) & 1;
  695. var->avl = (ar >> 12) & 1;
  696. var->l = (ar >> 13) & 1;
  697. var->db = (ar >> 14) & 1;
  698. var->g = (ar >> 15) & 1;
  699. var->unusable = (ar >> 16) & 1;
  700. }
  701. static void vmx_set_segment(struct kvm_vcpu *vcpu,
  702. struct kvm_segment *var, int seg)
  703. {
  704. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  705. u32 ar;
  706. vmcs_writel(sf->base, var->base);
  707. vmcs_write32(sf->limit, var->limit);
  708. vmcs_write16(sf->selector, var->selector);
  709. if (var->unusable)
  710. ar = 1 << 16;
  711. else {
  712. ar = var->type & 15;
  713. ar |= (var->s & 1) << 4;
  714. ar |= (var->dpl & 3) << 5;
  715. ar |= (var->present & 1) << 7;
  716. ar |= (var->avl & 1) << 12;
  717. ar |= (var->l & 1) << 13;
  718. ar |= (var->db & 1) << 14;
  719. ar |= (var->g & 1) << 15;
  720. }
  721. if (ar == 0) /* a 0 value means unusable */
  722. ar = AR_UNUSABLE_MASK;
  723. vmcs_write32(sf->ar_bytes, ar);
  724. }
  725. static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  726. {
  727. u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
  728. *db = (ar >> 14) & 1;
  729. *l = (ar >> 13) & 1;
  730. }
  731. static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  732. {
  733. dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
  734. dt->base = vmcs_readl(GUEST_IDTR_BASE);
  735. }
  736. static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  737. {
  738. vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
  739. vmcs_writel(GUEST_IDTR_BASE, dt->base);
  740. }
  741. static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  742. {
  743. dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
  744. dt->base = vmcs_readl(GUEST_GDTR_BASE);
  745. }
  746. static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  747. {
  748. vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
  749. vmcs_writel(GUEST_GDTR_BASE, dt->base);
  750. }
  751. static int init_rmode_tss(struct kvm* kvm)
  752. {
  753. struct page *p1, *p2, *p3;
  754. gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
  755. char *page;
  756. p1 = _gfn_to_page(kvm, fn++);
  757. p2 = _gfn_to_page(kvm, fn++);
  758. p3 = _gfn_to_page(kvm, fn);
  759. if (!p1 || !p2 || !p3) {
  760. kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
  761. return 0;
  762. }
  763. page = kmap_atomic(p1, KM_USER0);
  764. memset(page, 0, PAGE_SIZE);
  765. *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
  766. kunmap_atomic(page, KM_USER0);
  767. page = kmap_atomic(p2, KM_USER0);
  768. memset(page, 0, PAGE_SIZE);
  769. kunmap_atomic(page, KM_USER0);
  770. page = kmap_atomic(p3, KM_USER0);
  771. memset(page, 0, PAGE_SIZE);
  772. *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
  773. kunmap_atomic(page, KM_USER0);
  774. return 1;
  775. }
  776. static void vmcs_write32_fixedbits(u32 msr, u32 vmcs_field, u32 val)
  777. {
  778. u32 msr_high, msr_low;
  779. rdmsr(msr, msr_low, msr_high);
  780. val &= msr_high;
  781. val |= msr_low;
  782. vmcs_write32(vmcs_field, val);
  783. }
  784. static void seg_setup(int seg)
  785. {
  786. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  787. vmcs_write16(sf->selector, 0);
  788. vmcs_writel(sf->base, 0);
  789. vmcs_write32(sf->limit, 0xffff);
  790. vmcs_write32(sf->ar_bytes, 0x93);
  791. }
  792. /*
  793. * Sets up the vmcs for emulated real mode.
  794. */
  795. static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
  796. {
  797. u32 host_sysenter_cs;
  798. u32 junk;
  799. unsigned long a;
  800. struct descriptor_table dt;
  801. int i;
  802. int ret = 0;
  803. int nr_good_msrs;
  804. extern asmlinkage void kvm_vmx_return(void);
  805. if (!init_rmode_tss(vcpu->kvm)) {
  806. ret = -ENOMEM;
  807. goto out;
  808. }
  809. memset(vcpu->regs, 0, sizeof(vcpu->regs));
  810. vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
  811. vcpu->cr8 = 0;
  812. vcpu->apic_base = 0xfee00000 |
  813. /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
  814. MSR_IA32_APICBASE_ENABLE;
  815. fx_init(vcpu);
  816. /*
  817. * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
  818. * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
  819. */
  820. vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
  821. vmcs_writel(GUEST_CS_BASE, 0x000f0000);
  822. vmcs_write32(GUEST_CS_LIMIT, 0xffff);
  823. vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
  824. seg_setup(VCPU_SREG_DS);
  825. seg_setup(VCPU_SREG_ES);
  826. seg_setup(VCPU_SREG_FS);
  827. seg_setup(VCPU_SREG_GS);
  828. seg_setup(VCPU_SREG_SS);
  829. vmcs_write16(GUEST_TR_SELECTOR, 0);
  830. vmcs_writel(GUEST_TR_BASE, 0);
  831. vmcs_write32(GUEST_TR_LIMIT, 0xffff);
  832. vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
  833. vmcs_write16(GUEST_LDTR_SELECTOR, 0);
  834. vmcs_writel(GUEST_LDTR_BASE, 0);
  835. vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
  836. vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
  837. vmcs_write32(GUEST_SYSENTER_CS, 0);
  838. vmcs_writel(GUEST_SYSENTER_ESP, 0);
  839. vmcs_writel(GUEST_SYSENTER_EIP, 0);
  840. vmcs_writel(GUEST_RFLAGS, 0x02);
  841. vmcs_writel(GUEST_RIP, 0xfff0);
  842. vmcs_writel(GUEST_RSP, 0);
  843. vmcs_writel(GUEST_CR3, 0);
  844. //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
  845. vmcs_writel(GUEST_DR7, 0x400);
  846. vmcs_writel(GUEST_GDTR_BASE, 0);
  847. vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
  848. vmcs_writel(GUEST_IDTR_BASE, 0);
  849. vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
  850. vmcs_write32(GUEST_ACTIVITY_STATE, 0);
  851. vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
  852. vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
  853. /* I/O */
  854. vmcs_write64(IO_BITMAP_A, 0);
  855. vmcs_write64(IO_BITMAP_B, 0);
  856. guest_write_tsc(0);
  857. vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
  858. /* Special registers */
  859. vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
  860. /* Control */
  861. vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS,
  862. PIN_BASED_VM_EXEC_CONTROL,
  863. PIN_BASED_EXT_INTR_MASK /* 20.6.1 */
  864. | PIN_BASED_NMI_EXITING /* 20.6.1 */
  865. );
  866. vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS,
  867. CPU_BASED_VM_EXEC_CONTROL,
  868. CPU_BASED_HLT_EXITING /* 20.6.2 */
  869. | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
  870. | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
  871. | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */
  872. | CPU_BASED_INVDPG_EXITING
  873. | CPU_BASED_MOV_DR_EXITING
  874. | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
  875. );
  876. vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
  877. vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
  878. vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
  879. vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
  880. vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
  881. vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
  882. vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
  883. vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
  884. vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
  885. vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
  886. vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
  887. vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
  888. vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
  889. #ifdef CONFIG_X86_64
  890. rdmsrl(MSR_FS_BASE, a);
  891. vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
  892. rdmsrl(MSR_GS_BASE, a);
  893. vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
  894. #else
  895. vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
  896. vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
  897. #endif
  898. vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
  899. get_idt(&dt);
  900. vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
  901. vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */
  902. rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
  903. vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
  904. rdmsrl(MSR_IA32_SYSENTER_ESP, a);
  905. vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
  906. rdmsrl(MSR_IA32_SYSENTER_EIP, a);
  907. vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
  908. for (i = 0; i < NR_VMX_MSR; ++i) {
  909. u32 index = vmx_msr_index[i];
  910. u32 data_low, data_high;
  911. u64 data;
  912. int j = vcpu->nmsrs;
  913. if (rdmsr_safe(index, &data_low, &data_high) < 0)
  914. continue;
  915. data = data_low | ((u64)data_high << 32);
  916. vcpu->host_msrs[j].index = index;
  917. vcpu->host_msrs[j].reserved = 0;
  918. vcpu->host_msrs[j].data = data;
  919. vcpu->guest_msrs[j] = vcpu->host_msrs[j];
  920. ++vcpu->nmsrs;
  921. }
  922. printk(KERN_DEBUG "kvm: msrs: %d\n", vcpu->nmsrs);
  923. nr_good_msrs = vcpu->nmsrs - NR_BAD_MSRS;
  924. vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
  925. virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
  926. vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
  927. virt_to_phys(vcpu->guest_msrs + NR_BAD_MSRS));
  928. vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
  929. virt_to_phys(vcpu->host_msrs + NR_BAD_MSRS));
  930. vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS,
  931. (HOST_IS_64 << 9)); /* 22.2,1, 20.7.1 */
  932. vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
  933. vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
  934. vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
  935. /* 22.2.1, 20.8.1 */
  936. vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS,
  937. VM_ENTRY_CONTROLS, 0);
  938. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
  939. #ifdef CONFIG_X86_64
  940. vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
  941. vmcs_writel(TPR_THRESHOLD, 0);
  942. #endif
  943. vmcs_writel(CR0_GUEST_HOST_MASK, KVM_GUEST_CR0_MASK);
  944. vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
  945. vcpu->cr0 = 0x60000010;
  946. vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
  947. vmx_set_cr4(vcpu, 0);
  948. #ifdef CONFIG_X86_64
  949. vmx_set_efer(vcpu, 0);
  950. #endif
  951. return 0;
  952. out:
  953. return ret;
  954. }
  955. static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
  956. {
  957. u16 ent[2];
  958. u16 cs;
  959. u16 ip;
  960. unsigned long flags;
  961. unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
  962. u16 sp = vmcs_readl(GUEST_RSP);
  963. u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
  964. if (sp > ss_limit || sp - 6 > sp) {
  965. vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
  966. __FUNCTION__,
  967. vmcs_readl(GUEST_RSP),
  968. vmcs_readl(GUEST_SS_BASE),
  969. vmcs_read32(GUEST_SS_LIMIT));
  970. return;
  971. }
  972. if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
  973. sizeof(ent)) {
  974. vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
  975. return;
  976. }
  977. flags = vmcs_readl(GUEST_RFLAGS);
  978. cs = vmcs_readl(GUEST_CS_BASE) >> 4;
  979. ip = vmcs_readl(GUEST_RIP);
  980. if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
  981. kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
  982. kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
  983. vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
  984. return;
  985. }
  986. vmcs_writel(GUEST_RFLAGS, flags &
  987. ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
  988. vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
  989. vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
  990. vmcs_writel(GUEST_RIP, ent[0]);
  991. vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
  992. }
  993. static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
  994. {
  995. int word_index = __ffs(vcpu->irq_summary);
  996. int bit_index = __ffs(vcpu->irq_pending[word_index]);
  997. int irq = word_index * BITS_PER_LONG + bit_index;
  998. clear_bit(bit_index, &vcpu->irq_pending[word_index]);
  999. if (!vcpu->irq_pending[word_index])
  1000. clear_bit(word_index, &vcpu->irq_summary);
  1001. if (vcpu->rmode.active) {
  1002. inject_rmode_irq(vcpu, irq);
  1003. return;
  1004. }
  1005. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
  1006. irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
  1007. }
  1008. static void kvm_try_inject_irq(struct kvm_vcpu *vcpu)
  1009. {
  1010. if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)
  1011. && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0)
  1012. /*
  1013. * Interrupts enabled, and not blocked by sti or mov ss. Good.
  1014. */
  1015. kvm_do_inject_irq(vcpu);
  1016. else
  1017. /*
  1018. * Interrupts blocked. Wait for unblock.
  1019. */
  1020. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
  1021. vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
  1022. | CPU_BASED_VIRTUAL_INTR_PENDING);
  1023. }
  1024. static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
  1025. {
  1026. struct kvm_guest_debug *dbg = &vcpu->guest_debug;
  1027. set_debugreg(dbg->bp[0], 0);
  1028. set_debugreg(dbg->bp[1], 1);
  1029. set_debugreg(dbg->bp[2], 2);
  1030. set_debugreg(dbg->bp[3], 3);
  1031. if (dbg->singlestep) {
  1032. unsigned long flags;
  1033. flags = vmcs_readl(GUEST_RFLAGS);
  1034. flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
  1035. vmcs_writel(GUEST_RFLAGS, flags);
  1036. }
  1037. }
  1038. static int handle_rmode_exception(struct kvm_vcpu *vcpu,
  1039. int vec, u32 err_code)
  1040. {
  1041. if (!vcpu->rmode.active)
  1042. return 0;
  1043. if (vec == GP_VECTOR && err_code == 0)
  1044. if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
  1045. return 1;
  1046. return 0;
  1047. }
  1048. static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1049. {
  1050. u32 intr_info, error_code;
  1051. unsigned long cr2, rip;
  1052. u32 vect_info;
  1053. enum emulation_result er;
  1054. vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
  1055. intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  1056. if ((vect_info & VECTORING_INFO_VALID_MASK) &&
  1057. !is_page_fault(intr_info)) {
  1058. printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
  1059. "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
  1060. }
  1061. if (is_external_interrupt(vect_info)) {
  1062. int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
  1063. set_bit(irq, vcpu->irq_pending);
  1064. set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
  1065. }
  1066. if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
  1067. asm ("int $2");
  1068. return 1;
  1069. }
  1070. error_code = 0;
  1071. rip = vmcs_readl(GUEST_RIP);
  1072. if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
  1073. error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
  1074. if (is_page_fault(intr_info)) {
  1075. cr2 = vmcs_readl(EXIT_QUALIFICATION);
  1076. spin_lock(&vcpu->kvm->lock);
  1077. if (!vcpu->mmu.page_fault(vcpu, cr2, error_code)) {
  1078. spin_unlock(&vcpu->kvm->lock);
  1079. return 1;
  1080. }
  1081. er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
  1082. spin_unlock(&vcpu->kvm->lock);
  1083. switch (er) {
  1084. case EMULATE_DONE:
  1085. return 1;
  1086. case EMULATE_DO_MMIO:
  1087. ++kvm_stat.mmio_exits;
  1088. kvm_run->exit_reason = KVM_EXIT_MMIO;
  1089. return 0;
  1090. case EMULATE_FAIL:
  1091. vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
  1092. break;
  1093. default:
  1094. BUG();
  1095. }
  1096. }
  1097. if (vcpu->rmode.active &&
  1098. handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
  1099. error_code))
  1100. return 1;
  1101. if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
  1102. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1103. return 0;
  1104. }
  1105. kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
  1106. kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
  1107. kvm_run->ex.error_code = error_code;
  1108. return 0;
  1109. }
  1110. static int handle_external_interrupt(struct kvm_vcpu *vcpu,
  1111. struct kvm_run *kvm_run)
  1112. {
  1113. ++kvm_stat.irq_exits;
  1114. return 1;
  1115. }
  1116. static int get_io_count(struct kvm_vcpu *vcpu, u64 *count)
  1117. {
  1118. u64 inst;
  1119. gva_t rip;
  1120. int countr_size;
  1121. int i, n;
  1122. if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
  1123. countr_size = 2;
  1124. } else {
  1125. u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
  1126. countr_size = (cs_ar & AR_L_MASK) ? 8:
  1127. (cs_ar & AR_DB_MASK) ? 4: 2;
  1128. }
  1129. rip = vmcs_readl(GUEST_RIP);
  1130. if (countr_size != 8)
  1131. rip += vmcs_readl(GUEST_CS_BASE);
  1132. n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
  1133. for (i = 0; i < n; i++) {
  1134. switch (((u8*)&inst)[i]) {
  1135. case 0xf0:
  1136. case 0xf2:
  1137. case 0xf3:
  1138. case 0x2e:
  1139. case 0x36:
  1140. case 0x3e:
  1141. case 0x26:
  1142. case 0x64:
  1143. case 0x65:
  1144. case 0x66:
  1145. break;
  1146. case 0x67:
  1147. countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
  1148. default:
  1149. goto done;
  1150. }
  1151. }
  1152. return 0;
  1153. done:
  1154. countr_size *= 8;
  1155. *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
  1156. return 1;
  1157. }
  1158. static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1159. {
  1160. u64 exit_qualification;
  1161. ++kvm_stat.io_exits;
  1162. exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
  1163. kvm_run->exit_reason = KVM_EXIT_IO;
  1164. if (exit_qualification & 8)
  1165. kvm_run->io.direction = KVM_EXIT_IO_IN;
  1166. else
  1167. kvm_run->io.direction = KVM_EXIT_IO_OUT;
  1168. kvm_run->io.size = (exit_qualification & 7) + 1;
  1169. kvm_run->io.string = (exit_qualification & 16) != 0;
  1170. kvm_run->io.string_down
  1171. = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
  1172. kvm_run->io.rep = (exit_qualification & 32) != 0;
  1173. kvm_run->io.port = exit_qualification >> 16;
  1174. if (kvm_run->io.string) {
  1175. if (!get_io_count(vcpu, &kvm_run->io.count))
  1176. return 1;
  1177. kvm_run->io.address = vmcs_readl(GUEST_LINEAR_ADDRESS);
  1178. } else
  1179. kvm_run->io.value = vcpu->regs[VCPU_REGS_RAX]; /* rax */
  1180. return 0;
  1181. }
  1182. static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1183. {
  1184. u64 address = vmcs_read64(EXIT_QUALIFICATION);
  1185. int instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  1186. spin_lock(&vcpu->kvm->lock);
  1187. vcpu->mmu.inval_page(vcpu, address);
  1188. spin_unlock(&vcpu->kvm->lock);
  1189. vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP) + instruction_length);
  1190. return 1;
  1191. }
  1192. static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1193. {
  1194. u64 exit_qualification;
  1195. int cr;
  1196. int reg;
  1197. exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
  1198. cr = exit_qualification & 15;
  1199. reg = (exit_qualification >> 8) & 15;
  1200. switch ((exit_qualification >> 4) & 3) {
  1201. case 0: /* mov to cr */
  1202. switch (cr) {
  1203. case 0:
  1204. vcpu_load_rsp_rip(vcpu);
  1205. set_cr0(vcpu, vcpu->regs[reg]);
  1206. skip_emulated_instruction(vcpu);
  1207. return 1;
  1208. case 3:
  1209. vcpu_load_rsp_rip(vcpu);
  1210. set_cr3(vcpu, vcpu->regs[reg]);
  1211. skip_emulated_instruction(vcpu);
  1212. return 1;
  1213. case 4:
  1214. vcpu_load_rsp_rip(vcpu);
  1215. set_cr4(vcpu, vcpu->regs[reg]);
  1216. skip_emulated_instruction(vcpu);
  1217. return 1;
  1218. case 8:
  1219. vcpu_load_rsp_rip(vcpu);
  1220. set_cr8(vcpu, vcpu->regs[reg]);
  1221. skip_emulated_instruction(vcpu);
  1222. return 1;
  1223. };
  1224. break;
  1225. case 1: /*mov from cr*/
  1226. switch (cr) {
  1227. case 3:
  1228. vcpu_load_rsp_rip(vcpu);
  1229. vcpu->regs[reg] = vcpu->cr3;
  1230. vcpu_put_rsp_rip(vcpu);
  1231. skip_emulated_instruction(vcpu);
  1232. return 1;
  1233. case 8:
  1234. printk(KERN_DEBUG "handle_cr: read CR8 "
  1235. "cpu erratum AA15\n");
  1236. vcpu_load_rsp_rip(vcpu);
  1237. vcpu->regs[reg] = vcpu->cr8;
  1238. vcpu_put_rsp_rip(vcpu);
  1239. skip_emulated_instruction(vcpu);
  1240. return 1;
  1241. }
  1242. break;
  1243. case 3: /* lmsw */
  1244. lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
  1245. skip_emulated_instruction(vcpu);
  1246. return 1;
  1247. default:
  1248. break;
  1249. }
  1250. kvm_run->exit_reason = 0;
  1251. printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
  1252. (int)(exit_qualification >> 4) & 3, cr);
  1253. return 0;
  1254. }
  1255. static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1256. {
  1257. u64 exit_qualification;
  1258. unsigned long val;
  1259. int dr, reg;
  1260. /*
  1261. * FIXME: this code assumes the host is debugging the guest.
  1262. * need to deal with guest debugging itself too.
  1263. */
  1264. exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
  1265. dr = exit_qualification & 7;
  1266. reg = (exit_qualification >> 8) & 15;
  1267. vcpu_load_rsp_rip(vcpu);
  1268. if (exit_qualification & 16) {
  1269. /* mov from dr */
  1270. switch (dr) {
  1271. case 6:
  1272. val = 0xffff0ff0;
  1273. break;
  1274. case 7:
  1275. val = 0x400;
  1276. break;
  1277. default:
  1278. val = 0;
  1279. }
  1280. vcpu->regs[reg] = val;
  1281. } else {
  1282. /* mov to dr */
  1283. }
  1284. vcpu_put_rsp_rip(vcpu);
  1285. skip_emulated_instruction(vcpu);
  1286. return 1;
  1287. }
  1288. static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1289. {
  1290. kvm_run->exit_reason = KVM_EXIT_CPUID;
  1291. return 0;
  1292. }
  1293. static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1294. {
  1295. u32 ecx = vcpu->regs[VCPU_REGS_RCX];
  1296. u64 data;
  1297. if (vmx_get_msr(vcpu, ecx, &data)) {
  1298. vmx_inject_gp(vcpu, 0);
  1299. return 1;
  1300. }
  1301. /* FIXME: handling of bits 32:63 of rax, rdx */
  1302. vcpu->regs[VCPU_REGS_RAX] = data & -1u;
  1303. vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
  1304. skip_emulated_instruction(vcpu);
  1305. return 1;
  1306. }
  1307. static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1308. {
  1309. u32 ecx = vcpu->regs[VCPU_REGS_RCX];
  1310. u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
  1311. | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
  1312. if (vmx_set_msr(vcpu, ecx, data) != 0) {
  1313. vmx_inject_gp(vcpu, 0);
  1314. return 1;
  1315. }
  1316. skip_emulated_instruction(vcpu);
  1317. return 1;
  1318. }
  1319. static int handle_interrupt_window(struct kvm_vcpu *vcpu,
  1320. struct kvm_run *kvm_run)
  1321. {
  1322. /* Turn off interrupt window reporting. */
  1323. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
  1324. vmcs_read32(CPU_BASED_VM_EXEC_CONTROL)
  1325. & ~CPU_BASED_VIRTUAL_INTR_PENDING);
  1326. return 1;
  1327. }
  1328. static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1329. {
  1330. skip_emulated_instruction(vcpu);
  1331. if (vcpu->irq_summary && (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF))
  1332. return 1;
  1333. kvm_run->exit_reason = KVM_EXIT_HLT;
  1334. return 0;
  1335. }
  1336. /*
  1337. * The exit handlers return 1 if the exit was handled fully and guest execution
  1338. * may resume. Otherwise they set the kvm_run parameter to indicate what needs
  1339. * to be done to userspace and return 0.
  1340. */
  1341. static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
  1342. struct kvm_run *kvm_run) = {
  1343. [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
  1344. [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
  1345. [EXIT_REASON_IO_INSTRUCTION] = handle_io,
  1346. [EXIT_REASON_INVLPG] = handle_invlpg,
  1347. [EXIT_REASON_CR_ACCESS] = handle_cr,
  1348. [EXIT_REASON_DR_ACCESS] = handle_dr,
  1349. [EXIT_REASON_CPUID] = handle_cpuid,
  1350. [EXIT_REASON_MSR_READ] = handle_rdmsr,
  1351. [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
  1352. [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
  1353. [EXIT_REASON_HLT] = handle_halt,
  1354. };
  1355. static const int kvm_vmx_max_exit_handlers =
  1356. sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
  1357. /*
  1358. * The guest has exited. See if we can fix it or if we need userspace
  1359. * assistance.
  1360. */
  1361. static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  1362. {
  1363. u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
  1364. u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
  1365. if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
  1366. exit_reason != EXIT_REASON_EXCEPTION_NMI )
  1367. printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
  1368. "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
  1369. kvm_run->instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  1370. if (exit_reason < kvm_vmx_max_exit_handlers
  1371. && kvm_vmx_exit_handlers[exit_reason])
  1372. return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
  1373. else {
  1374. kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
  1375. kvm_run->hw.hardware_exit_reason = exit_reason;
  1376. }
  1377. return 0;
  1378. }
  1379. static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1380. {
  1381. u8 fail;
  1382. u16 fs_sel, gs_sel, ldt_sel;
  1383. int fs_gs_ldt_reload_needed;
  1384. again:
  1385. /*
  1386. * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
  1387. * allow segment selectors with cpl > 0 or ti == 1.
  1388. */
  1389. fs_sel = read_fs();
  1390. gs_sel = read_gs();
  1391. ldt_sel = read_ldt();
  1392. fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
  1393. if (!fs_gs_ldt_reload_needed) {
  1394. vmcs_write16(HOST_FS_SELECTOR, fs_sel);
  1395. vmcs_write16(HOST_GS_SELECTOR, gs_sel);
  1396. } else {
  1397. vmcs_write16(HOST_FS_SELECTOR, 0);
  1398. vmcs_write16(HOST_GS_SELECTOR, 0);
  1399. }
  1400. #ifdef CONFIG_X86_64
  1401. vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
  1402. vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
  1403. #else
  1404. vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
  1405. vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
  1406. #endif
  1407. if (vcpu->irq_summary &&
  1408. !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
  1409. kvm_try_inject_irq(vcpu);
  1410. if (vcpu->guest_debug.enabled)
  1411. kvm_guest_debug_pre(vcpu);
  1412. fx_save(vcpu->host_fx_image);
  1413. fx_restore(vcpu->guest_fx_image);
  1414. save_msrs(vcpu->host_msrs, vcpu->nmsrs);
  1415. load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
  1416. asm (
  1417. /* Store host registers */
  1418. "pushf \n\t"
  1419. #ifdef CONFIG_X86_64
  1420. "push %%rax; push %%rbx; push %%rdx;"
  1421. "push %%rsi; push %%rdi; push %%rbp;"
  1422. "push %%r8; push %%r9; push %%r10; push %%r11;"
  1423. "push %%r12; push %%r13; push %%r14; push %%r15;"
  1424. "push %%rcx \n\t"
  1425. ASM_VMX_VMWRITE_RSP_RDX "\n\t"
  1426. #else
  1427. "pusha; push %%ecx \n\t"
  1428. ASM_VMX_VMWRITE_RSP_RDX "\n\t"
  1429. #endif
  1430. /* Check if vmlaunch of vmresume is needed */
  1431. "cmp $0, %1 \n\t"
  1432. /* Load guest registers. Don't clobber flags. */
  1433. #ifdef CONFIG_X86_64
  1434. "mov %c[cr2](%3), %%rax \n\t"
  1435. "mov %%rax, %%cr2 \n\t"
  1436. "mov %c[rax](%3), %%rax \n\t"
  1437. "mov %c[rbx](%3), %%rbx \n\t"
  1438. "mov %c[rdx](%3), %%rdx \n\t"
  1439. "mov %c[rsi](%3), %%rsi \n\t"
  1440. "mov %c[rdi](%3), %%rdi \n\t"
  1441. "mov %c[rbp](%3), %%rbp \n\t"
  1442. "mov %c[r8](%3), %%r8 \n\t"
  1443. "mov %c[r9](%3), %%r9 \n\t"
  1444. "mov %c[r10](%3), %%r10 \n\t"
  1445. "mov %c[r11](%3), %%r11 \n\t"
  1446. "mov %c[r12](%3), %%r12 \n\t"
  1447. "mov %c[r13](%3), %%r13 \n\t"
  1448. "mov %c[r14](%3), %%r14 \n\t"
  1449. "mov %c[r15](%3), %%r15 \n\t"
  1450. "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
  1451. #else
  1452. "mov %c[cr2](%3), %%eax \n\t"
  1453. "mov %%eax, %%cr2 \n\t"
  1454. "mov %c[rax](%3), %%eax \n\t"
  1455. "mov %c[rbx](%3), %%ebx \n\t"
  1456. "mov %c[rdx](%3), %%edx \n\t"
  1457. "mov %c[rsi](%3), %%esi \n\t"
  1458. "mov %c[rdi](%3), %%edi \n\t"
  1459. "mov %c[rbp](%3), %%ebp \n\t"
  1460. "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
  1461. #endif
  1462. /* Enter guest mode */
  1463. "jne launched \n\t"
  1464. ASM_VMX_VMLAUNCH "\n\t"
  1465. "jmp kvm_vmx_return \n\t"
  1466. "launched: " ASM_VMX_VMRESUME "\n\t"
  1467. ".globl kvm_vmx_return \n\t"
  1468. "kvm_vmx_return: "
  1469. /* Save guest registers, load host registers, keep flags */
  1470. #ifdef CONFIG_X86_64
  1471. "xchg %3, 0(%%rsp) \n\t"
  1472. "mov %%rax, %c[rax](%3) \n\t"
  1473. "mov %%rbx, %c[rbx](%3) \n\t"
  1474. "pushq 0(%%rsp); popq %c[rcx](%3) \n\t"
  1475. "mov %%rdx, %c[rdx](%3) \n\t"
  1476. "mov %%rsi, %c[rsi](%3) \n\t"
  1477. "mov %%rdi, %c[rdi](%3) \n\t"
  1478. "mov %%rbp, %c[rbp](%3) \n\t"
  1479. "mov %%r8, %c[r8](%3) \n\t"
  1480. "mov %%r9, %c[r9](%3) \n\t"
  1481. "mov %%r10, %c[r10](%3) \n\t"
  1482. "mov %%r11, %c[r11](%3) \n\t"
  1483. "mov %%r12, %c[r12](%3) \n\t"
  1484. "mov %%r13, %c[r13](%3) \n\t"
  1485. "mov %%r14, %c[r14](%3) \n\t"
  1486. "mov %%r15, %c[r15](%3) \n\t"
  1487. "mov %%cr2, %%rax \n\t"
  1488. "mov %%rax, %c[cr2](%3) \n\t"
  1489. "mov 0(%%rsp), %3 \n\t"
  1490. "pop %%rcx; pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
  1491. "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
  1492. "pop %%rbp; pop %%rdi; pop %%rsi;"
  1493. "pop %%rdx; pop %%rbx; pop %%rax \n\t"
  1494. #else
  1495. "xchg %3, 0(%%esp) \n\t"
  1496. "mov %%eax, %c[rax](%3) \n\t"
  1497. "mov %%ebx, %c[rbx](%3) \n\t"
  1498. "pushl 0(%%esp); popl %c[rcx](%3) \n\t"
  1499. "mov %%edx, %c[rdx](%3) \n\t"
  1500. "mov %%esi, %c[rsi](%3) \n\t"
  1501. "mov %%edi, %c[rdi](%3) \n\t"
  1502. "mov %%ebp, %c[rbp](%3) \n\t"
  1503. "mov %%cr2, %%eax \n\t"
  1504. "mov %%eax, %c[cr2](%3) \n\t"
  1505. "mov 0(%%esp), %3 \n\t"
  1506. "pop %%ecx; popa \n\t"
  1507. #endif
  1508. "setbe %0 \n\t"
  1509. "popf \n\t"
  1510. : "=g" (fail)
  1511. : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
  1512. "c"(vcpu),
  1513. [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
  1514. [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
  1515. [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
  1516. [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
  1517. [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
  1518. [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
  1519. [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
  1520. #ifdef CONFIG_X86_64
  1521. [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
  1522. [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
  1523. [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
  1524. [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
  1525. [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
  1526. [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
  1527. [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
  1528. [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
  1529. #endif
  1530. [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
  1531. : "cc", "memory" );
  1532. ++kvm_stat.exits;
  1533. save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
  1534. load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
  1535. fx_save(vcpu->guest_fx_image);
  1536. fx_restore(vcpu->host_fx_image);
  1537. #ifndef CONFIG_X86_64
  1538. asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
  1539. #endif
  1540. kvm_run->exit_type = 0;
  1541. if (fail) {
  1542. kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
  1543. kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR);
  1544. } else {
  1545. if (fs_gs_ldt_reload_needed) {
  1546. load_ldt(ldt_sel);
  1547. load_fs(fs_sel);
  1548. /*
  1549. * If we have to reload gs, we must take care to
  1550. * preserve our gs base.
  1551. */
  1552. local_irq_disable();
  1553. load_gs(gs_sel);
  1554. #ifdef CONFIG_X86_64
  1555. wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
  1556. #endif
  1557. local_irq_enable();
  1558. reload_tss();
  1559. }
  1560. vcpu->launched = 1;
  1561. kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT;
  1562. if (kvm_handle_exit(kvm_run, vcpu)) {
  1563. /* Give scheduler a change to reschedule. */
  1564. if (signal_pending(current)) {
  1565. ++kvm_stat.signal_exits;
  1566. return -EINTR;
  1567. }
  1568. kvm_resched(vcpu);
  1569. goto again;
  1570. }
  1571. }
  1572. return 0;
  1573. }
  1574. static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
  1575. {
  1576. vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
  1577. }
  1578. static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
  1579. unsigned long addr,
  1580. u32 err_code)
  1581. {
  1582. u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
  1583. ++kvm_stat.pf_guest;
  1584. if (is_page_fault(vect_info)) {
  1585. printk(KERN_DEBUG "inject_page_fault: "
  1586. "double fault 0x%lx @ 0x%lx\n",
  1587. addr, vmcs_readl(GUEST_RIP));
  1588. vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
  1589. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
  1590. DF_VECTOR |
  1591. INTR_TYPE_EXCEPTION |
  1592. INTR_INFO_DELIEVER_CODE_MASK |
  1593. INTR_INFO_VALID_MASK);
  1594. return;
  1595. }
  1596. vcpu->cr2 = addr;
  1597. vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
  1598. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
  1599. PF_VECTOR |
  1600. INTR_TYPE_EXCEPTION |
  1601. INTR_INFO_DELIEVER_CODE_MASK |
  1602. INTR_INFO_VALID_MASK);
  1603. }
  1604. static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
  1605. {
  1606. if (vcpu->vmcs) {
  1607. on_each_cpu(__vcpu_clear, vcpu, 0, 1);
  1608. free_vmcs(vcpu->vmcs);
  1609. vcpu->vmcs = NULL;
  1610. }
  1611. }
  1612. static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
  1613. {
  1614. vmx_free_vmcs(vcpu);
  1615. }
  1616. static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
  1617. {
  1618. struct vmcs *vmcs;
  1619. vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
  1620. if (!vcpu->guest_msrs)
  1621. return -ENOMEM;
  1622. vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
  1623. if (!vcpu->host_msrs)
  1624. goto out_free_guest_msrs;
  1625. vmcs = alloc_vmcs();
  1626. if (!vmcs)
  1627. goto out_free_msrs;
  1628. vmcs_clear(vmcs);
  1629. vcpu->vmcs = vmcs;
  1630. vcpu->launched = 0;
  1631. return 0;
  1632. out_free_msrs:
  1633. kfree(vcpu->host_msrs);
  1634. vcpu->host_msrs = NULL;
  1635. out_free_guest_msrs:
  1636. kfree(vcpu->guest_msrs);
  1637. vcpu->guest_msrs = NULL;
  1638. return -ENOMEM;
  1639. }
  1640. static struct kvm_arch_ops vmx_arch_ops = {
  1641. .cpu_has_kvm_support = cpu_has_kvm_support,
  1642. .disabled_by_bios = vmx_disabled_by_bios,
  1643. .hardware_setup = hardware_setup,
  1644. .hardware_unsetup = hardware_unsetup,
  1645. .hardware_enable = hardware_enable,
  1646. .hardware_disable = hardware_disable,
  1647. .vcpu_create = vmx_create_vcpu,
  1648. .vcpu_free = vmx_free_vcpu,
  1649. .vcpu_load = vmx_vcpu_load,
  1650. .vcpu_put = vmx_vcpu_put,
  1651. .set_guest_debug = set_guest_debug,
  1652. .get_msr = vmx_get_msr,
  1653. .set_msr = vmx_set_msr,
  1654. .get_segment_base = vmx_get_segment_base,
  1655. .get_segment = vmx_get_segment,
  1656. .set_segment = vmx_set_segment,
  1657. .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
  1658. .set_cr0 = vmx_set_cr0,
  1659. .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
  1660. .set_cr3 = vmx_set_cr3,
  1661. .set_cr4 = vmx_set_cr4,
  1662. #ifdef CONFIG_X86_64
  1663. .set_efer = vmx_set_efer,
  1664. #endif
  1665. .get_idt = vmx_get_idt,
  1666. .set_idt = vmx_set_idt,
  1667. .get_gdt = vmx_get_gdt,
  1668. .set_gdt = vmx_set_gdt,
  1669. .cache_regs = vcpu_load_rsp_rip,
  1670. .decache_regs = vcpu_put_rsp_rip,
  1671. .get_rflags = vmx_get_rflags,
  1672. .set_rflags = vmx_set_rflags,
  1673. .tlb_flush = vmx_flush_tlb,
  1674. .inject_page_fault = vmx_inject_page_fault,
  1675. .inject_gp = vmx_inject_gp,
  1676. .run = vmx_vcpu_run,
  1677. .skip_emulated_instruction = skip_emulated_instruction,
  1678. .vcpu_setup = vmx_vcpu_setup,
  1679. };
  1680. static int __init vmx_init(void)
  1681. {
  1682. return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
  1683. }
  1684. static void __exit vmx_exit(void)
  1685. {
  1686. kvm_exit_arch();
  1687. }
  1688. module_init(vmx_init)
  1689. module_exit(vmx_exit)