svm.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * AMD SVM support
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. *
  8. * Authors:
  9. * Yaniv Kamay <yaniv@qumranet.com>
  10. * Avi Kivity <avi@qumranet.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2. See
  13. * the COPYING file in the top-level directory.
  14. *
  15. */
  16. #include "kvm_svm.h"
  17. #include "x86_emulate.h"
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/highmem.h>
  22. #include <linux/profile.h>
  23. #include <linux/sched.h>
  24. #include <asm/desc.h>
  25. MODULE_AUTHOR("Qumranet");
  26. MODULE_LICENSE("GPL");
  27. #define IOPM_ALLOC_ORDER 2
  28. #define MSRPM_ALLOC_ORDER 1
  29. #define DB_VECTOR 1
  30. #define UD_VECTOR 6
  31. #define GP_VECTOR 13
  32. #define DR7_GD_MASK (1 << 13)
  33. #define DR6_BD_MASK (1 << 13)
  34. #define SEG_TYPE_LDT 2
  35. #define SEG_TYPE_BUSY_TSS16 3
  36. #define KVM_EFER_LMA (1 << 10)
  37. #define KVM_EFER_LME (1 << 8)
  38. #define SVM_FEATURE_NPT (1 << 0)
  39. #define SVM_FEATURE_LBRV (1 << 1)
  40. #define SVM_DEATURE_SVML (1 << 2)
  41. static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
  42. {
  43. return container_of(vcpu, struct vcpu_svm, vcpu);
  44. }
  45. unsigned long iopm_base;
  46. unsigned long msrpm_base;
  47. struct kvm_ldttss_desc {
  48. u16 limit0;
  49. u16 base0;
  50. unsigned base1 : 8, type : 5, dpl : 2, p : 1;
  51. unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
  52. u32 base3;
  53. u32 zero1;
  54. } __attribute__((packed));
  55. struct svm_cpu_data {
  56. int cpu;
  57. u64 asid_generation;
  58. u32 max_asid;
  59. u32 next_asid;
  60. struct kvm_ldttss_desc *tss_desc;
  61. struct page *save_area;
  62. };
  63. static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
  64. static uint32_t svm_features;
  65. struct svm_init_data {
  66. int cpu;
  67. int r;
  68. };
  69. static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
  70. #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
  71. #define MSRS_RANGE_SIZE 2048
  72. #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
  73. #define MAX_INST_SIZE 15
  74. static inline u32 svm_has(u32 feat)
  75. {
  76. return svm_features & feat;
  77. }
  78. static unsigned get_addr_size(struct kvm_vcpu *vcpu)
  79. {
  80. struct vmcb_save_area *sa = &to_svm(vcpu)->vmcb->save;
  81. u16 cs_attrib;
  82. if (!(sa->cr0 & X86_CR0_PE) || (sa->rflags & X86_EFLAGS_VM))
  83. return 2;
  84. cs_attrib = sa->cs.attrib;
  85. return (cs_attrib & SVM_SELECTOR_L_MASK) ? 8 :
  86. (cs_attrib & SVM_SELECTOR_DB_MASK) ? 4 : 2;
  87. }
  88. static inline u8 pop_irq(struct kvm_vcpu *vcpu)
  89. {
  90. int word_index = __ffs(vcpu->irq_summary);
  91. int bit_index = __ffs(vcpu->irq_pending[word_index]);
  92. int irq = word_index * BITS_PER_LONG + bit_index;
  93. clear_bit(bit_index, &vcpu->irq_pending[word_index]);
  94. if (!vcpu->irq_pending[word_index])
  95. clear_bit(word_index, &vcpu->irq_summary);
  96. return irq;
  97. }
  98. static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
  99. {
  100. set_bit(irq, vcpu->irq_pending);
  101. set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
  102. }
  103. static inline void clgi(void)
  104. {
  105. asm volatile (SVM_CLGI);
  106. }
  107. static inline void stgi(void)
  108. {
  109. asm volatile (SVM_STGI);
  110. }
  111. static inline void invlpga(unsigned long addr, u32 asid)
  112. {
  113. asm volatile (SVM_INVLPGA :: "a"(addr), "c"(asid));
  114. }
  115. static inline unsigned long kvm_read_cr2(void)
  116. {
  117. unsigned long cr2;
  118. asm volatile ("mov %%cr2, %0" : "=r" (cr2));
  119. return cr2;
  120. }
  121. static inline void kvm_write_cr2(unsigned long val)
  122. {
  123. asm volatile ("mov %0, %%cr2" :: "r" (val));
  124. }
  125. static inline unsigned long read_dr6(void)
  126. {
  127. unsigned long dr6;
  128. asm volatile ("mov %%dr6, %0" : "=r" (dr6));
  129. return dr6;
  130. }
  131. static inline void write_dr6(unsigned long val)
  132. {
  133. asm volatile ("mov %0, %%dr6" :: "r" (val));
  134. }
  135. static inline unsigned long read_dr7(void)
  136. {
  137. unsigned long dr7;
  138. asm volatile ("mov %%dr7, %0" : "=r" (dr7));
  139. return dr7;
  140. }
  141. static inline void write_dr7(unsigned long val)
  142. {
  143. asm volatile ("mov %0, %%dr7" :: "r" (val));
  144. }
  145. static inline void force_new_asid(struct kvm_vcpu *vcpu)
  146. {
  147. to_svm(vcpu)->asid_generation--;
  148. }
  149. static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
  150. {
  151. force_new_asid(vcpu);
  152. }
  153. static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  154. {
  155. if (!(efer & KVM_EFER_LMA))
  156. efer &= ~KVM_EFER_LME;
  157. to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
  158. vcpu->shadow_efer = efer;
  159. }
  160. static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
  161. {
  162. struct vcpu_svm *svm = to_svm(vcpu);
  163. svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
  164. SVM_EVTINJ_VALID_ERR |
  165. SVM_EVTINJ_TYPE_EXEPT |
  166. GP_VECTOR;
  167. svm->vmcb->control.event_inj_err = error_code;
  168. }
  169. static void inject_ud(struct kvm_vcpu *vcpu)
  170. {
  171. to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID |
  172. SVM_EVTINJ_TYPE_EXEPT |
  173. UD_VECTOR;
  174. }
  175. static int is_page_fault(uint32_t info)
  176. {
  177. info &= SVM_EVTINJ_VEC_MASK | SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
  178. return info == (PF_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT);
  179. }
  180. static int is_external_interrupt(u32 info)
  181. {
  182. info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
  183. return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
  184. }
  185. static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  186. {
  187. struct vcpu_svm *svm = to_svm(vcpu);
  188. if (!svm->next_rip) {
  189. printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
  190. return;
  191. }
  192. if (svm->next_rip - svm->vmcb->save.rip > 15) {
  193. printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
  194. __FUNCTION__,
  195. svm->vmcb->save.rip,
  196. svm->next_rip);
  197. }
  198. vcpu->rip = svm->vmcb->save.rip = svm->next_rip;
  199. svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
  200. vcpu->interrupt_window_open = 1;
  201. }
  202. static int has_svm(void)
  203. {
  204. uint32_t eax, ebx, ecx, edx;
  205. if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
  206. printk(KERN_INFO "has_svm: not amd\n");
  207. return 0;
  208. }
  209. cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
  210. if (eax < SVM_CPUID_FUNC) {
  211. printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n");
  212. return 0;
  213. }
  214. cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
  215. if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
  216. printk(KERN_DEBUG "has_svm: svm not available\n");
  217. return 0;
  218. }
  219. return 1;
  220. }
  221. static void svm_hardware_disable(void *garbage)
  222. {
  223. struct svm_cpu_data *svm_data
  224. = per_cpu(svm_data, raw_smp_processor_id());
  225. if (svm_data) {
  226. uint64_t efer;
  227. wrmsrl(MSR_VM_HSAVE_PA, 0);
  228. rdmsrl(MSR_EFER, efer);
  229. wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
  230. per_cpu(svm_data, raw_smp_processor_id()) = NULL;
  231. __free_page(svm_data->save_area);
  232. kfree(svm_data);
  233. }
  234. }
  235. static void svm_hardware_enable(void *garbage)
  236. {
  237. struct svm_cpu_data *svm_data;
  238. uint64_t efer;
  239. #ifdef CONFIG_X86_64
  240. struct desc_ptr gdt_descr;
  241. #else
  242. struct Xgt_desc_struct gdt_descr;
  243. #endif
  244. struct desc_struct *gdt;
  245. int me = raw_smp_processor_id();
  246. if (!has_svm()) {
  247. printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
  248. return;
  249. }
  250. svm_data = per_cpu(svm_data, me);
  251. if (!svm_data) {
  252. printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
  253. me);
  254. return;
  255. }
  256. svm_data->asid_generation = 1;
  257. svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
  258. svm_data->next_asid = svm_data->max_asid + 1;
  259. svm_features = cpuid_edx(SVM_CPUID_FUNC);
  260. asm volatile ( "sgdt %0" : "=m"(gdt_descr) );
  261. gdt = (struct desc_struct *)gdt_descr.address;
  262. svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
  263. rdmsrl(MSR_EFER, efer);
  264. wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
  265. wrmsrl(MSR_VM_HSAVE_PA,
  266. page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
  267. }
  268. static int svm_cpu_init(int cpu)
  269. {
  270. struct svm_cpu_data *svm_data;
  271. int r;
  272. svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
  273. if (!svm_data)
  274. return -ENOMEM;
  275. svm_data->cpu = cpu;
  276. svm_data->save_area = alloc_page(GFP_KERNEL);
  277. r = -ENOMEM;
  278. if (!svm_data->save_area)
  279. goto err_1;
  280. per_cpu(svm_data, cpu) = svm_data;
  281. return 0;
  282. err_1:
  283. kfree(svm_data);
  284. return r;
  285. }
  286. static int set_msr_interception(u32 *msrpm, unsigned msr,
  287. int read, int write)
  288. {
  289. int i;
  290. for (i = 0; i < NUM_MSR_MAPS; i++) {
  291. if (msr >= msrpm_ranges[i] &&
  292. msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
  293. u32 msr_offset = (i * MSRS_IN_RANGE + msr -
  294. msrpm_ranges[i]) * 2;
  295. u32 *base = msrpm + (msr_offset / 32);
  296. u32 msr_shift = msr_offset % 32;
  297. u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
  298. *base = (*base & ~(0x3 << msr_shift)) |
  299. (mask << msr_shift);
  300. return 1;
  301. }
  302. }
  303. printk(KERN_DEBUG "%s: not found 0x%x\n", __FUNCTION__, msr);
  304. return 0;
  305. }
  306. static __init int svm_hardware_setup(void)
  307. {
  308. int cpu;
  309. struct page *iopm_pages;
  310. struct page *msrpm_pages;
  311. void *iopm_va, *msrpm_va;
  312. int r;
  313. kvm_emulator_want_group7_invlpg();
  314. iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
  315. if (!iopm_pages)
  316. return -ENOMEM;
  317. iopm_va = page_address(iopm_pages);
  318. memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
  319. clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
  320. iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
  321. msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  322. r = -ENOMEM;
  323. if (!msrpm_pages)
  324. goto err_1;
  325. msrpm_va = page_address(msrpm_pages);
  326. memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
  327. msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
  328. #ifdef CONFIG_X86_64
  329. set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
  330. set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
  331. set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
  332. set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
  333. set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
  334. set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
  335. #endif
  336. set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
  337. set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
  338. set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
  339. set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
  340. for_each_online_cpu(cpu) {
  341. r = svm_cpu_init(cpu);
  342. if (r)
  343. goto err_2;
  344. }
  345. return 0;
  346. err_2:
  347. __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
  348. msrpm_base = 0;
  349. err_1:
  350. __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
  351. iopm_base = 0;
  352. return r;
  353. }
  354. static __exit void svm_hardware_unsetup(void)
  355. {
  356. __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
  357. __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
  358. iopm_base = msrpm_base = 0;
  359. }
  360. static void init_seg(struct vmcb_seg *seg)
  361. {
  362. seg->selector = 0;
  363. seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
  364. SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
  365. seg->limit = 0xffff;
  366. seg->base = 0;
  367. }
  368. static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
  369. {
  370. seg->selector = 0;
  371. seg->attrib = SVM_SELECTOR_P_MASK | type;
  372. seg->limit = 0xffff;
  373. seg->base = 0;
  374. }
  375. static void init_vmcb(struct vmcb *vmcb)
  376. {
  377. struct vmcb_control_area *control = &vmcb->control;
  378. struct vmcb_save_area *save = &vmcb->save;
  379. control->intercept_cr_read = INTERCEPT_CR0_MASK |
  380. INTERCEPT_CR3_MASK |
  381. INTERCEPT_CR4_MASK;
  382. control->intercept_cr_write = INTERCEPT_CR0_MASK |
  383. INTERCEPT_CR3_MASK |
  384. INTERCEPT_CR4_MASK;
  385. control->intercept_dr_read = INTERCEPT_DR0_MASK |
  386. INTERCEPT_DR1_MASK |
  387. INTERCEPT_DR2_MASK |
  388. INTERCEPT_DR3_MASK;
  389. control->intercept_dr_write = INTERCEPT_DR0_MASK |
  390. INTERCEPT_DR1_MASK |
  391. INTERCEPT_DR2_MASK |
  392. INTERCEPT_DR3_MASK |
  393. INTERCEPT_DR5_MASK |
  394. INTERCEPT_DR7_MASK;
  395. control->intercept_exceptions = 1 << PF_VECTOR;
  396. control->intercept = (1ULL << INTERCEPT_INTR) |
  397. (1ULL << INTERCEPT_NMI) |
  398. (1ULL << INTERCEPT_SMI) |
  399. /*
  400. * selective cr0 intercept bug?
  401. * 0: 0f 22 d8 mov %eax,%cr3
  402. * 3: 0f 20 c0 mov %cr0,%eax
  403. * 6: 0d 00 00 00 80 or $0x80000000,%eax
  404. * b: 0f 22 c0 mov %eax,%cr0
  405. * set cr3 ->interception
  406. * get cr0 ->interception
  407. * set cr0 -> no interception
  408. */
  409. /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
  410. (1ULL << INTERCEPT_CPUID) |
  411. (1ULL << INTERCEPT_HLT) |
  412. (1ULL << INTERCEPT_INVLPGA) |
  413. (1ULL << INTERCEPT_IOIO_PROT) |
  414. (1ULL << INTERCEPT_MSR_PROT) |
  415. (1ULL << INTERCEPT_TASK_SWITCH) |
  416. (1ULL << INTERCEPT_SHUTDOWN) |
  417. (1ULL << INTERCEPT_VMRUN) |
  418. (1ULL << INTERCEPT_VMMCALL) |
  419. (1ULL << INTERCEPT_VMLOAD) |
  420. (1ULL << INTERCEPT_VMSAVE) |
  421. (1ULL << INTERCEPT_STGI) |
  422. (1ULL << INTERCEPT_CLGI) |
  423. (1ULL << INTERCEPT_SKINIT) |
  424. (1ULL << INTERCEPT_MONITOR) |
  425. (1ULL << INTERCEPT_MWAIT);
  426. control->iopm_base_pa = iopm_base;
  427. control->msrpm_base_pa = msrpm_base;
  428. control->tsc_offset = 0;
  429. control->int_ctl = V_INTR_MASKING_MASK;
  430. init_seg(&save->es);
  431. init_seg(&save->ss);
  432. init_seg(&save->ds);
  433. init_seg(&save->fs);
  434. init_seg(&save->gs);
  435. save->cs.selector = 0xf000;
  436. /* Executable/Readable Code Segment */
  437. save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
  438. SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
  439. save->cs.limit = 0xffff;
  440. /*
  441. * cs.base should really be 0xffff0000, but vmx can't handle that, so
  442. * be consistent with it.
  443. *
  444. * Replace when we have real mode working for vmx.
  445. */
  446. save->cs.base = 0xf0000;
  447. save->gdtr.limit = 0xffff;
  448. save->idtr.limit = 0xffff;
  449. init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
  450. init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
  451. save->efer = MSR_EFER_SVME_MASK;
  452. save->dr6 = 0xffff0ff0;
  453. save->dr7 = 0x400;
  454. save->rflags = 2;
  455. save->rip = 0x0000fff0;
  456. /*
  457. * cr0 val on cpu init should be 0x60000010, we enable cpu
  458. * cache by default. the orderly way is to enable cache in bios.
  459. */
  460. save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
  461. save->cr4 = X86_CR4_PAE;
  462. /* rdx = ?? */
  463. }
  464. static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
  465. {
  466. struct vcpu_svm *svm;
  467. struct page *page;
  468. int err;
  469. svm = kzalloc(sizeof *svm, GFP_KERNEL);
  470. if (!svm) {
  471. err = -ENOMEM;
  472. goto out;
  473. }
  474. err = kvm_vcpu_init(&svm->vcpu, kvm, id);
  475. if (err)
  476. goto free_svm;
  477. page = alloc_page(GFP_KERNEL);
  478. if (!page) {
  479. err = -ENOMEM;
  480. goto uninit;
  481. }
  482. svm->vmcb = page_address(page);
  483. clear_page(svm->vmcb);
  484. svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
  485. svm->asid_generation = 0;
  486. memset(svm->db_regs, 0, sizeof(svm->db_regs));
  487. init_vmcb(svm->vmcb);
  488. fx_init(&svm->vcpu);
  489. svm->vcpu.fpu_active = 1;
  490. svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
  491. if (svm->vcpu.vcpu_id == 0)
  492. svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
  493. return &svm->vcpu;
  494. uninit:
  495. kvm_vcpu_uninit(&svm->vcpu);
  496. free_svm:
  497. kfree(svm);
  498. out:
  499. return ERR_PTR(err);
  500. }
  501. static void svm_free_vcpu(struct kvm_vcpu *vcpu)
  502. {
  503. struct vcpu_svm *svm = to_svm(vcpu);
  504. __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
  505. kvm_vcpu_uninit(vcpu);
  506. kfree(svm);
  507. }
  508. static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  509. {
  510. struct vcpu_svm *svm = to_svm(vcpu);
  511. int i;
  512. if (unlikely(cpu != vcpu->cpu)) {
  513. u64 tsc_this, delta;
  514. /*
  515. * Make sure that the guest sees a monotonically
  516. * increasing TSC.
  517. */
  518. rdtscll(tsc_this);
  519. delta = vcpu->host_tsc - tsc_this;
  520. svm->vmcb->control.tsc_offset += delta;
  521. vcpu->cpu = cpu;
  522. }
  523. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  524. rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  525. }
  526. static void svm_vcpu_put(struct kvm_vcpu *vcpu)
  527. {
  528. struct vcpu_svm *svm = to_svm(vcpu);
  529. int i;
  530. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  531. wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  532. rdtscll(vcpu->host_tsc);
  533. }
  534. static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
  535. {
  536. }
  537. static void svm_cache_regs(struct kvm_vcpu *vcpu)
  538. {
  539. struct vcpu_svm *svm = to_svm(vcpu);
  540. vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
  541. vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
  542. vcpu->rip = svm->vmcb->save.rip;
  543. }
  544. static void svm_decache_regs(struct kvm_vcpu *vcpu)
  545. {
  546. struct vcpu_svm *svm = to_svm(vcpu);
  547. svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX];
  548. svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP];
  549. svm->vmcb->save.rip = vcpu->rip;
  550. }
  551. static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
  552. {
  553. return to_svm(vcpu)->vmcb->save.rflags;
  554. }
  555. static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  556. {
  557. to_svm(vcpu)->vmcb->save.rflags = rflags;
  558. }
  559. static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
  560. {
  561. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  562. switch (seg) {
  563. case VCPU_SREG_CS: return &save->cs;
  564. case VCPU_SREG_DS: return &save->ds;
  565. case VCPU_SREG_ES: return &save->es;
  566. case VCPU_SREG_FS: return &save->fs;
  567. case VCPU_SREG_GS: return &save->gs;
  568. case VCPU_SREG_SS: return &save->ss;
  569. case VCPU_SREG_TR: return &save->tr;
  570. case VCPU_SREG_LDTR: return &save->ldtr;
  571. }
  572. BUG();
  573. return NULL;
  574. }
  575. static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
  576. {
  577. struct vmcb_seg *s = svm_seg(vcpu, seg);
  578. return s->base;
  579. }
  580. static void svm_get_segment(struct kvm_vcpu *vcpu,
  581. struct kvm_segment *var, int seg)
  582. {
  583. struct vmcb_seg *s = svm_seg(vcpu, seg);
  584. var->base = s->base;
  585. var->limit = s->limit;
  586. var->selector = s->selector;
  587. var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
  588. var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
  589. var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
  590. var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
  591. var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
  592. var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
  593. var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
  594. var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
  595. var->unusable = !var->present;
  596. }
  597. static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  598. {
  599. struct vmcb_seg *s = svm_seg(vcpu, VCPU_SREG_CS);
  600. *db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
  601. *l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
  602. }
  603. static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  604. {
  605. struct vcpu_svm *svm = to_svm(vcpu);
  606. dt->limit = svm->vmcb->save.idtr.limit;
  607. dt->base = svm->vmcb->save.idtr.base;
  608. }
  609. static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  610. {
  611. struct vcpu_svm *svm = to_svm(vcpu);
  612. svm->vmcb->save.idtr.limit = dt->limit;
  613. svm->vmcb->save.idtr.base = dt->base ;
  614. }
  615. static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  616. {
  617. struct vcpu_svm *svm = to_svm(vcpu);
  618. dt->limit = svm->vmcb->save.gdtr.limit;
  619. dt->base = svm->vmcb->save.gdtr.base;
  620. }
  621. static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  622. {
  623. struct vcpu_svm *svm = to_svm(vcpu);
  624. svm->vmcb->save.gdtr.limit = dt->limit;
  625. svm->vmcb->save.gdtr.base = dt->base ;
  626. }
  627. static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
  628. {
  629. }
  630. static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  631. {
  632. struct vcpu_svm *svm = to_svm(vcpu);
  633. #ifdef CONFIG_X86_64
  634. if (vcpu->shadow_efer & KVM_EFER_LME) {
  635. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  636. vcpu->shadow_efer |= KVM_EFER_LMA;
  637. svm->vmcb->save.efer |= KVM_EFER_LMA | KVM_EFER_LME;
  638. }
  639. if (is_paging(vcpu) && !(cr0 & X86_CR0_PG) ) {
  640. vcpu->shadow_efer &= ~KVM_EFER_LMA;
  641. svm->vmcb->save.efer &= ~(KVM_EFER_LMA | KVM_EFER_LME);
  642. }
  643. }
  644. #endif
  645. if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
  646. svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
  647. vcpu->fpu_active = 1;
  648. }
  649. vcpu->cr0 = cr0;
  650. cr0 |= X86_CR0_PG | X86_CR0_WP;
  651. cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
  652. svm->vmcb->save.cr0 = cr0;
  653. }
  654. static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  655. {
  656. vcpu->cr4 = cr4;
  657. to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
  658. }
  659. static void svm_set_segment(struct kvm_vcpu *vcpu,
  660. struct kvm_segment *var, int seg)
  661. {
  662. struct vcpu_svm *svm = to_svm(vcpu);
  663. struct vmcb_seg *s = svm_seg(vcpu, seg);
  664. s->base = var->base;
  665. s->limit = var->limit;
  666. s->selector = var->selector;
  667. if (var->unusable)
  668. s->attrib = 0;
  669. else {
  670. s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
  671. s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
  672. s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
  673. s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
  674. s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
  675. s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
  676. s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
  677. s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
  678. }
  679. if (seg == VCPU_SREG_CS)
  680. svm->vmcb->save.cpl
  681. = (svm->vmcb->save.cs.attrib
  682. >> SVM_SELECTOR_DPL_SHIFT) & 3;
  683. }
  684. /* FIXME:
  685. svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
  686. svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
  687. */
  688. static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
  689. {
  690. return -EOPNOTSUPP;
  691. }
  692. static void load_host_msrs(struct kvm_vcpu *vcpu)
  693. {
  694. #ifdef CONFIG_X86_64
  695. wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  696. #endif
  697. }
  698. static void save_host_msrs(struct kvm_vcpu *vcpu)
  699. {
  700. #ifdef CONFIG_X86_64
  701. rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  702. #endif
  703. }
  704. static void new_asid(struct kvm_vcpu *vcpu, struct svm_cpu_data *svm_data)
  705. {
  706. struct vcpu_svm *svm = to_svm(vcpu);
  707. if (svm_data->next_asid > svm_data->max_asid) {
  708. ++svm_data->asid_generation;
  709. svm_data->next_asid = 1;
  710. svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
  711. }
  712. vcpu->cpu = svm_data->cpu;
  713. svm->asid_generation = svm_data->asid_generation;
  714. svm->vmcb->control.asid = svm_data->next_asid++;
  715. }
  716. static void svm_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  717. {
  718. invlpga(address, to_svm(vcpu)->vmcb->control.asid); // is needed?
  719. }
  720. static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
  721. {
  722. return to_svm(vcpu)->db_regs[dr];
  723. }
  724. static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
  725. int *exception)
  726. {
  727. struct vcpu_svm *svm = to_svm(vcpu);
  728. *exception = 0;
  729. if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
  730. svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
  731. svm->vmcb->save.dr6 |= DR6_BD_MASK;
  732. *exception = DB_VECTOR;
  733. return;
  734. }
  735. switch (dr) {
  736. case 0 ... 3:
  737. svm->db_regs[dr] = value;
  738. return;
  739. case 4 ... 5:
  740. if (vcpu->cr4 & X86_CR4_DE) {
  741. *exception = UD_VECTOR;
  742. return;
  743. }
  744. case 7: {
  745. if (value & ~((1ULL << 32) - 1)) {
  746. *exception = GP_VECTOR;
  747. return;
  748. }
  749. svm->vmcb->save.dr7 = value;
  750. return;
  751. }
  752. default:
  753. printk(KERN_DEBUG "%s: unexpected dr %u\n",
  754. __FUNCTION__, dr);
  755. *exception = UD_VECTOR;
  756. return;
  757. }
  758. }
  759. static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  760. {
  761. struct vcpu_svm *svm = to_svm(vcpu);
  762. u32 exit_int_info = svm->vmcb->control.exit_int_info;
  763. u64 fault_address;
  764. u32 error_code;
  765. enum emulation_result er;
  766. int r;
  767. if (is_external_interrupt(exit_int_info))
  768. push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
  769. spin_lock(&vcpu->kvm->lock);
  770. fault_address = svm->vmcb->control.exit_info_2;
  771. error_code = svm->vmcb->control.exit_info_1;
  772. r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
  773. if (r < 0) {
  774. spin_unlock(&vcpu->kvm->lock);
  775. return r;
  776. }
  777. if (!r) {
  778. spin_unlock(&vcpu->kvm->lock);
  779. return 1;
  780. }
  781. er = emulate_instruction(vcpu, kvm_run, fault_address, error_code);
  782. spin_unlock(&vcpu->kvm->lock);
  783. switch (er) {
  784. case EMULATE_DONE:
  785. return 1;
  786. case EMULATE_DO_MMIO:
  787. ++vcpu->stat.mmio_exits;
  788. return 0;
  789. case EMULATE_FAIL:
  790. vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
  791. break;
  792. default:
  793. BUG();
  794. }
  795. kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
  796. return 0;
  797. }
  798. static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  799. {
  800. struct vcpu_svm *svm = to_svm(vcpu);
  801. svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
  802. if (!(vcpu->cr0 & X86_CR0_TS))
  803. svm->vmcb->save.cr0 &= ~X86_CR0_TS;
  804. vcpu->fpu_active = 1;
  805. return 1;
  806. }
  807. static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  808. {
  809. struct vcpu_svm *svm = to_svm(vcpu);
  810. /*
  811. * VMCB is undefined after a SHUTDOWN intercept
  812. * so reinitialize it.
  813. */
  814. clear_page(svm->vmcb);
  815. init_vmcb(svm->vmcb);
  816. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  817. return 0;
  818. }
  819. static int io_get_override(struct kvm_vcpu *vcpu,
  820. struct vmcb_seg **seg,
  821. int *addr_override)
  822. {
  823. struct vcpu_svm *svm = to_svm(vcpu);
  824. u8 inst[MAX_INST_SIZE];
  825. unsigned ins_length;
  826. gva_t rip;
  827. int i;
  828. rip = svm->vmcb->save.rip;
  829. ins_length = svm->next_rip - rip;
  830. rip += svm->vmcb->save.cs.base;
  831. if (ins_length > MAX_INST_SIZE)
  832. printk(KERN_DEBUG
  833. "%s: inst length err, cs base 0x%llx rip 0x%llx "
  834. "next rip 0x%llx ins_length %u\n",
  835. __FUNCTION__,
  836. svm->vmcb->save.cs.base,
  837. svm->vmcb->save.rip,
  838. svm->vmcb->control.exit_info_2,
  839. ins_length);
  840. if (kvm_read_guest(vcpu, rip, ins_length, inst) != ins_length)
  841. /* #PF */
  842. return 0;
  843. *addr_override = 0;
  844. *seg = NULL;
  845. for (i = 0; i < ins_length; i++)
  846. switch (inst[i]) {
  847. case 0xf0:
  848. case 0xf2:
  849. case 0xf3:
  850. case 0x66:
  851. continue;
  852. case 0x67:
  853. *addr_override = 1;
  854. continue;
  855. case 0x2e:
  856. *seg = &svm->vmcb->save.cs;
  857. continue;
  858. case 0x36:
  859. *seg = &svm->vmcb->save.ss;
  860. continue;
  861. case 0x3e:
  862. *seg = &svm->vmcb->save.ds;
  863. continue;
  864. case 0x26:
  865. *seg = &svm->vmcb->save.es;
  866. continue;
  867. case 0x64:
  868. *seg = &svm->vmcb->save.fs;
  869. continue;
  870. case 0x65:
  871. *seg = &svm->vmcb->save.gs;
  872. continue;
  873. default:
  874. return 1;
  875. }
  876. printk(KERN_DEBUG "%s: unexpected\n", __FUNCTION__);
  877. return 0;
  878. }
  879. static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
  880. {
  881. unsigned long addr_mask;
  882. unsigned long *reg;
  883. struct vmcb_seg *seg;
  884. int addr_override;
  885. struct vcpu_svm *svm = to_svm(vcpu);
  886. struct vmcb_save_area *save_area = &svm->vmcb->save;
  887. u16 cs_attrib = save_area->cs.attrib;
  888. unsigned addr_size = get_addr_size(vcpu);
  889. if (!io_get_override(vcpu, &seg, &addr_override))
  890. return 0;
  891. if (addr_override)
  892. addr_size = (addr_size == 2) ? 4: (addr_size >> 1);
  893. if (ins) {
  894. reg = &vcpu->regs[VCPU_REGS_RDI];
  895. seg = &svm->vmcb->save.es;
  896. } else {
  897. reg = &vcpu->regs[VCPU_REGS_RSI];
  898. seg = (seg) ? seg : &svm->vmcb->save.ds;
  899. }
  900. addr_mask = ~0ULL >> (64 - (addr_size * 8));
  901. if ((cs_attrib & SVM_SELECTOR_L_MASK) &&
  902. !(svm->vmcb->save.rflags & X86_EFLAGS_VM)) {
  903. *address = (*reg & addr_mask);
  904. return addr_mask;
  905. }
  906. if (!(seg->attrib & SVM_SELECTOR_P_SHIFT)) {
  907. svm_inject_gp(vcpu, 0);
  908. return 0;
  909. }
  910. *address = (*reg & addr_mask) + seg->base;
  911. return addr_mask;
  912. }
  913. static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  914. {
  915. struct vcpu_svm *svm = to_svm(vcpu);
  916. u32 io_info = svm->vmcb->control.exit_info_1; //address size bug?
  917. int size, down, in, string, rep;
  918. unsigned port;
  919. unsigned long count;
  920. gva_t address = 0;
  921. ++vcpu->stat.io_exits;
  922. svm->next_rip = svm->vmcb->control.exit_info_2;
  923. in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
  924. port = io_info >> 16;
  925. size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
  926. string = (io_info & SVM_IOIO_STR_MASK) != 0;
  927. rep = (io_info & SVM_IOIO_REP_MASK) != 0;
  928. count = 1;
  929. down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
  930. if (string) {
  931. unsigned addr_mask;
  932. addr_mask = io_adress(vcpu, in, &address);
  933. if (!addr_mask) {
  934. printk(KERN_DEBUG "%s: get io address failed\n",
  935. __FUNCTION__);
  936. return 1;
  937. }
  938. if (rep)
  939. count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
  940. }
  941. return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
  942. address, rep, port);
  943. }
  944. static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  945. {
  946. return 1;
  947. }
  948. static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  949. {
  950. struct vcpu_svm *svm = to_svm(vcpu);
  951. svm->next_rip = svm->vmcb->save.rip + 1;
  952. skip_emulated_instruction(vcpu);
  953. return kvm_emulate_halt(vcpu);
  954. }
  955. static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  956. {
  957. struct vcpu_svm *svm = to_svm(vcpu);
  958. svm->next_rip = svm->vmcb->save.rip + 3;
  959. skip_emulated_instruction(vcpu);
  960. return kvm_hypercall(vcpu, kvm_run);
  961. }
  962. static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  963. {
  964. inject_ud(vcpu);
  965. return 1;
  966. }
  967. static int task_switch_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  968. {
  969. printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__);
  970. kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
  971. return 0;
  972. }
  973. static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  974. {
  975. struct vcpu_svm *svm = to_svm(vcpu);
  976. svm->next_rip = svm->vmcb->save.rip + 2;
  977. kvm_emulate_cpuid(vcpu);
  978. return 1;
  979. }
  980. static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  981. {
  982. if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE)
  983. printk(KERN_ERR "%s: failed\n", __FUNCTION__);
  984. return 1;
  985. }
  986. static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
  987. {
  988. struct vcpu_svm *svm = to_svm(vcpu);
  989. switch (ecx) {
  990. case MSR_IA32_TIME_STAMP_COUNTER: {
  991. u64 tsc;
  992. rdtscll(tsc);
  993. *data = svm->vmcb->control.tsc_offset + tsc;
  994. break;
  995. }
  996. case MSR_K6_STAR:
  997. *data = svm->vmcb->save.star;
  998. break;
  999. #ifdef CONFIG_X86_64
  1000. case MSR_LSTAR:
  1001. *data = svm->vmcb->save.lstar;
  1002. break;
  1003. case MSR_CSTAR:
  1004. *data = svm->vmcb->save.cstar;
  1005. break;
  1006. case MSR_KERNEL_GS_BASE:
  1007. *data = svm->vmcb->save.kernel_gs_base;
  1008. break;
  1009. case MSR_SYSCALL_MASK:
  1010. *data = svm->vmcb->save.sfmask;
  1011. break;
  1012. #endif
  1013. case MSR_IA32_SYSENTER_CS:
  1014. *data = svm->vmcb->save.sysenter_cs;
  1015. break;
  1016. case MSR_IA32_SYSENTER_EIP:
  1017. *data = svm->vmcb->save.sysenter_eip;
  1018. break;
  1019. case MSR_IA32_SYSENTER_ESP:
  1020. *data = svm->vmcb->save.sysenter_esp;
  1021. break;
  1022. default:
  1023. return kvm_get_msr_common(vcpu, ecx, data);
  1024. }
  1025. return 0;
  1026. }
  1027. static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1028. {
  1029. struct vcpu_svm *svm = to_svm(vcpu);
  1030. u32 ecx = vcpu->regs[VCPU_REGS_RCX];
  1031. u64 data;
  1032. if (svm_get_msr(vcpu, ecx, &data))
  1033. svm_inject_gp(vcpu, 0);
  1034. else {
  1035. svm->vmcb->save.rax = data & 0xffffffff;
  1036. vcpu->regs[VCPU_REGS_RDX] = data >> 32;
  1037. svm->next_rip = svm->vmcb->save.rip + 2;
  1038. skip_emulated_instruction(vcpu);
  1039. }
  1040. return 1;
  1041. }
  1042. static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
  1043. {
  1044. struct vcpu_svm *svm = to_svm(vcpu);
  1045. switch (ecx) {
  1046. case MSR_IA32_TIME_STAMP_COUNTER: {
  1047. u64 tsc;
  1048. rdtscll(tsc);
  1049. svm->vmcb->control.tsc_offset = data - tsc;
  1050. break;
  1051. }
  1052. case MSR_K6_STAR:
  1053. svm->vmcb->save.star = data;
  1054. break;
  1055. #ifdef CONFIG_X86_64
  1056. case MSR_LSTAR:
  1057. svm->vmcb->save.lstar = data;
  1058. break;
  1059. case MSR_CSTAR:
  1060. svm->vmcb->save.cstar = data;
  1061. break;
  1062. case MSR_KERNEL_GS_BASE:
  1063. svm->vmcb->save.kernel_gs_base = data;
  1064. break;
  1065. case MSR_SYSCALL_MASK:
  1066. svm->vmcb->save.sfmask = data;
  1067. break;
  1068. #endif
  1069. case MSR_IA32_SYSENTER_CS:
  1070. svm->vmcb->save.sysenter_cs = data;
  1071. break;
  1072. case MSR_IA32_SYSENTER_EIP:
  1073. svm->vmcb->save.sysenter_eip = data;
  1074. break;
  1075. case MSR_IA32_SYSENTER_ESP:
  1076. svm->vmcb->save.sysenter_esp = data;
  1077. break;
  1078. default:
  1079. return kvm_set_msr_common(vcpu, ecx, data);
  1080. }
  1081. return 0;
  1082. }
  1083. static int wrmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1084. {
  1085. struct vcpu_svm *svm = to_svm(vcpu);
  1086. u32 ecx = vcpu->regs[VCPU_REGS_RCX];
  1087. u64 data = (svm->vmcb->save.rax & -1u)
  1088. | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
  1089. svm->next_rip = svm->vmcb->save.rip + 2;
  1090. if (svm_set_msr(vcpu, ecx, data))
  1091. svm_inject_gp(vcpu, 0);
  1092. else
  1093. skip_emulated_instruction(vcpu);
  1094. return 1;
  1095. }
  1096. static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1097. {
  1098. if (to_svm(vcpu)->vmcb->control.exit_info_1)
  1099. return wrmsr_interception(vcpu, kvm_run);
  1100. else
  1101. return rdmsr_interception(vcpu, kvm_run);
  1102. }
  1103. static int interrupt_window_interception(struct kvm_vcpu *vcpu,
  1104. struct kvm_run *kvm_run)
  1105. {
  1106. /*
  1107. * If the user space waits to inject interrupts, exit as soon as
  1108. * possible
  1109. */
  1110. if (kvm_run->request_interrupt_window &&
  1111. !vcpu->irq_summary) {
  1112. ++vcpu->stat.irq_window_exits;
  1113. kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  1114. return 0;
  1115. }
  1116. return 1;
  1117. }
  1118. static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
  1119. struct kvm_run *kvm_run) = {
  1120. [SVM_EXIT_READ_CR0] = emulate_on_interception,
  1121. [SVM_EXIT_READ_CR3] = emulate_on_interception,
  1122. [SVM_EXIT_READ_CR4] = emulate_on_interception,
  1123. /* for now: */
  1124. [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
  1125. [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
  1126. [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
  1127. [SVM_EXIT_READ_DR0] = emulate_on_interception,
  1128. [SVM_EXIT_READ_DR1] = emulate_on_interception,
  1129. [SVM_EXIT_READ_DR2] = emulate_on_interception,
  1130. [SVM_EXIT_READ_DR3] = emulate_on_interception,
  1131. [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
  1132. [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
  1133. [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
  1134. [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
  1135. [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
  1136. [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
  1137. [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
  1138. [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
  1139. [SVM_EXIT_INTR] = nop_on_interception,
  1140. [SVM_EXIT_NMI] = nop_on_interception,
  1141. [SVM_EXIT_SMI] = nop_on_interception,
  1142. [SVM_EXIT_INIT] = nop_on_interception,
  1143. [SVM_EXIT_VINTR] = interrupt_window_interception,
  1144. /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
  1145. [SVM_EXIT_CPUID] = cpuid_interception,
  1146. [SVM_EXIT_HLT] = halt_interception,
  1147. [SVM_EXIT_INVLPG] = emulate_on_interception,
  1148. [SVM_EXIT_INVLPGA] = invalid_op_interception,
  1149. [SVM_EXIT_IOIO] = io_interception,
  1150. [SVM_EXIT_MSR] = msr_interception,
  1151. [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
  1152. [SVM_EXIT_SHUTDOWN] = shutdown_interception,
  1153. [SVM_EXIT_VMRUN] = invalid_op_interception,
  1154. [SVM_EXIT_VMMCALL] = vmmcall_interception,
  1155. [SVM_EXIT_VMLOAD] = invalid_op_interception,
  1156. [SVM_EXIT_VMSAVE] = invalid_op_interception,
  1157. [SVM_EXIT_STGI] = invalid_op_interception,
  1158. [SVM_EXIT_CLGI] = invalid_op_interception,
  1159. [SVM_EXIT_SKINIT] = invalid_op_interception,
  1160. [SVM_EXIT_MONITOR] = invalid_op_interception,
  1161. [SVM_EXIT_MWAIT] = invalid_op_interception,
  1162. };
  1163. static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1164. {
  1165. struct vcpu_svm *svm = to_svm(vcpu);
  1166. u32 exit_code = svm->vmcb->control.exit_code;
  1167. if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
  1168. exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
  1169. printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
  1170. "exit_code 0x%x\n",
  1171. __FUNCTION__, svm->vmcb->control.exit_int_info,
  1172. exit_code);
  1173. if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
  1174. || svm_exit_handlers[exit_code] == 0) {
  1175. kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
  1176. kvm_run->hw.hardware_exit_reason = exit_code;
  1177. return 0;
  1178. }
  1179. return svm_exit_handlers[exit_code](vcpu, kvm_run);
  1180. }
  1181. static void reload_tss(struct kvm_vcpu *vcpu)
  1182. {
  1183. int cpu = raw_smp_processor_id();
  1184. struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
  1185. svm_data->tss_desc->type = 9; //available 32/64-bit TSS
  1186. load_TR_desc();
  1187. }
  1188. static void pre_svm_run(struct kvm_vcpu *vcpu)
  1189. {
  1190. struct vcpu_svm *svm = to_svm(vcpu);
  1191. int cpu = raw_smp_processor_id();
  1192. struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
  1193. svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
  1194. if (vcpu->cpu != cpu ||
  1195. svm->asid_generation != svm_data->asid_generation)
  1196. new_asid(vcpu, svm_data);
  1197. }
  1198. static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
  1199. {
  1200. struct vmcb_control_area *control;
  1201. control = &to_svm(vcpu)->vmcb->control;
  1202. control->int_vector = pop_irq(vcpu);
  1203. control->int_ctl &= ~V_INTR_PRIO_MASK;
  1204. control->int_ctl |= V_IRQ_MASK |
  1205. ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
  1206. }
  1207. static void kvm_reput_irq(struct kvm_vcpu *vcpu)
  1208. {
  1209. struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
  1210. if (control->int_ctl & V_IRQ_MASK) {
  1211. control->int_ctl &= ~V_IRQ_MASK;
  1212. push_irq(vcpu, control->int_vector);
  1213. }
  1214. vcpu->interrupt_window_open =
  1215. !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
  1216. }
  1217. static void do_interrupt_requests(struct kvm_vcpu *vcpu,
  1218. struct kvm_run *kvm_run)
  1219. {
  1220. struct vcpu_svm *svm = to_svm(vcpu);
  1221. struct vmcb_control_area *control = &svm->vmcb->control;
  1222. vcpu->interrupt_window_open =
  1223. (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
  1224. (svm->vmcb->save.rflags & X86_EFLAGS_IF));
  1225. if (vcpu->interrupt_window_open && vcpu->irq_summary)
  1226. /*
  1227. * If interrupts enabled, and not blocked by sti or mov ss. Good.
  1228. */
  1229. kvm_do_inject_irq(vcpu);
  1230. /*
  1231. * Interrupts blocked. Wait for unblock.
  1232. */
  1233. if (!vcpu->interrupt_window_open &&
  1234. (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
  1235. control->intercept |= 1ULL << INTERCEPT_VINTR;
  1236. } else
  1237. control->intercept &= ~(1ULL << INTERCEPT_VINTR);
  1238. }
  1239. static void post_kvm_run_save(struct kvm_vcpu *vcpu,
  1240. struct kvm_run *kvm_run)
  1241. {
  1242. struct vcpu_svm *svm = to_svm(vcpu);
  1243. kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
  1244. vcpu->irq_summary == 0);
  1245. kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
  1246. kvm_run->cr8 = vcpu->cr8;
  1247. kvm_run->apic_base = vcpu->apic_base;
  1248. }
  1249. /*
  1250. * Check if userspace requested an interrupt window, and that the
  1251. * interrupt window is open.
  1252. *
  1253. * No need to exit to userspace if we already have an interrupt queued.
  1254. */
  1255. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
  1256. struct kvm_run *kvm_run)
  1257. {
  1258. return (!vcpu->irq_summary &&
  1259. kvm_run->request_interrupt_window &&
  1260. vcpu->interrupt_window_open &&
  1261. (to_svm(vcpu)->vmcb->save.rflags & X86_EFLAGS_IF));
  1262. }
  1263. static void save_db_regs(unsigned long *db_regs)
  1264. {
  1265. asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
  1266. asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
  1267. asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
  1268. asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
  1269. }
  1270. static void load_db_regs(unsigned long *db_regs)
  1271. {
  1272. asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
  1273. asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
  1274. asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
  1275. asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
  1276. }
  1277. static void svm_flush_tlb(struct kvm_vcpu *vcpu)
  1278. {
  1279. force_new_asid(vcpu);
  1280. }
  1281. static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1282. {
  1283. struct vcpu_svm *svm = to_svm(vcpu);
  1284. u16 fs_selector;
  1285. u16 gs_selector;
  1286. u16 ldt_selector;
  1287. int r;
  1288. again:
  1289. r = kvm_mmu_reload(vcpu);
  1290. if (unlikely(r))
  1291. return r;
  1292. if (!vcpu->mmio_read_completed)
  1293. do_interrupt_requests(vcpu, kvm_run);
  1294. clgi();
  1295. vcpu->guest_mode = 1;
  1296. if (vcpu->requests)
  1297. if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
  1298. svm_flush_tlb(vcpu);
  1299. pre_svm_run(vcpu);
  1300. save_host_msrs(vcpu);
  1301. fs_selector = read_fs();
  1302. gs_selector = read_gs();
  1303. ldt_selector = read_ldt();
  1304. svm->host_cr2 = kvm_read_cr2();
  1305. svm->host_dr6 = read_dr6();
  1306. svm->host_dr7 = read_dr7();
  1307. svm->vmcb->save.cr2 = vcpu->cr2;
  1308. if (svm->vmcb->save.dr7 & 0xff) {
  1309. write_dr7(0);
  1310. save_db_regs(svm->host_db_regs);
  1311. load_db_regs(svm->db_regs);
  1312. }
  1313. if (vcpu->fpu_active) {
  1314. fx_save(vcpu->host_fx_image);
  1315. fx_restore(vcpu->guest_fx_image);
  1316. }
  1317. asm volatile (
  1318. #ifdef CONFIG_X86_64
  1319. "push %%rbx; push %%rcx; push %%rdx;"
  1320. "push %%rsi; push %%rdi; push %%rbp;"
  1321. "push %%r8; push %%r9; push %%r10; push %%r11;"
  1322. "push %%r12; push %%r13; push %%r14; push %%r15;"
  1323. #else
  1324. "push %%ebx; push %%ecx; push %%edx;"
  1325. "push %%esi; push %%edi; push %%ebp;"
  1326. #endif
  1327. #ifdef CONFIG_X86_64
  1328. "mov %c[rbx](%[svm]), %%rbx \n\t"
  1329. "mov %c[rcx](%[svm]), %%rcx \n\t"
  1330. "mov %c[rdx](%[svm]), %%rdx \n\t"
  1331. "mov %c[rsi](%[svm]), %%rsi \n\t"
  1332. "mov %c[rdi](%[svm]), %%rdi \n\t"
  1333. "mov %c[rbp](%[svm]), %%rbp \n\t"
  1334. "mov %c[r8](%[svm]), %%r8 \n\t"
  1335. "mov %c[r9](%[svm]), %%r9 \n\t"
  1336. "mov %c[r10](%[svm]), %%r10 \n\t"
  1337. "mov %c[r11](%[svm]), %%r11 \n\t"
  1338. "mov %c[r12](%[svm]), %%r12 \n\t"
  1339. "mov %c[r13](%[svm]), %%r13 \n\t"
  1340. "mov %c[r14](%[svm]), %%r14 \n\t"
  1341. "mov %c[r15](%[svm]), %%r15 \n\t"
  1342. #else
  1343. "mov %c[rbx](%[svm]), %%ebx \n\t"
  1344. "mov %c[rcx](%[svm]), %%ecx \n\t"
  1345. "mov %c[rdx](%[svm]), %%edx \n\t"
  1346. "mov %c[rsi](%[svm]), %%esi \n\t"
  1347. "mov %c[rdi](%[svm]), %%edi \n\t"
  1348. "mov %c[rbp](%[svm]), %%ebp \n\t"
  1349. #endif
  1350. #ifdef CONFIG_X86_64
  1351. /* Enter guest mode */
  1352. "push %%rax \n\t"
  1353. "mov %c[vmcb](%[svm]), %%rax \n\t"
  1354. SVM_VMLOAD "\n\t"
  1355. SVM_VMRUN "\n\t"
  1356. SVM_VMSAVE "\n\t"
  1357. "pop %%rax \n\t"
  1358. #else
  1359. /* Enter guest mode */
  1360. "push %%eax \n\t"
  1361. "mov %c[vmcb](%[svm]), %%eax \n\t"
  1362. SVM_VMLOAD "\n\t"
  1363. SVM_VMRUN "\n\t"
  1364. SVM_VMSAVE "\n\t"
  1365. "pop %%eax \n\t"
  1366. #endif
  1367. /* Save guest registers, load host registers */
  1368. #ifdef CONFIG_X86_64
  1369. "mov %%rbx, %c[rbx](%[svm]) \n\t"
  1370. "mov %%rcx, %c[rcx](%[svm]) \n\t"
  1371. "mov %%rdx, %c[rdx](%[svm]) \n\t"
  1372. "mov %%rsi, %c[rsi](%[svm]) \n\t"
  1373. "mov %%rdi, %c[rdi](%[svm]) \n\t"
  1374. "mov %%rbp, %c[rbp](%[svm]) \n\t"
  1375. "mov %%r8, %c[r8](%[svm]) \n\t"
  1376. "mov %%r9, %c[r9](%[svm]) \n\t"
  1377. "mov %%r10, %c[r10](%[svm]) \n\t"
  1378. "mov %%r11, %c[r11](%[svm]) \n\t"
  1379. "mov %%r12, %c[r12](%[svm]) \n\t"
  1380. "mov %%r13, %c[r13](%[svm]) \n\t"
  1381. "mov %%r14, %c[r14](%[svm]) \n\t"
  1382. "mov %%r15, %c[r15](%[svm]) \n\t"
  1383. "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
  1384. "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
  1385. "pop %%rbp; pop %%rdi; pop %%rsi;"
  1386. "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
  1387. #else
  1388. "mov %%ebx, %c[rbx](%[svm]) \n\t"
  1389. "mov %%ecx, %c[rcx](%[svm]) \n\t"
  1390. "mov %%edx, %c[rdx](%[svm]) \n\t"
  1391. "mov %%esi, %c[rsi](%[svm]) \n\t"
  1392. "mov %%edi, %c[rdi](%[svm]) \n\t"
  1393. "mov %%ebp, %c[rbp](%[svm]) \n\t"
  1394. "pop %%ebp; pop %%edi; pop %%esi;"
  1395. "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
  1396. #endif
  1397. :
  1398. : [svm]"a"(svm),
  1399. [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
  1400. [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])),
  1401. [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])),
  1402. [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])),
  1403. [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])),
  1404. [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])),
  1405. [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP]))
  1406. #ifdef CONFIG_X86_64
  1407. ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])),
  1408. [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])),
  1409. [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])),
  1410. [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])),
  1411. [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])),
  1412. [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])),
  1413. [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])),
  1414. [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15]))
  1415. #endif
  1416. : "cc", "memory" );
  1417. vcpu->guest_mode = 0;
  1418. if (vcpu->fpu_active) {
  1419. fx_save(vcpu->guest_fx_image);
  1420. fx_restore(vcpu->host_fx_image);
  1421. }
  1422. if ((svm->vmcb->save.dr7 & 0xff))
  1423. load_db_regs(svm->host_db_regs);
  1424. vcpu->cr2 = svm->vmcb->save.cr2;
  1425. write_dr6(svm->host_dr6);
  1426. write_dr7(svm->host_dr7);
  1427. kvm_write_cr2(svm->host_cr2);
  1428. load_fs(fs_selector);
  1429. load_gs(gs_selector);
  1430. load_ldt(ldt_selector);
  1431. load_host_msrs(vcpu);
  1432. reload_tss(vcpu);
  1433. /*
  1434. * Profile KVM exit RIPs:
  1435. */
  1436. if (unlikely(prof_on == KVM_PROFILING))
  1437. profile_hit(KVM_PROFILING,
  1438. (void *)(unsigned long)svm->vmcb->save.rip);
  1439. stgi();
  1440. kvm_reput_irq(vcpu);
  1441. svm->next_rip = 0;
  1442. if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
  1443. kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  1444. kvm_run->fail_entry.hardware_entry_failure_reason
  1445. = svm->vmcb->control.exit_code;
  1446. post_kvm_run_save(vcpu, kvm_run);
  1447. return 0;
  1448. }
  1449. r = handle_exit(vcpu, kvm_run);
  1450. if (r > 0) {
  1451. if (signal_pending(current)) {
  1452. ++vcpu->stat.signal_exits;
  1453. post_kvm_run_save(vcpu, kvm_run);
  1454. kvm_run->exit_reason = KVM_EXIT_INTR;
  1455. return -EINTR;
  1456. }
  1457. if (dm_request_for_irq_injection(vcpu, kvm_run)) {
  1458. ++vcpu->stat.request_irq_exits;
  1459. post_kvm_run_save(vcpu, kvm_run);
  1460. kvm_run->exit_reason = KVM_EXIT_INTR;
  1461. return -EINTR;
  1462. }
  1463. kvm_resched(vcpu);
  1464. goto again;
  1465. }
  1466. post_kvm_run_save(vcpu, kvm_run);
  1467. return r;
  1468. }
  1469. static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
  1470. {
  1471. struct vcpu_svm *svm = to_svm(vcpu);
  1472. svm->vmcb->save.cr3 = root;
  1473. force_new_asid(vcpu);
  1474. if (vcpu->fpu_active) {
  1475. svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
  1476. svm->vmcb->save.cr0 |= X86_CR0_TS;
  1477. vcpu->fpu_active = 0;
  1478. }
  1479. }
  1480. static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
  1481. unsigned long addr,
  1482. uint32_t err_code)
  1483. {
  1484. struct vcpu_svm *svm = to_svm(vcpu);
  1485. uint32_t exit_int_info = svm->vmcb->control.exit_int_info;
  1486. ++vcpu->stat.pf_guest;
  1487. if (is_page_fault(exit_int_info)) {
  1488. svm->vmcb->control.event_inj_err = 0;
  1489. svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
  1490. SVM_EVTINJ_VALID_ERR |
  1491. SVM_EVTINJ_TYPE_EXEPT |
  1492. DF_VECTOR;
  1493. return;
  1494. }
  1495. vcpu->cr2 = addr;
  1496. svm->vmcb->save.cr2 = addr;
  1497. svm->vmcb->control.event_inj = SVM_EVTINJ_VALID |
  1498. SVM_EVTINJ_VALID_ERR |
  1499. SVM_EVTINJ_TYPE_EXEPT |
  1500. PF_VECTOR;
  1501. svm->vmcb->control.event_inj_err = err_code;
  1502. }
  1503. static int is_disabled(void)
  1504. {
  1505. u64 vm_cr;
  1506. rdmsrl(MSR_VM_CR, vm_cr);
  1507. if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
  1508. return 1;
  1509. return 0;
  1510. }
  1511. static void
  1512. svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
  1513. {
  1514. /*
  1515. * Patch in the VMMCALL instruction:
  1516. */
  1517. hypercall[0] = 0x0f;
  1518. hypercall[1] = 0x01;
  1519. hypercall[2] = 0xd9;
  1520. hypercall[3] = 0xc3;
  1521. }
  1522. static struct kvm_arch_ops svm_arch_ops = {
  1523. .cpu_has_kvm_support = has_svm,
  1524. .disabled_by_bios = is_disabled,
  1525. .hardware_setup = svm_hardware_setup,
  1526. .hardware_unsetup = svm_hardware_unsetup,
  1527. .hardware_enable = svm_hardware_enable,
  1528. .hardware_disable = svm_hardware_disable,
  1529. .vcpu_create = svm_create_vcpu,
  1530. .vcpu_free = svm_free_vcpu,
  1531. .vcpu_load = svm_vcpu_load,
  1532. .vcpu_put = svm_vcpu_put,
  1533. .vcpu_decache = svm_vcpu_decache,
  1534. .set_guest_debug = svm_guest_debug,
  1535. .get_msr = svm_get_msr,
  1536. .set_msr = svm_set_msr,
  1537. .get_segment_base = svm_get_segment_base,
  1538. .get_segment = svm_get_segment,
  1539. .set_segment = svm_set_segment,
  1540. .get_cs_db_l_bits = svm_get_cs_db_l_bits,
  1541. .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
  1542. .set_cr0 = svm_set_cr0,
  1543. .set_cr3 = svm_set_cr3,
  1544. .set_cr4 = svm_set_cr4,
  1545. .set_efer = svm_set_efer,
  1546. .get_idt = svm_get_idt,
  1547. .set_idt = svm_set_idt,
  1548. .get_gdt = svm_get_gdt,
  1549. .set_gdt = svm_set_gdt,
  1550. .get_dr = svm_get_dr,
  1551. .set_dr = svm_set_dr,
  1552. .cache_regs = svm_cache_regs,
  1553. .decache_regs = svm_decache_regs,
  1554. .get_rflags = svm_get_rflags,
  1555. .set_rflags = svm_set_rflags,
  1556. .invlpg = svm_invlpg,
  1557. .tlb_flush = svm_flush_tlb,
  1558. .inject_page_fault = svm_inject_page_fault,
  1559. .inject_gp = svm_inject_gp,
  1560. .run = svm_vcpu_run,
  1561. .skip_emulated_instruction = skip_emulated_instruction,
  1562. .patch_hypercall = svm_patch_hypercall,
  1563. };
  1564. static int __init svm_init(void)
  1565. {
  1566. return kvm_init_arch(&svm_arch_ops, THIS_MODULE);
  1567. }
  1568. static void __exit svm_exit(void)
  1569. {
  1570. kvm_exit_arch();
  1571. }
  1572. module_init(svm_init)
  1573. module_exit(svm_exit)