svm.c 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * AMD SVM support
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. *
  8. * Authors:
  9. * Yaniv Kamay <yaniv@qumranet.com>
  10. * Avi Kivity <avi@qumranet.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2. See
  13. * the COPYING file in the top-level directory.
  14. *
  15. */
  16. #include <linux/kvm_host.h>
  17. #include "irq.h"
  18. #include "mmu.h"
  19. #include "kvm_cache_regs.h"
  20. #include "x86.h"
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/highmem.h>
  25. #include <linux/sched.h>
  26. #include <linux/ftrace_event.h>
  27. #include <asm/desc.h>
  28. #include <asm/virtext.h>
  29. #include "trace.h"
  30. #define __ex(x) __kvm_handle_fault_on_reboot(x)
  31. MODULE_AUTHOR("Qumranet");
  32. MODULE_LICENSE("GPL");
  33. #define IOPM_ALLOC_ORDER 2
  34. #define MSRPM_ALLOC_ORDER 1
  35. #define SEG_TYPE_LDT 2
  36. #define SEG_TYPE_BUSY_TSS16 3
  37. #define SVM_FEATURE_NPT (1 << 0)
  38. #define SVM_FEATURE_LBRV (1 << 1)
  39. #define SVM_FEATURE_SVML (1 << 2)
  40. #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
  41. #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
  42. #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
  43. #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
  44. /* Turn on to get debugging output*/
  45. /* #define NESTED_DEBUG */
  46. #ifdef NESTED_DEBUG
  47. #define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
  48. #else
  49. #define nsvm_printk(fmt, args...) do {} while(0)
  50. #endif
  51. static const u32 host_save_user_msrs[] = {
  52. #ifdef CONFIG_X86_64
  53. MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
  54. MSR_FS_BASE,
  55. #endif
  56. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  57. };
  58. #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
  59. struct kvm_vcpu;
  60. struct nested_state {
  61. struct vmcb *hsave;
  62. u64 hsave_msr;
  63. u64 vmcb;
  64. /* These are the merged vectors */
  65. u32 *msrpm;
  66. /* gpa pointers to the real vectors */
  67. u64 vmcb_msrpm;
  68. /* A VMEXIT is required but not yet emulated */
  69. bool exit_required;
  70. /* cache for intercepts of the guest */
  71. u16 intercept_cr_read;
  72. u16 intercept_cr_write;
  73. u16 intercept_dr_read;
  74. u16 intercept_dr_write;
  75. u32 intercept_exceptions;
  76. u64 intercept;
  77. };
  78. struct vcpu_svm {
  79. struct kvm_vcpu vcpu;
  80. struct vmcb *vmcb;
  81. unsigned long vmcb_pa;
  82. struct svm_cpu_data *svm_data;
  83. uint64_t asid_generation;
  84. uint64_t sysenter_esp;
  85. uint64_t sysenter_eip;
  86. u64 next_rip;
  87. u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
  88. u64 host_gs_base;
  89. u32 *msrpm;
  90. struct nested_state nested;
  91. };
  92. /* enable NPT for AMD64 and X86 with PAE */
  93. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  94. static bool npt_enabled = true;
  95. #else
  96. static bool npt_enabled = false;
  97. #endif
  98. static int npt = 1;
  99. module_param(npt, int, S_IRUGO);
  100. static int nested = 1;
  101. module_param(nested, int, S_IRUGO);
  102. static void svm_flush_tlb(struct kvm_vcpu *vcpu);
  103. static void svm_complete_interrupts(struct vcpu_svm *svm);
  104. static int nested_svm_exit_handled(struct vcpu_svm *svm);
  105. static int nested_svm_vmexit(struct vcpu_svm *svm);
  106. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  107. bool has_error_code, u32 error_code);
  108. static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
  109. {
  110. return container_of(vcpu, struct vcpu_svm, vcpu);
  111. }
  112. static inline bool is_nested(struct vcpu_svm *svm)
  113. {
  114. return svm->nested.vmcb;
  115. }
  116. static inline void enable_gif(struct vcpu_svm *svm)
  117. {
  118. svm->vcpu.arch.hflags |= HF_GIF_MASK;
  119. }
  120. static inline void disable_gif(struct vcpu_svm *svm)
  121. {
  122. svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
  123. }
  124. static inline bool gif_set(struct vcpu_svm *svm)
  125. {
  126. return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
  127. }
  128. static unsigned long iopm_base;
  129. struct kvm_ldttss_desc {
  130. u16 limit0;
  131. u16 base0;
  132. unsigned base1 : 8, type : 5, dpl : 2, p : 1;
  133. unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
  134. u32 base3;
  135. u32 zero1;
  136. } __attribute__((packed));
  137. struct svm_cpu_data {
  138. int cpu;
  139. u64 asid_generation;
  140. u32 max_asid;
  141. u32 next_asid;
  142. struct kvm_ldttss_desc *tss_desc;
  143. struct page *save_area;
  144. };
  145. static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
  146. static uint32_t svm_features;
  147. struct svm_init_data {
  148. int cpu;
  149. int r;
  150. };
  151. static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
  152. #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
  153. #define MSRS_RANGE_SIZE 2048
  154. #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
  155. #define MAX_INST_SIZE 15
  156. static inline u32 svm_has(u32 feat)
  157. {
  158. return svm_features & feat;
  159. }
  160. static inline void clgi(void)
  161. {
  162. asm volatile (__ex(SVM_CLGI));
  163. }
  164. static inline void stgi(void)
  165. {
  166. asm volatile (__ex(SVM_STGI));
  167. }
  168. static inline void invlpga(unsigned long addr, u32 asid)
  169. {
  170. asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
  171. }
  172. static inline void force_new_asid(struct kvm_vcpu *vcpu)
  173. {
  174. to_svm(vcpu)->asid_generation--;
  175. }
  176. static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
  177. {
  178. force_new_asid(vcpu);
  179. }
  180. static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  181. {
  182. if (!npt_enabled && !(efer & EFER_LMA))
  183. efer &= ~EFER_LME;
  184. to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
  185. vcpu->arch.shadow_efer = efer;
  186. }
  187. static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
  188. bool has_error_code, u32 error_code)
  189. {
  190. struct vcpu_svm *svm = to_svm(vcpu);
  191. /* If we are within a nested VM we'd better #VMEXIT and let the
  192. guest handle the exception */
  193. if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
  194. return;
  195. svm->vmcb->control.event_inj = nr
  196. | SVM_EVTINJ_VALID
  197. | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
  198. | SVM_EVTINJ_TYPE_EXEPT;
  199. svm->vmcb->control.event_inj_err = error_code;
  200. }
  201. static int is_external_interrupt(u32 info)
  202. {
  203. info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
  204. return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
  205. }
  206. static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  207. {
  208. struct vcpu_svm *svm = to_svm(vcpu);
  209. u32 ret = 0;
  210. if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
  211. ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
  212. return ret & mask;
  213. }
  214. static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  215. {
  216. struct vcpu_svm *svm = to_svm(vcpu);
  217. if (mask == 0)
  218. svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
  219. else
  220. svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
  221. }
  222. static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  223. {
  224. struct vcpu_svm *svm = to_svm(vcpu);
  225. if (!svm->next_rip) {
  226. if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
  227. EMULATE_DONE)
  228. printk(KERN_DEBUG "%s: NOP\n", __func__);
  229. return;
  230. }
  231. if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
  232. printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
  233. __func__, kvm_rip_read(vcpu), svm->next_rip);
  234. kvm_rip_write(vcpu, svm->next_rip);
  235. svm_set_interrupt_shadow(vcpu, 0);
  236. }
  237. static int has_svm(void)
  238. {
  239. const char *msg;
  240. if (!cpu_has_svm(&msg)) {
  241. printk(KERN_INFO "has_svm: %s\n", msg);
  242. return 0;
  243. }
  244. return 1;
  245. }
  246. static void svm_hardware_disable(void *garbage)
  247. {
  248. cpu_svm_disable();
  249. }
  250. static int svm_hardware_enable(void *garbage)
  251. {
  252. struct svm_cpu_data *svm_data;
  253. uint64_t efer;
  254. struct descriptor_table gdt_descr;
  255. struct desc_struct *gdt;
  256. int me = raw_smp_processor_id();
  257. rdmsrl(MSR_EFER, efer);
  258. if (efer & EFER_SVME)
  259. return -EBUSY;
  260. if (!has_svm()) {
  261. printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
  262. me);
  263. return -EINVAL;
  264. }
  265. svm_data = per_cpu(svm_data, me);
  266. if (!svm_data) {
  267. printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
  268. me);
  269. return -EINVAL;
  270. }
  271. svm_data->asid_generation = 1;
  272. svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
  273. svm_data->next_asid = svm_data->max_asid + 1;
  274. kvm_get_gdt(&gdt_descr);
  275. gdt = (struct desc_struct *)gdt_descr.base;
  276. svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
  277. wrmsrl(MSR_EFER, efer | EFER_SVME);
  278. wrmsrl(MSR_VM_HSAVE_PA,
  279. page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
  280. return 0;
  281. }
  282. static void svm_cpu_uninit(int cpu)
  283. {
  284. struct svm_cpu_data *svm_data
  285. = per_cpu(svm_data, raw_smp_processor_id());
  286. if (!svm_data)
  287. return;
  288. per_cpu(svm_data, raw_smp_processor_id()) = NULL;
  289. __free_page(svm_data->save_area);
  290. kfree(svm_data);
  291. }
  292. static int svm_cpu_init(int cpu)
  293. {
  294. struct svm_cpu_data *svm_data;
  295. int r;
  296. svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
  297. if (!svm_data)
  298. return -ENOMEM;
  299. svm_data->cpu = cpu;
  300. svm_data->save_area = alloc_page(GFP_KERNEL);
  301. r = -ENOMEM;
  302. if (!svm_data->save_area)
  303. goto err_1;
  304. per_cpu(svm_data, cpu) = svm_data;
  305. return 0;
  306. err_1:
  307. kfree(svm_data);
  308. return r;
  309. }
  310. static void set_msr_interception(u32 *msrpm, unsigned msr,
  311. int read, int write)
  312. {
  313. int i;
  314. for (i = 0; i < NUM_MSR_MAPS; i++) {
  315. if (msr >= msrpm_ranges[i] &&
  316. msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
  317. u32 msr_offset = (i * MSRS_IN_RANGE + msr -
  318. msrpm_ranges[i]) * 2;
  319. u32 *base = msrpm + (msr_offset / 32);
  320. u32 msr_shift = msr_offset % 32;
  321. u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
  322. *base = (*base & ~(0x3 << msr_shift)) |
  323. (mask << msr_shift);
  324. return;
  325. }
  326. }
  327. BUG();
  328. }
  329. static void svm_vcpu_init_msrpm(u32 *msrpm)
  330. {
  331. memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
  332. #ifdef CONFIG_X86_64
  333. set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
  334. set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
  335. set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
  336. set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
  337. set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
  338. set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
  339. #endif
  340. set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
  341. set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
  342. }
  343. static void svm_enable_lbrv(struct vcpu_svm *svm)
  344. {
  345. u32 *msrpm = svm->msrpm;
  346. svm->vmcb->control.lbr_ctl = 1;
  347. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
  348. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
  349. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
  350. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
  351. }
  352. static void svm_disable_lbrv(struct vcpu_svm *svm)
  353. {
  354. u32 *msrpm = svm->msrpm;
  355. svm->vmcb->control.lbr_ctl = 0;
  356. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
  357. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
  358. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
  359. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
  360. }
  361. static __init int svm_hardware_setup(void)
  362. {
  363. int cpu;
  364. struct page *iopm_pages;
  365. void *iopm_va;
  366. int r;
  367. iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
  368. if (!iopm_pages)
  369. return -ENOMEM;
  370. iopm_va = page_address(iopm_pages);
  371. memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
  372. iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
  373. if (boot_cpu_has(X86_FEATURE_NX))
  374. kvm_enable_efer_bits(EFER_NX);
  375. if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
  376. kvm_enable_efer_bits(EFER_FFXSR);
  377. if (nested) {
  378. printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
  379. kvm_enable_efer_bits(EFER_SVME);
  380. }
  381. for_each_possible_cpu(cpu) {
  382. r = svm_cpu_init(cpu);
  383. if (r)
  384. goto err;
  385. }
  386. svm_features = cpuid_edx(SVM_CPUID_FUNC);
  387. if (!svm_has(SVM_FEATURE_NPT))
  388. npt_enabled = false;
  389. if (npt_enabled && !npt) {
  390. printk(KERN_INFO "kvm: Nested Paging disabled\n");
  391. npt_enabled = false;
  392. }
  393. if (npt_enabled) {
  394. printk(KERN_INFO "kvm: Nested Paging enabled\n");
  395. kvm_enable_tdp();
  396. } else
  397. kvm_disable_tdp();
  398. return 0;
  399. err:
  400. __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
  401. iopm_base = 0;
  402. return r;
  403. }
  404. static __exit void svm_hardware_unsetup(void)
  405. {
  406. int cpu;
  407. for_each_possible_cpu(cpu)
  408. svm_cpu_uninit(cpu);
  409. __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
  410. iopm_base = 0;
  411. }
  412. static void init_seg(struct vmcb_seg *seg)
  413. {
  414. seg->selector = 0;
  415. seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
  416. SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
  417. seg->limit = 0xffff;
  418. seg->base = 0;
  419. }
  420. static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
  421. {
  422. seg->selector = 0;
  423. seg->attrib = SVM_SELECTOR_P_MASK | type;
  424. seg->limit = 0xffff;
  425. seg->base = 0;
  426. }
  427. static void init_vmcb(struct vcpu_svm *svm)
  428. {
  429. struct vmcb_control_area *control = &svm->vmcb->control;
  430. struct vmcb_save_area *save = &svm->vmcb->save;
  431. control->intercept_cr_read = INTERCEPT_CR0_MASK |
  432. INTERCEPT_CR3_MASK |
  433. INTERCEPT_CR4_MASK;
  434. control->intercept_cr_write = INTERCEPT_CR0_MASK |
  435. INTERCEPT_CR3_MASK |
  436. INTERCEPT_CR4_MASK |
  437. INTERCEPT_CR8_MASK;
  438. control->intercept_dr_read = INTERCEPT_DR0_MASK |
  439. INTERCEPT_DR1_MASK |
  440. INTERCEPT_DR2_MASK |
  441. INTERCEPT_DR3_MASK;
  442. control->intercept_dr_write = INTERCEPT_DR0_MASK |
  443. INTERCEPT_DR1_MASK |
  444. INTERCEPT_DR2_MASK |
  445. INTERCEPT_DR3_MASK |
  446. INTERCEPT_DR5_MASK |
  447. INTERCEPT_DR7_MASK;
  448. control->intercept_exceptions = (1 << PF_VECTOR) |
  449. (1 << UD_VECTOR) |
  450. (1 << MC_VECTOR);
  451. control->intercept = (1ULL << INTERCEPT_INTR) |
  452. (1ULL << INTERCEPT_NMI) |
  453. (1ULL << INTERCEPT_SMI) |
  454. (1ULL << INTERCEPT_CPUID) |
  455. (1ULL << INTERCEPT_INVD) |
  456. (1ULL << INTERCEPT_HLT) |
  457. (1ULL << INTERCEPT_INVLPG) |
  458. (1ULL << INTERCEPT_INVLPGA) |
  459. (1ULL << INTERCEPT_IOIO_PROT) |
  460. (1ULL << INTERCEPT_MSR_PROT) |
  461. (1ULL << INTERCEPT_TASK_SWITCH) |
  462. (1ULL << INTERCEPT_SHUTDOWN) |
  463. (1ULL << INTERCEPT_VMRUN) |
  464. (1ULL << INTERCEPT_VMMCALL) |
  465. (1ULL << INTERCEPT_VMLOAD) |
  466. (1ULL << INTERCEPT_VMSAVE) |
  467. (1ULL << INTERCEPT_STGI) |
  468. (1ULL << INTERCEPT_CLGI) |
  469. (1ULL << INTERCEPT_SKINIT) |
  470. (1ULL << INTERCEPT_WBINVD) |
  471. (1ULL << INTERCEPT_MONITOR) |
  472. (1ULL << INTERCEPT_MWAIT);
  473. control->iopm_base_pa = iopm_base;
  474. control->msrpm_base_pa = __pa(svm->msrpm);
  475. control->tsc_offset = 0;
  476. control->int_ctl = V_INTR_MASKING_MASK;
  477. init_seg(&save->es);
  478. init_seg(&save->ss);
  479. init_seg(&save->ds);
  480. init_seg(&save->fs);
  481. init_seg(&save->gs);
  482. save->cs.selector = 0xf000;
  483. /* Executable/Readable Code Segment */
  484. save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
  485. SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
  486. save->cs.limit = 0xffff;
  487. /*
  488. * cs.base should really be 0xffff0000, but vmx can't handle that, so
  489. * be consistent with it.
  490. *
  491. * Replace when we have real mode working for vmx.
  492. */
  493. save->cs.base = 0xf0000;
  494. save->gdtr.limit = 0xffff;
  495. save->idtr.limit = 0xffff;
  496. init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
  497. init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
  498. save->efer = EFER_SVME;
  499. save->dr6 = 0xffff0ff0;
  500. save->dr7 = 0x400;
  501. save->rflags = 2;
  502. save->rip = 0x0000fff0;
  503. svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
  504. /*
  505. * cr0 val on cpu init should be 0x60000010, we enable cpu
  506. * cache by default. the orderly way is to enable cache in bios.
  507. */
  508. save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
  509. save->cr4 = X86_CR4_PAE;
  510. /* rdx = ?? */
  511. if (npt_enabled) {
  512. /* Setup VMCB for Nested Paging */
  513. control->nested_ctl = 1;
  514. control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
  515. (1ULL << INTERCEPT_INVLPG));
  516. control->intercept_exceptions &= ~(1 << PF_VECTOR);
  517. control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
  518. INTERCEPT_CR3_MASK);
  519. control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
  520. INTERCEPT_CR3_MASK);
  521. save->g_pat = 0x0007040600070406ULL;
  522. /* enable caching because the QEMU Bios doesn't enable it */
  523. save->cr0 = X86_CR0_ET;
  524. save->cr3 = 0;
  525. save->cr4 = 0;
  526. }
  527. force_new_asid(&svm->vcpu);
  528. svm->nested.vmcb = 0;
  529. svm->vcpu.arch.hflags = 0;
  530. enable_gif(svm);
  531. }
  532. static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
  533. {
  534. struct vcpu_svm *svm = to_svm(vcpu);
  535. init_vmcb(svm);
  536. if (!kvm_vcpu_is_bsp(vcpu)) {
  537. kvm_rip_write(vcpu, 0);
  538. svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
  539. svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
  540. }
  541. vcpu->arch.regs_avail = ~0;
  542. vcpu->arch.regs_dirty = ~0;
  543. return 0;
  544. }
  545. static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
  546. {
  547. struct vcpu_svm *svm;
  548. struct page *page;
  549. struct page *msrpm_pages;
  550. struct page *hsave_page;
  551. struct page *nested_msrpm_pages;
  552. int err;
  553. svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  554. if (!svm) {
  555. err = -ENOMEM;
  556. goto out;
  557. }
  558. err = kvm_vcpu_init(&svm->vcpu, kvm, id);
  559. if (err)
  560. goto free_svm;
  561. page = alloc_page(GFP_KERNEL);
  562. if (!page) {
  563. err = -ENOMEM;
  564. goto uninit;
  565. }
  566. err = -ENOMEM;
  567. msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  568. if (!msrpm_pages)
  569. goto uninit;
  570. nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  571. if (!nested_msrpm_pages)
  572. goto uninit;
  573. svm->msrpm = page_address(msrpm_pages);
  574. svm_vcpu_init_msrpm(svm->msrpm);
  575. hsave_page = alloc_page(GFP_KERNEL);
  576. if (!hsave_page)
  577. goto uninit;
  578. svm->nested.hsave = page_address(hsave_page);
  579. svm->nested.msrpm = page_address(nested_msrpm_pages);
  580. svm->vmcb = page_address(page);
  581. clear_page(svm->vmcb);
  582. svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
  583. svm->asid_generation = 0;
  584. init_vmcb(svm);
  585. fx_init(&svm->vcpu);
  586. svm->vcpu.fpu_active = 1;
  587. svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
  588. if (kvm_vcpu_is_bsp(&svm->vcpu))
  589. svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
  590. return &svm->vcpu;
  591. uninit:
  592. kvm_vcpu_uninit(&svm->vcpu);
  593. free_svm:
  594. kmem_cache_free(kvm_vcpu_cache, svm);
  595. out:
  596. return ERR_PTR(err);
  597. }
  598. static void svm_free_vcpu(struct kvm_vcpu *vcpu)
  599. {
  600. struct vcpu_svm *svm = to_svm(vcpu);
  601. __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
  602. __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
  603. __free_page(virt_to_page(svm->nested.hsave));
  604. __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
  605. kvm_vcpu_uninit(vcpu);
  606. kmem_cache_free(kvm_vcpu_cache, svm);
  607. }
  608. static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  609. {
  610. struct vcpu_svm *svm = to_svm(vcpu);
  611. int i;
  612. if (unlikely(cpu != vcpu->cpu)) {
  613. u64 delta;
  614. /*
  615. * Make sure that the guest sees a monotonically
  616. * increasing TSC.
  617. */
  618. delta = vcpu->arch.host_tsc - native_read_tsc();
  619. svm->vmcb->control.tsc_offset += delta;
  620. if (is_nested(svm))
  621. svm->nested.hsave->control.tsc_offset += delta;
  622. vcpu->cpu = cpu;
  623. kvm_migrate_timers(vcpu);
  624. svm->asid_generation = 0;
  625. }
  626. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  627. rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  628. }
  629. static void svm_vcpu_put(struct kvm_vcpu *vcpu)
  630. {
  631. struct vcpu_svm *svm = to_svm(vcpu);
  632. int i;
  633. ++vcpu->stat.host_state_reload;
  634. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  635. wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  636. vcpu->arch.host_tsc = native_read_tsc();
  637. }
  638. static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
  639. {
  640. return to_svm(vcpu)->vmcb->save.rflags;
  641. }
  642. static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  643. {
  644. to_svm(vcpu)->vmcb->save.rflags = rflags;
  645. }
  646. static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
  647. {
  648. switch (reg) {
  649. case VCPU_EXREG_PDPTR:
  650. BUG_ON(!npt_enabled);
  651. load_pdptrs(vcpu, vcpu->arch.cr3);
  652. break;
  653. default:
  654. BUG();
  655. }
  656. }
  657. static void svm_set_vintr(struct vcpu_svm *svm)
  658. {
  659. svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
  660. }
  661. static void svm_clear_vintr(struct vcpu_svm *svm)
  662. {
  663. svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
  664. }
  665. static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
  666. {
  667. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  668. switch (seg) {
  669. case VCPU_SREG_CS: return &save->cs;
  670. case VCPU_SREG_DS: return &save->ds;
  671. case VCPU_SREG_ES: return &save->es;
  672. case VCPU_SREG_FS: return &save->fs;
  673. case VCPU_SREG_GS: return &save->gs;
  674. case VCPU_SREG_SS: return &save->ss;
  675. case VCPU_SREG_TR: return &save->tr;
  676. case VCPU_SREG_LDTR: return &save->ldtr;
  677. }
  678. BUG();
  679. return NULL;
  680. }
  681. static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
  682. {
  683. struct vmcb_seg *s = svm_seg(vcpu, seg);
  684. return s->base;
  685. }
  686. static void svm_get_segment(struct kvm_vcpu *vcpu,
  687. struct kvm_segment *var, int seg)
  688. {
  689. struct vmcb_seg *s = svm_seg(vcpu, seg);
  690. var->base = s->base;
  691. var->limit = s->limit;
  692. var->selector = s->selector;
  693. var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
  694. var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
  695. var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
  696. var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
  697. var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
  698. var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
  699. var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
  700. var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
  701. /* AMD's VMCB does not have an explicit unusable field, so emulate it
  702. * for cross vendor migration purposes by "not present"
  703. */
  704. var->unusable = !var->present || (var->type == 0);
  705. switch (seg) {
  706. case VCPU_SREG_CS:
  707. /*
  708. * SVM always stores 0 for the 'G' bit in the CS selector in
  709. * the VMCB on a VMEXIT. This hurts cross-vendor migration:
  710. * Intel's VMENTRY has a check on the 'G' bit.
  711. */
  712. var->g = s->limit > 0xfffff;
  713. break;
  714. case VCPU_SREG_TR:
  715. /*
  716. * Work around a bug where the busy flag in the tr selector
  717. * isn't exposed
  718. */
  719. var->type |= 0x2;
  720. break;
  721. case VCPU_SREG_DS:
  722. case VCPU_SREG_ES:
  723. case VCPU_SREG_FS:
  724. case VCPU_SREG_GS:
  725. /*
  726. * The accessed bit must always be set in the segment
  727. * descriptor cache, although it can be cleared in the
  728. * descriptor, the cached bit always remains at 1. Since
  729. * Intel has a check on this, set it here to support
  730. * cross-vendor migration.
  731. */
  732. if (!var->unusable)
  733. var->type |= 0x1;
  734. break;
  735. case VCPU_SREG_SS:
  736. /* On AMD CPUs sometimes the DB bit in the segment
  737. * descriptor is left as 1, although the whole segment has
  738. * been made unusable. Clear it here to pass an Intel VMX
  739. * entry check when cross vendor migrating.
  740. */
  741. if (var->unusable)
  742. var->db = 0;
  743. break;
  744. }
  745. }
  746. static int svm_get_cpl(struct kvm_vcpu *vcpu)
  747. {
  748. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  749. return save->cpl;
  750. }
  751. static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  752. {
  753. struct vcpu_svm *svm = to_svm(vcpu);
  754. dt->limit = svm->vmcb->save.idtr.limit;
  755. dt->base = svm->vmcb->save.idtr.base;
  756. }
  757. static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  758. {
  759. struct vcpu_svm *svm = to_svm(vcpu);
  760. svm->vmcb->save.idtr.limit = dt->limit;
  761. svm->vmcb->save.idtr.base = dt->base ;
  762. }
  763. static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  764. {
  765. struct vcpu_svm *svm = to_svm(vcpu);
  766. dt->limit = svm->vmcb->save.gdtr.limit;
  767. dt->base = svm->vmcb->save.gdtr.base;
  768. }
  769. static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
  770. {
  771. struct vcpu_svm *svm = to_svm(vcpu);
  772. svm->vmcb->save.gdtr.limit = dt->limit;
  773. svm->vmcb->save.gdtr.base = dt->base ;
  774. }
  775. static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
  776. {
  777. }
  778. static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  779. {
  780. struct vcpu_svm *svm = to_svm(vcpu);
  781. #ifdef CONFIG_X86_64
  782. if (vcpu->arch.shadow_efer & EFER_LME) {
  783. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  784. vcpu->arch.shadow_efer |= EFER_LMA;
  785. svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
  786. }
  787. if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
  788. vcpu->arch.shadow_efer &= ~EFER_LMA;
  789. svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
  790. }
  791. }
  792. #endif
  793. if (npt_enabled)
  794. goto set;
  795. if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
  796. svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
  797. vcpu->fpu_active = 1;
  798. }
  799. vcpu->arch.cr0 = cr0;
  800. cr0 |= X86_CR0_PG | X86_CR0_WP;
  801. if (!vcpu->fpu_active) {
  802. svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
  803. cr0 |= X86_CR0_TS;
  804. }
  805. set:
  806. /*
  807. * re-enable caching here because the QEMU bios
  808. * does not do it - this results in some delay at
  809. * reboot
  810. */
  811. cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
  812. svm->vmcb->save.cr0 = cr0;
  813. }
  814. static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  815. {
  816. unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
  817. unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
  818. if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
  819. force_new_asid(vcpu);
  820. vcpu->arch.cr4 = cr4;
  821. if (!npt_enabled)
  822. cr4 |= X86_CR4_PAE;
  823. cr4 |= host_cr4_mce;
  824. to_svm(vcpu)->vmcb->save.cr4 = cr4;
  825. }
  826. static void svm_set_segment(struct kvm_vcpu *vcpu,
  827. struct kvm_segment *var, int seg)
  828. {
  829. struct vcpu_svm *svm = to_svm(vcpu);
  830. struct vmcb_seg *s = svm_seg(vcpu, seg);
  831. s->base = var->base;
  832. s->limit = var->limit;
  833. s->selector = var->selector;
  834. if (var->unusable)
  835. s->attrib = 0;
  836. else {
  837. s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
  838. s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
  839. s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
  840. s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
  841. s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
  842. s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
  843. s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
  844. s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
  845. }
  846. if (seg == VCPU_SREG_CS)
  847. svm->vmcb->save.cpl
  848. = (svm->vmcb->save.cs.attrib
  849. >> SVM_SELECTOR_DPL_SHIFT) & 3;
  850. }
  851. static void update_db_intercept(struct kvm_vcpu *vcpu)
  852. {
  853. struct vcpu_svm *svm = to_svm(vcpu);
  854. svm->vmcb->control.intercept_exceptions &=
  855. ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
  856. if (vcpu->arch.singlestep)
  857. svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
  858. if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
  859. if (vcpu->guest_debug &
  860. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
  861. svm->vmcb->control.intercept_exceptions |=
  862. 1 << DB_VECTOR;
  863. if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
  864. svm->vmcb->control.intercept_exceptions |=
  865. 1 << BP_VECTOR;
  866. } else
  867. vcpu->guest_debug = 0;
  868. }
  869. static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
  870. {
  871. struct vcpu_svm *svm = to_svm(vcpu);
  872. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  873. svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
  874. else
  875. svm->vmcb->save.dr7 = vcpu->arch.dr7;
  876. update_db_intercept(vcpu);
  877. }
  878. static void load_host_msrs(struct kvm_vcpu *vcpu)
  879. {
  880. #ifdef CONFIG_X86_64
  881. wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  882. #endif
  883. }
  884. static void save_host_msrs(struct kvm_vcpu *vcpu)
  885. {
  886. #ifdef CONFIG_X86_64
  887. rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  888. #endif
  889. }
  890. static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
  891. {
  892. if (svm_data->next_asid > svm_data->max_asid) {
  893. ++svm_data->asid_generation;
  894. svm_data->next_asid = 1;
  895. svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
  896. }
  897. svm->asid_generation = svm_data->asid_generation;
  898. svm->vmcb->control.asid = svm_data->next_asid++;
  899. }
  900. static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
  901. {
  902. struct vcpu_svm *svm = to_svm(vcpu);
  903. unsigned long val;
  904. switch (dr) {
  905. case 0 ... 3:
  906. val = vcpu->arch.db[dr];
  907. break;
  908. case 6:
  909. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  910. val = vcpu->arch.dr6;
  911. else
  912. val = svm->vmcb->save.dr6;
  913. break;
  914. case 7:
  915. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  916. val = vcpu->arch.dr7;
  917. else
  918. val = svm->vmcb->save.dr7;
  919. break;
  920. default:
  921. val = 0;
  922. }
  923. return val;
  924. }
  925. static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
  926. int *exception)
  927. {
  928. struct vcpu_svm *svm = to_svm(vcpu);
  929. *exception = 0;
  930. switch (dr) {
  931. case 0 ... 3:
  932. vcpu->arch.db[dr] = value;
  933. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
  934. vcpu->arch.eff_db[dr] = value;
  935. return;
  936. case 4 ... 5:
  937. if (vcpu->arch.cr4 & X86_CR4_DE)
  938. *exception = UD_VECTOR;
  939. return;
  940. case 6:
  941. if (value & 0xffffffff00000000ULL) {
  942. *exception = GP_VECTOR;
  943. return;
  944. }
  945. vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
  946. return;
  947. case 7:
  948. if (value & 0xffffffff00000000ULL) {
  949. *exception = GP_VECTOR;
  950. return;
  951. }
  952. vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
  953. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
  954. svm->vmcb->save.dr7 = vcpu->arch.dr7;
  955. vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
  956. }
  957. return;
  958. default:
  959. /* FIXME: Possible case? */
  960. printk(KERN_DEBUG "%s: unexpected dr %u\n",
  961. __func__, dr);
  962. *exception = UD_VECTOR;
  963. return;
  964. }
  965. }
  966. static int pf_interception(struct vcpu_svm *svm)
  967. {
  968. u64 fault_address;
  969. u32 error_code;
  970. fault_address = svm->vmcb->control.exit_info_2;
  971. error_code = svm->vmcb->control.exit_info_1;
  972. trace_kvm_page_fault(fault_address, error_code);
  973. if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
  974. kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
  975. return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
  976. }
  977. static int db_interception(struct vcpu_svm *svm)
  978. {
  979. struct kvm_run *kvm_run = svm->vcpu.run;
  980. if (!(svm->vcpu.guest_debug &
  981. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
  982. !svm->vcpu.arch.singlestep) {
  983. kvm_queue_exception(&svm->vcpu, DB_VECTOR);
  984. return 1;
  985. }
  986. if (svm->vcpu.arch.singlestep) {
  987. svm->vcpu.arch.singlestep = false;
  988. if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
  989. svm->vmcb->save.rflags &=
  990. ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  991. update_db_intercept(&svm->vcpu);
  992. }
  993. if (svm->vcpu.guest_debug &
  994. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
  995. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  996. kvm_run->debug.arch.pc =
  997. svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  998. kvm_run->debug.arch.exception = DB_VECTOR;
  999. return 0;
  1000. }
  1001. return 1;
  1002. }
  1003. static int bp_interception(struct vcpu_svm *svm)
  1004. {
  1005. struct kvm_run *kvm_run = svm->vcpu.run;
  1006. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1007. kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  1008. kvm_run->debug.arch.exception = BP_VECTOR;
  1009. return 0;
  1010. }
  1011. static int ud_interception(struct vcpu_svm *svm)
  1012. {
  1013. int er;
  1014. er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
  1015. if (er != EMULATE_DONE)
  1016. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1017. return 1;
  1018. }
  1019. static int nm_interception(struct vcpu_svm *svm)
  1020. {
  1021. svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
  1022. if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
  1023. svm->vmcb->save.cr0 &= ~X86_CR0_TS;
  1024. svm->vcpu.fpu_active = 1;
  1025. return 1;
  1026. }
  1027. static int mc_interception(struct vcpu_svm *svm)
  1028. {
  1029. /*
  1030. * On an #MC intercept the MCE handler is not called automatically in
  1031. * the host. So do it by hand here.
  1032. */
  1033. asm volatile (
  1034. "int $0x12\n");
  1035. /* not sure if we ever come back to this point */
  1036. return 1;
  1037. }
  1038. static int shutdown_interception(struct vcpu_svm *svm)
  1039. {
  1040. struct kvm_run *kvm_run = svm->vcpu.run;
  1041. /*
  1042. * VMCB is undefined after a SHUTDOWN intercept
  1043. * so reinitialize it.
  1044. */
  1045. clear_page(svm->vmcb);
  1046. init_vmcb(svm);
  1047. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  1048. return 0;
  1049. }
  1050. static int io_interception(struct vcpu_svm *svm)
  1051. {
  1052. u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
  1053. int size, in, string;
  1054. unsigned port;
  1055. ++svm->vcpu.stat.io_exits;
  1056. svm->next_rip = svm->vmcb->control.exit_info_2;
  1057. string = (io_info & SVM_IOIO_STR_MASK) != 0;
  1058. if (string) {
  1059. if (emulate_instruction(&svm->vcpu,
  1060. 0, 0, 0) == EMULATE_DO_MMIO)
  1061. return 0;
  1062. return 1;
  1063. }
  1064. in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
  1065. port = io_info >> 16;
  1066. size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
  1067. skip_emulated_instruction(&svm->vcpu);
  1068. return kvm_emulate_pio(&svm->vcpu, in, size, port);
  1069. }
  1070. static int nmi_interception(struct vcpu_svm *svm)
  1071. {
  1072. return 1;
  1073. }
  1074. static int intr_interception(struct vcpu_svm *svm)
  1075. {
  1076. ++svm->vcpu.stat.irq_exits;
  1077. return 1;
  1078. }
  1079. static int nop_on_interception(struct vcpu_svm *svm)
  1080. {
  1081. return 1;
  1082. }
  1083. static int halt_interception(struct vcpu_svm *svm)
  1084. {
  1085. svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
  1086. skip_emulated_instruction(&svm->vcpu);
  1087. return kvm_emulate_halt(&svm->vcpu);
  1088. }
  1089. static int vmmcall_interception(struct vcpu_svm *svm)
  1090. {
  1091. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1092. skip_emulated_instruction(&svm->vcpu);
  1093. kvm_emulate_hypercall(&svm->vcpu);
  1094. return 1;
  1095. }
  1096. static int nested_svm_check_permissions(struct vcpu_svm *svm)
  1097. {
  1098. if (!(svm->vcpu.arch.shadow_efer & EFER_SVME)
  1099. || !is_paging(&svm->vcpu)) {
  1100. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1101. return 1;
  1102. }
  1103. if (svm->vmcb->save.cpl) {
  1104. kvm_inject_gp(&svm->vcpu, 0);
  1105. return 1;
  1106. }
  1107. return 0;
  1108. }
  1109. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  1110. bool has_error_code, u32 error_code)
  1111. {
  1112. if (!is_nested(svm))
  1113. return 0;
  1114. svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
  1115. svm->vmcb->control.exit_code_hi = 0;
  1116. svm->vmcb->control.exit_info_1 = error_code;
  1117. svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
  1118. return nested_svm_exit_handled(svm);
  1119. }
  1120. static inline int nested_svm_intr(struct vcpu_svm *svm)
  1121. {
  1122. if (!is_nested(svm))
  1123. return 0;
  1124. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1125. return 0;
  1126. if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
  1127. return 0;
  1128. svm->vmcb->control.exit_code = SVM_EXIT_INTR;
  1129. if (svm->nested.intercept & 1ULL) {
  1130. /*
  1131. * The #vmexit can't be emulated here directly because this
  1132. * code path runs with irqs and preemtion disabled. A
  1133. * #vmexit emulation might sleep. Only signal request for
  1134. * the #vmexit here.
  1135. */
  1136. svm->nested.exit_required = true;
  1137. nsvm_printk("VMexit -> INTR\n");
  1138. return 1;
  1139. }
  1140. return 0;
  1141. }
  1142. static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
  1143. {
  1144. struct page *page;
  1145. page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
  1146. if (is_error_page(page))
  1147. goto error;
  1148. return kmap_atomic(page, idx);
  1149. error:
  1150. kvm_release_page_clean(page);
  1151. kvm_inject_gp(&svm->vcpu, 0);
  1152. return NULL;
  1153. }
  1154. static void nested_svm_unmap(void *addr, enum km_type idx)
  1155. {
  1156. struct page *page;
  1157. if (!addr)
  1158. return;
  1159. page = kmap_atomic_to_page(addr);
  1160. kunmap_atomic(addr, idx);
  1161. kvm_release_page_dirty(page);
  1162. }
  1163. static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
  1164. {
  1165. u32 param = svm->vmcb->control.exit_info_1 & 1;
  1166. u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1167. bool ret = false;
  1168. u32 t0, t1;
  1169. u8 *msrpm;
  1170. if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
  1171. return false;
  1172. msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
  1173. if (!msrpm)
  1174. goto out;
  1175. switch (msr) {
  1176. case 0 ... 0x1fff:
  1177. t0 = (msr * 2) % 8;
  1178. t1 = msr / 8;
  1179. break;
  1180. case 0xc0000000 ... 0xc0001fff:
  1181. t0 = (8192 + msr - 0xc0000000) * 2;
  1182. t1 = (t0 / 8);
  1183. t0 %= 8;
  1184. break;
  1185. case 0xc0010000 ... 0xc0011fff:
  1186. t0 = (16384 + msr - 0xc0010000) * 2;
  1187. t1 = (t0 / 8);
  1188. t0 %= 8;
  1189. break;
  1190. default:
  1191. ret = true;
  1192. goto out;
  1193. }
  1194. ret = msrpm[t1] & ((1 << param) << t0);
  1195. out:
  1196. nested_svm_unmap(msrpm, KM_USER0);
  1197. return ret;
  1198. }
  1199. static int nested_svm_exit_special(struct vcpu_svm *svm)
  1200. {
  1201. u32 exit_code = svm->vmcb->control.exit_code;
  1202. switch (exit_code) {
  1203. case SVM_EXIT_INTR:
  1204. case SVM_EXIT_NMI:
  1205. return NESTED_EXIT_HOST;
  1206. /* For now we are always handling NPFs when using them */
  1207. case SVM_EXIT_NPF:
  1208. if (npt_enabled)
  1209. return NESTED_EXIT_HOST;
  1210. break;
  1211. /* When we're shadowing, trap PFs */
  1212. case SVM_EXIT_EXCP_BASE + PF_VECTOR:
  1213. if (!npt_enabled)
  1214. return NESTED_EXIT_HOST;
  1215. break;
  1216. default:
  1217. break;
  1218. }
  1219. return NESTED_EXIT_CONTINUE;
  1220. }
  1221. /*
  1222. * If this function returns true, this #vmexit was already handled
  1223. */
  1224. static int nested_svm_exit_handled(struct vcpu_svm *svm)
  1225. {
  1226. u32 exit_code = svm->vmcb->control.exit_code;
  1227. int vmexit = NESTED_EXIT_HOST;
  1228. switch (exit_code) {
  1229. case SVM_EXIT_MSR:
  1230. vmexit = nested_svm_exit_handled_msr(svm);
  1231. break;
  1232. case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
  1233. u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
  1234. if (svm->nested.intercept_cr_read & cr_bits)
  1235. vmexit = NESTED_EXIT_DONE;
  1236. break;
  1237. }
  1238. case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
  1239. u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
  1240. if (svm->nested.intercept_cr_write & cr_bits)
  1241. vmexit = NESTED_EXIT_DONE;
  1242. break;
  1243. }
  1244. case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
  1245. u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
  1246. if (svm->nested.intercept_dr_read & dr_bits)
  1247. vmexit = NESTED_EXIT_DONE;
  1248. break;
  1249. }
  1250. case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
  1251. u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
  1252. if (svm->nested.intercept_dr_write & dr_bits)
  1253. vmexit = NESTED_EXIT_DONE;
  1254. break;
  1255. }
  1256. case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
  1257. u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
  1258. if (svm->nested.intercept_exceptions & excp_bits)
  1259. vmexit = NESTED_EXIT_DONE;
  1260. break;
  1261. }
  1262. default: {
  1263. u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
  1264. nsvm_printk("exit code: 0x%x\n", exit_code);
  1265. if (svm->nested.intercept & exit_bits)
  1266. vmexit = NESTED_EXIT_DONE;
  1267. }
  1268. }
  1269. if (vmexit == NESTED_EXIT_DONE) {
  1270. nsvm_printk("#VMEXIT reason=%04x\n", exit_code);
  1271. nested_svm_vmexit(svm);
  1272. }
  1273. return vmexit;
  1274. }
  1275. static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
  1276. {
  1277. struct vmcb_control_area *dst = &dst_vmcb->control;
  1278. struct vmcb_control_area *from = &from_vmcb->control;
  1279. dst->intercept_cr_read = from->intercept_cr_read;
  1280. dst->intercept_cr_write = from->intercept_cr_write;
  1281. dst->intercept_dr_read = from->intercept_dr_read;
  1282. dst->intercept_dr_write = from->intercept_dr_write;
  1283. dst->intercept_exceptions = from->intercept_exceptions;
  1284. dst->intercept = from->intercept;
  1285. dst->iopm_base_pa = from->iopm_base_pa;
  1286. dst->msrpm_base_pa = from->msrpm_base_pa;
  1287. dst->tsc_offset = from->tsc_offset;
  1288. dst->asid = from->asid;
  1289. dst->tlb_ctl = from->tlb_ctl;
  1290. dst->int_ctl = from->int_ctl;
  1291. dst->int_vector = from->int_vector;
  1292. dst->int_state = from->int_state;
  1293. dst->exit_code = from->exit_code;
  1294. dst->exit_code_hi = from->exit_code_hi;
  1295. dst->exit_info_1 = from->exit_info_1;
  1296. dst->exit_info_2 = from->exit_info_2;
  1297. dst->exit_int_info = from->exit_int_info;
  1298. dst->exit_int_info_err = from->exit_int_info_err;
  1299. dst->nested_ctl = from->nested_ctl;
  1300. dst->event_inj = from->event_inj;
  1301. dst->event_inj_err = from->event_inj_err;
  1302. dst->nested_cr3 = from->nested_cr3;
  1303. dst->lbr_ctl = from->lbr_ctl;
  1304. }
  1305. static int nested_svm_vmexit(struct vcpu_svm *svm)
  1306. {
  1307. struct vmcb *nested_vmcb;
  1308. struct vmcb *hsave = svm->nested.hsave;
  1309. struct vmcb *vmcb = svm->vmcb;
  1310. nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
  1311. if (!nested_vmcb)
  1312. return 1;
  1313. /* Give the current vmcb to the guest */
  1314. disable_gif(svm);
  1315. nested_vmcb->save.es = vmcb->save.es;
  1316. nested_vmcb->save.cs = vmcb->save.cs;
  1317. nested_vmcb->save.ss = vmcb->save.ss;
  1318. nested_vmcb->save.ds = vmcb->save.ds;
  1319. nested_vmcb->save.gdtr = vmcb->save.gdtr;
  1320. nested_vmcb->save.idtr = vmcb->save.idtr;
  1321. if (npt_enabled)
  1322. nested_vmcb->save.cr3 = vmcb->save.cr3;
  1323. nested_vmcb->save.cr2 = vmcb->save.cr2;
  1324. nested_vmcb->save.rflags = vmcb->save.rflags;
  1325. nested_vmcb->save.rip = vmcb->save.rip;
  1326. nested_vmcb->save.rsp = vmcb->save.rsp;
  1327. nested_vmcb->save.rax = vmcb->save.rax;
  1328. nested_vmcb->save.dr7 = vmcb->save.dr7;
  1329. nested_vmcb->save.dr6 = vmcb->save.dr6;
  1330. nested_vmcb->save.cpl = vmcb->save.cpl;
  1331. nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
  1332. nested_vmcb->control.int_vector = vmcb->control.int_vector;
  1333. nested_vmcb->control.int_state = vmcb->control.int_state;
  1334. nested_vmcb->control.exit_code = vmcb->control.exit_code;
  1335. nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
  1336. nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
  1337. nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
  1338. nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
  1339. nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
  1340. /*
  1341. * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
  1342. * to make sure that we do not lose injected events. So check event_inj
  1343. * here and copy it to exit_int_info if it is valid.
  1344. * Exit_int_info and event_inj can't be both valid because the case
  1345. * below only happens on a VMRUN instruction intercept which has
  1346. * no valid exit_int_info set.
  1347. */
  1348. if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
  1349. struct vmcb_control_area *nc = &nested_vmcb->control;
  1350. nc->exit_int_info = vmcb->control.event_inj;
  1351. nc->exit_int_info_err = vmcb->control.event_inj_err;
  1352. }
  1353. nested_vmcb->control.tlb_ctl = 0;
  1354. nested_vmcb->control.event_inj = 0;
  1355. nested_vmcb->control.event_inj_err = 0;
  1356. /* We always set V_INTR_MASKING and remember the old value in hflags */
  1357. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1358. nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
  1359. /* Restore the original control entries */
  1360. copy_vmcb_control_area(vmcb, hsave);
  1361. /* Kill any pending exceptions */
  1362. if (svm->vcpu.arch.exception.pending == true)
  1363. nsvm_printk("WARNING: Pending Exception\n");
  1364. kvm_clear_exception_queue(&svm->vcpu);
  1365. kvm_clear_interrupt_queue(&svm->vcpu);
  1366. /* Restore selected save entries */
  1367. svm->vmcb->save.es = hsave->save.es;
  1368. svm->vmcb->save.cs = hsave->save.cs;
  1369. svm->vmcb->save.ss = hsave->save.ss;
  1370. svm->vmcb->save.ds = hsave->save.ds;
  1371. svm->vmcb->save.gdtr = hsave->save.gdtr;
  1372. svm->vmcb->save.idtr = hsave->save.idtr;
  1373. svm->vmcb->save.rflags = hsave->save.rflags;
  1374. svm_set_efer(&svm->vcpu, hsave->save.efer);
  1375. svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
  1376. svm_set_cr4(&svm->vcpu, hsave->save.cr4);
  1377. if (npt_enabled) {
  1378. svm->vmcb->save.cr3 = hsave->save.cr3;
  1379. svm->vcpu.arch.cr3 = hsave->save.cr3;
  1380. } else {
  1381. kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
  1382. }
  1383. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
  1384. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
  1385. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
  1386. svm->vmcb->save.dr7 = 0;
  1387. svm->vmcb->save.cpl = 0;
  1388. svm->vmcb->control.exit_int_info = 0;
  1389. /* Exit nested SVM mode */
  1390. svm->nested.vmcb = 0;
  1391. nested_svm_unmap(nested_vmcb, KM_USER0);
  1392. kvm_mmu_reset_context(&svm->vcpu);
  1393. kvm_mmu_load(&svm->vcpu);
  1394. return 0;
  1395. }
  1396. static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
  1397. {
  1398. u32 *nested_msrpm;
  1399. int i;
  1400. nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
  1401. if (!nested_msrpm)
  1402. return false;
  1403. for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
  1404. svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
  1405. svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
  1406. nested_svm_unmap(nested_msrpm, KM_USER0);
  1407. return true;
  1408. }
  1409. static bool nested_svm_vmrun(struct vcpu_svm *svm)
  1410. {
  1411. struct vmcb *nested_vmcb;
  1412. struct vmcb *hsave = svm->nested.hsave;
  1413. struct vmcb *vmcb = svm->vmcb;
  1414. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
  1415. if (!nested_vmcb)
  1416. return false;
  1417. /* nested_vmcb is our indicator if nested SVM is activated */
  1418. svm->nested.vmcb = svm->vmcb->save.rax;
  1419. trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, svm->nested.vmcb,
  1420. nested_vmcb->save.rip,
  1421. nested_vmcb->control.int_ctl,
  1422. nested_vmcb->control.event_inj,
  1423. nested_vmcb->control.nested_ctl);
  1424. /* Clear internal status */
  1425. kvm_clear_exception_queue(&svm->vcpu);
  1426. kvm_clear_interrupt_queue(&svm->vcpu);
  1427. /* Save the old vmcb, so we don't need to pick what we save, but
  1428. can restore everything when a VMEXIT occurs */
  1429. hsave->save.es = vmcb->save.es;
  1430. hsave->save.cs = vmcb->save.cs;
  1431. hsave->save.ss = vmcb->save.ss;
  1432. hsave->save.ds = vmcb->save.ds;
  1433. hsave->save.gdtr = vmcb->save.gdtr;
  1434. hsave->save.idtr = vmcb->save.idtr;
  1435. hsave->save.efer = svm->vcpu.arch.shadow_efer;
  1436. hsave->save.cr0 = svm->vcpu.arch.cr0;
  1437. hsave->save.cr4 = svm->vcpu.arch.cr4;
  1438. hsave->save.rflags = vmcb->save.rflags;
  1439. hsave->save.rip = svm->next_rip;
  1440. hsave->save.rsp = vmcb->save.rsp;
  1441. hsave->save.rax = vmcb->save.rax;
  1442. if (npt_enabled)
  1443. hsave->save.cr3 = vmcb->save.cr3;
  1444. else
  1445. hsave->save.cr3 = svm->vcpu.arch.cr3;
  1446. copy_vmcb_control_area(hsave, vmcb);
  1447. if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
  1448. svm->vcpu.arch.hflags |= HF_HIF_MASK;
  1449. else
  1450. svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
  1451. /* Load the nested guest state */
  1452. svm->vmcb->save.es = nested_vmcb->save.es;
  1453. svm->vmcb->save.cs = nested_vmcb->save.cs;
  1454. svm->vmcb->save.ss = nested_vmcb->save.ss;
  1455. svm->vmcb->save.ds = nested_vmcb->save.ds;
  1456. svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
  1457. svm->vmcb->save.idtr = nested_vmcb->save.idtr;
  1458. svm->vmcb->save.rflags = nested_vmcb->save.rflags;
  1459. svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
  1460. svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
  1461. svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
  1462. if (npt_enabled) {
  1463. svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
  1464. svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
  1465. } else {
  1466. kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
  1467. kvm_mmu_reset_context(&svm->vcpu);
  1468. }
  1469. svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
  1470. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
  1471. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
  1472. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
  1473. /* In case we don't even reach vcpu_run, the fields are not updated */
  1474. svm->vmcb->save.rax = nested_vmcb->save.rax;
  1475. svm->vmcb->save.rsp = nested_vmcb->save.rsp;
  1476. svm->vmcb->save.rip = nested_vmcb->save.rip;
  1477. svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
  1478. svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
  1479. svm->vmcb->save.cpl = nested_vmcb->save.cpl;
  1480. /* We don't want a nested guest to be more powerful than the guest,
  1481. so all intercepts are ORed */
  1482. svm->vmcb->control.intercept_cr_read |=
  1483. nested_vmcb->control.intercept_cr_read;
  1484. svm->vmcb->control.intercept_cr_write |=
  1485. nested_vmcb->control.intercept_cr_write;
  1486. svm->vmcb->control.intercept_dr_read |=
  1487. nested_vmcb->control.intercept_dr_read;
  1488. svm->vmcb->control.intercept_dr_write |=
  1489. nested_vmcb->control.intercept_dr_write;
  1490. svm->vmcb->control.intercept_exceptions |=
  1491. nested_vmcb->control.intercept_exceptions;
  1492. svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
  1493. svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
  1494. /* cache intercepts */
  1495. svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read;
  1496. svm->nested.intercept_cr_write = nested_vmcb->control.intercept_cr_write;
  1497. svm->nested.intercept_dr_read = nested_vmcb->control.intercept_dr_read;
  1498. svm->nested.intercept_dr_write = nested_vmcb->control.intercept_dr_write;
  1499. svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
  1500. svm->nested.intercept = nested_vmcb->control.intercept;
  1501. force_new_asid(&svm->vcpu);
  1502. svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
  1503. if (nested_vmcb->control.int_ctl & V_IRQ_MASK) {
  1504. nsvm_printk("nSVM Injecting Interrupt: 0x%x\n",
  1505. nested_vmcb->control.int_ctl);
  1506. }
  1507. if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
  1508. svm->vcpu.arch.hflags |= HF_VINTR_MASK;
  1509. else
  1510. svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
  1511. nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n",
  1512. nested_vmcb->control.exit_int_info,
  1513. nested_vmcb->control.int_state);
  1514. svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
  1515. svm->vmcb->control.int_state = nested_vmcb->control.int_state;
  1516. svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
  1517. if (nested_vmcb->control.event_inj & SVM_EVTINJ_VALID)
  1518. nsvm_printk("Injecting Event: 0x%x\n",
  1519. nested_vmcb->control.event_inj);
  1520. svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
  1521. svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
  1522. nested_svm_unmap(nested_vmcb, KM_USER0);
  1523. enable_gif(svm);
  1524. return true;
  1525. }
  1526. static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
  1527. {
  1528. to_vmcb->save.fs = from_vmcb->save.fs;
  1529. to_vmcb->save.gs = from_vmcb->save.gs;
  1530. to_vmcb->save.tr = from_vmcb->save.tr;
  1531. to_vmcb->save.ldtr = from_vmcb->save.ldtr;
  1532. to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
  1533. to_vmcb->save.star = from_vmcb->save.star;
  1534. to_vmcb->save.lstar = from_vmcb->save.lstar;
  1535. to_vmcb->save.cstar = from_vmcb->save.cstar;
  1536. to_vmcb->save.sfmask = from_vmcb->save.sfmask;
  1537. to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
  1538. to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
  1539. to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
  1540. }
  1541. static int vmload_interception(struct vcpu_svm *svm)
  1542. {
  1543. struct vmcb *nested_vmcb;
  1544. if (nested_svm_check_permissions(svm))
  1545. return 1;
  1546. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1547. skip_emulated_instruction(&svm->vcpu);
  1548. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
  1549. if (!nested_vmcb)
  1550. return 1;
  1551. nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
  1552. nested_svm_unmap(nested_vmcb, KM_USER0);
  1553. return 1;
  1554. }
  1555. static int vmsave_interception(struct vcpu_svm *svm)
  1556. {
  1557. struct vmcb *nested_vmcb;
  1558. if (nested_svm_check_permissions(svm))
  1559. return 1;
  1560. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1561. skip_emulated_instruction(&svm->vcpu);
  1562. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
  1563. if (!nested_vmcb)
  1564. return 1;
  1565. nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
  1566. nested_svm_unmap(nested_vmcb, KM_USER0);
  1567. return 1;
  1568. }
  1569. static int vmrun_interception(struct vcpu_svm *svm)
  1570. {
  1571. nsvm_printk("VMrun\n");
  1572. if (nested_svm_check_permissions(svm))
  1573. return 1;
  1574. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1575. skip_emulated_instruction(&svm->vcpu);
  1576. if (!nested_svm_vmrun(svm))
  1577. return 1;
  1578. if (!nested_svm_vmrun_msrpm(svm))
  1579. goto failed;
  1580. return 1;
  1581. failed:
  1582. svm->vmcb->control.exit_code = SVM_EXIT_ERR;
  1583. svm->vmcb->control.exit_code_hi = 0;
  1584. svm->vmcb->control.exit_info_1 = 0;
  1585. svm->vmcb->control.exit_info_2 = 0;
  1586. nested_svm_vmexit(svm);
  1587. return 1;
  1588. }
  1589. static int stgi_interception(struct vcpu_svm *svm)
  1590. {
  1591. if (nested_svm_check_permissions(svm))
  1592. return 1;
  1593. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1594. skip_emulated_instruction(&svm->vcpu);
  1595. enable_gif(svm);
  1596. return 1;
  1597. }
  1598. static int clgi_interception(struct vcpu_svm *svm)
  1599. {
  1600. if (nested_svm_check_permissions(svm))
  1601. return 1;
  1602. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1603. skip_emulated_instruction(&svm->vcpu);
  1604. disable_gif(svm);
  1605. /* After a CLGI no interrupts should come */
  1606. svm_clear_vintr(svm);
  1607. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  1608. return 1;
  1609. }
  1610. static int invlpga_interception(struct vcpu_svm *svm)
  1611. {
  1612. struct kvm_vcpu *vcpu = &svm->vcpu;
  1613. nsvm_printk("INVLPGA\n");
  1614. /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
  1615. kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
  1616. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1617. skip_emulated_instruction(&svm->vcpu);
  1618. return 1;
  1619. }
  1620. static int invalid_op_interception(struct vcpu_svm *svm)
  1621. {
  1622. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1623. return 1;
  1624. }
  1625. static int task_switch_interception(struct vcpu_svm *svm)
  1626. {
  1627. u16 tss_selector;
  1628. int reason;
  1629. int int_type = svm->vmcb->control.exit_int_info &
  1630. SVM_EXITINTINFO_TYPE_MASK;
  1631. int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
  1632. uint32_t type =
  1633. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
  1634. uint32_t idt_v =
  1635. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
  1636. tss_selector = (u16)svm->vmcb->control.exit_info_1;
  1637. if (svm->vmcb->control.exit_info_2 &
  1638. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
  1639. reason = TASK_SWITCH_IRET;
  1640. else if (svm->vmcb->control.exit_info_2 &
  1641. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
  1642. reason = TASK_SWITCH_JMP;
  1643. else if (idt_v)
  1644. reason = TASK_SWITCH_GATE;
  1645. else
  1646. reason = TASK_SWITCH_CALL;
  1647. if (reason == TASK_SWITCH_GATE) {
  1648. switch (type) {
  1649. case SVM_EXITINTINFO_TYPE_NMI:
  1650. svm->vcpu.arch.nmi_injected = false;
  1651. break;
  1652. case SVM_EXITINTINFO_TYPE_EXEPT:
  1653. kvm_clear_exception_queue(&svm->vcpu);
  1654. break;
  1655. case SVM_EXITINTINFO_TYPE_INTR:
  1656. kvm_clear_interrupt_queue(&svm->vcpu);
  1657. break;
  1658. default:
  1659. break;
  1660. }
  1661. }
  1662. if (reason != TASK_SWITCH_GATE ||
  1663. int_type == SVM_EXITINTINFO_TYPE_SOFT ||
  1664. (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
  1665. (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
  1666. skip_emulated_instruction(&svm->vcpu);
  1667. return kvm_task_switch(&svm->vcpu, tss_selector, reason);
  1668. }
  1669. static int cpuid_interception(struct vcpu_svm *svm)
  1670. {
  1671. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  1672. kvm_emulate_cpuid(&svm->vcpu);
  1673. return 1;
  1674. }
  1675. static int iret_interception(struct vcpu_svm *svm)
  1676. {
  1677. ++svm->vcpu.stat.nmi_window_exits;
  1678. svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
  1679. svm->vcpu.arch.hflags |= HF_IRET_MASK;
  1680. return 1;
  1681. }
  1682. static int invlpg_interception(struct vcpu_svm *svm)
  1683. {
  1684. if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
  1685. pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
  1686. return 1;
  1687. }
  1688. static int emulate_on_interception(struct vcpu_svm *svm)
  1689. {
  1690. if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
  1691. pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
  1692. return 1;
  1693. }
  1694. static int cr8_write_interception(struct vcpu_svm *svm)
  1695. {
  1696. struct kvm_run *kvm_run = svm->vcpu.run;
  1697. u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
  1698. /* instruction emulation calls kvm_set_cr8() */
  1699. emulate_instruction(&svm->vcpu, 0, 0, 0);
  1700. if (irqchip_in_kernel(svm->vcpu.kvm)) {
  1701. svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
  1702. return 1;
  1703. }
  1704. if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
  1705. return 1;
  1706. kvm_run->exit_reason = KVM_EXIT_SET_TPR;
  1707. return 0;
  1708. }
  1709. static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
  1710. {
  1711. struct vcpu_svm *svm = to_svm(vcpu);
  1712. switch (ecx) {
  1713. case MSR_IA32_TSC: {
  1714. u64 tsc_offset;
  1715. if (is_nested(svm))
  1716. tsc_offset = svm->nested.hsave->control.tsc_offset;
  1717. else
  1718. tsc_offset = svm->vmcb->control.tsc_offset;
  1719. *data = tsc_offset + native_read_tsc();
  1720. break;
  1721. }
  1722. case MSR_K6_STAR:
  1723. *data = svm->vmcb->save.star;
  1724. break;
  1725. #ifdef CONFIG_X86_64
  1726. case MSR_LSTAR:
  1727. *data = svm->vmcb->save.lstar;
  1728. break;
  1729. case MSR_CSTAR:
  1730. *data = svm->vmcb->save.cstar;
  1731. break;
  1732. case MSR_KERNEL_GS_BASE:
  1733. *data = svm->vmcb->save.kernel_gs_base;
  1734. break;
  1735. case MSR_SYSCALL_MASK:
  1736. *data = svm->vmcb->save.sfmask;
  1737. break;
  1738. #endif
  1739. case MSR_IA32_SYSENTER_CS:
  1740. *data = svm->vmcb->save.sysenter_cs;
  1741. break;
  1742. case MSR_IA32_SYSENTER_EIP:
  1743. *data = svm->sysenter_eip;
  1744. break;
  1745. case MSR_IA32_SYSENTER_ESP:
  1746. *data = svm->sysenter_esp;
  1747. break;
  1748. /* Nobody will change the following 5 values in the VMCB so
  1749. we can safely return them on rdmsr. They will always be 0
  1750. until LBRV is implemented. */
  1751. case MSR_IA32_DEBUGCTLMSR:
  1752. *data = svm->vmcb->save.dbgctl;
  1753. break;
  1754. case MSR_IA32_LASTBRANCHFROMIP:
  1755. *data = svm->vmcb->save.br_from;
  1756. break;
  1757. case MSR_IA32_LASTBRANCHTOIP:
  1758. *data = svm->vmcb->save.br_to;
  1759. break;
  1760. case MSR_IA32_LASTINTFROMIP:
  1761. *data = svm->vmcb->save.last_excp_from;
  1762. break;
  1763. case MSR_IA32_LASTINTTOIP:
  1764. *data = svm->vmcb->save.last_excp_to;
  1765. break;
  1766. case MSR_VM_HSAVE_PA:
  1767. *data = svm->nested.hsave_msr;
  1768. break;
  1769. case MSR_VM_CR:
  1770. *data = 0;
  1771. break;
  1772. case MSR_IA32_UCODE_REV:
  1773. *data = 0x01000065;
  1774. break;
  1775. default:
  1776. return kvm_get_msr_common(vcpu, ecx, data);
  1777. }
  1778. return 0;
  1779. }
  1780. static int rdmsr_interception(struct vcpu_svm *svm)
  1781. {
  1782. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1783. u64 data;
  1784. if (svm_get_msr(&svm->vcpu, ecx, &data))
  1785. kvm_inject_gp(&svm->vcpu, 0);
  1786. else {
  1787. trace_kvm_msr_read(ecx, data);
  1788. svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
  1789. svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
  1790. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  1791. skip_emulated_instruction(&svm->vcpu);
  1792. }
  1793. return 1;
  1794. }
  1795. static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
  1796. {
  1797. struct vcpu_svm *svm = to_svm(vcpu);
  1798. switch (ecx) {
  1799. case MSR_IA32_TSC: {
  1800. u64 tsc_offset = data - native_read_tsc();
  1801. u64 g_tsc_offset = 0;
  1802. if (is_nested(svm)) {
  1803. g_tsc_offset = svm->vmcb->control.tsc_offset -
  1804. svm->nested.hsave->control.tsc_offset;
  1805. svm->nested.hsave->control.tsc_offset = tsc_offset;
  1806. }
  1807. svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
  1808. break;
  1809. }
  1810. case MSR_K6_STAR:
  1811. svm->vmcb->save.star = data;
  1812. break;
  1813. #ifdef CONFIG_X86_64
  1814. case MSR_LSTAR:
  1815. svm->vmcb->save.lstar = data;
  1816. break;
  1817. case MSR_CSTAR:
  1818. svm->vmcb->save.cstar = data;
  1819. break;
  1820. case MSR_KERNEL_GS_BASE:
  1821. svm->vmcb->save.kernel_gs_base = data;
  1822. break;
  1823. case MSR_SYSCALL_MASK:
  1824. svm->vmcb->save.sfmask = data;
  1825. break;
  1826. #endif
  1827. case MSR_IA32_SYSENTER_CS:
  1828. svm->vmcb->save.sysenter_cs = data;
  1829. break;
  1830. case MSR_IA32_SYSENTER_EIP:
  1831. svm->sysenter_eip = data;
  1832. svm->vmcb->save.sysenter_eip = data;
  1833. break;
  1834. case MSR_IA32_SYSENTER_ESP:
  1835. svm->sysenter_esp = data;
  1836. svm->vmcb->save.sysenter_esp = data;
  1837. break;
  1838. case MSR_IA32_DEBUGCTLMSR:
  1839. if (!svm_has(SVM_FEATURE_LBRV)) {
  1840. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
  1841. __func__, data);
  1842. break;
  1843. }
  1844. if (data & DEBUGCTL_RESERVED_BITS)
  1845. return 1;
  1846. svm->vmcb->save.dbgctl = data;
  1847. if (data & (1ULL<<0))
  1848. svm_enable_lbrv(svm);
  1849. else
  1850. svm_disable_lbrv(svm);
  1851. break;
  1852. case MSR_VM_HSAVE_PA:
  1853. svm->nested.hsave_msr = data;
  1854. break;
  1855. case MSR_VM_CR:
  1856. case MSR_VM_IGNNE:
  1857. pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
  1858. break;
  1859. default:
  1860. return kvm_set_msr_common(vcpu, ecx, data);
  1861. }
  1862. return 0;
  1863. }
  1864. static int wrmsr_interception(struct vcpu_svm *svm)
  1865. {
  1866. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1867. u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
  1868. | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
  1869. trace_kvm_msr_write(ecx, data);
  1870. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  1871. if (svm_set_msr(&svm->vcpu, ecx, data))
  1872. kvm_inject_gp(&svm->vcpu, 0);
  1873. else
  1874. skip_emulated_instruction(&svm->vcpu);
  1875. return 1;
  1876. }
  1877. static int msr_interception(struct vcpu_svm *svm)
  1878. {
  1879. if (svm->vmcb->control.exit_info_1)
  1880. return wrmsr_interception(svm);
  1881. else
  1882. return rdmsr_interception(svm);
  1883. }
  1884. static int interrupt_window_interception(struct vcpu_svm *svm)
  1885. {
  1886. struct kvm_run *kvm_run = svm->vcpu.run;
  1887. svm_clear_vintr(svm);
  1888. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  1889. /*
  1890. * If the user space waits to inject interrupts, exit as soon as
  1891. * possible
  1892. */
  1893. if (!irqchip_in_kernel(svm->vcpu.kvm) &&
  1894. kvm_run->request_interrupt_window &&
  1895. !kvm_cpu_has_interrupt(&svm->vcpu)) {
  1896. ++svm->vcpu.stat.irq_window_exits;
  1897. kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  1898. return 0;
  1899. }
  1900. return 1;
  1901. }
  1902. static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
  1903. [SVM_EXIT_READ_CR0] = emulate_on_interception,
  1904. [SVM_EXIT_READ_CR3] = emulate_on_interception,
  1905. [SVM_EXIT_READ_CR4] = emulate_on_interception,
  1906. [SVM_EXIT_READ_CR8] = emulate_on_interception,
  1907. /* for now: */
  1908. [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
  1909. [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
  1910. [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
  1911. [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
  1912. [SVM_EXIT_READ_DR0] = emulate_on_interception,
  1913. [SVM_EXIT_READ_DR1] = emulate_on_interception,
  1914. [SVM_EXIT_READ_DR2] = emulate_on_interception,
  1915. [SVM_EXIT_READ_DR3] = emulate_on_interception,
  1916. [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
  1917. [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
  1918. [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
  1919. [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
  1920. [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
  1921. [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
  1922. [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
  1923. [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
  1924. [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
  1925. [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
  1926. [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
  1927. [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
  1928. [SVM_EXIT_INTR] = intr_interception,
  1929. [SVM_EXIT_NMI] = nmi_interception,
  1930. [SVM_EXIT_SMI] = nop_on_interception,
  1931. [SVM_EXIT_INIT] = nop_on_interception,
  1932. [SVM_EXIT_VINTR] = interrupt_window_interception,
  1933. /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
  1934. [SVM_EXIT_CPUID] = cpuid_interception,
  1935. [SVM_EXIT_IRET] = iret_interception,
  1936. [SVM_EXIT_INVD] = emulate_on_interception,
  1937. [SVM_EXIT_HLT] = halt_interception,
  1938. [SVM_EXIT_INVLPG] = invlpg_interception,
  1939. [SVM_EXIT_INVLPGA] = invlpga_interception,
  1940. [SVM_EXIT_IOIO] = io_interception,
  1941. [SVM_EXIT_MSR] = msr_interception,
  1942. [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
  1943. [SVM_EXIT_SHUTDOWN] = shutdown_interception,
  1944. [SVM_EXIT_VMRUN] = vmrun_interception,
  1945. [SVM_EXIT_VMMCALL] = vmmcall_interception,
  1946. [SVM_EXIT_VMLOAD] = vmload_interception,
  1947. [SVM_EXIT_VMSAVE] = vmsave_interception,
  1948. [SVM_EXIT_STGI] = stgi_interception,
  1949. [SVM_EXIT_CLGI] = clgi_interception,
  1950. [SVM_EXIT_SKINIT] = invalid_op_interception,
  1951. [SVM_EXIT_WBINVD] = emulate_on_interception,
  1952. [SVM_EXIT_MONITOR] = invalid_op_interception,
  1953. [SVM_EXIT_MWAIT] = invalid_op_interception,
  1954. [SVM_EXIT_NPF] = pf_interception,
  1955. };
  1956. static int handle_exit(struct kvm_vcpu *vcpu)
  1957. {
  1958. struct vcpu_svm *svm = to_svm(vcpu);
  1959. struct kvm_run *kvm_run = vcpu->run;
  1960. u32 exit_code = svm->vmcb->control.exit_code;
  1961. trace_kvm_exit(exit_code, svm->vmcb->save.rip);
  1962. if (unlikely(svm->nested.exit_required)) {
  1963. nested_svm_vmexit(svm);
  1964. svm->nested.exit_required = false;
  1965. return 1;
  1966. }
  1967. if (is_nested(svm)) {
  1968. int vmexit;
  1969. trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
  1970. svm->vmcb->control.exit_info_1,
  1971. svm->vmcb->control.exit_info_2,
  1972. svm->vmcb->control.exit_int_info,
  1973. svm->vmcb->control.exit_int_info_err);
  1974. nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
  1975. exit_code, svm->vmcb->control.exit_info_1,
  1976. svm->vmcb->control.exit_info_2, svm->vmcb->save.rip);
  1977. vmexit = nested_svm_exit_special(svm);
  1978. if (vmexit == NESTED_EXIT_CONTINUE)
  1979. vmexit = nested_svm_exit_handled(svm);
  1980. if (vmexit == NESTED_EXIT_DONE)
  1981. return 1;
  1982. }
  1983. svm_complete_interrupts(svm);
  1984. if (npt_enabled) {
  1985. int mmu_reload = 0;
  1986. if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
  1987. svm_set_cr0(vcpu, svm->vmcb->save.cr0);
  1988. mmu_reload = 1;
  1989. }
  1990. vcpu->arch.cr0 = svm->vmcb->save.cr0;
  1991. vcpu->arch.cr3 = svm->vmcb->save.cr3;
  1992. if (mmu_reload) {
  1993. kvm_mmu_reset_context(vcpu);
  1994. kvm_mmu_load(vcpu);
  1995. }
  1996. }
  1997. if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
  1998. kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  1999. kvm_run->fail_entry.hardware_entry_failure_reason
  2000. = svm->vmcb->control.exit_code;
  2001. return 0;
  2002. }
  2003. if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
  2004. exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
  2005. exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH)
  2006. printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
  2007. "exit_code 0x%x\n",
  2008. __func__, svm->vmcb->control.exit_int_info,
  2009. exit_code);
  2010. if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
  2011. || !svm_exit_handlers[exit_code]) {
  2012. kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
  2013. kvm_run->hw.hardware_exit_reason = exit_code;
  2014. return 0;
  2015. }
  2016. return svm_exit_handlers[exit_code](svm);
  2017. }
  2018. static void reload_tss(struct kvm_vcpu *vcpu)
  2019. {
  2020. int cpu = raw_smp_processor_id();
  2021. struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
  2022. svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
  2023. load_TR_desc();
  2024. }
  2025. static void pre_svm_run(struct vcpu_svm *svm)
  2026. {
  2027. int cpu = raw_smp_processor_id();
  2028. struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
  2029. svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
  2030. /* FIXME: handle wraparound of asid_generation */
  2031. if (svm->asid_generation != svm_data->asid_generation)
  2032. new_asid(svm, svm_data);
  2033. }
  2034. static void svm_inject_nmi(struct kvm_vcpu *vcpu)
  2035. {
  2036. struct vcpu_svm *svm = to_svm(vcpu);
  2037. svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
  2038. vcpu->arch.hflags |= HF_NMI_MASK;
  2039. svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
  2040. ++vcpu->stat.nmi_injections;
  2041. }
  2042. static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
  2043. {
  2044. struct vmcb_control_area *control;
  2045. trace_kvm_inj_virq(irq);
  2046. ++svm->vcpu.stat.irq_injections;
  2047. control = &svm->vmcb->control;
  2048. control->int_vector = irq;
  2049. control->int_ctl &= ~V_INTR_PRIO_MASK;
  2050. control->int_ctl |= V_IRQ_MASK |
  2051. ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
  2052. }
  2053. static void svm_set_irq(struct kvm_vcpu *vcpu)
  2054. {
  2055. struct vcpu_svm *svm = to_svm(vcpu);
  2056. BUG_ON(!(gif_set(svm)));
  2057. svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
  2058. SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
  2059. }
  2060. static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
  2061. {
  2062. struct vcpu_svm *svm = to_svm(vcpu);
  2063. if (irr == -1)
  2064. return;
  2065. if (tpr >= irr)
  2066. svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
  2067. }
  2068. static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
  2069. {
  2070. struct vcpu_svm *svm = to_svm(vcpu);
  2071. struct vmcb *vmcb = svm->vmcb;
  2072. return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
  2073. !(svm->vcpu.arch.hflags & HF_NMI_MASK);
  2074. }
  2075. static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
  2076. {
  2077. struct vcpu_svm *svm = to_svm(vcpu);
  2078. struct vmcb *vmcb = svm->vmcb;
  2079. int ret;
  2080. if (!gif_set(svm) ||
  2081. (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
  2082. return 0;
  2083. ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
  2084. if (is_nested(svm))
  2085. return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
  2086. return ret;
  2087. }
  2088. static void enable_irq_window(struct kvm_vcpu *vcpu)
  2089. {
  2090. struct vcpu_svm *svm = to_svm(vcpu);
  2091. nsvm_printk("Trying to open IRQ window\n");
  2092. nested_svm_intr(svm);
  2093. /* In case GIF=0 we can't rely on the CPU to tell us when
  2094. * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
  2095. * The next time we get that intercept, this function will be
  2096. * called again though and we'll get the vintr intercept. */
  2097. if (gif_set(svm)) {
  2098. svm_set_vintr(svm);
  2099. svm_inject_irq(svm, 0x0);
  2100. }
  2101. }
  2102. static void enable_nmi_window(struct kvm_vcpu *vcpu)
  2103. {
  2104. struct vcpu_svm *svm = to_svm(vcpu);
  2105. if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
  2106. == HF_NMI_MASK)
  2107. return; /* IRET will cause a vm exit */
  2108. /* Something prevents NMI from been injected. Single step over
  2109. possible problem (IRET or exception injection or interrupt
  2110. shadow) */
  2111. vcpu->arch.singlestep = true;
  2112. svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
  2113. update_db_intercept(vcpu);
  2114. }
  2115. static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
  2116. {
  2117. return 0;
  2118. }
  2119. static void svm_flush_tlb(struct kvm_vcpu *vcpu)
  2120. {
  2121. force_new_asid(vcpu);
  2122. }
  2123. static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
  2124. {
  2125. }
  2126. static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
  2127. {
  2128. struct vcpu_svm *svm = to_svm(vcpu);
  2129. if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
  2130. int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
  2131. kvm_set_cr8(vcpu, cr8);
  2132. }
  2133. }
  2134. static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
  2135. {
  2136. struct vcpu_svm *svm = to_svm(vcpu);
  2137. u64 cr8;
  2138. cr8 = kvm_get_cr8(vcpu);
  2139. svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
  2140. svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
  2141. }
  2142. static void svm_complete_interrupts(struct vcpu_svm *svm)
  2143. {
  2144. u8 vector;
  2145. int type;
  2146. u32 exitintinfo = svm->vmcb->control.exit_int_info;
  2147. if (svm->vcpu.arch.hflags & HF_IRET_MASK)
  2148. svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
  2149. svm->vcpu.arch.nmi_injected = false;
  2150. kvm_clear_exception_queue(&svm->vcpu);
  2151. kvm_clear_interrupt_queue(&svm->vcpu);
  2152. if (!(exitintinfo & SVM_EXITINTINFO_VALID))
  2153. return;
  2154. vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
  2155. type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
  2156. switch (type) {
  2157. case SVM_EXITINTINFO_TYPE_NMI:
  2158. svm->vcpu.arch.nmi_injected = true;
  2159. break;
  2160. case SVM_EXITINTINFO_TYPE_EXEPT:
  2161. /* In case of software exception do not reinject an exception
  2162. vector, but re-execute and instruction instead */
  2163. if (is_nested(svm))
  2164. break;
  2165. if (kvm_exception_is_soft(vector))
  2166. break;
  2167. if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
  2168. u32 err = svm->vmcb->control.exit_int_info_err;
  2169. kvm_queue_exception_e(&svm->vcpu, vector, err);
  2170. } else
  2171. kvm_queue_exception(&svm->vcpu, vector);
  2172. break;
  2173. case SVM_EXITINTINFO_TYPE_INTR:
  2174. kvm_queue_interrupt(&svm->vcpu, vector, false);
  2175. break;
  2176. default:
  2177. break;
  2178. }
  2179. }
  2180. #ifdef CONFIG_X86_64
  2181. #define R "r"
  2182. #else
  2183. #define R "e"
  2184. #endif
  2185. static void svm_vcpu_run(struct kvm_vcpu *vcpu)
  2186. {
  2187. struct vcpu_svm *svm = to_svm(vcpu);
  2188. u16 fs_selector;
  2189. u16 gs_selector;
  2190. u16 ldt_selector;
  2191. /*
  2192. * A vmexit emulation is required before the vcpu can be executed
  2193. * again.
  2194. */
  2195. if (unlikely(svm->nested.exit_required))
  2196. return;
  2197. svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
  2198. svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
  2199. svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
  2200. pre_svm_run(svm);
  2201. sync_lapic_to_cr8(vcpu);
  2202. save_host_msrs(vcpu);
  2203. fs_selector = kvm_read_fs();
  2204. gs_selector = kvm_read_gs();
  2205. ldt_selector = kvm_read_ldt();
  2206. svm->vmcb->save.cr2 = vcpu->arch.cr2;
  2207. /* required for live migration with NPT */
  2208. if (npt_enabled)
  2209. svm->vmcb->save.cr3 = vcpu->arch.cr3;
  2210. clgi();
  2211. local_irq_enable();
  2212. asm volatile (
  2213. "push %%"R"bp; \n\t"
  2214. "mov %c[rbx](%[svm]), %%"R"bx \n\t"
  2215. "mov %c[rcx](%[svm]), %%"R"cx \n\t"
  2216. "mov %c[rdx](%[svm]), %%"R"dx \n\t"
  2217. "mov %c[rsi](%[svm]), %%"R"si \n\t"
  2218. "mov %c[rdi](%[svm]), %%"R"di \n\t"
  2219. "mov %c[rbp](%[svm]), %%"R"bp \n\t"
  2220. #ifdef CONFIG_X86_64
  2221. "mov %c[r8](%[svm]), %%r8 \n\t"
  2222. "mov %c[r9](%[svm]), %%r9 \n\t"
  2223. "mov %c[r10](%[svm]), %%r10 \n\t"
  2224. "mov %c[r11](%[svm]), %%r11 \n\t"
  2225. "mov %c[r12](%[svm]), %%r12 \n\t"
  2226. "mov %c[r13](%[svm]), %%r13 \n\t"
  2227. "mov %c[r14](%[svm]), %%r14 \n\t"
  2228. "mov %c[r15](%[svm]), %%r15 \n\t"
  2229. #endif
  2230. /* Enter guest mode */
  2231. "push %%"R"ax \n\t"
  2232. "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
  2233. __ex(SVM_VMLOAD) "\n\t"
  2234. __ex(SVM_VMRUN) "\n\t"
  2235. __ex(SVM_VMSAVE) "\n\t"
  2236. "pop %%"R"ax \n\t"
  2237. /* Save guest registers, load host registers */
  2238. "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
  2239. "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
  2240. "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
  2241. "mov %%"R"si, %c[rsi](%[svm]) \n\t"
  2242. "mov %%"R"di, %c[rdi](%[svm]) \n\t"
  2243. "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
  2244. #ifdef CONFIG_X86_64
  2245. "mov %%r8, %c[r8](%[svm]) \n\t"
  2246. "mov %%r9, %c[r9](%[svm]) \n\t"
  2247. "mov %%r10, %c[r10](%[svm]) \n\t"
  2248. "mov %%r11, %c[r11](%[svm]) \n\t"
  2249. "mov %%r12, %c[r12](%[svm]) \n\t"
  2250. "mov %%r13, %c[r13](%[svm]) \n\t"
  2251. "mov %%r14, %c[r14](%[svm]) \n\t"
  2252. "mov %%r15, %c[r15](%[svm]) \n\t"
  2253. #endif
  2254. "pop %%"R"bp"
  2255. :
  2256. : [svm]"a"(svm),
  2257. [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
  2258. [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
  2259. [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
  2260. [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
  2261. [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
  2262. [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
  2263. [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
  2264. #ifdef CONFIG_X86_64
  2265. , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
  2266. [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
  2267. [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
  2268. [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
  2269. [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
  2270. [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
  2271. [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
  2272. [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
  2273. #endif
  2274. : "cc", "memory"
  2275. , R"bx", R"cx", R"dx", R"si", R"di"
  2276. #ifdef CONFIG_X86_64
  2277. , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
  2278. #endif
  2279. );
  2280. vcpu->arch.cr2 = svm->vmcb->save.cr2;
  2281. vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
  2282. vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
  2283. vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
  2284. kvm_load_fs(fs_selector);
  2285. kvm_load_gs(gs_selector);
  2286. kvm_load_ldt(ldt_selector);
  2287. load_host_msrs(vcpu);
  2288. reload_tss(vcpu);
  2289. local_irq_disable();
  2290. stgi();
  2291. sync_cr8_to_lapic(vcpu);
  2292. svm->next_rip = 0;
  2293. if (npt_enabled) {
  2294. vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
  2295. vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
  2296. }
  2297. }
  2298. #undef R
  2299. static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
  2300. {
  2301. struct vcpu_svm *svm = to_svm(vcpu);
  2302. if (npt_enabled) {
  2303. svm->vmcb->control.nested_cr3 = root;
  2304. force_new_asid(vcpu);
  2305. return;
  2306. }
  2307. svm->vmcb->save.cr3 = root;
  2308. force_new_asid(vcpu);
  2309. if (vcpu->fpu_active) {
  2310. svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
  2311. svm->vmcb->save.cr0 |= X86_CR0_TS;
  2312. vcpu->fpu_active = 0;
  2313. }
  2314. }
  2315. static int is_disabled(void)
  2316. {
  2317. u64 vm_cr;
  2318. rdmsrl(MSR_VM_CR, vm_cr);
  2319. if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
  2320. return 1;
  2321. return 0;
  2322. }
  2323. static void
  2324. svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
  2325. {
  2326. /*
  2327. * Patch in the VMMCALL instruction:
  2328. */
  2329. hypercall[0] = 0x0f;
  2330. hypercall[1] = 0x01;
  2331. hypercall[2] = 0xd9;
  2332. }
  2333. static void svm_check_processor_compat(void *rtn)
  2334. {
  2335. *(int *)rtn = 0;
  2336. }
  2337. static bool svm_cpu_has_accelerated_tpr(void)
  2338. {
  2339. return false;
  2340. }
  2341. static int get_npt_level(void)
  2342. {
  2343. #ifdef CONFIG_X86_64
  2344. return PT64_ROOT_LEVEL;
  2345. #else
  2346. return PT32E_ROOT_LEVEL;
  2347. #endif
  2348. }
  2349. static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
  2350. {
  2351. return 0;
  2352. }
  2353. static const struct trace_print_flags svm_exit_reasons_str[] = {
  2354. { SVM_EXIT_READ_CR0, "read_cr0" },
  2355. { SVM_EXIT_READ_CR3, "read_cr3" },
  2356. { SVM_EXIT_READ_CR4, "read_cr4" },
  2357. { SVM_EXIT_READ_CR8, "read_cr8" },
  2358. { SVM_EXIT_WRITE_CR0, "write_cr0" },
  2359. { SVM_EXIT_WRITE_CR3, "write_cr3" },
  2360. { SVM_EXIT_WRITE_CR4, "write_cr4" },
  2361. { SVM_EXIT_WRITE_CR8, "write_cr8" },
  2362. { SVM_EXIT_READ_DR0, "read_dr0" },
  2363. { SVM_EXIT_READ_DR1, "read_dr1" },
  2364. { SVM_EXIT_READ_DR2, "read_dr2" },
  2365. { SVM_EXIT_READ_DR3, "read_dr3" },
  2366. { SVM_EXIT_WRITE_DR0, "write_dr0" },
  2367. { SVM_EXIT_WRITE_DR1, "write_dr1" },
  2368. { SVM_EXIT_WRITE_DR2, "write_dr2" },
  2369. { SVM_EXIT_WRITE_DR3, "write_dr3" },
  2370. { SVM_EXIT_WRITE_DR5, "write_dr5" },
  2371. { SVM_EXIT_WRITE_DR7, "write_dr7" },
  2372. { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
  2373. { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
  2374. { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
  2375. { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
  2376. { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
  2377. { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
  2378. { SVM_EXIT_INTR, "interrupt" },
  2379. { SVM_EXIT_NMI, "nmi" },
  2380. { SVM_EXIT_SMI, "smi" },
  2381. { SVM_EXIT_INIT, "init" },
  2382. { SVM_EXIT_VINTR, "vintr" },
  2383. { SVM_EXIT_CPUID, "cpuid" },
  2384. { SVM_EXIT_INVD, "invd" },
  2385. { SVM_EXIT_HLT, "hlt" },
  2386. { SVM_EXIT_INVLPG, "invlpg" },
  2387. { SVM_EXIT_INVLPGA, "invlpga" },
  2388. { SVM_EXIT_IOIO, "io" },
  2389. { SVM_EXIT_MSR, "msr" },
  2390. { SVM_EXIT_TASK_SWITCH, "task_switch" },
  2391. { SVM_EXIT_SHUTDOWN, "shutdown" },
  2392. { SVM_EXIT_VMRUN, "vmrun" },
  2393. { SVM_EXIT_VMMCALL, "hypercall" },
  2394. { SVM_EXIT_VMLOAD, "vmload" },
  2395. { SVM_EXIT_VMSAVE, "vmsave" },
  2396. { SVM_EXIT_STGI, "stgi" },
  2397. { SVM_EXIT_CLGI, "clgi" },
  2398. { SVM_EXIT_SKINIT, "skinit" },
  2399. { SVM_EXIT_WBINVD, "wbinvd" },
  2400. { SVM_EXIT_MONITOR, "monitor" },
  2401. { SVM_EXIT_MWAIT, "mwait" },
  2402. { SVM_EXIT_NPF, "npf" },
  2403. { -1, NULL }
  2404. };
  2405. static bool svm_gb_page_enable(void)
  2406. {
  2407. return true;
  2408. }
  2409. static struct kvm_x86_ops svm_x86_ops = {
  2410. .cpu_has_kvm_support = has_svm,
  2411. .disabled_by_bios = is_disabled,
  2412. .hardware_setup = svm_hardware_setup,
  2413. .hardware_unsetup = svm_hardware_unsetup,
  2414. .check_processor_compatibility = svm_check_processor_compat,
  2415. .hardware_enable = svm_hardware_enable,
  2416. .hardware_disable = svm_hardware_disable,
  2417. .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
  2418. .vcpu_create = svm_create_vcpu,
  2419. .vcpu_free = svm_free_vcpu,
  2420. .vcpu_reset = svm_vcpu_reset,
  2421. .prepare_guest_switch = svm_prepare_guest_switch,
  2422. .vcpu_load = svm_vcpu_load,
  2423. .vcpu_put = svm_vcpu_put,
  2424. .set_guest_debug = svm_guest_debug,
  2425. .get_msr = svm_get_msr,
  2426. .set_msr = svm_set_msr,
  2427. .get_segment_base = svm_get_segment_base,
  2428. .get_segment = svm_get_segment,
  2429. .set_segment = svm_set_segment,
  2430. .get_cpl = svm_get_cpl,
  2431. .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
  2432. .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
  2433. .set_cr0 = svm_set_cr0,
  2434. .set_cr3 = svm_set_cr3,
  2435. .set_cr4 = svm_set_cr4,
  2436. .set_efer = svm_set_efer,
  2437. .get_idt = svm_get_idt,
  2438. .set_idt = svm_set_idt,
  2439. .get_gdt = svm_get_gdt,
  2440. .set_gdt = svm_set_gdt,
  2441. .get_dr = svm_get_dr,
  2442. .set_dr = svm_set_dr,
  2443. .cache_reg = svm_cache_reg,
  2444. .get_rflags = svm_get_rflags,
  2445. .set_rflags = svm_set_rflags,
  2446. .tlb_flush = svm_flush_tlb,
  2447. .run = svm_vcpu_run,
  2448. .handle_exit = handle_exit,
  2449. .skip_emulated_instruction = skip_emulated_instruction,
  2450. .set_interrupt_shadow = svm_set_interrupt_shadow,
  2451. .get_interrupt_shadow = svm_get_interrupt_shadow,
  2452. .patch_hypercall = svm_patch_hypercall,
  2453. .set_irq = svm_set_irq,
  2454. .set_nmi = svm_inject_nmi,
  2455. .queue_exception = svm_queue_exception,
  2456. .interrupt_allowed = svm_interrupt_allowed,
  2457. .nmi_allowed = svm_nmi_allowed,
  2458. .enable_nmi_window = enable_nmi_window,
  2459. .enable_irq_window = enable_irq_window,
  2460. .update_cr8_intercept = update_cr8_intercept,
  2461. .set_tss_addr = svm_set_tss_addr,
  2462. .get_tdp_level = get_npt_level,
  2463. .get_mt_mask = svm_get_mt_mask,
  2464. .exit_reasons_str = svm_exit_reasons_str,
  2465. .gb_page_enable = svm_gb_page_enable,
  2466. };
  2467. static int __init svm_init(void)
  2468. {
  2469. return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
  2470. THIS_MODULE);
  2471. }
  2472. static void __exit svm_exit(void)
  2473. {
  2474. kvm_exit();
  2475. }
  2476. module_init(svm_init)
  2477. module_exit(svm_exit)