svm.c 82 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * AMD SVM support
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. *
  8. * Authors:
  9. * Yaniv Kamay <yaniv@qumranet.com>
  10. * Avi Kivity <avi@qumranet.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2. See
  13. * the COPYING file in the top-level directory.
  14. *
  15. */
  16. #include <linux/kvm_host.h>
  17. #include "irq.h"
  18. #include "mmu.h"
  19. #include "kvm_cache_regs.h"
  20. #include "x86.h"
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/highmem.h>
  25. #include <linux/sched.h>
  26. #include <linux/ftrace_event.h>
  27. #include <linux/slab.h>
  28. #include <asm/desc.h>
  29. #include <asm/virtext.h>
  30. #include "trace.h"
  31. #define __ex(x) __kvm_handle_fault_on_reboot(x)
  32. MODULE_AUTHOR("Qumranet");
  33. MODULE_LICENSE("GPL");
  34. #define IOPM_ALLOC_ORDER 2
  35. #define MSRPM_ALLOC_ORDER 1
  36. #define SEG_TYPE_LDT 2
  37. #define SEG_TYPE_BUSY_TSS16 3
  38. #define SVM_FEATURE_NPT (1 << 0)
  39. #define SVM_FEATURE_LBRV (1 << 1)
  40. #define SVM_FEATURE_SVML (1 << 2)
  41. #define SVM_FEATURE_NRIP (1 << 3)
  42. #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
  43. #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
  44. #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
  45. #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
  46. #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
  47. static const u32 host_save_user_msrs[] = {
  48. #ifdef CONFIG_X86_64
  49. MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
  50. MSR_FS_BASE,
  51. #endif
  52. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  53. };
  54. #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
  55. struct kvm_vcpu;
  56. struct nested_state {
  57. struct vmcb *hsave;
  58. u64 hsave_msr;
  59. u64 vm_cr_msr;
  60. u64 vmcb;
  61. /* These are the merged vectors */
  62. u32 *msrpm;
  63. /* gpa pointers to the real vectors */
  64. u64 vmcb_msrpm;
  65. /* A VMEXIT is required but not yet emulated */
  66. bool exit_required;
  67. /* cache for intercepts of the guest */
  68. u16 intercept_cr_read;
  69. u16 intercept_cr_write;
  70. u16 intercept_dr_read;
  71. u16 intercept_dr_write;
  72. u32 intercept_exceptions;
  73. u64 intercept;
  74. };
  75. struct vcpu_svm {
  76. struct kvm_vcpu vcpu;
  77. struct vmcb *vmcb;
  78. unsigned long vmcb_pa;
  79. struct svm_cpu_data *svm_data;
  80. uint64_t asid_generation;
  81. uint64_t sysenter_esp;
  82. uint64_t sysenter_eip;
  83. u64 next_rip;
  84. u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
  85. u64 host_gs_base;
  86. u32 *msrpm;
  87. struct nested_state nested;
  88. bool nmi_singlestep;
  89. unsigned int3_injected;
  90. unsigned long int3_rip;
  91. };
  92. /* enable NPT for AMD64 and X86 with PAE */
  93. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  94. static bool npt_enabled = true;
  95. #else
  96. static bool npt_enabled;
  97. #endif
  98. static int npt = 1;
  99. module_param(npt, int, S_IRUGO);
  100. static int nested = 1;
  101. module_param(nested, int, S_IRUGO);
  102. static void svm_flush_tlb(struct kvm_vcpu *vcpu);
  103. static void svm_complete_interrupts(struct vcpu_svm *svm);
  104. static int nested_svm_exit_handled(struct vcpu_svm *svm);
  105. static int nested_svm_intercept(struct vcpu_svm *svm);
  106. static int nested_svm_vmexit(struct vcpu_svm *svm);
  107. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  108. bool has_error_code, u32 error_code);
  109. static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
  110. {
  111. return container_of(vcpu, struct vcpu_svm, vcpu);
  112. }
  113. static inline bool is_nested(struct vcpu_svm *svm)
  114. {
  115. return svm->nested.vmcb;
  116. }
  117. static inline void enable_gif(struct vcpu_svm *svm)
  118. {
  119. svm->vcpu.arch.hflags |= HF_GIF_MASK;
  120. }
  121. static inline void disable_gif(struct vcpu_svm *svm)
  122. {
  123. svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
  124. }
  125. static inline bool gif_set(struct vcpu_svm *svm)
  126. {
  127. return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
  128. }
  129. static unsigned long iopm_base;
  130. struct kvm_ldttss_desc {
  131. u16 limit0;
  132. u16 base0;
  133. unsigned base1:8, type:5, dpl:2, p:1;
  134. unsigned limit1:4, zero0:3, g:1, base2:8;
  135. u32 base3;
  136. u32 zero1;
  137. } __attribute__((packed));
  138. struct svm_cpu_data {
  139. int cpu;
  140. u64 asid_generation;
  141. u32 max_asid;
  142. u32 next_asid;
  143. struct kvm_ldttss_desc *tss_desc;
  144. struct page *save_area;
  145. };
  146. static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
  147. static uint32_t svm_features;
  148. struct svm_init_data {
  149. int cpu;
  150. int r;
  151. };
  152. static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
  153. #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
  154. #define MSRS_RANGE_SIZE 2048
  155. #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
  156. #define MAX_INST_SIZE 15
  157. static inline u32 svm_has(u32 feat)
  158. {
  159. return svm_features & feat;
  160. }
  161. static inline void clgi(void)
  162. {
  163. asm volatile (__ex(SVM_CLGI));
  164. }
  165. static inline void stgi(void)
  166. {
  167. asm volatile (__ex(SVM_STGI));
  168. }
  169. static inline void invlpga(unsigned long addr, u32 asid)
  170. {
  171. asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
  172. }
  173. static inline void force_new_asid(struct kvm_vcpu *vcpu)
  174. {
  175. to_svm(vcpu)->asid_generation--;
  176. }
  177. static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
  178. {
  179. force_new_asid(vcpu);
  180. }
  181. static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  182. {
  183. if (!npt_enabled && !(efer & EFER_LMA))
  184. efer &= ~EFER_LME;
  185. to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
  186. vcpu->arch.efer = efer;
  187. }
  188. static int is_external_interrupt(u32 info)
  189. {
  190. info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
  191. return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
  192. }
  193. static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  194. {
  195. struct vcpu_svm *svm = to_svm(vcpu);
  196. u32 ret = 0;
  197. if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
  198. ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
  199. return ret & mask;
  200. }
  201. static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  202. {
  203. struct vcpu_svm *svm = to_svm(vcpu);
  204. if (mask == 0)
  205. svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
  206. else
  207. svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
  208. }
  209. static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  210. {
  211. struct vcpu_svm *svm = to_svm(vcpu);
  212. if (!svm->next_rip) {
  213. if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
  214. EMULATE_DONE)
  215. printk(KERN_DEBUG "%s: NOP\n", __func__);
  216. return;
  217. }
  218. if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
  219. printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
  220. __func__, kvm_rip_read(vcpu), svm->next_rip);
  221. kvm_rip_write(vcpu, svm->next_rip);
  222. svm_set_interrupt_shadow(vcpu, 0);
  223. }
  224. static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
  225. bool has_error_code, u32 error_code)
  226. {
  227. struct vcpu_svm *svm = to_svm(vcpu);
  228. /*
  229. * If we are within a nested VM we'd better #VMEXIT and let the guest
  230. * handle the exception
  231. */
  232. if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
  233. return;
  234. if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
  235. unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
  236. /*
  237. * For guest debugging where we have to reinject #BP if some
  238. * INT3 is guest-owned:
  239. * Emulate nRIP by moving RIP forward. Will fail if injection
  240. * raises a fault that is not intercepted. Still better than
  241. * failing in all cases.
  242. */
  243. skip_emulated_instruction(&svm->vcpu);
  244. rip = kvm_rip_read(&svm->vcpu);
  245. svm->int3_rip = rip + svm->vmcb->save.cs.base;
  246. svm->int3_injected = rip - old_rip;
  247. }
  248. svm->vmcb->control.event_inj = nr
  249. | SVM_EVTINJ_VALID
  250. | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
  251. | SVM_EVTINJ_TYPE_EXEPT;
  252. svm->vmcb->control.event_inj_err = error_code;
  253. }
  254. static int has_svm(void)
  255. {
  256. const char *msg;
  257. if (!cpu_has_svm(&msg)) {
  258. printk(KERN_INFO "has_svm: %s\n", msg);
  259. return 0;
  260. }
  261. return 1;
  262. }
  263. static void svm_hardware_disable(void *garbage)
  264. {
  265. cpu_svm_disable();
  266. }
  267. static int svm_hardware_enable(void *garbage)
  268. {
  269. struct svm_cpu_data *sd;
  270. uint64_t efer;
  271. struct desc_ptr gdt_descr;
  272. struct desc_struct *gdt;
  273. int me = raw_smp_processor_id();
  274. rdmsrl(MSR_EFER, efer);
  275. if (efer & EFER_SVME)
  276. return -EBUSY;
  277. if (!has_svm()) {
  278. printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
  279. me);
  280. return -EINVAL;
  281. }
  282. sd = per_cpu(svm_data, me);
  283. if (!sd) {
  284. printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
  285. me);
  286. return -EINVAL;
  287. }
  288. sd->asid_generation = 1;
  289. sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
  290. sd->next_asid = sd->max_asid + 1;
  291. kvm_get_gdt(&gdt_descr);
  292. gdt = (struct desc_struct *)gdt_descr.address;
  293. sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
  294. wrmsrl(MSR_EFER, efer | EFER_SVME);
  295. wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
  296. return 0;
  297. }
  298. static void svm_cpu_uninit(int cpu)
  299. {
  300. struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
  301. if (!sd)
  302. return;
  303. per_cpu(svm_data, raw_smp_processor_id()) = NULL;
  304. __free_page(sd->save_area);
  305. kfree(sd);
  306. }
  307. static int svm_cpu_init(int cpu)
  308. {
  309. struct svm_cpu_data *sd;
  310. int r;
  311. sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
  312. if (!sd)
  313. return -ENOMEM;
  314. sd->cpu = cpu;
  315. sd->save_area = alloc_page(GFP_KERNEL);
  316. r = -ENOMEM;
  317. if (!sd->save_area)
  318. goto err_1;
  319. per_cpu(svm_data, cpu) = sd;
  320. return 0;
  321. err_1:
  322. kfree(sd);
  323. return r;
  324. }
  325. static void set_msr_interception(u32 *msrpm, unsigned msr,
  326. int read, int write)
  327. {
  328. int i;
  329. for (i = 0; i < NUM_MSR_MAPS; i++) {
  330. if (msr >= msrpm_ranges[i] &&
  331. msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
  332. u32 msr_offset = (i * MSRS_IN_RANGE + msr -
  333. msrpm_ranges[i]) * 2;
  334. u32 *base = msrpm + (msr_offset / 32);
  335. u32 msr_shift = msr_offset % 32;
  336. u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
  337. *base = (*base & ~(0x3 << msr_shift)) |
  338. (mask << msr_shift);
  339. return;
  340. }
  341. }
  342. BUG();
  343. }
  344. static void svm_vcpu_init_msrpm(u32 *msrpm)
  345. {
  346. memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
  347. #ifdef CONFIG_X86_64
  348. set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
  349. set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
  350. set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
  351. set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
  352. set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
  353. set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
  354. #endif
  355. set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
  356. set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
  357. }
  358. static void svm_enable_lbrv(struct vcpu_svm *svm)
  359. {
  360. u32 *msrpm = svm->msrpm;
  361. svm->vmcb->control.lbr_ctl = 1;
  362. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
  363. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
  364. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
  365. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
  366. }
  367. static void svm_disable_lbrv(struct vcpu_svm *svm)
  368. {
  369. u32 *msrpm = svm->msrpm;
  370. svm->vmcb->control.lbr_ctl = 0;
  371. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
  372. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
  373. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
  374. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
  375. }
  376. static __init int svm_hardware_setup(void)
  377. {
  378. int cpu;
  379. struct page *iopm_pages;
  380. void *iopm_va;
  381. int r;
  382. iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
  383. if (!iopm_pages)
  384. return -ENOMEM;
  385. iopm_va = page_address(iopm_pages);
  386. memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
  387. iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
  388. if (boot_cpu_has(X86_FEATURE_NX))
  389. kvm_enable_efer_bits(EFER_NX);
  390. if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
  391. kvm_enable_efer_bits(EFER_FFXSR);
  392. if (nested) {
  393. printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
  394. kvm_enable_efer_bits(EFER_SVME);
  395. }
  396. for_each_possible_cpu(cpu) {
  397. r = svm_cpu_init(cpu);
  398. if (r)
  399. goto err;
  400. }
  401. svm_features = cpuid_edx(SVM_CPUID_FUNC);
  402. if (!svm_has(SVM_FEATURE_NPT))
  403. npt_enabled = false;
  404. if (npt_enabled && !npt) {
  405. printk(KERN_INFO "kvm: Nested Paging disabled\n");
  406. npt_enabled = false;
  407. }
  408. if (npt_enabled) {
  409. printk(KERN_INFO "kvm: Nested Paging enabled\n");
  410. kvm_enable_tdp();
  411. } else
  412. kvm_disable_tdp();
  413. return 0;
  414. err:
  415. __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
  416. iopm_base = 0;
  417. return r;
  418. }
  419. static __exit void svm_hardware_unsetup(void)
  420. {
  421. int cpu;
  422. for_each_possible_cpu(cpu)
  423. svm_cpu_uninit(cpu);
  424. __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
  425. iopm_base = 0;
  426. }
  427. static void init_seg(struct vmcb_seg *seg)
  428. {
  429. seg->selector = 0;
  430. seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
  431. SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
  432. seg->limit = 0xffff;
  433. seg->base = 0;
  434. }
  435. static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
  436. {
  437. seg->selector = 0;
  438. seg->attrib = SVM_SELECTOR_P_MASK | type;
  439. seg->limit = 0xffff;
  440. seg->base = 0;
  441. }
  442. static void init_vmcb(struct vcpu_svm *svm)
  443. {
  444. struct vmcb_control_area *control = &svm->vmcb->control;
  445. struct vmcb_save_area *save = &svm->vmcb->save;
  446. svm->vcpu.fpu_active = 1;
  447. control->intercept_cr_read = INTERCEPT_CR0_MASK |
  448. INTERCEPT_CR3_MASK |
  449. INTERCEPT_CR4_MASK;
  450. control->intercept_cr_write = INTERCEPT_CR0_MASK |
  451. INTERCEPT_CR3_MASK |
  452. INTERCEPT_CR4_MASK |
  453. INTERCEPT_CR8_MASK;
  454. control->intercept_dr_read = INTERCEPT_DR0_MASK |
  455. INTERCEPT_DR1_MASK |
  456. INTERCEPT_DR2_MASK |
  457. INTERCEPT_DR3_MASK |
  458. INTERCEPT_DR4_MASK |
  459. INTERCEPT_DR5_MASK |
  460. INTERCEPT_DR6_MASK |
  461. INTERCEPT_DR7_MASK;
  462. control->intercept_dr_write = INTERCEPT_DR0_MASK |
  463. INTERCEPT_DR1_MASK |
  464. INTERCEPT_DR2_MASK |
  465. INTERCEPT_DR3_MASK |
  466. INTERCEPT_DR4_MASK |
  467. INTERCEPT_DR5_MASK |
  468. INTERCEPT_DR6_MASK |
  469. INTERCEPT_DR7_MASK;
  470. control->intercept_exceptions = (1 << PF_VECTOR) |
  471. (1 << UD_VECTOR) |
  472. (1 << MC_VECTOR);
  473. control->intercept = (1ULL << INTERCEPT_INTR) |
  474. (1ULL << INTERCEPT_NMI) |
  475. (1ULL << INTERCEPT_SMI) |
  476. (1ULL << INTERCEPT_SELECTIVE_CR0) |
  477. (1ULL << INTERCEPT_CPUID) |
  478. (1ULL << INTERCEPT_INVD) |
  479. (1ULL << INTERCEPT_HLT) |
  480. (1ULL << INTERCEPT_INVLPG) |
  481. (1ULL << INTERCEPT_INVLPGA) |
  482. (1ULL << INTERCEPT_IOIO_PROT) |
  483. (1ULL << INTERCEPT_MSR_PROT) |
  484. (1ULL << INTERCEPT_TASK_SWITCH) |
  485. (1ULL << INTERCEPT_SHUTDOWN) |
  486. (1ULL << INTERCEPT_VMRUN) |
  487. (1ULL << INTERCEPT_VMMCALL) |
  488. (1ULL << INTERCEPT_VMLOAD) |
  489. (1ULL << INTERCEPT_VMSAVE) |
  490. (1ULL << INTERCEPT_STGI) |
  491. (1ULL << INTERCEPT_CLGI) |
  492. (1ULL << INTERCEPT_SKINIT) |
  493. (1ULL << INTERCEPT_WBINVD) |
  494. (1ULL << INTERCEPT_MONITOR) |
  495. (1ULL << INTERCEPT_MWAIT);
  496. control->iopm_base_pa = iopm_base;
  497. control->msrpm_base_pa = __pa(svm->msrpm);
  498. control->tsc_offset = 0;
  499. control->int_ctl = V_INTR_MASKING_MASK;
  500. init_seg(&save->es);
  501. init_seg(&save->ss);
  502. init_seg(&save->ds);
  503. init_seg(&save->fs);
  504. init_seg(&save->gs);
  505. save->cs.selector = 0xf000;
  506. /* Executable/Readable Code Segment */
  507. save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
  508. SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
  509. save->cs.limit = 0xffff;
  510. /*
  511. * cs.base should really be 0xffff0000, but vmx can't handle that, so
  512. * be consistent with it.
  513. *
  514. * Replace when we have real mode working for vmx.
  515. */
  516. save->cs.base = 0xf0000;
  517. save->gdtr.limit = 0xffff;
  518. save->idtr.limit = 0xffff;
  519. init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
  520. init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
  521. save->efer = EFER_SVME;
  522. save->dr6 = 0xffff0ff0;
  523. save->dr7 = 0x400;
  524. save->rflags = 2;
  525. save->rip = 0x0000fff0;
  526. svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
  527. /*
  528. * This is the guest-visible cr0 value.
  529. * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
  530. */
  531. svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
  532. kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
  533. save->cr4 = X86_CR4_PAE;
  534. /* rdx = ?? */
  535. if (npt_enabled) {
  536. /* Setup VMCB for Nested Paging */
  537. control->nested_ctl = 1;
  538. control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
  539. (1ULL << INTERCEPT_INVLPG));
  540. control->intercept_exceptions &= ~(1 << PF_VECTOR);
  541. control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
  542. control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
  543. save->g_pat = 0x0007040600070406ULL;
  544. save->cr3 = 0;
  545. save->cr4 = 0;
  546. }
  547. force_new_asid(&svm->vcpu);
  548. svm->nested.vmcb = 0;
  549. svm->vcpu.arch.hflags = 0;
  550. if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
  551. control->pause_filter_count = 3000;
  552. control->intercept |= (1ULL << INTERCEPT_PAUSE);
  553. }
  554. enable_gif(svm);
  555. }
  556. static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
  557. {
  558. struct vcpu_svm *svm = to_svm(vcpu);
  559. init_vmcb(svm);
  560. if (!kvm_vcpu_is_bsp(vcpu)) {
  561. kvm_rip_write(vcpu, 0);
  562. svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
  563. svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
  564. }
  565. vcpu->arch.regs_avail = ~0;
  566. vcpu->arch.regs_dirty = ~0;
  567. return 0;
  568. }
  569. static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
  570. {
  571. struct vcpu_svm *svm;
  572. struct page *page;
  573. struct page *msrpm_pages;
  574. struct page *hsave_page;
  575. struct page *nested_msrpm_pages;
  576. int err;
  577. svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  578. if (!svm) {
  579. err = -ENOMEM;
  580. goto out;
  581. }
  582. err = kvm_vcpu_init(&svm->vcpu, kvm, id);
  583. if (err)
  584. goto free_svm;
  585. err = -ENOMEM;
  586. page = alloc_page(GFP_KERNEL);
  587. if (!page)
  588. goto uninit;
  589. msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  590. if (!msrpm_pages)
  591. goto free_page1;
  592. nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  593. if (!nested_msrpm_pages)
  594. goto free_page2;
  595. hsave_page = alloc_page(GFP_KERNEL);
  596. if (!hsave_page)
  597. goto free_page3;
  598. svm->nested.hsave = page_address(hsave_page);
  599. svm->msrpm = page_address(msrpm_pages);
  600. svm_vcpu_init_msrpm(svm->msrpm);
  601. svm->nested.msrpm = page_address(nested_msrpm_pages);
  602. svm->vmcb = page_address(page);
  603. clear_page(svm->vmcb);
  604. svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
  605. svm->asid_generation = 0;
  606. init_vmcb(svm);
  607. fx_init(&svm->vcpu);
  608. svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
  609. if (kvm_vcpu_is_bsp(&svm->vcpu))
  610. svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
  611. return &svm->vcpu;
  612. free_page3:
  613. __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
  614. free_page2:
  615. __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
  616. free_page1:
  617. __free_page(page);
  618. uninit:
  619. kvm_vcpu_uninit(&svm->vcpu);
  620. free_svm:
  621. kmem_cache_free(kvm_vcpu_cache, svm);
  622. out:
  623. return ERR_PTR(err);
  624. }
  625. static void svm_free_vcpu(struct kvm_vcpu *vcpu)
  626. {
  627. struct vcpu_svm *svm = to_svm(vcpu);
  628. __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
  629. __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
  630. __free_page(virt_to_page(svm->nested.hsave));
  631. __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
  632. kvm_vcpu_uninit(vcpu);
  633. kmem_cache_free(kvm_vcpu_cache, svm);
  634. }
  635. static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  636. {
  637. struct vcpu_svm *svm = to_svm(vcpu);
  638. int i;
  639. if (unlikely(cpu != vcpu->cpu)) {
  640. u64 delta;
  641. if (check_tsc_unstable()) {
  642. /*
  643. * Make sure that the guest sees a monotonically
  644. * increasing TSC.
  645. */
  646. delta = vcpu->arch.host_tsc - native_read_tsc();
  647. svm->vmcb->control.tsc_offset += delta;
  648. if (is_nested(svm))
  649. svm->nested.hsave->control.tsc_offset += delta;
  650. }
  651. vcpu->cpu = cpu;
  652. kvm_migrate_timers(vcpu);
  653. svm->asid_generation = 0;
  654. }
  655. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  656. rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  657. }
  658. static void svm_vcpu_put(struct kvm_vcpu *vcpu)
  659. {
  660. struct vcpu_svm *svm = to_svm(vcpu);
  661. int i;
  662. ++vcpu->stat.host_state_reload;
  663. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  664. wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  665. vcpu->arch.host_tsc = native_read_tsc();
  666. }
  667. static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
  668. {
  669. return to_svm(vcpu)->vmcb->save.rflags;
  670. }
  671. static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  672. {
  673. to_svm(vcpu)->vmcb->save.rflags = rflags;
  674. }
  675. static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
  676. {
  677. switch (reg) {
  678. case VCPU_EXREG_PDPTR:
  679. BUG_ON(!npt_enabled);
  680. load_pdptrs(vcpu, vcpu->arch.cr3);
  681. break;
  682. default:
  683. BUG();
  684. }
  685. }
  686. static void svm_set_vintr(struct vcpu_svm *svm)
  687. {
  688. svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
  689. }
  690. static void svm_clear_vintr(struct vcpu_svm *svm)
  691. {
  692. svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
  693. }
  694. static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
  695. {
  696. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  697. switch (seg) {
  698. case VCPU_SREG_CS: return &save->cs;
  699. case VCPU_SREG_DS: return &save->ds;
  700. case VCPU_SREG_ES: return &save->es;
  701. case VCPU_SREG_FS: return &save->fs;
  702. case VCPU_SREG_GS: return &save->gs;
  703. case VCPU_SREG_SS: return &save->ss;
  704. case VCPU_SREG_TR: return &save->tr;
  705. case VCPU_SREG_LDTR: return &save->ldtr;
  706. }
  707. BUG();
  708. return NULL;
  709. }
  710. static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
  711. {
  712. struct vmcb_seg *s = svm_seg(vcpu, seg);
  713. return s->base;
  714. }
  715. static void svm_get_segment(struct kvm_vcpu *vcpu,
  716. struct kvm_segment *var, int seg)
  717. {
  718. struct vmcb_seg *s = svm_seg(vcpu, seg);
  719. var->base = s->base;
  720. var->limit = s->limit;
  721. var->selector = s->selector;
  722. var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
  723. var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
  724. var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
  725. var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
  726. var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
  727. var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
  728. var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
  729. var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
  730. /*
  731. * AMD's VMCB does not have an explicit unusable field, so emulate it
  732. * for cross vendor migration purposes by "not present"
  733. */
  734. var->unusable = !var->present || (var->type == 0);
  735. switch (seg) {
  736. case VCPU_SREG_CS:
  737. /*
  738. * SVM always stores 0 for the 'G' bit in the CS selector in
  739. * the VMCB on a VMEXIT. This hurts cross-vendor migration:
  740. * Intel's VMENTRY has a check on the 'G' bit.
  741. */
  742. var->g = s->limit > 0xfffff;
  743. break;
  744. case VCPU_SREG_TR:
  745. /*
  746. * Work around a bug where the busy flag in the tr selector
  747. * isn't exposed
  748. */
  749. var->type |= 0x2;
  750. break;
  751. case VCPU_SREG_DS:
  752. case VCPU_SREG_ES:
  753. case VCPU_SREG_FS:
  754. case VCPU_SREG_GS:
  755. /*
  756. * The accessed bit must always be set in the segment
  757. * descriptor cache, although it can be cleared in the
  758. * descriptor, the cached bit always remains at 1. Since
  759. * Intel has a check on this, set it here to support
  760. * cross-vendor migration.
  761. */
  762. if (!var->unusable)
  763. var->type |= 0x1;
  764. break;
  765. case VCPU_SREG_SS:
  766. /*
  767. * On AMD CPUs sometimes the DB bit in the segment
  768. * descriptor is left as 1, although the whole segment has
  769. * been made unusable. Clear it here to pass an Intel VMX
  770. * entry check when cross vendor migrating.
  771. */
  772. if (var->unusable)
  773. var->db = 0;
  774. break;
  775. }
  776. }
  777. static int svm_get_cpl(struct kvm_vcpu *vcpu)
  778. {
  779. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  780. return save->cpl;
  781. }
  782. static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  783. {
  784. struct vcpu_svm *svm = to_svm(vcpu);
  785. dt->size = svm->vmcb->save.idtr.limit;
  786. dt->address = svm->vmcb->save.idtr.base;
  787. }
  788. static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  789. {
  790. struct vcpu_svm *svm = to_svm(vcpu);
  791. svm->vmcb->save.idtr.limit = dt->size;
  792. svm->vmcb->save.idtr.base = dt->address ;
  793. }
  794. static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  795. {
  796. struct vcpu_svm *svm = to_svm(vcpu);
  797. dt->size = svm->vmcb->save.gdtr.limit;
  798. dt->address = svm->vmcb->save.gdtr.base;
  799. }
  800. static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  801. {
  802. struct vcpu_svm *svm = to_svm(vcpu);
  803. svm->vmcb->save.gdtr.limit = dt->size;
  804. svm->vmcb->save.gdtr.base = dt->address ;
  805. }
  806. static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
  807. {
  808. }
  809. static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
  810. {
  811. }
  812. static void update_cr0_intercept(struct vcpu_svm *svm)
  813. {
  814. struct vmcb *vmcb = svm->vmcb;
  815. ulong gcr0 = svm->vcpu.arch.cr0;
  816. u64 *hcr0 = &svm->vmcb->save.cr0;
  817. if (!svm->vcpu.fpu_active)
  818. *hcr0 |= SVM_CR0_SELECTIVE_MASK;
  819. else
  820. *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
  821. | (gcr0 & SVM_CR0_SELECTIVE_MASK);
  822. if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
  823. vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
  824. vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
  825. if (is_nested(svm)) {
  826. struct vmcb *hsave = svm->nested.hsave;
  827. hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
  828. hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
  829. vmcb->control.intercept_cr_read |= svm->nested.intercept_cr_read;
  830. vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
  831. }
  832. } else {
  833. svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
  834. svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
  835. if (is_nested(svm)) {
  836. struct vmcb *hsave = svm->nested.hsave;
  837. hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
  838. hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
  839. }
  840. }
  841. }
  842. static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  843. {
  844. struct vcpu_svm *svm = to_svm(vcpu);
  845. if (is_nested(svm)) {
  846. /*
  847. * We are here because we run in nested mode, the host kvm
  848. * intercepts cr0 writes but the l1 hypervisor does not.
  849. * But the L1 hypervisor may intercept selective cr0 writes.
  850. * This needs to be checked here.
  851. */
  852. unsigned long old, new;
  853. /* Remove bits that would trigger a real cr0 write intercept */
  854. old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
  855. new = cr0 & SVM_CR0_SELECTIVE_MASK;
  856. if (old == new) {
  857. /* cr0 write with ts and mp unchanged */
  858. svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
  859. if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
  860. return;
  861. }
  862. }
  863. #ifdef CONFIG_X86_64
  864. if (vcpu->arch.efer & EFER_LME) {
  865. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  866. vcpu->arch.efer |= EFER_LMA;
  867. svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
  868. }
  869. if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
  870. vcpu->arch.efer &= ~EFER_LMA;
  871. svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
  872. }
  873. }
  874. #endif
  875. vcpu->arch.cr0 = cr0;
  876. if (!npt_enabled)
  877. cr0 |= X86_CR0_PG | X86_CR0_WP;
  878. if (!vcpu->fpu_active)
  879. cr0 |= X86_CR0_TS;
  880. /*
  881. * re-enable caching here because the QEMU bios
  882. * does not do it - this results in some delay at
  883. * reboot
  884. */
  885. cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
  886. svm->vmcb->save.cr0 = cr0;
  887. update_cr0_intercept(svm);
  888. }
  889. static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  890. {
  891. unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
  892. unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
  893. if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
  894. force_new_asid(vcpu);
  895. vcpu->arch.cr4 = cr4;
  896. if (!npt_enabled)
  897. cr4 |= X86_CR4_PAE;
  898. cr4 |= host_cr4_mce;
  899. to_svm(vcpu)->vmcb->save.cr4 = cr4;
  900. }
  901. static void svm_set_segment(struct kvm_vcpu *vcpu,
  902. struct kvm_segment *var, int seg)
  903. {
  904. struct vcpu_svm *svm = to_svm(vcpu);
  905. struct vmcb_seg *s = svm_seg(vcpu, seg);
  906. s->base = var->base;
  907. s->limit = var->limit;
  908. s->selector = var->selector;
  909. if (var->unusable)
  910. s->attrib = 0;
  911. else {
  912. s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
  913. s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
  914. s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
  915. s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
  916. s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
  917. s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
  918. s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
  919. s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
  920. }
  921. if (seg == VCPU_SREG_CS)
  922. svm->vmcb->save.cpl
  923. = (svm->vmcb->save.cs.attrib
  924. >> SVM_SELECTOR_DPL_SHIFT) & 3;
  925. }
  926. static void update_db_intercept(struct kvm_vcpu *vcpu)
  927. {
  928. struct vcpu_svm *svm = to_svm(vcpu);
  929. svm->vmcb->control.intercept_exceptions &=
  930. ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
  931. if (svm->nmi_singlestep)
  932. svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
  933. if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
  934. if (vcpu->guest_debug &
  935. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
  936. svm->vmcb->control.intercept_exceptions |=
  937. 1 << DB_VECTOR;
  938. if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
  939. svm->vmcb->control.intercept_exceptions |=
  940. 1 << BP_VECTOR;
  941. } else
  942. vcpu->guest_debug = 0;
  943. }
  944. static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
  945. {
  946. struct vcpu_svm *svm = to_svm(vcpu);
  947. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  948. svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
  949. else
  950. svm->vmcb->save.dr7 = vcpu->arch.dr7;
  951. update_db_intercept(vcpu);
  952. }
  953. static void load_host_msrs(struct kvm_vcpu *vcpu)
  954. {
  955. #ifdef CONFIG_X86_64
  956. wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  957. #endif
  958. }
  959. static void save_host_msrs(struct kvm_vcpu *vcpu)
  960. {
  961. #ifdef CONFIG_X86_64
  962. rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  963. #endif
  964. }
  965. static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
  966. {
  967. if (sd->next_asid > sd->max_asid) {
  968. ++sd->asid_generation;
  969. sd->next_asid = 1;
  970. svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
  971. }
  972. svm->asid_generation = sd->asid_generation;
  973. svm->vmcb->control.asid = sd->next_asid++;
  974. }
  975. static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
  976. {
  977. struct vcpu_svm *svm = to_svm(vcpu);
  978. switch (dr) {
  979. case 0 ... 3:
  980. *dest = vcpu->arch.db[dr];
  981. break;
  982. case 4:
  983. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  984. return EMULATE_FAIL; /* will re-inject UD */
  985. /* fall through */
  986. case 6:
  987. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  988. *dest = vcpu->arch.dr6;
  989. else
  990. *dest = svm->vmcb->save.dr6;
  991. break;
  992. case 5:
  993. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  994. return EMULATE_FAIL; /* will re-inject UD */
  995. /* fall through */
  996. case 7:
  997. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  998. *dest = vcpu->arch.dr7;
  999. else
  1000. *dest = svm->vmcb->save.dr7;
  1001. break;
  1002. }
  1003. return EMULATE_DONE;
  1004. }
  1005. static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
  1006. {
  1007. struct vcpu_svm *svm = to_svm(vcpu);
  1008. switch (dr) {
  1009. case 0 ... 3:
  1010. vcpu->arch.db[dr] = value;
  1011. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
  1012. vcpu->arch.eff_db[dr] = value;
  1013. break;
  1014. case 4:
  1015. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  1016. return EMULATE_FAIL; /* will re-inject UD */
  1017. /* fall through */
  1018. case 6:
  1019. vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
  1020. break;
  1021. case 5:
  1022. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  1023. return EMULATE_FAIL; /* will re-inject UD */
  1024. /* fall through */
  1025. case 7:
  1026. vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
  1027. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
  1028. svm->vmcb->save.dr7 = vcpu->arch.dr7;
  1029. vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
  1030. }
  1031. break;
  1032. }
  1033. return EMULATE_DONE;
  1034. }
  1035. static int pf_interception(struct vcpu_svm *svm)
  1036. {
  1037. u64 fault_address;
  1038. u32 error_code;
  1039. fault_address = svm->vmcb->control.exit_info_2;
  1040. error_code = svm->vmcb->control.exit_info_1;
  1041. trace_kvm_page_fault(fault_address, error_code);
  1042. if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
  1043. kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
  1044. return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
  1045. }
  1046. static int db_interception(struct vcpu_svm *svm)
  1047. {
  1048. struct kvm_run *kvm_run = svm->vcpu.run;
  1049. if (!(svm->vcpu.guest_debug &
  1050. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
  1051. !svm->nmi_singlestep) {
  1052. kvm_queue_exception(&svm->vcpu, DB_VECTOR);
  1053. return 1;
  1054. }
  1055. if (svm->nmi_singlestep) {
  1056. svm->nmi_singlestep = false;
  1057. if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
  1058. svm->vmcb->save.rflags &=
  1059. ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  1060. update_db_intercept(&svm->vcpu);
  1061. }
  1062. if (svm->vcpu.guest_debug &
  1063. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
  1064. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1065. kvm_run->debug.arch.pc =
  1066. svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  1067. kvm_run->debug.arch.exception = DB_VECTOR;
  1068. return 0;
  1069. }
  1070. return 1;
  1071. }
  1072. static int bp_interception(struct vcpu_svm *svm)
  1073. {
  1074. struct kvm_run *kvm_run = svm->vcpu.run;
  1075. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1076. kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  1077. kvm_run->debug.arch.exception = BP_VECTOR;
  1078. return 0;
  1079. }
  1080. static int ud_interception(struct vcpu_svm *svm)
  1081. {
  1082. int er;
  1083. er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
  1084. if (er != EMULATE_DONE)
  1085. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1086. return 1;
  1087. }
  1088. static void svm_fpu_activate(struct kvm_vcpu *vcpu)
  1089. {
  1090. struct vcpu_svm *svm = to_svm(vcpu);
  1091. u32 excp;
  1092. if (is_nested(svm)) {
  1093. u32 h_excp, n_excp;
  1094. h_excp = svm->nested.hsave->control.intercept_exceptions;
  1095. n_excp = svm->nested.intercept_exceptions;
  1096. h_excp &= ~(1 << NM_VECTOR);
  1097. excp = h_excp | n_excp;
  1098. } else {
  1099. excp = svm->vmcb->control.intercept_exceptions;
  1100. excp &= ~(1 << NM_VECTOR);
  1101. }
  1102. svm->vmcb->control.intercept_exceptions = excp;
  1103. svm->vcpu.fpu_active = 1;
  1104. update_cr0_intercept(svm);
  1105. }
  1106. static int nm_interception(struct vcpu_svm *svm)
  1107. {
  1108. svm_fpu_activate(&svm->vcpu);
  1109. return 1;
  1110. }
  1111. static int mc_interception(struct vcpu_svm *svm)
  1112. {
  1113. /*
  1114. * On an #MC intercept the MCE handler is not called automatically in
  1115. * the host. So do it by hand here.
  1116. */
  1117. asm volatile (
  1118. "int $0x12\n");
  1119. /* not sure if we ever come back to this point */
  1120. return 1;
  1121. }
  1122. static int shutdown_interception(struct vcpu_svm *svm)
  1123. {
  1124. struct kvm_run *kvm_run = svm->vcpu.run;
  1125. /*
  1126. * VMCB is undefined after a SHUTDOWN intercept
  1127. * so reinitialize it.
  1128. */
  1129. clear_page(svm->vmcb);
  1130. init_vmcb(svm);
  1131. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  1132. return 0;
  1133. }
  1134. static int io_interception(struct vcpu_svm *svm)
  1135. {
  1136. u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
  1137. int size, in, string;
  1138. unsigned port;
  1139. ++svm->vcpu.stat.io_exits;
  1140. svm->next_rip = svm->vmcb->control.exit_info_2;
  1141. string = (io_info & SVM_IOIO_STR_MASK) != 0;
  1142. if (string) {
  1143. if (emulate_instruction(&svm->vcpu,
  1144. 0, 0, 0) == EMULATE_DO_MMIO)
  1145. return 0;
  1146. return 1;
  1147. }
  1148. in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
  1149. port = io_info >> 16;
  1150. size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
  1151. skip_emulated_instruction(&svm->vcpu);
  1152. return kvm_emulate_pio(&svm->vcpu, in, size, port);
  1153. }
  1154. static int nmi_interception(struct vcpu_svm *svm)
  1155. {
  1156. return 1;
  1157. }
  1158. static int intr_interception(struct vcpu_svm *svm)
  1159. {
  1160. ++svm->vcpu.stat.irq_exits;
  1161. return 1;
  1162. }
  1163. static int nop_on_interception(struct vcpu_svm *svm)
  1164. {
  1165. return 1;
  1166. }
  1167. static int halt_interception(struct vcpu_svm *svm)
  1168. {
  1169. svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
  1170. skip_emulated_instruction(&svm->vcpu);
  1171. return kvm_emulate_halt(&svm->vcpu);
  1172. }
  1173. static int vmmcall_interception(struct vcpu_svm *svm)
  1174. {
  1175. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1176. skip_emulated_instruction(&svm->vcpu);
  1177. kvm_emulate_hypercall(&svm->vcpu);
  1178. return 1;
  1179. }
  1180. static int nested_svm_check_permissions(struct vcpu_svm *svm)
  1181. {
  1182. if (!(svm->vcpu.arch.efer & EFER_SVME)
  1183. || !is_paging(&svm->vcpu)) {
  1184. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1185. return 1;
  1186. }
  1187. if (svm->vmcb->save.cpl) {
  1188. kvm_inject_gp(&svm->vcpu, 0);
  1189. return 1;
  1190. }
  1191. return 0;
  1192. }
  1193. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  1194. bool has_error_code, u32 error_code)
  1195. {
  1196. int vmexit;
  1197. if (!is_nested(svm))
  1198. return 0;
  1199. svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
  1200. svm->vmcb->control.exit_code_hi = 0;
  1201. svm->vmcb->control.exit_info_1 = error_code;
  1202. svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
  1203. vmexit = nested_svm_intercept(svm);
  1204. if (vmexit == NESTED_EXIT_DONE)
  1205. svm->nested.exit_required = true;
  1206. return vmexit;
  1207. }
  1208. /* This function returns true if it is save to enable the irq window */
  1209. static inline bool nested_svm_intr(struct vcpu_svm *svm)
  1210. {
  1211. if (!is_nested(svm))
  1212. return true;
  1213. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1214. return true;
  1215. if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
  1216. return false;
  1217. svm->vmcb->control.exit_code = SVM_EXIT_INTR;
  1218. svm->vmcb->control.exit_info_1 = 0;
  1219. svm->vmcb->control.exit_info_2 = 0;
  1220. if (svm->nested.intercept & 1ULL) {
  1221. /*
  1222. * The #vmexit can't be emulated here directly because this
  1223. * code path runs with irqs and preemtion disabled. A
  1224. * #vmexit emulation might sleep. Only signal request for
  1225. * the #vmexit here.
  1226. */
  1227. svm->nested.exit_required = true;
  1228. trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
  1229. return false;
  1230. }
  1231. return true;
  1232. }
  1233. /* This function returns true if it is save to enable the nmi window */
  1234. static inline bool nested_svm_nmi(struct vcpu_svm *svm)
  1235. {
  1236. if (!is_nested(svm))
  1237. return true;
  1238. if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
  1239. return true;
  1240. svm->vmcb->control.exit_code = SVM_EXIT_NMI;
  1241. svm->nested.exit_required = true;
  1242. return false;
  1243. }
  1244. static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
  1245. {
  1246. struct page *page;
  1247. might_sleep();
  1248. page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
  1249. if (is_error_page(page))
  1250. goto error;
  1251. *_page = page;
  1252. return kmap(page);
  1253. error:
  1254. kvm_release_page_clean(page);
  1255. kvm_inject_gp(&svm->vcpu, 0);
  1256. return NULL;
  1257. }
  1258. static void nested_svm_unmap(struct page *page)
  1259. {
  1260. kunmap(page);
  1261. kvm_release_page_dirty(page);
  1262. }
  1263. static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
  1264. {
  1265. u32 param = svm->vmcb->control.exit_info_1 & 1;
  1266. u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1267. bool ret = false;
  1268. u32 t0, t1;
  1269. u8 val;
  1270. if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
  1271. return false;
  1272. switch (msr) {
  1273. case 0 ... 0x1fff:
  1274. t0 = (msr * 2) % 8;
  1275. t1 = msr / 8;
  1276. break;
  1277. case 0xc0000000 ... 0xc0001fff:
  1278. t0 = (8192 + msr - 0xc0000000) * 2;
  1279. t1 = (t0 / 8);
  1280. t0 %= 8;
  1281. break;
  1282. case 0xc0010000 ... 0xc0011fff:
  1283. t0 = (16384 + msr - 0xc0010000) * 2;
  1284. t1 = (t0 / 8);
  1285. t0 %= 8;
  1286. break;
  1287. default:
  1288. ret = true;
  1289. goto out;
  1290. }
  1291. if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1))
  1292. ret = val & ((1 << param) << t0);
  1293. out:
  1294. return ret;
  1295. }
  1296. static int nested_svm_exit_special(struct vcpu_svm *svm)
  1297. {
  1298. u32 exit_code = svm->vmcb->control.exit_code;
  1299. switch (exit_code) {
  1300. case SVM_EXIT_INTR:
  1301. case SVM_EXIT_NMI:
  1302. return NESTED_EXIT_HOST;
  1303. case SVM_EXIT_NPF:
  1304. /* For now we are always handling NPFs when using them */
  1305. if (npt_enabled)
  1306. return NESTED_EXIT_HOST;
  1307. break;
  1308. case SVM_EXIT_EXCP_BASE + PF_VECTOR:
  1309. /* When we're shadowing, trap PFs */
  1310. if (!npt_enabled)
  1311. return NESTED_EXIT_HOST;
  1312. break;
  1313. case SVM_EXIT_EXCP_BASE + NM_VECTOR:
  1314. nm_interception(svm);
  1315. break;
  1316. default:
  1317. break;
  1318. }
  1319. return NESTED_EXIT_CONTINUE;
  1320. }
  1321. /*
  1322. * If this function returns true, this #vmexit was already handled
  1323. */
  1324. static int nested_svm_intercept(struct vcpu_svm *svm)
  1325. {
  1326. u32 exit_code = svm->vmcb->control.exit_code;
  1327. int vmexit = NESTED_EXIT_HOST;
  1328. switch (exit_code) {
  1329. case SVM_EXIT_MSR:
  1330. vmexit = nested_svm_exit_handled_msr(svm);
  1331. break;
  1332. case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
  1333. u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
  1334. if (svm->nested.intercept_cr_read & cr_bits)
  1335. vmexit = NESTED_EXIT_DONE;
  1336. break;
  1337. }
  1338. case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
  1339. u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
  1340. if (svm->nested.intercept_cr_write & cr_bits)
  1341. vmexit = NESTED_EXIT_DONE;
  1342. break;
  1343. }
  1344. case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
  1345. u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
  1346. if (svm->nested.intercept_dr_read & dr_bits)
  1347. vmexit = NESTED_EXIT_DONE;
  1348. break;
  1349. }
  1350. case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
  1351. u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
  1352. if (svm->nested.intercept_dr_write & dr_bits)
  1353. vmexit = NESTED_EXIT_DONE;
  1354. break;
  1355. }
  1356. case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
  1357. u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
  1358. if (svm->nested.intercept_exceptions & excp_bits)
  1359. vmexit = NESTED_EXIT_DONE;
  1360. break;
  1361. }
  1362. default: {
  1363. u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
  1364. if (svm->nested.intercept & exit_bits)
  1365. vmexit = NESTED_EXIT_DONE;
  1366. }
  1367. }
  1368. return vmexit;
  1369. }
  1370. static int nested_svm_exit_handled(struct vcpu_svm *svm)
  1371. {
  1372. int vmexit;
  1373. vmexit = nested_svm_intercept(svm);
  1374. if (vmexit == NESTED_EXIT_DONE)
  1375. nested_svm_vmexit(svm);
  1376. return vmexit;
  1377. }
  1378. static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
  1379. {
  1380. struct vmcb_control_area *dst = &dst_vmcb->control;
  1381. struct vmcb_control_area *from = &from_vmcb->control;
  1382. dst->intercept_cr_read = from->intercept_cr_read;
  1383. dst->intercept_cr_write = from->intercept_cr_write;
  1384. dst->intercept_dr_read = from->intercept_dr_read;
  1385. dst->intercept_dr_write = from->intercept_dr_write;
  1386. dst->intercept_exceptions = from->intercept_exceptions;
  1387. dst->intercept = from->intercept;
  1388. dst->iopm_base_pa = from->iopm_base_pa;
  1389. dst->msrpm_base_pa = from->msrpm_base_pa;
  1390. dst->tsc_offset = from->tsc_offset;
  1391. dst->asid = from->asid;
  1392. dst->tlb_ctl = from->tlb_ctl;
  1393. dst->int_ctl = from->int_ctl;
  1394. dst->int_vector = from->int_vector;
  1395. dst->int_state = from->int_state;
  1396. dst->exit_code = from->exit_code;
  1397. dst->exit_code_hi = from->exit_code_hi;
  1398. dst->exit_info_1 = from->exit_info_1;
  1399. dst->exit_info_2 = from->exit_info_2;
  1400. dst->exit_int_info = from->exit_int_info;
  1401. dst->exit_int_info_err = from->exit_int_info_err;
  1402. dst->nested_ctl = from->nested_ctl;
  1403. dst->event_inj = from->event_inj;
  1404. dst->event_inj_err = from->event_inj_err;
  1405. dst->nested_cr3 = from->nested_cr3;
  1406. dst->lbr_ctl = from->lbr_ctl;
  1407. }
  1408. static int nested_svm_vmexit(struct vcpu_svm *svm)
  1409. {
  1410. struct vmcb *nested_vmcb;
  1411. struct vmcb *hsave = svm->nested.hsave;
  1412. struct vmcb *vmcb = svm->vmcb;
  1413. struct page *page;
  1414. trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
  1415. vmcb->control.exit_info_1,
  1416. vmcb->control.exit_info_2,
  1417. vmcb->control.exit_int_info,
  1418. vmcb->control.exit_int_info_err);
  1419. nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
  1420. if (!nested_vmcb)
  1421. return 1;
  1422. /* Exit nested SVM mode */
  1423. svm->nested.vmcb = 0;
  1424. /* Give the current vmcb to the guest */
  1425. disable_gif(svm);
  1426. nested_vmcb->save.es = vmcb->save.es;
  1427. nested_vmcb->save.cs = vmcb->save.cs;
  1428. nested_vmcb->save.ss = vmcb->save.ss;
  1429. nested_vmcb->save.ds = vmcb->save.ds;
  1430. nested_vmcb->save.gdtr = vmcb->save.gdtr;
  1431. nested_vmcb->save.idtr = vmcb->save.idtr;
  1432. nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
  1433. if (npt_enabled)
  1434. nested_vmcb->save.cr3 = vmcb->save.cr3;
  1435. else
  1436. nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
  1437. nested_vmcb->save.cr2 = vmcb->save.cr2;
  1438. nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
  1439. nested_vmcb->save.rflags = vmcb->save.rflags;
  1440. nested_vmcb->save.rip = vmcb->save.rip;
  1441. nested_vmcb->save.rsp = vmcb->save.rsp;
  1442. nested_vmcb->save.rax = vmcb->save.rax;
  1443. nested_vmcb->save.dr7 = vmcb->save.dr7;
  1444. nested_vmcb->save.dr6 = vmcb->save.dr6;
  1445. nested_vmcb->save.cpl = vmcb->save.cpl;
  1446. nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
  1447. nested_vmcb->control.int_vector = vmcb->control.int_vector;
  1448. nested_vmcb->control.int_state = vmcb->control.int_state;
  1449. nested_vmcb->control.exit_code = vmcb->control.exit_code;
  1450. nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
  1451. nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
  1452. nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
  1453. nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
  1454. nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
  1455. /*
  1456. * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
  1457. * to make sure that we do not lose injected events. So check event_inj
  1458. * here and copy it to exit_int_info if it is valid.
  1459. * Exit_int_info and event_inj can't be both valid because the case
  1460. * below only happens on a VMRUN instruction intercept which has
  1461. * no valid exit_int_info set.
  1462. */
  1463. if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
  1464. struct vmcb_control_area *nc = &nested_vmcb->control;
  1465. nc->exit_int_info = vmcb->control.event_inj;
  1466. nc->exit_int_info_err = vmcb->control.event_inj_err;
  1467. }
  1468. nested_vmcb->control.tlb_ctl = 0;
  1469. nested_vmcb->control.event_inj = 0;
  1470. nested_vmcb->control.event_inj_err = 0;
  1471. /* We always set V_INTR_MASKING and remember the old value in hflags */
  1472. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1473. nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
  1474. /* Restore the original control entries */
  1475. copy_vmcb_control_area(vmcb, hsave);
  1476. kvm_clear_exception_queue(&svm->vcpu);
  1477. kvm_clear_interrupt_queue(&svm->vcpu);
  1478. /* Restore selected save entries */
  1479. svm->vmcb->save.es = hsave->save.es;
  1480. svm->vmcb->save.cs = hsave->save.cs;
  1481. svm->vmcb->save.ss = hsave->save.ss;
  1482. svm->vmcb->save.ds = hsave->save.ds;
  1483. svm->vmcb->save.gdtr = hsave->save.gdtr;
  1484. svm->vmcb->save.idtr = hsave->save.idtr;
  1485. svm->vmcb->save.rflags = hsave->save.rflags;
  1486. svm_set_efer(&svm->vcpu, hsave->save.efer);
  1487. svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
  1488. svm_set_cr4(&svm->vcpu, hsave->save.cr4);
  1489. if (npt_enabled) {
  1490. svm->vmcb->save.cr3 = hsave->save.cr3;
  1491. svm->vcpu.arch.cr3 = hsave->save.cr3;
  1492. } else {
  1493. kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
  1494. }
  1495. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
  1496. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
  1497. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
  1498. svm->vmcb->save.dr7 = 0;
  1499. svm->vmcb->save.cpl = 0;
  1500. svm->vmcb->control.exit_int_info = 0;
  1501. nested_svm_unmap(page);
  1502. kvm_mmu_reset_context(&svm->vcpu);
  1503. kvm_mmu_load(&svm->vcpu);
  1504. return 0;
  1505. }
  1506. static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
  1507. {
  1508. u32 *nested_msrpm;
  1509. struct page *page;
  1510. int i;
  1511. nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
  1512. if (!nested_msrpm)
  1513. return false;
  1514. for (i = 0; i < PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
  1515. svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
  1516. svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
  1517. nested_svm_unmap(page);
  1518. return true;
  1519. }
  1520. static bool nested_svm_vmrun(struct vcpu_svm *svm)
  1521. {
  1522. struct vmcb *nested_vmcb;
  1523. struct vmcb *hsave = svm->nested.hsave;
  1524. struct vmcb *vmcb = svm->vmcb;
  1525. struct page *page;
  1526. u64 vmcb_gpa;
  1527. vmcb_gpa = svm->vmcb->save.rax;
  1528. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1529. if (!nested_vmcb)
  1530. return false;
  1531. trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, vmcb_gpa,
  1532. nested_vmcb->save.rip,
  1533. nested_vmcb->control.int_ctl,
  1534. nested_vmcb->control.event_inj,
  1535. nested_vmcb->control.nested_ctl);
  1536. trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
  1537. nested_vmcb->control.intercept_cr_write,
  1538. nested_vmcb->control.intercept_exceptions,
  1539. nested_vmcb->control.intercept);
  1540. /* Clear internal status */
  1541. kvm_clear_exception_queue(&svm->vcpu);
  1542. kvm_clear_interrupt_queue(&svm->vcpu);
  1543. /*
  1544. * Save the old vmcb, so we don't need to pick what we save, but can
  1545. * restore everything when a VMEXIT occurs
  1546. */
  1547. hsave->save.es = vmcb->save.es;
  1548. hsave->save.cs = vmcb->save.cs;
  1549. hsave->save.ss = vmcb->save.ss;
  1550. hsave->save.ds = vmcb->save.ds;
  1551. hsave->save.gdtr = vmcb->save.gdtr;
  1552. hsave->save.idtr = vmcb->save.idtr;
  1553. hsave->save.efer = svm->vcpu.arch.efer;
  1554. hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
  1555. hsave->save.cr4 = svm->vcpu.arch.cr4;
  1556. hsave->save.rflags = vmcb->save.rflags;
  1557. hsave->save.rip = svm->next_rip;
  1558. hsave->save.rsp = vmcb->save.rsp;
  1559. hsave->save.rax = vmcb->save.rax;
  1560. if (npt_enabled)
  1561. hsave->save.cr3 = vmcb->save.cr3;
  1562. else
  1563. hsave->save.cr3 = svm->vcpu.arch.cr3;
  1564. copy_vmcb_control_area(hsave, vmcb);
  1565. if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
  1566. svm->vcpu.arch.hflags |= HF_HIF_MASK;
  1567. else
  1568. svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
  1569. /* Load the nested guest state */
  1570. svm->vmcb->save.es = nested_vmcb->save.es;
  1571. svm->vmcb->save.cs = nested_vmcb->save.cs;
  1572. svm->vmcb->save.ss = nested_vmcb->save.ss;
  1573. svm->vmcb->save.ds = nested_vmcb->save.ds;
  1574. svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
  1575. svm->vmcb->save.idtr = nested_vmcb->save.idtr;
  1576. svm->vmcb->save.rflags = nested_vmcb->save.rflags;
  1577. svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
  1578. svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
  1579. svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
  1580. if (npt_enabled) {
  1581. svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
  1582. svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
  1583. } else
  1584. kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
  1585. /* Guest paging mode is active - reset mmu */
  1586. kvm_mmu_reset_context(&svm->vcpu);
  1587. svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
  1588. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
  1589. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
  1590. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
  1591. /* In case we don't even reach vcpu_run, the fields are not updated */
  1592. svm->vmcb->save.rax = nested_vmcb->save.rax;
  1593. svm->vmcb->save.rsp = nested_vmcb->save.rsp;
  1594. svm->vmcb->save.rip = nested_vmcb->save.rip;
  1595. svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
  1596. svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
  1597. svm->vmcb->save.cpl = nested_vmcb->save.cpl;
  1598. svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
  1599. /* cache intercepts */
  1600. svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read;
  1601. svm->nested.intercept_cr_write = nested_vmcb->control.intercept_cr_write;
  1602. svm->nested.intercept_dr_read = nested_vmcb->control.intercept_dr_read;
  1603. svm->nested.intercept_dr_write = nested_vmcb->control.intercept_dr_write;
  1604. svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
  1605. svm->nested.intercept = nested_vmcb->control.intercept;
  1606. force_new_asid(&svm->vcpu);
  1607. svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
  1608. if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
  1609. svm->vcpu.arch.hflags |= HF_VINTR_MASK;
  1610. else
  1611. svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
  1612. if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
  1613. /* We only want the cr8 intercept bits of the guest */
  1614. svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
  1615. svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
  1616. }
  1617. /*
  1618. * We don't want a nested guest to be more powerful than the guest, so
  1619. * all intercepts are ORed
  1620. */
  1621. svm->vmcb->control.intercept_cr_read |=
  1622. nested_vmcb->control.intercept_cr_read;
  1623. svm->vmcb->control.intercept_cr_write |=
  1624. nested_vmcb->control.intercept_cr_write;
  1625. svm->vmcb->control.intercept_dr_read |=
  1626. nested_vmcb->control.intercept_dr_read;
  1627. svm->vmcb->control.intercept_dr_write |=
  1628. nested_vmcb->control.intercept_dr_write;
  1629. svm->vmcb->control.intercept_exceptions |=
  1630. nested_vmcb->control.intercept_exceptions;
  1631. svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
  1632. svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
  1633. svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
  1634. svm->vmcb->control.int_state = nested_vmcb->control.int_state;
  1635. svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
  1636. svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
  1637. svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
  1638. nested_svm_unmap(page);
  1639. /* nested_vmcb is our indicator if nested SVM is activated */
  1640. svm->nested.vmcb = vmcb_gpa;
  1641. enable_gif(svm);
  1642. return true;
  1643. }
  1644. static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
  1645. {
  1646. to_vmcb->save.fs = from_vmcb->save.fs;
  1647. to_vmcb->save.gs = from_vmcb->save.gs;
  1648. to_vmcb->save.tr = from_vmcb->save.tr;
  1649. to_vmcb->save.ldtr = from_vmcb->save.ldtr;
  1650. to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
  1651. to_vmcb->save.star = from_vmcb->save.star;
  1652. to_vmcb->save.lstar = from_vmcb->save.lstar;
  1653. to_vmcb->save.cstar = from_vmcb->save.cstar;
  1654. to_vmcb->save.sfmask = from_vmcb->save.sfmask;
  1655. to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
  1656. to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
  1657. to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
  1658. }
  1659. static int vmload_interception(struct vcpu_svm *svm)
  1660. {
  1661. struct vmcb *nested_vmcb;
  1662. struct page *page;
  1663. if (nested_svm_check_permissions(svm))
  1664. return 1;
  1665. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1666. skip_emulated_instruction(&svm->vcpu);
  1667. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1668. if (!nested_vmcb)
  1669. return 1;
  1670. nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
  1671. nested_svm_unmap(page);
  1672. return 1;
  1673. }
  1674. static int vmsave_interception(struct vcpu_svm *svm)
  1675. {
  1676. struct vmcb *nested_vmcb;
  1677. struct page *page;
  1678. if (nested_svm_check_permissions(svm))
  1679. return 1;
  1680. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1681. skip_emulated_instruction(&svm->vcpu);
  1682. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1683. if (!nested_vmcb)
  1684. return 1;
  1685. nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
  1686. nested_svm_unmap(page);
  1687. return 1;
  1688. }
  1689. static int vmrun_interception(struct vcpu_svm *svm)
  1690. {
  1691. if (nested_svm_check_permissions(svm))
  1692. return 1;
  1693. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1694. skip_emulated_instruction(&svm->vcpu);
  1695. if (!nested_svm_vmrun(svm))
  1696. return 1;
  1697. if (!nested_svm_vmrun_msrpm(svm))
  1698. goto failed;
  1699. return 1;
  1700. failed:
  1701. svm->vmcb->control.exit_code = SVM_EXIT_ERR;
  1702. svm->vmcb->control.exit_code_hi = 0;
  1703. svm->vmcb->control.exit_info_1 = 0;
  1704. svm->vmcb->control.exit_info_2 = 0;
  1705. nested_svm_vmexit(svm);
  1706. return 1;
  1707. }
  1708. static int stgi_interception(struct vcpu_svm *svm)
  1709. {
  1710. if (nested_svm_check_permissions(svm))
  1711. return 1;
  1712. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1713. skip_emulated_instruction(&svm->vcpu);
  1714. enable_gif(svm);
  1715. return 1;
  1716. }
  1717. static int clgi_interception(struct vcpu_svm *svm)
  1718. {
  1719. if (nested_svm_check_permissions(svm))
  1720. return 1;
  1721. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1722. skip_emulated_instruction(&svm->vcpu);
  1723. disable_gif(svm);
  1724. /* After a CLGI no interrupts should come */
  1725. svm_clear_vintr(svm);
  1726. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  1727. return 1;
  1728. }
  1729. static int invlpga_interception(struct vcpu_svm *svm)
  1730. {
  1731. struct kvm_vcpu *vcpu = &svm->vcpu;
  1732. trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
  1733. vcpu->arch.regs[VCPU_REGS_RAX]);
  1734. /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
  1735. kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
  1736. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1737. skip_emulated_instruction(&svm->vcpu);
  1738. return 1;
  1739. }
  1740. static int skinit_interception(struct vcpu_svm *svm)
  1741. {
  1742. trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
  1743. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1744. return 1;
  1745. }
  1746. static int invalid_op_interception(struct vcpu_svm *svm)
  1747. {
  1748. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1749. return 1;
  1750. }
  1751. static int task_switch_interception(struct vcpu_svm *svm)
  1752. {
  1753. u16 tss_selector;
  1754. int reason;
  1755. int int_type = svm->vmcb->control.exit_int_info &
  1756. SVM_EXITINTINFO_TYPE_MASK;
  1757. int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
  1758. uint32_t type =
  1759. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
  1760. uint32_t idt_v =
  1761. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
  1762. tss_selector = (u16)svm->vmcb->control.exit_info_1;
  1763. if (svm->vmcb->control.exit_info_2 &
  1764. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
  1765. reason = TASK_SWITCH_IRET;
  1766. else if (svm->vmcb->control.exit_info_2 &
  1767. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
  1768. reason = TASK_SWITCH_JMP;
  1769. else if (idt_v)
  1770. reason = TASK_SWITCH_GATE;
  1771. else
  1772. reason = TASK_SWITCH_CALL;
  1773. if (reason == TASK_SWITCH_GATE) {
  1774. switch (type) {
  1775. case SVM_EXITINTINFO_TYPE_NMI:
  1776. svm->vcpu.arch.nmi_injected = false;
  1777. break;
  1778. case SVM_EXITINTINFO_TYPE_EXEPT:
  1779. kvm_clear_exception_queue(&svm->vcpu);
  1780. break;
  1781. case SVM_EXITINTINFO_TYPE_INTR:
  1782. kvm_clear_interrupt_queue(&svm->vcpu);
  1783. break;
  1784. default:
  1785. break;
  1786. }
  1787. }
  1788. if (reason != TASK_SWITCH_GATE ||
  1789. int_type == SVM_EXITINTINFO_TYPE_SOFT ||
  1790. (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
  1791. (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
  1792. skip_emulated_instruction(&svm->vcpu);
  1793. return kvm_task_switch(&svm->vcpu, tss_selector, reason);
  1794. }
  1795. static int cpuid_interception(struct vcpu_svm *svm)
  1796. {
  1797. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  1798. kvm_emulate_cpuid(&svm->vcpu);
  1799. return 1;
  1800. }
  1801. static int iret_interception(struct vcpu_svm *svm)
  1802. {
  1803. ++svm->vcpu.stat.nmi_window_exits;
  1804. svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
  1805. svm->vcpu.arch.hflags |= HF_IRET_MASK;
  1806. return 1;
  1807. }
  1808. static int invlpg_interception(struct vcpu_svm *svm)
  1809. {
  1810. if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
  1811. pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
  1812. return 1;
  1813. }
  1814. static int emulate_on_interception(struct vcpu_svm *svm)
  1815. {
  1816. if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
  1817. pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
  1818. return 1;
  1819. }
  1820. static int cr8_write_interception(struct vcpu_svm *svm)
  1821. {
  1822. struct kvm_run *kvm_run = svm->vcpu.run;
  1823. u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
  1824. /* instruction emulation calls kvm_set_cr8() */
  1825. emulate_instruction(&svm->vcpu, 0, 0, 0);
  1826. if (irqchip_in_kernel(svm->vcpu.kvm)) {
  1827. svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
  1828. return 1;
  1829. }
  1830. if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
  1831. return 1;
  1832. kvm_run->exit_reason = KVM_EXIT_SET_TPR;
  1833. return 0;
  1834. }
  1835. static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
  1836. {
  1837. struct vcpu_svm *svm = to_svm(vcpu);
  1838. switch (ecx) {
  1839. case MSR_IA32_TSC: {
  1840. u64 tsc_offset;
  1841. if (is_nested(svm))
  1842. tsc_offset = svm->nested.hsave->control.tsc_offset;
  1843. else
  1844. tsc_offset = svm->vmcb->control.tsc_offset;
  1845. *data = tsc_offset + native_read_tsc();
  1846. break;
  1847. }
  1848. case MSR_K6_STAR:
  1849. *data = svm->vmcb->save.star;
  1850. break;
  1851. #ifdef CONFIG_X86_64
  1852. case MSR_LSTAR:
  1853. *data = svm->vmcb->save.lstar;
  1854. break;
  1855. case MSR_CSTAR:
  1856. *data = svm->vmcb->save.cstar;
  1857. break;
  1858. case MSR_KERNEL_GS_BASE:
  1859. *data = svm->vmcb->save.kernel_gs_base;
  1860. break;
  1861. case MSR_SYSCALL_MASK:
  1862. *data = svm->vmcb->save.sfmask;
  1863. break;
  1864. #endif
  1865. case MSR_IA32_SYSENTER_CS:
  1866. *data = svm->vmcb->save.sysenter_cs;
  1867. break;
  1868. case MSR_IA32_SYSENTER_EIP:
  1869. *data = svm->sysenter_eip;
  1870. break;
  1871. case MSR_IA32_SYSENTER_ESP:
  1872. *data = svm->sysenter_esp;
  1873. break;
  1874. /*
  1875. * Nobody will change the following 5 values in the VMCB so we can
  1876. * safely return them on rdmsr. They will always be 0 until LBRV is
  1877. * implemented.
  1878. */
  1879. case MSR_IA32_DEBUGCTLMSR:
  1880. *data = svm->vmcb->save.dbgctl;
  1881. break;
  1882. case MSR_IA32_LASTBRANCHFROMIP:
  1883. *data = svm->vmcb->save.br_from;
  1884. break;
  1885. case MSR_IA32_LASTBRANCHTOIP:
  1886. *data = svm->vmcb->save.br_to;
  1887. break;
  1888. case MSR_IA32_LASTINTFROMIP:
  1889. *data = svm->vmcb->save.last_excp_from;
  1890. break;
  1891. case MSR_IA32_LASTINTTOIP:
  1892. *data = svm->vmcb->save.last_excp_to;
  1893. break;
  1894. case MSR_VM_HSAVE_PA:
  1895. *data = svm->nested.hsave_msr;
  1896. break;
  1897. case MSR_VM_CR:
  1898. *data = svm->nested.vm_cr_msr;
  1899. break;
  1900. case MSR_IA32_UCODE_REV:
  1901. *data = 0x01000065;
  1902. break;
  1903. default:
  1904. return kvm_get_msr_common(vcpu, ecx, data);
  1905. }
  1906. return 0;
  1907. }
  1908. static int rdmsr_interception(struct vcpu_svm *svm)
  1909. {
  1910. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1911. u64 data;
  1912. if (svm_get_msr(&svm->vcpu, ecx, &data)) {
  1913. trace_kvm_msr_read_ex(ecx);
  1914. kvm_inject_gp(&svm->vcpu, 0);
  1915. } else {
  1916. trace_kvm_msr_read(ecx, data);
  1917. svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
  1918. svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
  1919. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  1920. skip_emulated_instruction(&svm->vcpu);
  1921. }
  1922. return 1;
  1923. }
  1924. static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
  1925. {
  1926. struct vcpu_svm *svm = to_svm(vcpu);
  1927. int svm_dis, chg_mask;
  1928. if (data & ~SVM_VM_CR_VALID_MASK)
  1929. return 1;
  1930. chg_mask = SVM_VM_CR_VALID_MASK;
  1931. if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
  1932. chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
  1933. svm->nested.vm_cr_msr &= ~chg_mask;
  1934. svm->nested.vm_cr_msr |= (data & chg_mask);
  1935. svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
  1936. /* check for svm_disable while efer.svme is set */
  1937. if (svm_dis && (vcpu->arch.efer & EFER_SVME))
  1938. return 1;
  1939. return 0;
  1940. }
  1941. static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
  1942. {
  1943. struct vcpu_svm *svm = to_svm(vcpu);
  1944. switch (ecx) {
  1945. case MSR_IA32_TSC: {
  1946. u64 tsc_offset = data - native_read_tsc();
  1947. u64 g_tsc_offset = 0;
  1948. if (is_nested(svm)) {
  1949. g_tsc_offset = svm->vmcb->control.tsc_offset -
  1950. svm->nested.hsave->control.tsc_offset;
  1951. svm->nested.hsave->control.tsc_offset = tsc_offset;
  1952. }
  1953. svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
  1954. break;
  1955. }
  1956. case MSR_K6_STAR:
  1957. svm->vmcb->save.star = data;
  1958. break;
  1959. #ifdef CONFIG_X86_64
  1960. case MSR_LSTAR:
  1961. svm->vmcb->save.lstar = data;
  1962. break;
  1963. case MSR_CSTAR:
  1964. svm->vmcb->save.cstar = data;
  1965. break;
  1966. case MSR_KERNEL_GS_BASE:
  1967. svm->vmcb->save.kernel_gs_base = data;
  1968. break;
  1969. case MSR_SYSCALL_MASK:
  1970. svm->vmcb->save.sfmask = data;
  1971. break;
  1972. #endif
  1973. case MSR_IA32_SYSENTER_CS:
  1974. svm->vmcb->save.sysenter_cs = data;
  1975. break;
  1976. case MSR_IA32_SYSENTER_EIP:
  1977. svm->sysenter_eip = data;
  1978. svm->vmcb->save.sysenter_eip = data;
  1979. break;
  1980. case MSR_IA32_SYSENTER_ESP:
  1981. svm->sysenter_esp = data;
  1982. svm->vmcb->save.sysenter_esp = data;
  1983. break;
  1984. case MSR_IA32_DEBUGCTLMSR:
  1985. if (!svm_has(SVM_FEATURE_LBRV)) {
  1986. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
  1987. __func__, data);
  1988. break;
  1989. }
  1990. if (data & DEBUGCTL_RESERVED_BITS)
  1991. return 1;
  1992. svm->vmcb->save.dbgctl = data;
  1993. if (data & (1ULL<<0))
  1994. svm_enable_lbrv(svm);
  1995. else
  1996. svm_disable_lbrv(svm);
  1997. break;
  1998. case MSR_VM_HSAVE_PA:
  1999. svm->nested.hsave_msr = data;
  2000. break;
  2001. case MSR_VM_CR:
  2002. return svm_set_vm_cr(vcpu, data);
  2003. case MSR_VM_IGNNE:
  2004. pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
  2005. break;
  2006. default:
  2007. return kvm_set_msr_common(vcpu, ecx, data);
  2008. }
  2009. return 0;
  2010. }
  2011. static int wrmsr_interception(struct vcpu_svm *svm)
  2012. {
  2013. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  2014. u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
  2015. | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
  2016. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  2017. if (svm_set_msr(&svm->vcpu, ecx, data)) {
  2018. trace_kvm_msr_write_ex(ecx, data);
  2019. kvm_inject_gp(&svm->vcpu, 0);
  2020. } else {
  2021. trace_kvm_msr_write(ecx, data);
  2022. skip_emulated_instruction(&svm->vcpu);
  2023. }
  2024. return 1;
  2025. }
  2026. static int msr_interception(struct vcpu_svm *svm)
  2027. {
  2028. if (svm->vmcb->control.exit_info_1)
  2029. return wrmsr_interception(svm);
  2030. else
  2031. return rdmsr_interception(svm);
  2032. }
  2033. static int interrupt_window_interception(struct vcpu_svm *svm)
  2034. {
  2035. struct kvm_run *kvm_run = svm->vcpu.run;
  2036. svm_clear_vintr(svm);
  2037. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  2038. /*
  2039. * If the user space waits to inject interrupts, exit as soon as
  2040. * possible
  2041. */
  2042. if (!irqchip_in_kernel(svm->vcpu.kvm) &&
  2043. kvm_run->request_interrupt_window &&
  2044. !kvm_cpu_has_interrupt(&svm->vcpu)) {
  2045. ++svm->vcpu.stat.irq_window_exits;
  2046. kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  2047. return 0;
  2048. }
  2049. return 1;
  2050. }
  2051. static int pause_interception(struct vcpu_svm *svm)
  2052. {
  2053. kvm_vcpu_on_spin(&(svm->vcpu));
  2054. return 1;
  2055. }
  2056. static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
  2057. [SVM_EXIT_READ_CR0] = emulate_on_interception,
  2058. [SVM_EXIT_READ_CR3] = emulate_on_interception,
  2059. [SVM_EXIT_READ_CR4] = emulate_on_interception,
  2060. [SVM_EXIT_READ_CR8] = emulate_on_interception,
  2061. [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
  2062. [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
  2063. [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
  2064. [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
  2065. [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
  2066. [SVM_EXIT_READ_DR0] = emulate_on_interception,
  2067. [SVM_EXIT_READ_DR1] = emulate_on_interception,
  2068. [SVM_EXIT_READ_DR2] = emulate_on_interception,
  2069. [SVM_EXIT_READ_DR3] = emulate_on_interception,
  2070. [SVM_EXIT_READ_DR4] = emulate_on_interception,
  2071. [SVM_EXIT_READ_DR5] = emulate_on_interception,
  2072. [SVM_EXIT_READ_DR6] = emulate_on_interception,
  2073. [SVM_EXIT_READ_DR7] = emulate_on_interception,
  2074. [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
  2075. [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
  2076. [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
  2077. [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
  2078. [SVM_EXIT_WRITE_DR4] = emulate_on_interception,
  2079. [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
  2080. [SVM_EXIT_WRITE_DR6] = emulate_on_interception,
  2081. [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
  2082. [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
  2083. [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
  2084. [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
  2085. [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
  2086. [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
  2087. [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
  2088. [SVM_EXIT_INTR] = intr_interception,
  2089. [SVM_EXIT_NMI] = nmi_interception,
  2090. [SVM_EXIT_SMI] = nop_on_interception,
  2091. [SVM_EXIT_INIT] = nop_on_interception,
  2092. [SVM_EXIT_VINTR] = interrupt_window_interception,
  2093. [SVM_EXIT_CPUID] = cpuid_interception,
  2094. [SVM_EXIT_IRET] = iret_interception,
  2095. [SVM_EXIT_INVD] = emulate_on_interception,
  2096. [SVM_EXIT_PAUSE] = pause_interception,
  2097. [SVM_EXIT_HLT] = halt_interception,
  2098. [SVM_EXIT_INVLPG] = invlpg_interception,
  2099. [SVM_EXIT_INVLPGA] = invlpga_interception,
  2100. [SVM_EXIT_IOIO] = io_interception,
  2101. [SVM_EXIT_MSR] = msr_interception,
  2102. [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
  2103. [SVM_EXIT_SHUTDOWN] = shutdown_interception,
  2104. [SVM_EXIT_VMRUN] = vmrun_interception,
  2105. [SVM_EXIT_VMMCALL] = vmmcall_interception,
  2106. [SVM_EXIT_VMLOAD] = vmload_interception,
  2107. [SVM_EXIT_VMSAVE] = vmsave_interception,
  2108. [SVM_EXIT_STGI] = stgi_interception,
  2109. [SVM_EXIT_CLGI] = clgi_interception,
  2110. [SVM_EXIT_SKINIT] = skinit_interception,
  2111. [SVM_EXIT_WBINVD] = emulate_on_interception,
  2112. [SVM_EXIT_MONITOR] = invalid_op_interception,
  2113. [SVM_EXIT_MWAIT] = invalid_op_interception,
  2114. [SVM_EXIT_NPF] = pf_interception,
  2115. };
  2116. static int handle_exit(struct kvm_vcpu *vcpu)
  2117. {
  2118. struct vcpu_svm *svm = to_svm(vcpu);
  2119. struct kvm_run *kvm_run = vcpu->run;
  2120. u32 exit_code = svm->vmcb->control.exit_code;
  2121. trace_kvm_exit(exit_code, svm->vmcb->save.rip);
  2122. if (unlikely(svm->nested.exit_required)) {
  2123. nested_svm_vmexit(svm);
  2124. svm->nested.exit_required = false;
  2125. return 1;
  2126. }
  2127. if (is_nested(svm)) {
  2128. int vmexit;
  2129. trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
  2130. svm->vmcb->control.exit_info_1,
  2131. svm->vmcb->control.exit_info_2,
  2132. svm->vmcb->control.exit_int_info,
  2133. svm->vmcb->control.exit_int_info_err);
  2134. vmexit = nested_svm_exit_special(svm);
  2135. if (vmexit == NESTED_EXIT_CONTINUE)
  2136. vmexit = nested_svm_exit_handled(svm);
  2137. if (vmexit == NESTED_EXIT_DONE)
  2138. return 1;
  2139. }
  2140. svm_complete_interrupts(svm);
  2141. if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
  2142. vcpu->arch.cr0 = svm->vmcb->save.cr0;
  2143. if (npt_enabled)
  2144. vcpu->arch.cr3 = svm->vmcb->save.cr3;
  2145. if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
  2146. kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  2147. kvm_run->fail_entry.hardware_entry_failure_reason
  2148. = svm->vmcb->control.exit_code;
  2149. return 0;
  2150. }
  2151. if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
  2152. exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
  2153. exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH)
  2154. printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
  2155. "exit_code 0x%x\n",
  2156. __func__, svm->vmcb->control.exit_int_info,
  2157. exit_code);
  2158. if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
  2159. || !svm_exit_handlers[exit_code]) {
  2160. kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
  2161. kvm_run->hw.hardware_exit_reason = exit_code;
  2162. return 0;
  2163. }
  2164. return svm_exit_handlers[exit_code](svm);
  2165. }
  2166. static void reload_tss(struct kvm_vcpu *vcpu)
  2167. {
  2168. int cpu = raw_smp_processor_id();
  2169. struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
  2170. sd->tss_desc->type = 9; /* available 32/64-bit TSS */
  2171. load_TR_desc();
  2172. }
  2173. static void pre_svm_run(struct vcpu_svm *svm)
  2174. {
  2175. int cpu = raw_smp_processor_id();
  2176. struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
  2177. svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
  2178. /* FIXME: handle wraparound of asid_generation */
  2179. if (svm->asid_generation != sd->asid_generation)
  2180. new_asid(svm, sd);
  2181. }
  2182. static void svm_inject_nmi(struct kvm_vcpu *vcpu)
  2183. {
  2184. struct vcpu_svm *svm = to_svm(vcpu);
  2185. svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
  2186. vcpu->arch.hflags |= HF_NMI_MASK;
  2187. svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
  2188. ++vcpu->stat.nmi_injections;
  2189. }
  2190. static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
  2191. {
  2192. struct vmcb_control_area *control;
  2193. trace_kvm_inj_virq(irq);
  2194. ++svm->vcpu.stat.irq_injections;
  2195. control = &svm->vmcb->control;
  2196. control->int_vector = irq;
  2197. control->int_ctl &= ~V_INTR_PRIO_MASK;
  2198. control->int_ctl |= V_IRQ_MASK |
  2199. ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
  2200. }
  2201. static void svm_set_irq(struct kvm_vcpu *vcpu)
  2202. {
  2203. struct vcpu_svm *svm = to_svm(vcpu);
  2204. BUG_ON(!(gif_set(svm)));
  2205. svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
  2206. SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
  2207. }
  2208. static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
  2209. {
  2210. struct vcpu_svm *svm = to_svm(vcpu);
  2211. if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2212. return;
  2213. if (irr == -1)
  2214. return;
  2215. if (tpr >= irr)
  2216. svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
  2217. }
  2218. static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
  2219. {
  2220. struct vcpu_svm *svm = to_svm(vcpu);
  2221. struct vmcb *vmcb = svm->vmcb;
  2222. return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
  2223. !(svm->vcpu.arch.hflags & HF_NMI_MASK);
  2224. }
  2225. static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
  2226. {
  2227. struct vcpu_svm *svm = to_svm(vcpu);
  2228. return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
  2229. }
  2230. static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
  2231. {
  2232. struct vcpu_svm *svm = to_svm(vcpu);
  2233. if (masked) {
  2234. svm->vcpu.arch.hflags |= HF_NMI_MASK;
  2235. svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
  2236. } else {
  2237. svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
  2238. svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
  2239. }
  2240. }
  2241. static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
  2242. {
  2243. struct vcpu_svm *svm = to_svm(vcpu);
  2244. struct vmcb *vmcb = svm->vmcb;
  2245. int ret;
  2246. if (!gif_set(svm) ||
  2247. (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
  2248. return 0;
  2249. ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
  2250. if (is_nested(svm))
  2251. return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
  2252. return ret;
  2253. }
  2254. static void enable_irq_window(struct kvm_vcpu *vcpu)
  2255. {
  2256. struct vcpu_svm *svm = to_svm(vcpu);
  2257. /*
  2258. * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
  2259. * 1, because that's a separate STGI/VMRUN intercept. The next time we
  2260. * get that intercept, this function will be called again though and
  2261. * we'll get the vintr intercept.
  2262. */
  2263. if (gif_set(svm) && nested_svm_intr(svm)) {
  2264. svm_set_vintr(svm);
  2265. svm_inject_irq(svm, 0x0);
  2266. }
  2267. }
  2268. static void enable_nmi_window(struct kvm_vcpu *vcpu)
  2269. {
  2270. struct vcpu_svm *svm = to_svm(vcpu);
  2271. if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
  2272. == HF_NMI_MASK)
  2273. return; /* IRET will cause a vm exit */
  2274. /*
  2275. * Something prevents NMI from been injected. Single step over possible
  2276. * problem (IRET or exception injection or interrupt shadow)
  2277. */
  2278. if (gif_set(svm) && nested_svm_nmi(svm)) {
  2279. svm->nmi_singlestep = true;
  2280. svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
  2281. update_db_intercept(vcpu);
  2282. }
  2283. }
  2284. static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
  2285. {
  2286. return 0;
  2287. }
  2288. static void svm_flush_tlb(struct kvm_vcpu *vcpu)
  2289. {
  2290. force_new_asid(vcpu);
  2291. }
  2292. static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
  2293. {
  2294. }
  2295. static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
  2296. {
  2297. struct vcpu_svm *svm = to_svm(vcpu);
  2298. if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2299. return;
  2300. if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
  2301. int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
  2302. kvm_set_cr8(vcpu, cr8);
  2303. }
  2304. }
  2305. static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
  2306. {
  2307. struct vcpu_svm *svm = to_svm(vcpu);
  2308. u64 cr8;
  2309. if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2310. return;
  2311. cr8 = kvm_get_cr8(vcpu);
  2312. svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
  2313. svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
  2314. }
  2315. static void svm_complete_interrupts(struct vcpu_svm *svm)
  2316. {
  2317. u8 vector;
  2318. int type;
  2319. u32 exitintinfo = svm->vmcb->control.exit_int_info;
  2320. unsigned int3_injected = svm->int3_injected;
  2321. svm->int3_injected = 0;
  2322. if (svm->vcpu.arch.hflags & HF_IRET_MASK)
  2323. svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
  2324. svm->vcpu.arch.nmi_injected = false;
  2325. kvm_clear_exception_queue(&svm->vcpu);
  2326. kvm_clear_interrupt_queue(&svm->vcpu);
  2327. if (!(exitintinfo & SVM_EXITINTINFO_VALID))
  2328. return;
  2329. vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
  2330. type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
  2331. switch (type) {
  2332. case SVM_EXITINTINFO_TYPE_NMI:
  2333. svm->vcpu.arch.nmi_injected = true;
  2334. break;
  2335. case SVM_EXITINTINFO_TYPE_EXEPT:
  2336. if (is_nested(svm))
  2337. break;
  2338. /*
  2339. * In case of software exceptions, do not reinject the vector,
  2340. * but re-execute the instruction instead. Rewind RIP first
  2341. * if we emulated INT3 before.
  2342. */
  2343. if (kvm_exception_is_soft(vector)) {
  2344. if (vector == BP_VECTOR && int3_injected &&
  2345. kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
  2346. kvm_rip_write(&svm->vcpu,
  2347. kvm_rip_read(&svm->vcpu) -
  2348. int3_injected);
  2349. break;
  2350. }
  2351. if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
  2352. u32 err = svm->vmcb->control.exit_int_info_err;
  2353. kvm_queue_exception_e(&svm->vcpu, vector, err);
  2354. } else
  2355. kvm_queue_exception(&svm->vcpu, vector);
  2356. break;
  2357. case SVM_EXITINTINFO_TYPE_INTR:
  2358. kvm_queue_interrupt(&svm->vcpu, vector, false);
  2359. break;
  2360. default:
  2361. break;
  2362. }
  2363. }
  2364. #ifdef CONFIG_X86_64
  2365. #define R "r"
  2366. #else
  2367. #define R "e"
  2368. #endif
  2369. static void svm_vcpu_run(struct kvm_vcpu *vcpu)
  2370. {
  2371. struct vcpu_svm *svm = to_svm(vcpu);
  2372. u16 fs_selector;
  2373. u16 gs_selector;
  2374. u16 ldt_selector;
  2375. /*
  2376. * A vmexit emulation is required before the vcpu can be executed
  2377. * again.
  2378. */
  2379. if (unlikely(svm->nested.exit_required))
  2380. return;
  2381. svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
  2382. svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
  2383. svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
  2384. pre_svm_run(svm);
  2385. sync_lapic_to_cr8(vcpu);
  2386. save_host_msrs(vcpu);
  2387. fs_selector = kvm_read_fs();
  2388. gs_selector = kvm_read_gs();
  2389. ldt_selector = kvm_read_ldt();
  2390. svm->vmcb->save.cr2 = vcpu->arch.cr2;
  2391. /* required for live migration with NPT */
  2392. if (npt_enabled)
  2393. svm->vmcb->save.cr3 = vcpu->arch.cr3;
  2394. clgi();
  2395. local_irq_enable();
  2396. asm volatile (
  2397. "push %%"R"bp; \n\t"
  2398. "mov %c[rbx](%[svm]), %%"R"bx \n\t"
  2399. "mov %c[rcx](%[svm]), %%"R"cx \n\t"
  2400. "mov %c[rdx](%[svm]), %%"R"dx \n\t"
  2401. "mov %c[rsi](%[svm]), %%"R"si \n\t"
  2402. "mov %c[rdi](%[svm]), %%"R"di \n\t"
  2403. "mov %c[rbp](%[svm]), %%"R"bp \n\t"
  2404. #ifdef CONFIG_X86_64
  2405. "mov %c[r8](%[svm]), %%r8 \n\t"
  2406. "mov %c[r9](%[svm]), %%r9 \n\t"
  2407. "mov %c[r10](%[svm]), %%r10 \n\t"
  2408. "mov %c[r11](%[svm]), %%r11 \n\t"
  2409. "mov %c[r12](%[svm]), %%r12 \n\t"
  2410. "mov %c[r13](%[svm]), %%r13 \n\t"
  2411. "mov %c[r14](%[svm]), %%r14 \n\t"
  2412. "mov %c[r15](%[svm]), %%r15 \n\t"
  2413. #endif
  2414. /* Enter guest mode */
  2415. "push %%"R"ax \n\t"
  2416. "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
  2417. __ex(SVM_VMLOAD) "\n\t"
  2418. __ex(SVM_VMRUN) "\n\t"
  2419. __ex(SVM_VMSAVE) "\n\t"
  2420. "pop %%"R"ax \n\t"
  2421. /* Save guest registers, load host registers */
  2422. "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
  2423. "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
  2424. "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
  2425. "mov %%"R"si, %c[rsi](%[svm]) \n\t"
  2426. "mov %%"R"di, %c[rdi](%[svm]) \n\t"
  2427. "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
  2428. #ifdef CONFIG_X86_64
  2429. "mov %%r8, %c[r8](%[svm]) \n\t"
  2430. "mov %%r9, %c[r9](%[svm]) \n\t"
  2431. "mov %%r10, %c[r10](%[svm]) \n\t"
  2432. "mov %%r11, %c[r11](%[svm]) \n\t"
  2433. "mov %%r12, %c[r12](%[svm]) \n\t"
  2434. "mov %%r13, %c[r13](%[svm]) \n\t"
  2435. "mov %%r14, %c[r14](%[svm]) \n\t"
  2436. "mov %%r15, %c[r15](%[svm]) \n\t"
  2437. #endif
  2438. "pop %%"R"bp"
  2439. :
  2440. : [svm]"a"(svm),
  2441. [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
  2442. [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
  2443. [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
  2444. [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
  2445. [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
  2446. [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
  2447. [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
  2448. #ifdef CONFIG_X86_64
  2449. , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
  2450. [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
  2451. [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
  2452. [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
  2453. [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
  2454. [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
  2455. [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
  2456. [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
  2457. #endif
  2458. : "cc", "memory"
  2459. , R"bx", R"cx", R"dx", R"si", R"di"
  2460. #ifdef CONFIG_X86_64
  2461. , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
  2462. #endif
  2463. );
  2464. vcpu->arch.cr2 = svm->vmcb->save.cr2;
  2465. vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
  2466. vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
  2467. vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
  2468. kvm_load_fs(fs_selector);
  2469. kvm_load_gs(gs_selector);
  2470. kvm_load_ldt(ldt_selector);
  2471. load_host_msrs(vcpu);
  2472. reload_tss(vcpu);
  2473. local_irq_disable();
  2474. stgi();
  2475. sync_cr8_to_lapic(vcpu);
  2476. svm->next_rip = 0;
  2477. if (npt_enabled) {
  2478. vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
  2479. vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
  2480. }
  2481. }
  2482. #undef R
  2483. static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
  2484. {
  2485. struct vcpu_svm *svm = to_svm(vcpu);
  2486. if (npt_enabled) {
  2487. svm->vmcb->control.nested_cr3 = root;
  2488. force_new_asid(vcpu);
  2489. return;
  2490. }
  2491. svm->vmcb->save.cr3 = root;
  2492. force_new_asid(vcpu);
  2493. }
  2494. static int is_disabled(void)
  2495. {
  2496. u64 vm_cr;
  2497. rdmsrl(MSR_VM_CR, vm_cr);
  2498. if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
  2499. return 1;
  2500. return 0;
  2501. }
  2502. static void
  2503. svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
  2504. {
  2505. /*
  2506. * Patch in the VMMCALL instruction:
  2507. */
  2508. hypercall[0] = 0x0f;
  2509. hypercall[1] = 0x01;
  2510. hypercall[2] = 0xd9;
  2511. }
  2512. static void svm_check_processor_compat(void *rtn)
  2513. {
  2514. *(int *)rtn = 0;
  2515. }
  2516. static bool svm_cpu_has_accelerated_tpr(void)
  2517. {
  2518. return false;
  2519. }
  2520. static int get_npt_level(void)
  2521. {
  2522. #ifdef CONFIG_X86_64
  2523. return PT64_ROOT_LEVEL;
  2524. #else
  2525. return PT32E_ROOT_LEVEL;
  2526. #endif
  2527. }
  2528. static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
  2529. {
  2530. return 0;
  2531. }
  2532. static void svm_cpuid_update(struct kvm_vcpu *vcpu)
  2533. {
  2534. }
  2535. static const struct trace_print_flags svm_exit_reasons_str[] = {
  2536. { SVM_EXIT_READ_CR0, "read_cr0" },
  2537. { SVM_EXIT_READ_CR3, "read_cr3" },
  2538. { SVM_EXIT_READ_CR4, "read_cr4" },
  2539. { SVM_EXIT_READ_CR8, "read_cr8" },
  2540. { SVM_EXIT_WRITE_CR0, "write_cr0" },
  2541. { SVM_EXIT_WRITE_CR3, "write_cr3" },
  2542. { SVM_EXIT_WRITE_CR4, "write_cr4" },
  2543. { SVM_EXIT_WRITE_CR8, "write_cr8" },
  2544. { SVM_EXIT_READ_DR0, "read_dr0" },
  2545. { SVM_EXIT_READ_DR1, "read_dr1" },
  2546. { SVM_EXIT_READ_DR2, "read_dr2" },
  2547. { SVM_EXIT_READ_DR3, "read_dr3" },
  2548. { SVM_EXIT_WRITE_DR0, "write_dr0" },
  2549. { SVM_EXIT_WRITE_DR1, "write_dr1" },
  2550. { SVM_EXIT_WRITE_DR2, "write_dr2" },
  2551. { SVM_EXIT_WRITE_DR3, "write_dr3" },
  2552. { SVM_EXIT_WRITE_DR5, "write_dr5" },
  2553. { SVM_EXIT_WRITE_DR7, "write_dr7" },
  2554. { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
  2555. { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
  2556. { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
  2557. { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
  2558. { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
  2559. { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
  2560. { SVM_EXIT_INTR, "interrupt" },
  2561. { SVM_EXIT_NMI, "nmi" },
  2562. { SVM_EXIT_SMI, "smi" },
  2563. { SVM_EXIT_INIT, "init" },
  2564. { SVM_EXIT_VINTR, "vintr" },
  2565. { SVM_EXIT_CPUID, "cpuid" },
  2566. { SVM_EXIT_INVD, "invd" },
  2567. { SVM_EXIT_HLT, "hlt" },
  2568. { SVM_EXIT_INVLPG, "invlpg" },
  2569. { SVM_EXIT_INVLPGA, "invlpga" },
  2570. { SVM_EXIT_IOIO, "io" },
  2571. { SVM_EXIT_MSR, "msr" },
  2572. { SVM_EXIT_TASK_SWITCH, "task_switch" },
  2573. { SVM_EXIT_SHUTDOWN, "shutdown" },
  2574. { SVM_EXIT_VMRUN, "vmrun" },
  2575. { SVM_EXIT_VMMCALL, "hypercall" },
  2576. { SVM_EXIT_VMLOAD, "vmload" },
  2577. { SVM_EXIT_VMSAVE, "vmsave" },
  2578. { SVM_EXIT_STGI, "stgi" },
  2579. { SVM_EXIT_CLGI, "clgi" },
  2580. { SVM_EXIT_SKINIT, "skinit" },
  2581. { SVM_EXIT_WBINVD, "wbinvd" },
  2582. { SVM_EXIT_MONITOR, "monitor" },
  2583. { SVM_EXIT_MWAIT, "mwait" },
  2584. { SVM_EXIT_NPF, "npf" },
  2585. { -1, NULL }
  2586. };
  2587. static int svm_get_lpage_level(void)
  2588. {
  2589. return PT_PDPE_LEVEL;
  2590. }
  2591. static bool svm_rdtscp_supported(void)
  2592. {
  2593. return false;
  2594. }
  2595. static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
  2596. {
  2597. struct vcpu_svm *svm = to_svm(vcpu);
  2598. svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
  2599. if (is_nested(svm))
  2600. svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
  2601. update_cr0_intercept(svm);
  2602. }
  2603. static struct kvm_x86_ops svm_x86_ops = {
  2604. .cpu_has_kvm_support = has_svm,
  2605. .disabled_by_bios = is_disabled,
  2606. .hardware_setup = svm_hardware_setup,
  2607. .hardware_unsetup = svm_hardware_unsetup,
  2608. .check_processor_compatibility = svm_check_processor_compat,
  2609. .hardware_enable = svm_hardware_enable,
  2610. .hardware_disable = svm_hardware_disable,
  2611. .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
  2612. .vcpu_create = svm_create_vcpu,
  2613. .vcpu_free = svm_free_vcpu,
  2614. .vcpu_reset = svm_vcpu_reset,
  2615. .prepare_guest_switch = svm_prepare_guest_switch,
  2616. .vcpu_load = svm_vcpu_load,
  2617. .vcpu_put = svm_vcpu_put,
  2618. .set_guest_debug = svm_guest_debug,
  2619. .get_msr = svm_get_msr,
  2620. .set_msr = svm_set_msr,
  2621. .get_segment_base = svm_get_segment_base,
  2622. .get_segment = svm_get_segment,
  2623. .set_segment = svm_set_segment,
  2624. .get_cpl = svm_get_cpl,
  2625. .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
  2626. .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
  2627. .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
  2628. .set_cr0 = svm_set_cr0,
  2629. .set_cr3 = svm_set_cr3,
  2630. .set_cr4 = svm_set_cr4,
  2631. .set_efer = svm_set_efer,
  2632. .get_idt = svm_get_idt,
  2633. .set_idt = svm_set_idt,
  2634. .get_gdt = svm_get_gdt,
  2635. .set_gdt = svm_set_gdt,
  2636. .get_dr = svm_get_dr,
  2637. .set_dr = svm_set_dr,
  2638. .cache_reg = svm_cache_reg,
  2639. .get_rflags = svm_get_rflags,
  2640. .set_rflags = svm_set_rflags,
  2641. .fpu_activate = svm_fpu_activate,
  2642. .fpu_deactivate = svm_fpu_deactivate,
  2643. .tlb_flush = svm_flush_tlb,
  2644. .run = svm_vcpu_run,
  2645. .handle_exit = handle_exit,
  2646. .skip_emulated_instruction = skip_emulated_instruction,
  2647. .set_interrupt_shadow = svm_set_interrupt_shadow,
  2648. .get_interrupt_shadow = svm_get_interrupt_shadow,
  2649. .patch_hypercall = svm_patch_hypercall,
  2650. .set_irq = svm_set_irq,
  2651. .set_nmi = svm_inject_nmi,
  2652. .queue_exception = svm_queue_exception,
  2653. .interrupt_allowed = svm_interrupt_allowed,
  2654. .nmi_allowed = svm_nmi_allowed,
  2655. .get_nmi_mask = svm_get_nmi_mask,
  2656. .set_nmi_mask = svm_set_nmi_mask,
  2657. .enable_nmi_window = enable_nmi_window,
  2658. .enable_irq_window = enable_irq_window,
  2659. .update_cr8_intercept = update_cr8_intercept,
  2660. .set_tss_addr = svm_set_tss_addr,
  2661. .get_tdp_level = get_npt_level,
  2662. .get_mt_mask = svm_get_mt_mask,
  2663. .exit_reasons_str = svm_exit_reasons_str,
  2664. .get_lpage_level = svm_get_lpage_level,
  2665. .cpuid_update = svm_cpuid_update,
  2666. .rdtscp_supported = svm_rdtscp_supported,
  2667. };
  2668. static int __init svm_init(void)
  2669. {
  2670. return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
  2671. THIS_MODULE);
  2672. }
  2673. static void __exit svm_exit(void)
  2674. {
  2675. kvm_exit();
  2676. }
  2677. module_init(svm_init)
  2678. module_exit(svm_exit)