svm.c 84 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * AMD SVM support
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. *
  8. * Authors:
  9. * Yaniv Kamay <yaniv@qumranet.com>
  10. * Avi Kivity <avi@qumranet.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2. See
  13. * the COPYING file in the top-level directory.
  14. *
  15. */
  16. #include <linux/kvm_host.h>
  17. #include "irq.h"
  18. #include "mmu.h"
  19. #include "kvm_cache_regs.h"
  20. #include "x86.h"
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/highmem.h>
  25. #include <linux/sched.h>
  26. #include <linux/ftrace_event.h>
  27. #include <linux/slab.h>
  28. #include <asm/desc.h>
  29. #include <asm/virtext.h>
  30. #include "trace.h"
  31. #define __ex(x) __kvm_handle_fault_on_reboot(x)
  32. MODULE_AUTHOR("Qumranet");
  33. MODULE_LICENSE("GPL");
  34. #define IOPM_ALLOC_ORDER 2
  35. #define MSRPM_ALLOC_ORDER 1
  36. #define SEG_TYPE_LDT 2
  37. #define SEG_TYPE_BUSY_TSS16 3
  38. #define SVM_FEATURE_NPT (1 << 0)
  39. #define SVM_FEATURE_LBRV (1 << 1)
  40. #define SVM_FEATURE_SVML (1 << 2)
  41. #define SVM_FEATURE_NRIP (1 << 3)
  42. #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
  43. #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
  44. #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
  45. #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
  46. #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
  47. static const u32 host_save_user_msrs[] = {
  48. #ifdef CONFIG_X86_64
  49. MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
  50. MSR_FS_BASE,
  51. #endif
  52. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  53. };
  54. #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
  55. struct kvm_vcpu;
  56. struct nested_state {
  57. struct vmcb *hsave;
  58. u64 hsave_msr;
  59. u64 vm_cr_msr;
  60. u64 vmcb;
  61. /* These are the merged vectors */
  62. u32 *msrpm;
  63. /* gpa pointers to the real vectors */
  64. u64 vmcb_msrpm;
  65. u64 vmcb_iopm;
  66. /* A VMEXIT is required but not yet emulated */
  67. bool exit_required;
  68. /* cache for intercepts of the guest */
  69. u16 intercept_cr_read;
  70. u16 intercept_cr_write;
  71. u16 intercept_dr_read;
  72. u16 intercept_dr_write;
  73. u32 intercept_exceptions;
  74. u64 intercept;
  75. };
  76. #define MSRPM_OFFSETS 16
  77. static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
  78. struct vcpu_svm {
  79. struct kvm_vcpu vcpu;
  80. struct vmcb *vmcb;
  81. unsigned long vmcb_pa;
  82. struct svm_cpu_data *svm_data;
  83. uint64_t asid_generation;
  84. uint64_t sysenter_esp;
  85. uint64_t sysenter_eip;
  86. u64 next_rip;
  87. u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
  88. u64 host_gs_base;
  89. u32 *msrpm;
  90. struct nested_state nested;
  91. bool nmi_singlestep;
  92. unsigned int3_injected;
  93. unsigned long int3_rip;
  94. };
  95. #define MSR_INVALID 0xffffffffU
  96. static struct svm_direct_access_msrs {
  97. u32 index; /* Index of the MSR */
  98. bool always; /* True if intercept is always on */
  99. } direct_access_msrs[] = {
  100. { .index = MSR_K6_STAR, .always = true },
  101. { .index = MSR_IA32_SYSENTER_CS, .always = true },
  102. #ifdef CONFIG_X86_64
  103. { .index = MSR_GS_BASE, .always = true },
  104. { .index = MSR_FS_BASE, .always = true },
  105. { .index = MSR_KERNEL_GS_BASE, .always = true },
  106. { .index = MSR_LSTAR, .always = true },
  107. { .index = MSR_CSTAR, .always = true },
  108. { .index = MSR_SYSCALL_MASK, .always = true },
  109. #endif
  110. { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
  111. { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
  112. { .index = MSR_IA32_LASTINTFROMIP, .always = false },
  113. { .index = MSR_IA32_LASTINTTOIP, .always = false },
  114. { .index = MSR_INVALID, .always = false },
  115. };
  116. /* enable NPT for AMD64 and X86 with PAE */
  117. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  118. static bool npt_enabled = true;
  119. #else
  120. static bool npt_enabled;
  121. #endif
  122. static int npt = 1;
  123. module_param(npt, int, S_IRUGO);
  124. static int nested = 1;
  125. module_param(nested, int, S_IRUGO);
  126. static void svm_flush_tlb(struct kvm_vcpu *vcpu);
  127. static void svm_complete_interrupts(struct vcpu_svm *svm);
  128. static int nested_svm_exit_handled(struct vcpu_svm *svm);
  129. static int nested_svm_intercept(struct vcpu_svm *svm);
  130. static int nested_svm_vmexit(struct vcpu_svm *svm);
  131. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  132. bool has_error_code, u32 error_code);
  133. static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
  134. {
  135. return container_of(vcpu, struct vcpu_svm, vcpu);
  136. }
  137. static inline bool is_nested(struct vcpu_svm *svm)
  138. {
  139. return svm->nested.vmcb;
  140. }
  141. static inline void enable_gif(struct vcpu_svm *svm)
  142. {
  143. svm->vcpu.arch.hflags |= HF_GIF_MASK;
  144. }
  145. static inline void disable_gif(struct vcpu_svm *svm)
  146. {
  147. svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
  148. }
  149. static inline bool gif_set(struct vcpu_svm *svm)
  150. {
  151. return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
  152. }
  153. static unsigned long iopm_base;
  154. struct kvm_ldttss_desc {
  155. u16 limit0;
  156. u16 base0;
  157. unsigned base1:8, type:5, dpl:2, p:1;
  158. unsigned limit1:4, zero0:3, g:1, base2:8;
  159. u32 base3;
  160. u32 zero1;
  161. } __attribute__((packed));
  162. struct svm_cpu_data {
  163. int cpu;
  164. u64 asid_generation;
  165. u32 max_asid;
  166. u32 next_asid;
  167. struct kvm_ldttss_desc *tss_desc;
  168. struct page *save_area;
  169. };
  170. static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
  171. static uint32_t svm_features;
  172. struct svm_init_data {
  173. int cpu;
  174. int r;
  175. };
  176. static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
  177. #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
  178. #define MSRS_RANGE_SIZE 2048
  179. #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
  180. static u32 svm_msrpm_offset(u32 msr)
  181. {
  182. u32 offset;
  183. int i;
  184. for (i = 0; i < NUM_MSR_MAPS; i++) {
  185. if (msr < msrpm_ranges[i] ||
  186. msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
  187. continue;
  188. offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
  189. offset += (i * MSRS_RANGE_SIZE); /* add range offset */
  190. /* Now we have the u8 offset - but need the u32 offset */
  191. return offset / 4;
  192. }
  193. /* MSR not in any range */
  194. return MSR_INVALID;
  195. }
  196. #define MAX_INST_SIZE 15
  197. static inline u32 svm_has(u32 feat)
  198. {
  199. return svm_features & feat;
  200. }
  201. static inline void clgi(void)
  202. {
  203. asm volatile (__ex(SVM_CLGI));
  204. }
  205. static inline void stgi(void)
  206. {
  207. asm volatile (__ex(SVM_STGI));
  208. }
  209. static inline void invlpga(unsigned long addr, u32 asid)
  210. {
  211. asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
  212. }
  213. static inline void force_new_asid(struct kvm_vcpu *vcpu)
  214. {
  215. to_svm(vcpu)->asid_generation--;
  216. }
  217. static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
  218. {
  219. force_new_asid(vcpu);
  220. }
  221. static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  222. {
  223. if (!npt_enabled && !(efer & EFER_LMA))
  224. efer &= ~EFER_LME;
  225. to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
  226. vcpu->arch.efer = efer;
  227. }
  228. static int is_external_interrupt(u32 info)
  229. {
  230. info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
  231. return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
  232. }
  233. static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  234. {
  235. struct vcpu_svm *svm = to_svm(vcpu);
  236. u32 ret = 0;
  237. if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
  238. ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
  239. return ret & mask;
  240. }
  241. static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  242. {
  243. struct vcpu_svm *svm = to_svm(vcpu);
  244. if (mask == 0)
  245. svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
  246. else
  247. svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
  248. }
  249. static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  250. {
  251. struct vcpu_svm *svm = to_svm(vcpu);
  252. if (svm->vmcb->control.next_rip != 0)
  253. svm->next_rip = svm->vmcb->control.next_rip;
  254. if (!svm->next_rip) {
  255. if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
  256. EMULATE_DONE)
  257. printk(KERN_DEBUG "%s: NOP\n", __func__);
  258. return;
  259. }
  260. if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
  261. printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
  262. __func__, kvm_rip_read(vcpu), svm->next_rip);
  263. kvm_rip_write(vcpu, svm->next_rip);
  264. svm_set_interrupt_shadow(vcpu, 0);
  265. }
  266. static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
  267. bool has_error_code, u32 error_code)
  268. {
  269. struct vcpu_svm *svm = to_svm(vcpu);
  270. /*
  271. * If we are within a nested VM we'd better #VMEXIT and let the guest
  272. * handle the exception
  273. */
  274. if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
  275. return;
  276. if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
  277. unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
  278. /*
  279. * For guest debugging where we have to reinject #BP if some
  280. * INT3 is guest-owned:
  281. * Emulate nRIP by moving RIP forward. Will fail if injection
  282. * raises a fault that is not intercepted. Still better than
  283. * failing in all cases.
  284. */
  285. skip_emulated_instruction(&svm->vcpu);
  286. rip = kvm_rip_read(&svm->vcpu);
  287. svm->int3_rip = rip + svm->vmcb->save.cs.base;
  288. svm->int3_injected = rip - old_rip;
  289. }
  290. svm->vmcb->control.event_inj = nr
  291. | SVM_EVTINJ_VALID
  292. | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
  293. | SVM_EVTINJ_TYPE_EXEPT;
  294. svm->vmcb->control.event_inj_err = error_code;
  295. }
  296. static int has_svm(void)
  297. {
  298. const char *msg;
  299. if (!cpu_has_svm(&msg)) {
  300. printk(KERN_INFO "has_svm: %s\n", msg);
  301. return 0;
  302. }
  303. return 1;
  304. }
  305. static void svm_hardware_disable(void *garbage)
  306. {
  307. cpu_svm_disable();
  308. }
  309. static int svm_hardware_enable(void *garbage)
  310. {
  311. struct svm_cpu_data *sd;
  312. uint64_t efer;
  313. struct desc_ptr gdt_descr;
  314. struct desc_struct *gdt;
  315. int me = raw_smp_processor_id();
  316. rdmsrl(MSR_EFER, efer);
  317. if (efer & EFER_SVME)
  318. return -EBUSY;
  319. if (!has_svm()) {
  320. printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
  321. me);
  322. return -EINVAL;
  323. }
  324. sd = per_cpu(svm_data, me);
  325. if (!sd) {
  326. printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
  327. me);
  328. return -EINVAL;
  329. }
  330. sd->asid_generation = 1;
  331. sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
  332. sd->next_asid = sd->max_asid + 1;
  333. native_store_gdt(&gdt_descr);
  334. gdt = (struct desc_struct *)gdt_descr.address;
  335. sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
  336. wrmsrl(MSR_EFER, efer | EFER_SVME);
  337. wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
  338. return 0;
  339. }
  340. static void svm_cpu_uninit(int cpu)
  341. {
  342. struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
  343. if (!sd)
  344. return;
  345. per_cpu(svm_data, raw_smp_processor_id()) = NULL;
  346. __free_page(sd->save_area);
  347. kfree(sd);
  348. }
  349. static int svm_cpu_init(int cpu)
  350. {
  351. struct svm_cpu_data *sd;
  352. int r;
  353. sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
  354. if (!sd)
  355. return -ENOMEM;
  356. sd->cpu = cpu;
  357. sd->save_area = alloc_page(GFP_KERNEL);
  358. r = -ENOMEM;
  359. if (!sd->save_area)
  360. goto err_1;
  361. per_cpu(svm_data, cpu) = sd;
  362. return 0;
  363. err_1:
  364. kfree(sd);
  365. return r;
  366. }
  367. static bool valid_msr_intercept(u32 index)
  368. {
  369. int i;
  370. for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
  371. if (direct_access_msrs[i].index == index)
  372. return true;
  373. return false;
  374. }
  375. static void set_msr_interception(u32 *msrpm, unsigned msr,
  376. int read, int write)
  377. {
  378. u8 bit_read, bit_write;
  379. unsigned long tmp;
  380. u32 offset;
  381. /*
  382. * If this warning triggers extend the direct_access_msrs list at the
  383. * beginning of the file
  384. */
  385. WARN_ON(!valid_msr_intercept(msr));
  386. offset = svm_msrpm_offset(msr);
  387. bit_read = 2 * (msr & 0x0f);
  388. bit_write = 2 * (msr & 0x0f) + 1;
  389. tmp = msrpm[offset];
  390. BUG_ON(offset == MSR_INVALID);
  391. read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
  392. write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
  393. msrpm[offset] = tmp;
  394. }
  395. static void svm_vcpu_init_msrpm(u32 *msrpm)
  396. {
  397. int i;
  398. memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
  399. for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
  400. if (!direct_access_msrs[i].always)
  401. continue;
  402. set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
  403. }
  404. }
  405. static void add_msr_offset(u32 offset)
  406. {
  407. int i;
  408. for (i = 0; i < MSRPM_OFFSETS; ++i) {
  409. /* Offset already in list? */
  410. if (msrpm_offsets[i] == offset)
  411. return;
  412. /* Slot used by another offset? */
  413. if (msrpm_offsets[i] != MSR_INVALID)
  414. continue;
  415. /* Add offset to list */
  416. msrpm_offsets[i] = offset;
  417. return;
  418. }
  419. /*
  420. * If this BUG triggers the msrpm_offsets table has an overflow. Just
  421. * increase MSRPM_OFFSETS in this case.
  422. */
  423. BUG();
  424. }
  425. static void init_msrpm_offsets(void)
  426. {
  427. int i;
  428. memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
  429. for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
  430. u32 offset;
  431. offset = svm_msrpm_offset(direct_access_msrs[i].index);
  432. BUG_ON(offset == MSR_INVALID);
  433. add_msr_offset(offset);
  434. }
  435. }
  436. static void svm_enable_lbrv(struct vcpu_svm *svm)
  437. {
  438. u32 *msrpm = svm->msrpm;
  439. svm->vmcb->control.lbr_ctl = 1;
  440. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
  441. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
  442. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
  443. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
  444. }
  445. static void svm_disable_lbrv(struct vcpu_svm *svm)
  446. {
  447. u32 *msrpm = svm->msrpm;
  448. svm->vmcb->control.lbr_ctl = 0;
  449. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
  450. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
  451. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
  452. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
  453. }
  454. static __init int svm_hardware_setup(void)
  455. {
  456. int cpu;
  457. struct page *iopm_pages;
  458. void *iopm_va;
  459. int r;
  460. iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
  461. if (!iopm_pages)
  462. return -ENOMEM;
  463. iopm_va = page_address(iopm_pages);
  464. memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
  465. iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
  466. init_msrpm_offsets();
  467. if (boot_cpu_has(X86_FEATURE_NX))
  468. kvm_enable_efer_bits(EFER_NX);
  469. if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
  470. kvm_enable_efer_bits(EFER_FFXSR);
  471. if (nested) {
  472. printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
  473. kvm_enable_efer_bits(EFER_SVME);
  474. }
  475. for_each_possible_cpu(cpu) {
  476. r = svm_cpu_init(cpu);
  477. if (r)
  478. goto err;
  479. }
  480. svm_features = cpuid_edx(SVM_CPUID_FUNC);
  481. if (!svm_has(SVM_FEATURE_NPT))
  482. npt_enabled = false;
  483. if (npt_enabled && !npt) {
  484. printk(KERN_INFO "kvm: Nested Paging disabled\n");
  485. npt_enabled = false;
  486. }
  487. if (npt_enabled) {
  488. printk(KERN_INFO "kvm: Nested Paging enabled\n");
  489. kvm_enable_tdp();
  490. } else
  491. kvm_disable_tdp();
  492. return 0;
  493. err:
  494. __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
  495. iopm_base = 0;
  496. return r;
  497. }
  498. static __exit void svm_hardware_unsetup(void)
  499. {
  500. int cpu;
  501. for_each_possible_cpu(cpu)
  502. svm_cpu_uninit(cpu);
  503. __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
  504. iopm_base = 0;
  505. }
  506. static void init_seg(struct vmcb_seg *seg)
  507. {
  508. seg->selector = 0;
  509. seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
  510. SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
  511. seg->limit = 0xffff;
  512. seg->base = 0;
  513. }
  514. static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
  515. {
  516. seg->selector = 0;
  517. seg->attrib = SVM_SELECTOR_P_MASK | type;
  518. seg->limit = 0xffff;
  519. seg->base = 0;
  520. }
  521. static void init_vmcb(struct vcpu_svm *svm)
  522. {
  523. struct vmcb_control_area *control = &svm->vmcb->control;
  524. struct vmcb_save_area *save = &svm->vmcb->save;
  525. svm->vcpu.fpu_active = 1;
  526. control->intercept_cr_read = INTERCEPT_CR0_MASK |
  527. INTERCEPT_CR3_MASK |
  528. INTERCEPT_CR4_MASK;
  529. control->intercept_cr_write = INTERCEPT_CR0_MASK |
  530. INTERCEPT_CR3_MASK |
  531. INTERCEPT_CR4_MASK |
  532. INTERCEPT_CR8_MASK;
  533. control->intercept_dr_read = INTERCEPT_DR0_MASK |
  534. INTERCEPT_DR1_MASK |
  535. INTERCEPT_DR2_MASK |
  536. INTERCEPT_DR3_MASK |
  537. INTERCEPT_DR4_MASK |
  538. INTERCEPT_DR5_MASK |
  539. INTERCEPT_DR6_MASK |
  540. INTERCEPT_DR7_MASK;
  541. control->intercept_dr_write = INTERCEPT_DR0_MASK |
  542. INTERCEPT_DR1_MASK |
  543. INTERCEPT_DR2_MASK |
  544. INTERCEPT_DR3_MASK |
  545. INTERCEPT_DR4_MASK |
  546. INTERCEPT_DR5_MASK |
  547. INTERCEPT_DR6_MASK |
  548. INTERCEPT_DR7_MASK;
  549. control->intercept_exceptions = (1 << PF_VECTOR) |
  550. (1 << UD_VECTOR) |
  551. (1 << MC_VECTOR);
  552. control->intercept = (1ULL << INTERCEPT_INTR) |
  553. (1ULL << INTERCEPT_NMI) |
  554. (1ULL << INTERCEPT_SMI) |
  555. (1ULL << INTERCEPT_SELECTIVE_CR0) |
  556. (1ULL << INTERCEPT_CPUID) |
  557. (1ULL << INTERCEPT_INVD) |
  558. (1ULL << INTERCEPT_HLT) |
  559. (1ULL << INTERCEPT_INVLPG) |
  560. (1ULL << INTERCEPT_INVLPGA) |
  561. (1ULL << INTERCEPT_IOIO_PROT) |
  562. (1ULL << INTERCEPT_MSR_PROT) |
  563. (1ULL << INTERCEPT_TASK_SWITCH) |
  564. (1ULL << INTERCEPT_SHUTDOWN) |
  565. (1ULL << INTERCEPT_VMRUN) |
  566. (1ULL << INTERCEPT_VMMCALL) |
  567. (1ULL << INTERCEPT_VMLOAD) |
  568. (1ULL << INTERCEPT_VMSAVE) |
  569. (1ULL << INTERCEPT_STGI) |
  570. (1ULL << INTERCEPT_CLGI) |
  571. (1ULL << INTERCEPT_SKINIT) |
  572. (1ULL << INTERCEPT_WBINVD) |
  573. (1ULL << INTERCEPT_MONITOR) |
  574. (1ULL << INTERCEPT_MWAIT);
  575. control->iopm_base_pa = iopm_base;
  576. control->msrpm_base_pa = __pa(svm->msrpm);
  577. control->tsc_offset = 0;
  578. control->int_ctl = V_INTR_MASKING_MASK;
  579. init_seg(&save->es);
  580. init_seg(&save->ss);
  581. init_seg(&save->ds);
  582. init_seg(&save->fs);
  583. init_seg(&save->gs);
  584. save->cs.selector = 0xf000;
  585. /* Executable/Readable Code Segment */
  586. save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
  587. SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
  588. save->cs.limit = 0xffff;
  589. /*
  590. * cs.base should really be 0xffff0000, but vmx can't handle that, so
  591. * be consistent with it.
  592. *
  593. * Replace when we have real mode working for vmx.
  594. */
  595. save->cs.base = 0xf0000;
  596. save->gdtr.limit = 0xffff;
  597. save->idtr.limit = 0xffff;
  598. init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
  599. init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
  600. save->efer = EFER_SVME;
  601. save->dr6 = 0xffff0ff0;
  602. save->dr7 = 0x400;
  603. save->rflags = 2;
  604. save->rip = 0x0000fff0;
  605. svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
  606. /*
  607. * This is the guest-visible cr0 value.
  608. * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
  609. */
  610. svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
  611. kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
  612. save->cr4 = X86_CR4_PAE;
  613. /* rdx = ?? */
  614. if (npt_enabled) {
  615. /* Setup VMCB for Nested Paging */
  616. control->nested_ctl = 1;
  617. control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
  618. (1ULL << INTERCEPT_INVLPG));
  619. control->intercept_exceptions &= ~(1 << PF_VECTOR);
  620. control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
  621. control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
  622. save->g_pat = 0x0007040600070406ULL;
  623. save->cr3 = 0;
  624. save->cr4 = 0;
  625. }
  626. force_new_asid(&svm->vcpu);
  627. svm->nested.vmcb = 0;
  628. svm->vcpu.arch.hflags = 0;
  629. if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
  630. control->pause_filter_count = 3000;
  631. control->intercept |= (1ULL << INTERCEPT_PAUSE);
  632. }
  633. enable_gif(svm);
  634. }
  635. static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
  636. {
  637. struct vcpu_svm *svm = to_svm(vcpu);
  638. init_vmcb(svm);
  639. if (!kvm_vcpu_is_bsp(vcpu)) {
  640. kvm_rip_write(vcpu, 0);
  641. svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
  642. svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
  643. }
  644. vcpu->arch.regs_avail = ~0;
  645. vcpu->arch.regs_dirty = ~0;
  646. return 0;
  647. }
  648. static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
  649. {
  650. struct vcpu_svm *svm;
  651. struct page *page;
  652. struct page *msrpm_pages;
  653. struct page *hsave_page;
  654. struct page *nested_msrpm_pages;
  655. int err;
  656. svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  657. if (!svm) {
  658. err = -ENOMEM;
  659. goto out;
  660. }
  661. err = kvm_vcpu_init(&svm->vcpu, kvm, id);
  662. if (err)
  663. goto free_svm;
  664. err = -ENOMEM;
  665. page = alloc_page(GFP_KERNEL);
  666. if (!page)
  667. goto uninit;
  668. msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  669. if (!msrpm_pages)
  670. goto free_page1;
  671. nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  672. if (!nested_msrpm_pages)
  673. goto free_page2;
  674. hsave_page = alloc_page(GFP_KERNEL);
  675. if (!hsave_page)
  676. goto free_page3;
  677. svm->nested.hsave = page_address(hsave_page);
  678. svm->msrpm = page_address(msrpm_pages);
  679. svm_vcpu_init_msrpm(svm->msrpm);
  680. svm->nested.msrpm = page_address(nested_msrpm_pages);
  681. svm_vcpu_init_msrpm(svm->nested.msrpm);
  682. svm->vmcb = page_address(page);
  683. clear_page(svm->vmcb);
  684. svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
  685. svm->asid_generation = 0;
  686. init_vmcb(svm);
  687. fx_init(&svm->vcpu);
  688. svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
  689. if (kvm_vcpu_is_bsp(&svm->vcpu))
  690. svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
  691. return &svm->vcpu;
  692. free_page3:
  693. __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
  694. free_page2:
  695. __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
  696. free_page1:
  697. __free_page(page);
  698. uninit:
  699. kvm_vcpu_uninit(&svm->vcpu);
  700. free_svm:
  701. kmem_cache_free(kvm_vcpu_cache, svm);
  702. out:
  703. return ERR_PTR(err);
  704. }
  705. static void svm_free_vcpu(struct kvm_vcpu *vcpu)
  706. {
  707. struct vcpu_svm *svm = to_svm(vcpu);
  708. __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
  709. __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
  710. __free_page(virt_to_page(svm->nested.hsave));
  711. __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
  712. kvm_vcpu_uninit(vcpu);
  713. kmem_cache_free(kvm_vcpu_cache, svm);
  714. }
  715. static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  716. {
  717. struct vcpu_svm *svm = to_svm(vcpu);
  718. int i;
  719. if (unlikely(cpu != vcpu->cpu)) {
  720. u64 delta;
  721. if (check_tsc_unstable()) {
  722. /*
  723. * Make sure that the guest sees a monotonically
  724. * increasing TSC.
  725. */
  726. delta = vcpu->arch.host_tsc - native_read_tsc();
  727. svm->vmcb->control.tsc_offset += delta;
  728. if (is_nested(svm))
  729. svm->nested.hsave->control.tsc_offset += delta;
  730. }
  731. vcpu->cpu = cpu;
  732. kvm_migrate_timers(vcpu);
  733. svm->asid_generation = 0;
  734. }
  735. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  736. rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  737. }
  738. static void svm_vcpu_put(struct kvm_vcpu *vcpu)
  739. {
  740. struct vcpu_svm *svm = to_svm(vcpu);
  741. int i;
  742. ++vcpu->stat.host_state_reload;
  743. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  744. wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  745. vcpu->arch.host_tsc = native_read_tsc();
  746. }
  747. static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
  748. {
  749. return to_svm(vcpu)->vmcb->save.rflags;
  750. }
  751. static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  752. {
  753. to_svm(vcpu)->vmcb->save.rflags = rflags;
  754. }
  755. static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
  756. {
  757. switch (reg) {
  758. case VCPU_EXREG_PDPTR:
  759. BUG_ON(!npt_enabled);
  760. load_pdptrs(vcpu, vcpu->arch.cr3);
  761. break;
  762. default:
  763. BUG();
  764. }
  765. }
  766. static void svm_set_vintr(struct vcpu_svm *svm)
  767. {
  768. svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
  769. }
  770. static void svm_clear_vintr(struct vcpu_svm *svm)
  771. {
  772. svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
  773. }
  774. static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
  775. {
  776. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  777. switch (seg) {
  778. case VCPU_SREG_CS: return &save->cs;
  779. case VCPU_SREG_DS: return &save->ds;
  780. case VCPU_SREG_ES: return &save->es;
  781. case VCPU_SREG_FS: return &save->fs;
  782. case VCPU_SREG_GS: return &save->gs;
  783. case VCPU_SREG_SS: return &save->ss;
  784. case VCPU_SREG_TR: return &save->tr;
  785. case VCPU_SREG_LDTR: return &save->ldtr;
  786. }
  787. BUG();
  788. return NULL;
  789. }
  790. static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
  791. {
  792. struct vmcb_seg *s = svm_seg(vcpu, seg);
  793. return s->base;
  794. }
  795. static void svm_get_segment(struct kvm_vcpu *vcpu,
  796. struct kvm_segment *var, int seg)
  797. {
  798. struct vmcb_seg *s = svm_seg(vcpu, seg);
  799. var->base = s->base;
  800. var->limit = s->limit;
  801. var->selector = s->selector;
  802. var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
  803. var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
  804. var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
  805. var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
  806. var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
  807. var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
  808. var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
  809. var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
  810. /*
  811. * AMD's VMCB does not have an explicit unusable field, so emulate it
  812. * for cross vendor migration purposes by "not present"
  813. */
  814. var->unusable = !var->present || (var->type == 0);
  815. switch (seg) {
  816. case VCPU_SREG_CS:
  817. /*
  818. * SVM always stores 0 for the 'G' bit in the CS selector in
  819. * the VMCB on a VMEXIT. This hurts cross-vendor migration:
  820. * Intel's VMENTRY has a check on the 'G' bit.
  821. */
  822. var->g = s->limit > 0xfffff;
  823. break;
  824. case VCPU_SREG_TR:
  825. /*
  826. * Work around a bug where the busy flag in the tr selector
  827. * isn't exposed
  828. */
  829. var->type |= 0x2;
  830. break;
  831. case VCPU_SREG_DS:
  832. case VCPU_SREG_ES:
  833. case VCPU_SREG_FS:
  834. case VCPU_SREG_GS:
  835. /*
  836. * The accessed bit must always be set in the segment
  837. * descriptor cache, although it can be cleared in the
  838. * descriptor, the cached bit always remains at 1. Since
  839. * Intel has a check on this, set it here to support
  840. * cross-vendor migration.
  841. */
  842. if (!var->unusable)
  843. var->type |= 0x1;
  844. break;
  845. case VCPU_SREG_SS:
  846. /*
  847. * On AMD CPUs sometimes the DB bit in the segment
  848. * descriptor is left as 1, although the whole segment has
  849. * been made unusable. Clear it here to pass an Intel VMX
  850. * entry check when cross vendor migrating.
  851. */
  852. if (var->unusable)
  853. var->db = 0;
  854. break;
  855. }
  856. }
  857. static int svm_get_cpl(struct kvm_vcpu *vcpu)
  858. {
  859. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  860. return save->cpl;
  861. }
  862. static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  863. {
  864. struct vcpu_svm *svm = to_svm(vcpu);
  865. dt->size = svm->vmcb->save.idtr.limit;
  866. dt->address = svm->vmcb->save.idtr.base;
  867. }
  868. static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  869. {
  870. struct vcpu_svm *svm = to_svm(vcpu);
  871. svm->vmcb->save.idtr.limit = dt->size;
  872. svm->vmcb->save.idtr.base = dt->address ;
  873. }
  874. static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  875. {
  876. struct vcpu_svm *svm = to_svm(vcpu);
  877. dt->size = svm->vmcb->save.gdtr.limit;
  878. dt->address = svm->vmcb->save.gdtr.base;
  879. }
  880. static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  881. {
  882. struct vcpu_svm *svm = to_svm(vcpu);
  883. svm->vmcb->save.gdtr.limit = dt->size;
  884. svm->vmcb->save.gdtr.base = dt->address ;
  885. }
  886. static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
  887. {
  888. }
  889. static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
  890. {
  891. }
  892. static void update_cr0_intercept(struct vcpu_svm *svm)
  893. {
  894. struct vmcb *vmcb = svm->vmcb;
  895. ulong gcr0 = svm->vcpu.arch.cr0;
  896. u64 *hcr0 = &svm->vmcb->save.cr0;
  897. if (!svm->vcpu.fpu_active)
  898. *hcr0 |= SVM_CR0_SELECTIVE_MASK;
  899. else
  900. *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
  901. | (gcr0 & SVM_CR0_SELECTIVE_MASK);
  902. if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
  903. vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
  904. vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
  905. if (is_nested(svm)) {
  906. struct vmcb *hsave = svm->nested.hsave;
  907. hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
  908. hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
  909. vmcb->control.intercept_cr_read |= svm->nested.intercept_cr_read;
  910. vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
  911. }
  912. } else {
  913. svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
  914. svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
  915. if (is_nested(svm)) {
  916. struct vmcb *hsave = svm->nested.hsave;
  917. hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
  918. hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
  919. }
  920. }
  921. }
  922. static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  923. {
  924. struct vcpu_svm *svm = to_svm(vcpu);
  925. if (is_nested(svm)) {
  926. /*
  927. * We are here because we run in nested mode, the host kvm
  928. * intercepts cr0 writes but the l1 hypervisor does not.
  929. * But the L1 hypervisor may intercept selective cr0 writes.
  930. * This needs to be checked here.
  931. */
  932. unsigned long old, new;
  933. /* Remove bits that would trigger a real cr0 write intercept */
  934. old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
  935. new = cr0 & SVM_CR0_SELECTIVE_MASK;
  936. if (old == new) {
  937. /* cr0 write with ts and mp unchanged */
  938. svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
  939. if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
  940. return;
  941. }
  942. }
  943. #ifdef CONFIG_X86_64
  944. if (vcpu->arch.efer & EFER_LME) {
  945. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  946. vcpu->arch.efer |= EFER_LMA;
  947. svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
  948. }
  949. if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
  950. vcpu->arch.efer &= ~EFER_LMA;
  951. svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
  952. }
  953. }
  954. #endif
  955. vcpu->arch.cr0 = cr0;
  956. if (!npt_enabled)
  957. cr0 |= X86_CR0_PG | X86_CR0_WP;
  958. if (!vcpu->fpu_active)
  959. cr0 |= X86_CR0_TS;
  960. /*
  961. * re-enable caching here because the QEMU bios
  962. * does not do it - this results in some delay at
  963. * reboot
  964. */
  965. cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
  966. svm->vmcb->save.cr0 = cr0;
  967. update_cr0_intercept(svm);
  968. }
  969. static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  970. {
  971. unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
  972. unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
  973. if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
  974. force_new_asid(vcpu);
  975. vcpu->arch.cr4 = cr4;
  976. if (!npt_enabled)
  977. cr4 |= X86_CR4_PAE;
  978. cr4 |= host_cr4_mce;
  979. to_svm(vcpu)->vmcb->save.cr4 = cr4;
  980. }
  981. static void svm_set_segment(struct kvm_vcpu *vcpu,
  982. struct kvm_segment *var, int seg)
  983. {
  984. struct vcpu_svm *svm = to_svm(vcpu);
  985. struct vmcb_seg *s = svm_seg(vcpu, seg);
  986. s->base = var->base;
  987. s->limit = var->limit;
  988. s->selector = var->selector;
  989. if (var->unusable)
  990. s->attrib = 0;
  991. else {
  992. s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
  993. s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
  994. s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
  995. s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
  996. s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
  997. s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
  998. s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
  999. s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
  1000. }
  1001. if (seg == VCPU_SREG_CS)
  1002. svm->vmcb->save.cpl
  1003. = (svm->vmcb->save.cs.attrib
  1004. >> SVM_SELECTOR_DPL_SHIFT) & 3;
  1005. }
  1006. static void update_db_intercept(struct kvm_vcpu *vcpu)
  1007. {
  1008. struct vcpu_svm *svm = to_svm(vcpu);
  1009. svm->vmcb->control.intercept_exceptions &=
  1010. ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
  1011. if (svm->nmi_singlestep)
  1012. svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
  1013. if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
  1014. if (vcpu->guest_debug &
  1015. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
  1016. svm->vmcb->control.intercept_exceptions |=
  1017. 1 << DB_VECTOR;
  1018. if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
  1019. svm->vmcb->control.intercept_exceptions |=
  1020. 1 << BP_VECTOR;
  1021. } else
  1022. vcpu->guest_debug = 0;
  1023. }
  1024. static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
  1025. {
  1026. struct vcpu_svm *svm = to_svm(vcpu);
  1027. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  1028. svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
  1029. else
  1030. svm->vmcb->save.dr7 = vcpu->arch.dr7;
  1031. update_db_intercept(vcpu);
  1032. }
  1033. static void load_host_msrs(struct kvm_vcpu *vcpu)
  1034. {
  1035. #ifdef CONFIG_X86_64
  1036. wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  1037. #endif
  1038. }
  1039. static void save_host_msrs(struct kvm_vcpu *vcpu)
  1040. {
  1041. #ifdef CONFIG_X86_64
  1042. rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  1043. #endif
  1044. }
  1045. static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
  1046. {
  1047. if (sd->next_asid > sd->max_asid) {
  1048. ++sd->asid_generation;
  1049. sd->next_asid = 1;
  1050. svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
  1051. }
  1052. svm->asid_generation = sd->asid_generation;
  1053. svm->vmcb->control.asid = sd->next_asid++;
  1054. }
  1055. static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
  1056. {
  1057. struct vcpu_svm *svm = to_svm(vcpu);
  1058. svm->vmcb->save.dr7 = value;
  1059. }
  1060. static int pf_interception(struct vcpu_svm *svm)
  1061. {
  1062. u64 fault_address;
  1063. u32 error_code;
  1064. fault_address = svm->vmcb->control.exit_info_2;
  1065. error_code = svm->vmcb->control.exit_info_1;
  1066. trace_kvm_page_fault(fault_address, error_code);
  1067. if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
  1068. kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
  1069. return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
  1070. }
  1071. static int db_interception(struct vcpu_svm *svm)
  1072. {
  1073. struct kvm_run *kvm_run = svm->vcpu.run;
  1074. if (!(svm->vcpu.guest_debug &
  1075. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
  1076. !svm->nmi_singlestep) {
  1077. kvm_queue_exception(&svm->vcpu, DB_VECTOR);
  1078. return 1;
  1079. }
  1080. if (svm->nmi_singlestep) {
  1081. svm->nmi_singlestep = false;
  1082. if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
  1083. svm->vmcb->save.rflags &=
  1084. ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  1085. update_db_intercept(&svm->vcpu);
  1086. }
  1087. if (svm->vcpu.guest_debug &
  1088. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
  1089. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1090. kvm_run->debug.arch.pc =
  1091. svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  1092. kvm_run->debug.arch.exception = DB_VECTOR;
  1093. return 0;
  1094. }
  1095. return 1;
  1096. }
  1097. static int bp_interception(struct vcpu_svm *svm)
  1098. {
  1099. struct kvm_run *kvm_run = svm->vcpu.run;
  1100. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1101. kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  1102. kvm_run->debug.arch.exception = BP_VECTOR;
  1103. return 0;
  1104. }
  1105. static int ud_interception(struct vcpu_svm *svm)
  1106. {
  1107. int er;
  1108. er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
  1109. if (er != EMULATE_DONE)
  1110. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1111. return 1;
  1112. }
  1113. static void svm_fpu_activate(struct kvm_vcpu *vcpu)
  1114. {
  1115. struct vcpu_svm *svm = to_svm(vcpu);
  1116. u32 excp;
  1117. if (is_nested(svm)) {
  1118. u32 h_excp, n_excp;
  1119. h_excp = svm->nested.hsave->control.intercept_exceptions;
  1120. n_excp = svm->nested.intercept_exceptions;
  1121. h_excp &= ~(1 << NM_VECTOR);
  1122. excp = h_excp | n_excp;
  1123. } else {
  1124. excp = svm->vmcb->control.intercept_exceptions;
  1125. excp &= ~(1 << NM_VECTOR);
  1126. }
  1127. svm->vmcb->control.intercept_exceptions = excp;
  1128. svm->vcpu.fpu_active = 1;
  1129. update_cr0_intercept(svm);
  1130. }
  1131. static int nm_interception(struct vcpu_svm *svm)
  1132. {
  1133. svm_fpu_activate(&svm->vcpu);
  1134. return 1;
  1135. }
  1136. static int mc_interception(struct vcpu_svm *svm)
  1137. {
  1138. /*
  1139. * On an #MC intercept the MCE handler is not called automatically in
  1140. * the host. So do it by hand here.
  1141. */
  1142. asm volatile (
  1143. "int $0x12\n");
  1144. /* not sure if we ever come back to this point */
  1145. return 1;
  1146. }
  1147. static int shutdown_interception(struct vcpu_svm *svm)
  1148. {
  1149. struct kvm_run *kvm_run = svm->vcpu.run;
  1150. /*
  1151. * VMCB is undefined after a SHUTDOWN intercept
  1152. * so reinitialize it.
  1153. */
  1154. clear_page(svm->vmcb);
  1155. init_vmcb(svm);
  1156. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  1157. return 0;
  1158. }
  1159. static int io_interception(struct vcpu_svm *svm)
  1160. {
  1161. struct kvm_vcpu *vcpu = &svm->vcpu;
  1162. u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
  1163. int size, in, string;
  1164. unsigned port;
  1165. ++svm->vcpu.stat.io_exits;
  1166. string = (io_info & SVM_IOIO_STR_MASK) != 0;
  1167. in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
  1168. if (string || in)
  1169. return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
  1170. port = io_info >> 16;
  1171. size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
  1172. svm->next_rip = svm->vmcb->control.exit_info_2;
  1173. skip_emulated_instruction(&svm->vcpu);
  1174. return kvm_fast_pio_out(vcpu, size, port);
  1175. }
  1176. static int nmi_interception(struct vcpu_svm *svm)
  1177. {
  1178. return 1;
  1179. }
  1180. static int intr_interception(struct vcpu_svm *svm)
  1181. {
  1182. ++svm->vcpu.stat.irq_exits;
  1183. return 1;
  1184. }
  1185. static int nop_on_interception(struct vcpu_svm *svm)
  1186. {
  1187. return 1;
  1188. }
  1189. static int halt_interception(struct vcpu_svm *svm)
  1190. {
  1191. svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
  1192. skip_emulated_instruction(&svm->vcpu);
  1193. return kvm_emulate_halt(&svm->vcpu);
  1194. }
  1195. static int vmmcall_interception(struct vcpu_svm *svm)
  1196. {
  1197. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1198. skip_emulated_instruction(&svm->vcpu);
  1199. kvm_emulate_hypercall(&svm->vcpu);
  1200. return 1;
  1201. }
  1202. static int nested_svm_check_permissions(struct vcpu_svm *svm)
  1203. {
  1204. if (!(svm->vcpu.arch.efer & EFER_SVME)
  1205. || !is_paging(&svm->vcpu)) {
  1206. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1207. return 1;
  1208. }
  1209. if (svm->vmcb->save.cpl) {
  1210. kvm_inject_gp(&svm->vcpu, 0);
  1211. return 1;
  1212. }
  1213. return 0;
  1214. }
  1215. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  1216. bool has_error_code, u32 error_code)
  1217. {
  1218. int vmexit;
  1219. if (!is_nested(svm))
  1220. return 0;
  1221. svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
  1222. svm->vmcb->control.exit_code_hi = 0;
  1223. svm->vmcb->control.exit_info_1 = error_code;
  1224. svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
  1225. vmexit = nested_svm_intercept(svm);
  1226. if (vmexit == NESTED_EXIT_DONE)
  1227. svm->nested.exit_required = true;
  1228. return vmexit;
  1229. }
  1230. /* This function returns true if it is save to enable the irq window */
  1231. static inline bool nested_svm_intr(struct vcpu_svm *svm)
  1232. {
  1233. if (!is_nested(svm))
  1234. return true;
  1235. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1236. return true;
  1237. if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
  1238. return false;
  1239. svm->vmcb->control.exit_code = SVM_EXIT_INTR;
  1240. svm->vmcb->control.exit_info_1 = 0;
  1241. svm->vmcb->control.exit_info_2 = 0;
  1242. if (svm->nested.intercept & 1ULL) {
  1243. /*
  1244. * The #vmexit can't be emulated here directly because this
  1245. * code path runs with irqs and preemtion disabled. A
  1246. * #vmexit emulation might sleep. Only signal request for
  1247. * the #vmexit here.
  1248. */
  1249. svm->nested.exit_required = true;
  1250. trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
  1251. return false;
  1252. }
  1253. return true;
  1254. }
  1255. /* This function returns true if it is save to enable the nmi window */
  1256. static inline bool nested_svm_nmi(struct vcpu_svm *svm)
  1257. {
  1258. if (!is_nested(svm))
  1259. return true;
  1260. if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
  1261. return true;
  1262. svm->vmcb->control.exit_code = SVM_EXIT_NMI;
  1263. svm->nested.exit_required = true;
  1264. return false;
  1265. }
  1266. static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
  1267. {
  1268. struct page *page;
  1269. might_sleep();
  1270. page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
  1271. if (is_error_page(page))
  1272. goto error;
  1273. *_page = page;
  1274. return kmap(page);
  1275. error:
  1276. kvm_release_page_clean(page);
  1277. kvm_inject_gp(&svm->vcpu, 0);
  1278. return NULL;
  1279. }
  1280. static void nested_svm_unmap(struct page *page)
  1281. {
  1282. kunmap(page);
  1283. kvm_release_page_dirty(page);
  1284. }
  1285. static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
  1286. {
  1287. unsigned port;
  1288. u8 val, bit;
  1289. u64 gpa;
  1290. if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
  1291. return NESTED_EXIT_HOST;
  1292. port = svm->vmcb->control.exit_info_1 >> 16;
  1293. gpa = svm->nested.vmcb_iopm + (port / 8);
  1294. bit = port % 8;
  1295. val = 0;
  1296. if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
  1297. val &= (1 << bit);
  1298. return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  1299. }
  1300. static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
  1301. {
  1302. u32 offset, msr, value;
  1303. int write, mask;
  1304. if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
  1305. return NESTED_EXIT_HOST;
  1306. msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1307. offset = svm_msrpm_offset(msr);
  1308. write = svm->vmcb->control.exit_info_1 & 1;
  1309. mask = 1 << ((2 * (msr & 0xf)) + write);
  1310. if (offset == MSR_INVALID)
  1311. return NESTED_EXIT_DONE;
  1312. /* Offset is in 32 bit units but need in 8 bit units */
  1313. offset *= 4;
  1314. if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
  1315. return NESTED_EXIT_DONE;
  1316. return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  1317. }
  1318. static int nested_svm_exit_special(struct vcpu_svm *svm)
  1319. {
  1320. u32 exit_code = svm->vmcb->control.exit_code;
  1321. switch (exit_code) {
  1322. case SVM_EXIT_INTR:
  1323. case SVM_EXIT_NMI:
  1324. return NESTED_EXIT_HOST;
  1325. case SVM_EXIT_NPF:
  1326. /* For now we are always handling NPFs when using them */
  1327. if (npt_enabled)
  1328. return NESTED_EXIT_HOST;
  1329. break;
  1330. case SVM_EXIT_EXCP_BASE + PF_VECTOR:
  1331. /* When we're shadowing, trap PFs */
  1332. if (!npt_enabled)
  1333. return NESTED_EXIT_HOST;
  1334. break;
  1335. case SVM_EXIT_EXCP_BASE + NM_VECTOR:
  1336. nm_interception(svm);
  1337. break;
  1338. default:
  1339. break;
  1340. }
  1341. return NESTED_EXIT_CONTINUE;
  1342. }
  1343. /*
  1344. * If this function returns true, this #vmexit was already handled
  1345. */
  1346. static int nested_svm_intercept(struct vcpu_svm *svm)
  1347. {
  1348. u32 exit_code = svm->vmcb->control.exit_code;
  1349. int vmexit = NESTED_EXIT_HOST;
  1350. switch (exit_code) {
  1351. case SVM_EXIT_MSR:
  1352. vmexit = nested_svm_exit_handled_msr(svm);
  1353. break;
  1354. case SVM_EXIT_IOIO:
  1355. vmexit = nested_svm_intercept_ioio(svm);
  1356. break;
  1357. case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
  1358. u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
  1359. if (svm->nested.intercept_cr_read & cr_bits)
  1360. vmexit = NESTED_EXIT_DONE;
  1361. break;
  1362. }
  1363. case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
  1364. u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
  1365. if (svm->nested.intercept_cr_write & cr_bits)
  1366. vmexit = NESTED_EXIT_DONE;
  1367. break;
  1368. }
  1369. case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
  1370. u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
  1371. if (svm->nested.intercept_dr_read & dr_bits)
  1372. vmexit = NESTED_EXIT_DONE;
  1373. break;
  1374. }
  1375. case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
  1376. u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
  1377. if (svm->nested.intercept_dr_write & dr_bits)
  1378. vmexit = NESTED_EXIT_DONE;
  1379. break;
  1380. }
  1381. case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
  1382. u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
  1383. if (svm->nested.intercept_exceptions & excp_bits)
  1384. vmexit = NESTED_EXIT_DONE;
  1385. break;
  1386. }
  1387. case SVM_EXIT_ERR: {
  1388. vmexit = NESTED_EXIT_DONE;
  1389. break;
  1390. }
  1391. default: {
  1392. u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
  1393. if (svm->nested.intercept & exit_bits)
  1394. vmexit = NESTED_EXIT_DONE;
  1395. }
  1396. }
  1397. return vmexit;
  1398. }
  1399. static int nested_svm_exit_handled(struct vcpu_svm *svm)
  1400. {
  1401. int vmexit;
  1402. vmexit = nested_svm_intercept(svm);
  1403. if (vmexit == NESTED_EXIT_DONE)
  1404. nested_svm_vmexit(svm);
  1405. return vmexit;
  1406. }
  1407. static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
  1408. {
  1409. struct vmcb_control_area *dst = &dst_vmcb->control;
  1410. struct vmcb_control_area *from = &from_vmcb->control;
  1411. dst->intercept_cr_read = from->intercept_cr_read;
  1412. dst->intercept_cr_write = from->intercept_cr_write;
  1413. dst->intercept_dr_read = from->intercept_dr_read;
  1414. dst->intercept_dr_write = from->intercept_dr_write;
  1415. dst->intercept_exceptions = from->intercept_exceptions;
  1416. dst->intercept = from->intercept;
  1417. dst->iopm_base_pa = from->iopm_base_pa;
  1418. dst->msrpm_base_pa = from->msrpm_base_pa;
  1419. dst->tsc_offset = from->tsc_offset;
  1420. dst->asid = from->asid;
  1421. dst->tlb_ctl = from->tlb_ctl;
  1422. dst->int_ctl = from->int_ctl;
  1423. dst->int_vector = from->int_vector;
  1424. dst->int_state = from->int_state;
  1425. dst->exit_code = from->exit_code;
  1426. dst->exit_code_hi = from->exit_code_hi;
  1427. dst->exit_info_1 = from->exit_info_1;
  1428. dst->exit_info_2 = from->exit_info_2;
  1429. dst->exit_int_info = from->exit_int_info;
  1430. dst->exit_int_info_err = from->exit_int_info_err;
  1431. dst->nested_ctl = from->nested_ctl;
  1432. dst->event_inj = from->event_inj;
  1433. dst->event_inj_err = from->event_inj_err;
  1434. dst->nested_cr3 = from->nested_cr3;
  1435. dst->lbr_ctl = from->lbr_ctl;
  1436. }
  1437. static int nested_svm_vmexit(struct vcpu_svm *svm)
  1438. {
  1439. struct vmcb *nested_vmcb;
  1440. struct vmcb *hsave = svm->nested.hsave;
  1441. struct vmcb *vmcb = svm->vmcb;
  1442. struct page *page;
  1443. trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
  1444. vmcb->control.exit_info_1,
  1445. vmcb->control.exit_info_2,
  1446. vmcb->control.exit_int_info,
  1447. vmcb->control.exit_int_info_err);
  1448. nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
  1449. if (!nested_vmcb)
  1450. return 1;
  1451. /* Exit nested SVM mode */
  1452. svm->nested.vmcb = 0;
  1453. /* Give the current vmcb to the guest */
  1454. disable_gif(svm);
  1455. nested_vmcb->save.es = vmcb->save.es;
  1456. nested_vmcb->save.cs = vmcb->save.cs;
  1457. nested_vmcb->save.ss = vmcb->save.ss;
  1458. nested_vmcb->save.ds = vmcb->save.ds;
  1459. nested_vmcb->save.gdtr = vmcb->save.gdtr;
  1460. nested_vmcb->save.idtr = vmcb->save.idtr;
  1461. nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
  1462. nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
  1463. nested_vmcb->save.cr2 = vmcb->save.cr2;
  1464. nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
  1465. nested_vmcb->save.rflags = vmcb->save.rflags;
  1466. nested_vmcb->save.rip = vmcb->save.rip;
  1467. nested_vmcb->save.rsp = vmcb->save.rsp;
  1468. nested_vmcb->save.rax = vmcb->save.rax;
  1469. nested_vmcb->save.dr7 = vmcb->save.dr7;
  1470. nested_vmcb->save.dr6 = vmcb->save.dr6;
  1471. nested_vmcb->save.cpl = vmcb->save.cpl;
  1472. nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
  1473. nested_vmcb->control.int_vector = vmcb->control.int_vector;
  1474. nested_vmcb->control.int_state = vmcb->control.int_state;
  1475. nested_vmcb->control.exit_code = vmcb->control.exit_code;
  1476. nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
  1477. nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
  1478. nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
  1479. nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
  1480. nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
  1481. /*
  1482. * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
  1483. * to make sure that we do not lose injected events. So check event_inj
  1484. * here and copy it to exit_int_info if it is valid.
  1485. * Exit_int_info and event_inj can't be both valid because the case
  1486. * below only happens on a VMRUN instruction intercept which has
  1487. * no valid exit_int_info set.
  1488. */
  1489. if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
  1490. struct vmcb_control_area *nc = &nested_vmcb->control;
  1491. nc->exit_int_info = vmcb->control.event_inj;
  1492. nc->exit_int_info_err = vmcb->control.event_inj_err;
  1493. }
  1494. nested_vmcb->control.tlb_ctl = 0;
  1495. nested_vmcb->control.event_inj = 0;
  1496. nested_vmcb->control.event_inj_err = 0;
  1497. /* We always set V_INTR_MASKING and remember the old value in hflags */
  1498. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1499. nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
  1500. /* Restore the original control entries */
  1501. copy_vmcb_control_area(vmcb, hsave);
  1502. kvm_clear_exception_queue(&svm->vcpu);
  1503. kvm_clear_interrupt_queue(&svm->vcpu);
  1504. /* Restore selected save entries */
  1505. svm->vmcb->save.es = hsave->save.es;
  1506. svm->vmcb->save.cs = hsave->save.cs;
  1507. svm->vmcb->save.ss = hsave->save.ss;
  1508. svm->vmcb->save.ds = hsave->save.ds;
  1509. svm->vmcb->save.gdtr = hsave->save.gdtr;
  1510. svm->vmcb->save.idtr = hsave->save.idtr;
  1511. svm->vmcb->save.rflags = hsave->save.rflags;
  1512. svm_set_efer(&svm->vcpu, hsave->save.efer);
  1513. svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
  1514. svm_set_cr4(&svm->vcpu, hsave->save.cr4);
  1515. if (npt_enabled) {
  1516. svm->vmcb->save.cr3 = hsave->save.cr3;
  1517. svm->vcpu.arch.cr3 = hsave->save.cr3;
  1518. } else {
  1519. kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
  1520. }
  1521. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
  1522. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
  1523. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
  1524. svm->vmcb->save.dr7 = 0;
  1525. svm->vmcb->save.cpl = 0;
  1526. svm->vmcb->control.exit_int_info = 0;
  1527. nested_svm_unmap(page);
  1528. kvm_mmu_reset_context(&svm->vcpu);
  1529. kvm_mmu_load(&svm->vcpu);
  1530. return 0;
  1531. }
  1532. static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
  1533. {
  1534. /*
  1535. * This function merges the msr permission bitmaps of kvm and the
  1536. * nested vmcb. It is omptimized in that it only merges the parts where
  1537. * the kvm msr permission bitmap may contain zero bits
  1538. */
  1539. int i;
  1540. if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
  1541. return true;
  1542. for (i = 0; i < MSRPM_OFFSETS; i++) {
  1543. u32 value, p;
  1544. u64 offset;
  1545. if (msrpm_offsets[i] == 0xffffffff)
  1546. break;
  1547. p = msrpm_offsets[i];
  1548. offset = svm->nested.vmcb_msrpm + (p * 4);
  1549. if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
  1550. return false;
  1551. svm->nested.msrpm[p] = svm->msrpm[p] | value;
  1552. }
  1553. svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
  1554. return true;
  1555. }
  1556. static bool nested_svm_vmrun(struct vcpu_svm *svm)
  1557. {
  1558. struct vmcb *nested_vmcb;
  1559. struct vmcb *hsave = svm->nested.hsave;
  1560. struct vmcb *vmcb = svm->vmcb;
  1561. struct page *page;
  1562. u64 vmcb_gpa;
  1563. vmcb_gpa = svm->vmcb->save.rax;
  1564. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1565. if (!nested_vmcb)
  1566. return false;
  1567. trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, vmcb_gpa,
  1568. nested_vmcb->save.rip,
  1569. nested_vmcb->control.int_ctl,
  1570. nested_vmcb->control.event_inj,
  1571. nested_vmcb->control.nested_ctl);
  1572. trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
  1573. nested_vmcb->control.intercept_cr_write,
  1574. nested_vmcb->control.intercept_exceptions,
  1575. nested_vmcb->control.intercept);
  1576. /* Clear internal status */
  1577. kvm_clear_exception_queue(&svm->vcpu);
  1578. kvm_clear_interrupt_queue(&svm->vcpu);
  1579. /*
  1580. * Save the old vmcb, so we don't need to pick what we save, but can
  1581. * restore everything when a VMEXIT occurs
  1582. */
  1583. hsave->save.es = vmcb->save.es;
  1584. hsave->save.cs = vmcb->save.cs;
  1585. hsave->save.ss = vmcb->save.ss;
  1586. hsave->save.ds = vmcb->save.ds;
  1587. hsave->save.gdtr = vmcb->save.gdtr;
  1588. hsave->save.idtr = vmcb->save.idtr;
  1589. hsave->save.efer = svm->vcpu.arch.efer;
  1590. hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
  1591. hsave->save.cr4 = svm->vcpu.arch.cr4;
  1592. hsave->save.rflags = vmcb->save.rflags;
  1593. hsave->save.rip = svm->next_rip;
  1594. hsave->save.rsp = vmcb->save.rsp;
  1595. hsave->save.rax = vmcb->save.rax;
  1596. if (npt_enabled)
  1597. hsave->save.cr3 = vmcb->save.cr3;
  1598. else
  1599. hsave->save.cr3 = svm->vcpu.arch.cr3;
  1600. copy_vmcb_control_area(hsave, vmcb);
  1601. if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
  1602. svm->vcpu.arch.hflags |= HF_HIF_MASK;
  1603. else
  1604. svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
  1605. /* Load the nested guest state */
  1606. svm->vmcb->save.es = nested_vmcb->save.es;
  1607. svm->vmcb->save.cs = nested_vmcb->save.cs;
  1608. svm->vmcb->save.ss = nested_vmcb->save.ss;
  1609. svm->vmcb->save.ds = nested_vmcb->save.ds;
  1610. svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
  1611. svm->vmcb->save.idtr = nested_vmcb->save.idtr;
  1612. svm->vmcb->save.rflags = nested_vmcb->save.rflags;
  1613. svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
  1614. svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
  1615. svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
  1616. if (npt_enabled) {
  1617. svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
  1618. svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
  1619. } else
  1620. kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
  1621. /* Guest paging mode is active - reset mmu */
  1622. kvm_mmu_reset_context(&svm->vcpu);
  1623. svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
  1624. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
  1625. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
  1626. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
  1627. /* In case we don't even reach vcpu_run, the fields are not updated */
  1628. svm->vmcb->save.rax = nested_vmcb->save.rax;
  1629. svm->vmcb->save.rsp = nested_vmcb->save.rsp;
  1630. svm->vmcb->save.rip = nested_vmcb->save.rip;
  1631. svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
  1632. svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
  1633. svm->vmcb->save.cpl = nested_vmcb->save.cpl;
  1634. svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
  1635. svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
  1636. /* cache intercepts */
  1637. svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read;
  1638. svm->nested.intercept_cr_write = nested_vmcb->control.intercept_cr_write;
  1639. svm->nested.intercept_dr_read = nested_vmcb->control.intercept_dr_read;
  1640. svm->nested.intercept_dr_write = nested_vmcb->control.intercept_dr_write;
  1641. svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
  1642. svm->nested.intercept = nested_vmcb->control.intercept;
  1643. force_new_asid(&svm->vcpu);
  1644. svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
  1645. if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
  1646. svm->vcpu.arch.hflags |= HF_VINTR_MASK;
  1647. else
  1648. svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
  1649. if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
  1650. /* We only want the cr8 intercept bits of the guest */
  1651. svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
  1652. svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
  1653. }
  1654. /*
  1655. * We don't want a nested guest to be more powerful than the guest, so
  1656. * all intercepts are ORed
  1657. */
  1658. svm->vmcb->control.intercept_cr_read |=
  1659. nested_vmcb->control.intercept_cr_read;
  1660. svm->vmcb->control.intercept_cr_write |=
  1661. nested_vmcb->control.intercept_cr_write;
  1662. svm->vmcb->control.intercept_dr_read |=
  1663. nested_vmcb->control.intercept_dr_read;
  1664. svm->vmcb->control.intercept_dr_write |=
  1665. nested_vmcb->control.intercept_dr_write;
  1666. svm->vmcb->control.intercept_exceptions |=
  1667. nested_vmcb->control.intercept_exceptions;
  1668. svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
  1669. svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
  1670. svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
  1671. svm->vmcb->control.int_state = nested_vmcb->control.int_state;
  1672. svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
  1673. svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
  1674. svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
  1675. nested_svm_unmap(page);
  1676. /* nested_vmcb is our indicator if nested SVM is activated */
  1677. svm->nested.vmcb = vmcb_gpa;
  1678. enable_gif(svm);
  1679. return true;
  1680. }
  1681. static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
  1682. {
  1683. to_vmcb->save.fs = from_vmcb->save.fs;
  1684. to_vmcb->save.gs = from_vmcb->save.gs;
  1685. to_vmcb->save.tr = from_vmcb->save.tr;
  1686. to_vmcb->save.ldtr = from_vmcb->save.ldtr;
  1687. to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
  1688. to_vmcb->save.star = from_vmcb->save.star;
  1689. to_vmcb->save.lstar = from_vmcb->save.lstar;
  1690. to_vmcb->save.cstar = from_vmcb->save.cstar;
  1691. to_vmcb->save.sfmask = from_vmcb->save.sfmask;
  1692. to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
  1693. to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
  1694. to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
  1695. }
  1696. static int vmload_interception(struct vcpu_svm *svm)
  1697. {
  1698. struct vmcb *nested_vmcb;
  1699. struct page *page;
  1700. if (nested_svm_check_permissions(svm))
  1701. return 1;
  1702. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1703. skip_emulated_instruction(&svm->vcpu);
  1704. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1705. if (!nested_vmcb)
  1706. return 1;
  1707. nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
  1708. nested_svm_unmap(page);
  1709. return 1;
  1710. }
  1711. static int vmsave_interception(struct vcpu_svm *svm)
  1712. {
  1713. struct vmcb *nested_vmcb;
  1714. struct page *page;
  1715. if (nested_svm_check_permissions(svm))
  1716. return 1;
  1717. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1718. skip_emulated_instruction(&svm->vcpu);
  1719. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1720. if (!nested_vmcb)
  1721. return 1;
  1722. nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
  1723. nested_svm_unmap(page);
  1724. return 1;
  1725. }
  1726. static int vmrun_interception(struct vcpu_svm *svm)
  1727. {
  1728. if (nested_svm_check_permissions(svm))
  1729. return 1;
  1730. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1731. skip_emulated_instruction(&svm->vcpu);
  1732. if (!nested_svm_vmrun(svm))
  1733. return 1;
  1734. if (!nested_svm_vmrun_msrpm(svm))
  1735. goto failed;
  1736. return 1;
  1737. failed:
  1738. svm->vmcb->control.exit_code = SVM_EXIT_ERR;
  1739. svm->vmcb->control.exit_code_hi = 0;
  1740. svm->vmcb->control.exit_info_1 = 0;
  1741. svm->vmcb->control.exit_info_2 = 0;
  1742. nested_svm_vmexit(svm);
  1743. return 1;
  1744. }
  1745. static int stgi_interception(struct vcpu_svm *svm)
  1746. {
  1747. if (nested_svm_check_permissions(svm))
  1748. return 1;
  1749. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1750. skip_emulated_instruction(&svm->vcpu);
  1751. enable_gif(svm);
  1752. return 1;
  1753. }
  1754. static int clgi_interception(struct vcpu_svm *svm)
  1755. {
  1756. if (nested_svm_check_permissions(svm))
  1757. return 1;
  1758. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1759. skip_emulated_instruction(&svm->vcpu);
  1760. disable_gif(svm);
  1761. /* After a CLGI no interrupts should come */
  1762. svm_clear_vintr(svm);
  1763. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  1764. return 1;
  1765. }
  1766. static int invlpga_interception(struct vcpu_svm *svm)
  1767. {
  1768. struct kvm_vcpu *vcpu = &svm->vcpu;
  1769. trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
  1770. vcpu->arch.regs[VCPU_REGS_RAX]);
  1771. /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
  1772. kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
  1773. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1774. skip_emulated_instruction(&svm->vcpu);
  1775. return 1;
  1776. }
  1777. static int skinit_interception(struct vcpu_svm *svm)
  1778. {
  1779. trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
  1780. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1781. return 1;
  1782. }
  1783. static int invalid_op_interception(struct vcpu_svm *svm)
  1784. {
  1785. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1786. return 1;
  1787. }
  1788. static int task_switch_interception(struct vcpu_svm *svm)
  1789. {
  1790. u16 tss_selector;
  1791. int reason;
  1792. int int_type = svm->vmcb->control.exit_int_info &
  1793. SVM_EXITINTINFO_TYPE_MASK;
  1794. int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
  1795. uint32_t type =
  1796. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
  1797. uint32_t idt_v =
  1798. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
  1799. bool has_error_code = false;
  1800. u32 error_code = 0;
  1801. tss_selector = (u16)svm->vmcb->control.exit_info_1;
  1802. if (svm->vmcb->control.exit_info_2 &
  1803. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
  1804. reason = TASK_SWITCH_IRET;
  1805. else if (svm->vmcb->control.exit_info_2 &
  1806. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
  1807. reason = TASK_SWITCH_JMP;
  1808. else if (idt_v)
  1809. reason = TASK_SWITCH_GATE;
  1810. else
  1811. reason = TASK_SWITCH_CALL;
  1812. if (reason == TASK_SWITCH_GATE) {
  1813. switch (type) {
  1814. case SVM_EXITINTINFO_TYPE_NMI:
  1815. svm->vcpu.arch.nmi_injected = false;
  1816. break;
  1817. case SVM_EXITINTINFO_TYPE_EXEPT:
  1818. if (svm->vmcb->control.exit_info_2 &
  1819. (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
  1820. has_error_code = true;
  1821. error_code =
  1822. (u32)svm->vmcb->control.exit_info_2;
  1823. }
  1824. kvm_clear_exception_queue(&svm->vcpu);
  1825. break;
  1826. case SVM_EXITINTINFO_TYPE_INTR:
  1827. kvm_clear_interrupt_queue(&svm->vcpu);
  1828. break;
  1829. default:
  1830. break;
  1831. }
  1832. }
  1833. if (reason != TASK_SWITCH_GATE ||
  1834. int_type == SVM_EXITINTINFO_TYPE_SOFT ||
  1835. (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
  1836. (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
  1837. skip_emulated_instruction(&svm->vcpu);
  1838. if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
  1839. has_error_code, error_code) == EMULATE_FAIL) {
  1840. svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  1841. svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  1842. svm->vcpu.run->internal.ndata = 0;
  1843. return 0;
  1844. }
  1845. return 1;
  1846. }
  1847. static int cpuid_interception(struct vcpu_svm *svm)
  1848. {
  1849. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  1850. kvm_emulate_cpuid(&svm->vcpu);
  1851. return 1;
  1852. }
  1853. static int iret_interception(struct vcpu_svm *svm)
  1854. {
  1855. ++svm->vcpu.stat.nmi_window_exits;
  1856. svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
  1857. svm->vcpu.arch.hflags |= HF_IRET_MASK;
  1858. return 1;
  1859. }
  1860. static int invlpg_interception(struct vcpu_svm *svm)
  1861. {
  1862. if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
  1863. pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
  1864. return 1;
  1865. }
  1866. static int emulate_on_interception(struct vcpu_svm *svm)
  1867. {
  1868. if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
  1869. pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
  1870. return 1;
  1871. }
  1872. static int cr8_write_interception(struct vcpu_svm *svm)
  1873. {
  1874. struct kvm_run *kvm_run = svm->vcpu.run;
  1875. u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
  1876. /* instruction emulation calls kvm_set_cr8() */
  1877. emulate_instruction(&svm->vcpu, 0, 0, 0);
  1878. if (irqchip_in_kernel(svm->vcpu.kvm)) {
  1879. svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
  1880. return 1;
  1881. }
  1882. if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
  1883. return 1;
  1884. kvm_run->exit_reason = KVM_EXIT_SET_TPR;
  1885. return 0;
  1886. }
  1887. static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
  1888. {
  1889. struct vcpu_svm *svm = to_svm(vcpu);
  1890. switch (ecx) {
  1891. case MSR_IA32_TSC: {
  1892. u64 tsc_offset;
  1893. if (is_nested(svm))
  1894. tsc_offset = svm->nested.hsave->control.tsc_offset;
  1895. else
  1896. tsc_offset = svm->vmcb->control.tsc_offset;
  1897. *data = tsc_offset + native_read_tsc();
  1898. break;
  1899. }
  1900. case MSR_K6_STAR:
  1901. *data = svm->vmcb->save.star;
  1902. break;
  1903. #ifdef CONFIG_X86_64
  1904. case MSR_LSTAR:
  1905. *data = svm->vmcb->save.lstar;
  1906. break;
  1907. case MSR_CSTAR:
  1908. *data = svm->vmcb->save.cstar;
  1909. break;
  1910. case MSR_KERNEL_GS_BASE:
  1911. *data = svm->vmcb->save.kernel_gs_base;
  1912. break;
  1913. case MSR_SYSCALL_MASK:
  1914. *data = svm->vmcb->save.sfmask;
  1915. break;
  1916. #endif
  1917. case MSR_IA32_SYSENTER_CS:
  1918. *data = svm->vmcb->save.sysenter_cs;
  1919. break;
  1920. case MSR_IA32_SYSENTER_EIP:
  1921. *data = svm->sysenter_eip;
  1922. break;
  1923. case MSR_IA32_SYSENTER_ESP:
  1924. *data = svm->sysenter_esp;
  1925. break;
  1926. /*
  1927. * Nobody will change the following 5 values in the VMCB so we can
  1928. * safely return them on rdmsr. They will always be 0 until LBRV is
  1929. * implemented.
  1930. */
  1931. case MSR_IA32_DEBUGCTLMSR:
  1932. *data = svm->vmcb->save.dbgctl;
  1933. break;
  1934. case MSR_IA32_LASTBRANCHFROMIP:
  1935. *data = svm->vmcb->save.br_from;
  1936. break;
  1937. case MSR_IA32_LASTBRANCHTOIP:
  1938. *data = svm->vmcb->save.br_to;
  1939. break;
  1940. case MSR_IA32_LASTINTFROMIP:
  1941. *data = svm->vmcb->save.last_excp_from;
  1942. break;
  1943. case MSR_IA32_LASTINTTOIP:
  1944. *data = svm->vmcb->save.last_excp_to;
  1945. break;
  1946. case MSR_VM_HSAVE_PA:
  1947. *data = svm->nested.hsave_msr;
  1948. break;
  1949. case MSR_VM_CR:
  1950. *data = svm->nested.vm_cr_msr;
  1951. break;
  1952. case MSR_IA32_UCODE_REV:
  1953. *data = 0x01000065;
  1954. break;
  1955. default:
  1956. return kvm_get_msr_common(vcpu, ecx, data);
  1957. }
  1958. return 0;
  1959. }
  1960. static int rdmsr_interception(struct vcpu_svm *svm)
  1961. {
  1962. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1963. u64 data;
  1964. if (svm_get_msr(&svm->vcpu, ecx, &data)) {
  1965. trace_kvm_msr_read_ex(ecx);
  1966. kvm_inject_gp(&svm->vcpu, 0);
  1967. } else {
  1968. trace_kvm_msr_read(ecx, data);
  1969. svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
  1970. svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
  1971. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  1972. skip_emulated_instruction(&svm->vcpu);
  1973. }
  1974. return 1;
  1975. }
  1976. static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
  1977. {
  1978. struct vcpu_svm *svm = to_svm(vcpu);
  1979. int svm_dis, chg_mask;
  1980. if (data & ~SVM_VM_CR_VALID_MASK)
  1981. return 1;
  1982. chg_mask = SVM_VM_CR_VALID_MASK;
  1983. if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
  1984. chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
  1985. svm->nested.vm_cr_msr &= ~chg_mask;
  1986. svm->nested.vm_cr_msr |= (data & chg_mask);
  1987. svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
  1988. /* check for svm_disable while efer.svme is set */
  1989. if (svm_dis && (vcpu->arch.efer & EFER_SVME))
  1990. return 1;
  1991. return 0;
  1992. }
  1993. static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
  1994. {
  1995. struct vcpu_svm *svm = to_svm(vcpu);
  1996. switch (ecx) {
  1997. case MSR_IA32_TSC: {
  1998. u64 tsc_offset = data - native_read_tsc();
  1999. u64 g_tsc_offset = 0;
  2000. if (is_nested(svm)) {
  2001. g_tsc_offset = svm->vmcb->control.tsc_offset -
  2002. svm->nested.hsave->control.tsc_offset;
  2003. svm->nested.hsave->control.tsc_offset = tsc_offset;
  2004. }
  2005. svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
  2006. break;
  2007. }
  2008. case MSR_K6_STAR:
  2009. svm->vmcb->save.star = data;
  2010. break;
  2011. #ifdef CONFIG_X86_64
  2012. case MSR_LSTAR:
  2013. svm->vmcb->save.lstar = data;
  2014. break;
  2015. case MSR_CSTAR:
  2016. svm->vmcb->save.cstar = data;
  2017. break;
  2018. case MSR_KERNEL_GS_BASE:
  2019. svm->vmcb->save.kernel_gs_base = data;
  2020. break;
  2021. case MSR_SYSCALL_MASK:
  2022. svm->vmcb->save.sfmask = data;
  2023. break;
  2024. #endif
  2025. case MSR_IA32_SYSENTER_CS:
  2026. svm->vmcb->save.sysenter_cs = data;
  2027. break;
  2028. case MSR_IA32_SYSENTER_EIP:
  2029. svm->sysenter_eip = data;
  2030. svm->vmcb->save.sysenter_eip = data;
  2031. break;
  2032. case MSR_IA32_SYSENTER_ESP:
  2033. svm->sysenter_esp = data;
  2034. svm->vmcb->save.sysenter_esp = data;
  2035. break;
  2036. case MSR_IA32_DEBUGCTLMSR:
  2037. if (!svm_has(SVM_FEATURE_LBRV)) {
  2038. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
  2039. __func__, data);
  2040. break;
  2041. }
  2042. if (data & DEBUGCTL_RESERVED_BITS)
  2043. return 1;
  2044. svm->vmcb->save.dbgctl = data;
  2045. if (data & (1ULL<<0))
  2046. svm_enable_lbrv(svm);
  2047. else
  2048. svm_disable_lbrv(svm);
  2049. break;
  2050. case MSR_VM_HSAVE_PA:
  2051. svm->nested.hsave_msr = data;
  2052. break;
  2053. case MSR_VM_CR:
  2054. return svm_set_vm_cr(vcpu, data);
  2055. case MSR_VM_IGNNE:
  2056. pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
  2057. break;
  2058. default:
  2059. return kvm_set_msr_common(vcpu, ecx, data);
  2060. }
  2061. return 0;
  2062. }
  2063. static int wrmsr_interception(struct vcpu_svm *svm)
  2064. {
  2065. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  2066. u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
  2067. | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
  2068. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  2069. if (svm_set_msr(&svm->vcpu, ecx, data)) {
  2070. trace_kvm_msr_write_ex(ecx, data);
  2071. kvm_inject_gp(&svm->vcpu, 0);
  2072. } else {
  2073. trace_kvm_msr_write(ecx, data);
  2074. skip_emulated_instruction(&svm->vcpu);
  2075. }
  2076. return 1;
  2077. }
  2078. static int msr_interception(struct vcpu_svm *svm)
  2079. {
  2080. if (svm->vmcb->control.exit_info_1)
  2081. return wrmsr_interception(svm);
  2082. else
  2083. return rdmsr_interception(svm);
  2084. }
  2085. static int interrupt_window_interception(struct vcpu_svm *svm)
  2086. {
  2087. struct kvm_run *kvm_run = svm->vcpu.run;
  2088. svm_clear_vintr(svm);
  2089. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  2090. /*
  2091. * If the user space waits to inject interrupts, exit as soon as
  2092. * possible
  2093. */
  2094. if (!irqchip_in_kernel(svm->vcpu.kvm) &&
  2095. kvm_run->request_interrupt_window &&
  2096. !kvm_cpu_has_interrupt(&svm->vcpu)) {
  2097. ++svm->vcpu.stat.irq_window_exits;
  2098. kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  2099. return 0;
  2100. }
  2101. return 1;
  2102. }
  2103. static int pause_interception(struct vcpu_svm *svm)
  2104. {
  2105. kvm_vcpu_on_spin(&(svm->vcpu));
  2106. return 1;
  2107. }
  2108. static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
  2109. [SVM_EXIT_READ_CR0] = emulate_on_interception,
  2110. [SVM_EXIT_READ_CR3] = emulate_on_interception,
  2111. [SVM_EXIT_READ_CR4] = emulate_on_interception,
  2112. [SVM_EXIT_READ_CR8] = emulate_on_interception,
  2113. [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
  2114. [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
  2115. [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
  2116. [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
  2117. [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
  2118. [SVM_EXIT_READ_DR0] = emulate_on_interception,
  2119. [SVM_EXIT_READ_DR1] = emulate_on_interception,
  2120. [SVM_EXIT_READ_DR2] = emulate_on_interception,
  2121. [SVM_EXIT_READ_DR3] = emulate_on_interception,
  2122. [SVM_EXIT_READ_DR4] = emulate_on_interception,
  2123. [SVM_EXIT_READ_DR5] = emulate_on_interception,
  2124. [SVM_EXIT_READ_DR6] = emulate_on_interception,
  2125. [SVM_EXIT_READ_DR7] = emulate_on_interception,
  2126. [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
  2127. [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
  2128. [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
  2129. [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
  2130. [SVM_EXIT_WRITE_DR4] = emulate_on_interception,
  2131. [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
  2132. [SVM_EXIT_WRITE_DR6] = emulate_on_interception,
  2133. [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
  2134. [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
  2135. [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
  2136. [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
  2137. [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
  2138. [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
  2139. [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
  2140. [SVM_EXIT_INTR] = intr_interception,
  2141. [SVM_EXIT_NMI] = nmi_interception,
  2142. [SVM_EXIT_SMI] = nop_on_interception,
  2143. [SVM_EXIT_INIT] = nop_on_interception,
  2144. [SVM_EXIT_VINTR] = interrupt_window_interception,
  2145. [SVM_EXIT_CPUID] = cpuid_interception,
  2146. [SVM_EXIT_IRET] = iret_interception,
  2147. [SVM_EXIT_INVD] = emulate_on_interception,
  2148. [SVM_EXIT_PAUSE] = pause_interception,
  2149. [SVM_EXIT_HLT] = halt_interception,
  2150. [SVM_EXIT_INVLPG] = invlpg_interception,
  2151. [SVM_EXIT_INVLPGA] = invlpga_interception,
  2152. [SVM_EXIT_IOIO] = io_interception,
  2153. [SVM_EXIT_MSR] = msr_interception,
  2154. [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
  2155. [SVM_EXIT_SHUTDOWN] = shutdown_interception,
  2156. [SVM_EXIT_VMRUN] = vmrun_interception,
  2157. [SVM_EXIT_VMMCALL] = vmmcall_interception,
  2158. [SVM_EXIT_VMLOAD] = vmload_interception,
  2159. [SVM_EXIT_VMSAVE] = vmsave_interception,
  2160. [SVM_EXIT_STGI] = stgi_interception,
  2161. [SVM_EXIT_CLGI] = clgi_interception,
  2162. [SVM_EXIT_SKINIT] = skinit_interception,
  2163. [SVM_EXIT_WBINVD] = emulate_on_interception,
  2164. [SVM_EXIT_MONITOR] = invalid_op_interception,
  2165. [SVM_EXIT_MWAIT] = invalid_op_interception,
  2166. [SVM_EXIT_NPF] = pf_interception,
  2167. };
  2168. static int handle_exit(struct kvm_vcpu *vcpu)
  2169. {
  2170. struct vcpu_svm *svm = to_svm(vcpu);
  2171. struct kvm_run *kvm_run = vcpu->run;
  2172. u32 exit_code = svm->vmcb->control.exit_code;
  2173. trace_kvm_exit(exit_code, vcpu);
  2174. if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
  2175. vcpu->arch.cr0 = svm->vmcb->save.cr0;
  2176. if (npt_enabled)
  2177. vcpu->arch.cr3 = svm->vmcb->save.cr3;
  2178. if (unlikely(svm->nested.exit_required)) {
  2179. nested_svm_vmexit(svm);
  2180. svm->nested.exit_required = false;
  2181. return 1;
  2182. }
  2183. if (is_nested(svm)) {
  2184. int vmexit;
  2185. trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
  2186. svm->vmcb->control.exit_info_1,
  2187. svm->vmcb->control.exit_info_2,
  2188. svm->vmcb->control.exit_int_info,
  2189. svm->vmcb->control.exit_int_info_err);
  2190. vmexit = nested_svm_exit_special(svm);
  2191. if (vmexit == NESTED_EXIT_CONTINUE)
  2192. vmexit = nested_svm_exit_handled(svm);
  2193. if (vmexit == NESTED_EXIT_DONE)
  2194. return 1;
  2195. }
  2196. svm_complete_interrupts(svm);
  2197. if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
  2198. kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  2199. kvm_run->fail_entry.hardware_entry_failure_reason
  2200. = svm->vmcb->control.exit_code;
  2201. return 0;
  2202. }
  2203. if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
  2204. exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
  2205. exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH)
  2206. printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
  2207. "exit_code 0x%x\n",
  2208. __func__, svm->vmcb->control.exit_int_info,
  2209. exit_code);
  2210. if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
  2211. || !svm_exit_handlers[exit_code]) {
  2212. kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
  2213. kvm_run->hw.hardware_exit_reason = exit_code;
  2214. return 0;
  2215. }
  2216. return svm_exit_handlers[exit_code](svm);
  2217. }
  2218. static void reload_tss(struct kvm_vcpu *vcpu)
  2219. {
  2220. int cpu = raw_smp_processor_id();
  2221. struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
  2222. sd->tss_desc->type = 9; /* available 32/64-bit TSS */
  2223. load_TR_desc();
  2224. }
  2225. static void pre_svm_run(struct vcpu_svm *svm)
  2226. {
  2227. int cpu = raw_smp_processor_id();
  2228. struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
  2229. svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
  2230. /* FIXME: handle wraparound of asid_generation */
  2231. if (svm->asid_generation != sd->asid_generation)
  2232. new_asid(svm, sd);
  2233. }
  2234. static void svm_inject_nmi(struct kvm_vcpu *vcpu)
  2235. {
  2236. struct vcpu_svm *svm = to_svm(vcpu);
  2237. svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
  2238. vcpu->arch.hflags |= HF_NMI_MASK;
  2239. svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
  2240. ++vcpu->stat.nmi_injections;
  2241. }
  2242. static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
  2243. {
  2244. struct vmcb_control_area *control;
  2245. trace_kvm_inj_virq(irq);
  2246. ++svm->vcpu.stat.irq_injections;
  2247. control = &svm->vmcb->control;
  2248. control->int_vector = irq;
  2249. control->int_ctl &= ~V_INTR_PRIO_MASK;
  2250. control->int_ctl |= V_IRQ_MASK |
  2251. ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
  2252. }
  2253. static void svm_set_irq(struct kvm_vcpu *vcpu)
  2254. {
  2255. struct vcpu_svm *svm = to_svm(vcpu);
  2256. BUG_ON(!(gif_set(svm)));
  2257. svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
  2258. SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
  2259. }
  2260. static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
  2261. {
  2262. struct vcpu_svm *svm = to_svm(vcpu);
  2263. if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2264. return;
  2265. if (irr == -1)
  2266. return;
  2267. if (tpr >= irr)
  2268. svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
  2269. }
  2270. static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
  2271. {
  2272. struct vcpu_svm *svm = to_svm(vcpu);
  2273. struct vmcb *vmcb = svm->vmcb;
  2274. int ret;
  2275. ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
  2276. !(svm->vcpu.arch.hflags & HF_NMI_MASK);
  2277. ret = ret && gif_set(svm) && nested_svm_nmi(svm);
  2278. return ret;
  2279. }
  2280. static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
  2281. {
  2282. struct vcpu_svm *svm = to_svm(vcpu);
  2283. return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
  2284. }
  2285. static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
  2286. {
  2287. struct vcpu_svm *svm = to_svm(vcpu);
  2288. if (masked) {
  2289. svm->vcpu.arch.hflags |= HF_NMI_MASK;
  2290. svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
  2291. } else {
  2292. svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
  2293. svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
  2294. }
  2295. }
  2296. static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
  2297. {
  2298. struct vcpu_svm *svm = to_svm(vcpu);
  2299. struct vmcb *vmcb = svm->vmcb;
  2300. int ret;
  2301. if (!gif_set(svm) ||
  2302. (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
  2303. return 0;
  2304. ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
  2305. if (is_nested(svm))
  2306. return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
  2307. return ret;
  2308. }
  2309. static void enable_irq_window(struct kvm_vcpu *vcpu)
  2310. {
  2311. struct vcpu_svm *svm = to_svm(vcpu);
  2312. /*
  2313. * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
  2314. * 1, because that's a separate STGI/VMRUN intercept. The next time we
  2315. * get that intercept, this function will be called again though and
  2316. * we'll get the vintr intercept.
  2317. */
  2318. if (gif_set(svm) && nested_svm_intr(svm)) {
  2319. svm_set_vintr(svm);
  2320. svm_inject_irq(svm, 0x0);
  2321. }
  2322. }
  2323. static void enable_nmi_window(struct kvm_vcpu *vcpu)
  2324. {
  2325. struct vcpu_svm *svm = to_svm(vcpu);
  2326. if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
  2327. == HF_NMI_MASK)
  2328. return; /* IRET will cause a vm exit */
  2329. /*
  2330. * Something prevents NMI from been injected. Single step over possible
  2331. * problem (IRET or exception injection or interrupt shadow)
  2332. */
  2333. svm->nmi_singlestep = true;
  2334. svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
  2335. update_db_intercept(vcpu);
  2336. }
  2337. static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
  2338. {
  2339. return 0;
  2340. }
  2341. static void svm_flush_tlb(struct kvm_vcpu *vcpu)
  2342. {
  2343. force_new_asid(vcpu);
  2344. }
  2345. static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
  2346. {
  2347. }
  2348. static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
  2349. {
  2350. struct vcpu_svm *svm = to_svm(vcpu);
  2351. if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2352. return;
  2353. if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
  2354. int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
  2355. kvm_set_cr8(vcpu, cr8);
  2356. }
  2357. }
  2358. static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
  2359. {
  2360. struct vcpu_svm *svm = to_svm(vcpu);
  2361. u64 cr8;
  2362. if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2363. return;
  2364. cr8 = kvm_get_cr8(vcpu);
  2365. svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
  2366. svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
  2367. }
  2368. static void svm_complete_interrupts(struct vcpu_svm *svm)
  2369. {
  2370. u8 vector;
  2371. int type;
  2372. u32 exitintinfo = svm->vmcb->control.exit_int_info;
  2373. unsigned int3_injected = svm->int3_injected;
  2374. svm->int3_injected = 0;
  2375. if (svm->vcpu.arch.hflags & HF_IRET_MASK)
  2376. svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
  2377. svm->vcpu.arch.nmi_injected = false;
  2378. kvm_clear_exception_queue(&svm->vcpu);
  2379. kvm_clear_interrupt_queue(&svm->vcpu);
  2380. if (!(exitintinfo & SVM_EXITINTINFO_VALID))
  2381. return;
  2382. vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
  2383. type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
  2384. switch (type) {
  2385. case SVM_EXITINTINFO_TYPE_NMI:
  2386. svm->vcpu.arch.nmi_injected = true;
  2387. break;
  2388. case SVM_EXITINTINFO_TYPE_EXEPT:
  2389. if (is_nested(svm))
  2390. break;
  2391. /*
  2392. * In case of software exceptions, do not reinject the vector,
  2393. * but re-execute the instruction instead. Rewind RIP first
  2394. * if we emulated INT3 before.
  2395. */
  2396. if (kvm_exception_is_soft(vector)) {
  2397. if (vector == BP_VECTOR && int3_injected &&
  2398. kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
  2399. kvm_rip_write(&svm->vcpu,
  2400. kvm_rip_read(&svm->vcpu) -
  2401. int3_injected);
  2402. break;
  2403. }
  2404. if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
  2405. u32 err = svm->vmcb->control.exit_int_info_err;
  2406. kvm_queue_exception_e(&svm->vcpu, vector, err);
  2407. } else
  2408. kvm_queue_exception(&svm->vcpu, vector);
  2409. break;
  2410. case SVM_EXITINTINFO_TYPE_INTR:
  2411. kvm_queue_interrupt(&svm->vcpu, vector, false);
  2412. break;
  2413. default:
  2414. break;
  2415. }
  2416. }
  2417. #ifdef CONFIG_X86_64
  2418. #define R "r"
  2419. #else
  2420. #define R "e"
  2421. #endif
  2422. static void svm_vcpu_run(struct kvm_vcpu *vcpu)
  2423. {
  2424. struct vcpu_svm *svm = to_svm(vcpu);
  2425. u16 fs_selector;
  2426. u16 gs_selector;
  2427. u16 ldt_selector;
  2428. svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
  2429. svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
  2430. svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
  2431. /*
  2432. * A vmexit emulation is required before the vcpu can be executed
  2433. * again.
  2434. */
  2435. if (unlikely(svm->nested.exit_required))
  2436. return;
  2437. pre_svm_run(svm);
  2438. sync_lapic_to_cr8(vcpu);
  2439. save_host_msrs(vcpu);
  2440. fs_selector = kvm_read_fs();
  2441. gs_selector = kvm_read_gs();
  2442. ldt_selector = kvm_read_ldt();
  2443. svm->vmcb->save.cr2 = vcpu->arch.cr2;
  2444. /* required for live migration with NPT */
  2445. if (npt_enabled)
  2446. svm->vmcb->save.cr3 = vcpu->arch.cr3;
  2447. clgi();
  2448. local_irq_enable();
  2449. asm volatile (
  2450. "push %%"R"bp; \n\t"
  2451. "mov %c[rbx](%[svm]), %%"R"bx \n\t"
  2452. "mov %c[rcx](%[svm]), %%"R"cx \n\t"
  2453. "mov %c[rdx](%[svm]), %%"R"dx \n\t"
  2454. "mov %c[rsi](%[svm]), %%"R"si \n\t"
  2455. "mov %c[rdi](%[svm]), %%"R"di \n\t"
  2456. "mov %c[rbp](%[svm]), %%"R"bp \n\t"
  2457. #ifdef CONFIG_X86_64
  2458. "mov %c[r8](%[svm]), %%r8 \n\t"
  2459. "mov %c[r9](%[svm]), %%r9 \n\t"
  2460. "mov %c[r10](%[svm]), %%r10 \n\t"
  2461. "mov %c[r11](%[svm]), %%r11 \n\t"
  2462. "mov %c[r12](%[svm]), %%r12 \n\t"
  2463. "mov %c[r13](%[svm]), %%r13 \n\t"
  2464. "mov %c[r14](%[svm]), %%r14 \n\t"
  2465. "mov %c[r15](%[svm]), %%r15 \n\t"
  2466. #endif
  2467. /* Enter guest mode */
  2468. "push %%"R"ax \n\t"
  2469. "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
  2470. __ex(SVM_VMLOAD) "\n\t"
  2471. __ex(SVM_VMRUN) "\n\t"
  2472. __ex(SVM_VMSAVE) "\n\t"
  2473. "pop %%"R"ax \n\t"
  2474. /* Save guest registers, load host registers */
  2475. "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
  2476. "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
  2477. "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
  2478. "mov %%"R"si, %c[rsi](%[svm]) \n\t"
  2479. "mov %%"R"di, %c[rdi](%[svm]) \n\t"
  2480. "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
  2481. #ifdef CONFIG_X86_64
  2482. "mov %%r8, %c[r8](%[svm]) \n\t"
  2483. "mov %%r9, %c[r9](%[svm]) \n\t"
  2484. "mov %%r10, %c[r10](%[svm]) \n\t"
  2485. "mov %%r11, %c[r11](%[svm]) \n\t"
  2486. "mov %%r12, %c[r12](%[svm]) \n\t"
  2487. "mov %%r13, %c[r13](%[svm]) \n\t"
  2488. "mov %%r14, %c[r14](%[svm]) \n\t"
  2489. "mov %%r15, %c[r15](%[svm]) \n\t"
  2490. #endif
  2491. "pop %%"R"bp"
  2492. :
  2493. : [svm]"a"(svm),
  2494. [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
  2495. [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
  2496. [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
  2497. [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
  2498. [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
  2499. [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
  2500. [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
  2501. #ifdef CONFIG_X86_64
  2502. , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
  2503. [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
  2504. [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
  2505. [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
  2506. [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
  2507. [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
  2508. [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
  2509. [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
  2510. #endif
  2511. : "cc", "memory"
  2512. , R"bx", R"cx", R"dx", R"si", R"di"
  2513. #ifdef CONFIG_X86_64
  2514. , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
  2515. #endif
  2516. );
  2517. vcpu->arch.cr2 = svm->vmcb->save.cr2;
  2518. vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
  2519. vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
  2520. vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
  2521. kvm_load_fs(fs_selector);
  2522. kvm_load_gs(gs_selector);
  2523. kvm_load_ldt(ldt_selector);
  2524. load_host_msrs(vcpu);
  2525. reload_tss(vcpu);
  2526. local_irq_disable();
  2527. stgi();
  2528. sync_cr8_to_lapic(vcpu);
  2529. svm->next_rip = 0;
  2530. if (npt_enabled) {
  2531. vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
  2532. vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
  2533. }
  2534. }
  2535. #undef R
  2536. static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
  2537. {
  2538. struct vcpu_svm *svm = to_svm(vcpu);
  2539. if (npt_enabled) {
  2540. svm->vmcb->control.nested_cr3 = root;
  2541. force_new_asid(vcpu);
  2542. return;
  2543. }
  2544. svm->vmcb->save.cr3 = root;
  2545. force_new_asid(vcpu);
  2546. }
  2547. static int is_disabled(void)
  2548. {
  2549. u64 vm_cr;
  2550. rdmsrl(MSR_VM_CR, vm_cr);
  2551. if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
  2552. return 1;
  2553. return 0;
  2554. }
  2555. static void
  2556. svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
  2557. {
  2558. /*
  2559. * Patch in the VMMCALL instruction:
  2560. */
  2561. hypercall[0] = 0x0f;
  2562. hypercall[1] = 0x01;
  2563. hypercall[2] = 0xd9;
  2564. }
  2565. static void svm_check_processor_compat(void *rtn)
  2566. {
  2567. *(int *)rtn = 0;
  2568. }
  2569. static bool svm_cpu_has_accelerated_tpr(void)
  2570. {
  2571. return false;
  2572. }
  2573. static int get_npt_level(void)
  2574. {
  2575. #ifdef CONFIG_X86_64
  2576. return PT64_ROOT_LEVEL;
  2577. #else
  2578. return PT32E_ROOT_LEVEL;
  2579. #endif
  2580. }
  2581. static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
  2582. {
  2583. return 0;
  2584. }
  2585. static void svm_cpuid_update(struct kvm_vcpu *vcpu)
  2586. {
  2587. }
  2588. static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
  2589. {
  2590. switch (func) {
  2591. case 0x8000000A:
  2592. entry->eax = 1; /* SVM revision 1 */
  2593. entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
  2594. ASID emulation to nested SVM */
  2595. entry->ecx = 0; /* Reserved */
  2596. entry->edx = 0; /* Do not support any additional features */
  2597. break;
  2598. }
  2599. }
  2600. static const struct trace_print_flags svm_exit_reasons_str[] = {
  2601. { SVM_EXIT_READ_CR0, "read_cr0" },
  2602. { SVM_EXIT_READ_CR3, "read_cr3" },
  2603. { SVM_EXIT_READ_CR4, "read_cr4" },
  2604. { SVM_EXIT_READ_CR8, "read_cr8" },
  2605. { SVM_EXIT_WRITE_CR0, "write_cr0" },
  2606. { SVM_EXIT_WRITE_CR3, "write_cr3" },
  2607. { SVM_EXIT_WRITE_CR4, "write_cr4" },
  2608. { SVM_EXIT_WRITE_CR8, "write_cr8" },
  2609. { SVM_EXIT_READ_DR0, "read_dr0" },
  2610. { SVM_EXIT_READ_DR1, "read_dr1" },
  2611. { SVM_EXIT_READ_DR2, "read_dr2" },
  2612. { SVM_EXIT_READ_DR3, "read_dr3" },
  2613. { SVM_EXIT_WRITE_DR0, "write_dr0" },
  2614. { SVM_EXIT_WRITE_DR1, "write_dr1" },
  2615. { SVM_EXIT_WRITE_DR2, "write_dr2" },
  2616. { SVM_EXIT_WRITE_DR3, "write_dr3" },
  2617. { SVM_EXIT_WRITE_DR5, "write_dr5" },
  2618. { SVM_EXIT_WRITE_DR7, "write_dr7" },
  2619. { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
  2620. { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
  2621. { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
  2622. { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
  2623. { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
  2624. { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
  2625. { SVM_EXIT_INTR, "interrupt" },
  2626. { SVM_EXIT_NMI, "nmi" },
  2627. { SVM_EXIT_SMI, "smi" },
  2628. { SVM_EXIT_INIT, "init" },
  2629. { SVM_EXIT_VINTR, "vintr" },
  2630. { SVM_EXIT_CPUID, "cpuid" },
  2631. { SVM_EXIT_INVD, "invd" },
  2632. { SVM_EXIT_HLT, "hlt" },
  2633. { SVM_EXIT_INVLPG, "invlpg" },
  2634. { SVM_EXIT_INVLPGA, "invlpga" },
  2635. { SVM_EXIT_IOIO, "io" },
  2636. { SVM_EXIT_MSR, "msr" },
  2637. { SVM_EXIT_TASK_SWITCH, "task_switch" },
  2638. { SVM_EXIT_SHUTDOWN, "shutdown" },
  2639. { SVM_EXIT_VMRUN, "vmrun" },
  2640. { SVM_EXIT_VMMCALL, "hypercall" },
  2641. { SVM_EXIT_VMLOAD, "vmload" },
  2642. { SVM_EXIT_VMSAVE, "vmsave" },
  2643. { SVM_EXIT_STGI, "stgi" },
  2644. { SVM_EXIT_CLGI, "clgi" },
  2645. { SVM_EXIT_SKINIT, "skinit" },
  2646. { SVM_EXIT_WBINVD, "wbinvd" },
  2647. { SVM_EXIT_MONITOR, "monitor" },
  2648. { SVM_EXIT_MWAIT, "mwait" },
  2649. { SVM_EXIT_NPF, "npf" },
  2650. { -1, NULL }
  2651. };
  2652. static int svm_get_lpage_level(void)
  2653. {
  2654. return PT_PDPE_LEVEL;
  2655. }
  2656. static bool svm_rdtscp_supported(void)
  2657. {
  2658. return false;
  2659. }
  2660. static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
  2661. {
  2662. struct vcpu_svm *svm = to_svm(vcpu);
  2663. svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
  2664. if (is_nested(svm))
  2665. svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
  2666. update_cr0_intercept(svm);
  2667. }
  2668. static struct kvm_x86_ops svm_x86_ops = {
  2669. .cpu_has_kvm_support = has_svm,
  2670. .disabled_by_bios = is_disabled,
  2671. .hardware_setup = svm_hardware_setup,
  2672. .hardware_unsetup = svm_hardware_unsetup,
  2673. .check_processor_compatibility = svm_check_processor_compat,
  2674. .hardware_enable = svm_hardware_enable,
  2675. .hardware_disable = svm_hardware_disable,
  2676. .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
  2677. .vcpu_create = svm_create_vcpu,
  2678. .vcpu_free = svm_free_vcpu,
  2679. .vcpu_reset = svm_vcpu_reset,
  2680. .prepare_guest_switch = svm_prepare_guest_switch,
  2681. .vcpu_load = svm_vcpu_load,
  2682. .vcpu_put = svm_vcpu_put,
  2683. .set_guest_debug = svm_guest_debug,
  2684. .get_msr = svm_get_msr,
  2685. .set_msr = svm_set_msr,
  2686. .get_segment_base = svm_get_segment_base,
  2687. .get_segment = svm_get_segment,
  2688. .set_segment = svm_set_segment,
  2689. .get_cpl = svm_get_cpl,
  2690. .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
  2691. .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
  2692. .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
  2693. .set_cr0 = svm_set_cr0,
  2694. .set_cr3 = svm_set_cr3,
  2695. .set_cr4 = svm_set_cr4,
  2696. .set_efer = svm_set_efer,
  2697. .get_idt = svm_get_idt,
  2698. .set_idt = svm_set_idt,
  2699. .get_gdt = svm_get_gdt,
  2700. .set_gdt = svm_set_gdt,
  2701. .set_dr7 = svm_set_dr7,
  2702. .cache_reg = svm_cache_reg,
  2703. .get_rflags = svm_get_rflags,
  2704. .set_rflags = svm_set_rflags,
  2705. .fpu_activate = svm_fpu_activate,
  2706. .fpu_deactivate = svm_fpu_deactivate,
  2707. .tlb_flush = svm_flush_tlb,
  2708. .run = svm_vcpu_run,
  2709. .handle_exit = handle_exit,
  2710. .skip_emulated_instruction = skip_emulated_instruction,
  2711. .set_interrupt_shadow = svm_set_interrupt_shadow,
  2712. .get_interrupt_shadow = svm_get_interrupt_shadow,
  2713. .patch_hypercall = svm_patch_hypercall,
  2714. .set_irq = svm_set_irq,
  2715. .set_nmi = svm_inject_nmi,
  2716. .queue_exception = svm_queue_exception,
  2717. .interrupt_allowed = svm_interrupt_allowed,
  2718. .nmi_allowed = svm_nmi_allowed,
  2719. .get_nmi_mask = svm_get_nmi_mask,
  2720. .set_nmi_mask = svm_set_nmi_mask,
  2721. .enable_nmi_window = enable_nmi_window,
  2722. .enable_irq_window = enable_irq_window,
  2723. .update_cr8_intercept = update_cr8_intercept,
  2724. .set_tss_addr = svm_set_tss_addr,
  2725. .get_tdp_level = get_npt_level,
  2726. .get_mt_mask = svm_get_mt_mask,
  2727. .exit_reasons_str = svm_exit_reasons_str,
  2728. .get_lpage_level = svm_get_lpage_level,
  2729. .cpuid_update = svm_cpuid_update,
  2730. .rdtscp_supported = svm_rdtscp_supported,
  2731. .set_supported_cpuid = svm_set_supported_cpuid,
  2732. };
  2733. static int __init svm_init(void)
  2734. {
  2735. return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
  2736. THIS_MODULE);
  2737. }
  2738. static void __exit svm_exit(void)
  2739. {
  2740. kvm_exit();
  2741. }
  2742. module_init(svm_init)
  2743. module_exit(svm_exit)