svm.c 110 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * AMD SVM support
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  8. *
  9. * Authors:
  10. * Yaniv Kamay <yaniv@qumranet.com>
  11. * Avi Kivity <avi@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include <linux/kvm_host.h>
  18. #include "irq.h"
  19. #include "mmu.h"
  20. #include "kvm_cache_regs.h"
  21. #include "x86.h"
  22. #include <linux/module.h>
  23. #include <linux/kernel.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/highmem.h>
  26. #include <linux/sched.h>
  27. #include <linux/ftrace_event.h>
  28. #include <linux/slab.h>
  29. #include <asm/tlbflush.h>
  30. #include <asm/desc.h>
  31. #include <asm/kvm_para.h>
  32. #include <asm/virtext.h>
  33. #include "trace.h"
  34. #define __ex(x) __kvm_handle_fault_on_reboot(x)
  35. MODULE_AUTHOR("Qumranet");
  36. MODULE_LICENSE("GPL");
  37. #define IOPM_ALLOC_ORDER 2
  38. #define MSRPM_ALLOC_ORDER 1
  39. #define SEG_TYPE_LDT 2
  40. #define SEG_TYPE_BUSY_TSS16 3
  41. #define SVM_FEATURE_NPT (1 << 0)
  42. #define SVM_FEATURE_LBRV (1 << 1)
  43. #define SVM_FEATURE_SVML (1 << 2)
  44. #define SVM_FEATURE_NRIP (1 << 3)
  45. #define SVM_FEATURE_TSC_RATE (1 << 4)
  46. #define SVM_FEATURE_VMCB_CLEAN (1 << 5)
  47. #define SVM_FEATURE_FLUSH_ASID (1 << 6)
  48. #define SVM_FEATURE_DECODE_ASSIST (1 << 7)
  49. #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
  50. #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
  51. #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
  52. #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
  53. #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
  54. #define TSC_RATIO_RSVD 0xffffff0000000000ULL
  55. #define TSC_RATIO_MIN 0x0000000000000001ULL
  56. #define TSC_RATIO_MAX 0x000000ffffffffffULL
  57. static bool erratum_383_found __read_mostly;
  58. static const u32 host_save_user_msrs[] = {
  59. #ifdef CONFIG_X86_64
  60. MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
  61. MSR_FS_BASE,
  62. #endif
  63. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  64. };
  65. #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
  66. struct kvm_vcpu;
  67. struct nested_state {
  68. struct vmcb *hsave;
  69. u64 hsave_msr;
  70. u64 vm_cr_msr;
  71. u64 vmcb;
  72. /* These are the merged vectors */
  73. u32 *msrpm;
  74. /* gpa pointers to the real vectors */
  75. u64 vmcb_msrpm;
  76. u64 vmcb_iopm;
  77. /* A VMEXIT is required but not yet emulated */
  78. bool exit_required;
  79. /* cache for intercepts of the guest */
  80. u32 intercept_cr;
  81. u32 intercept_dr;
  82. u32 intercept_exceptions;
  83. u64 intercept;
  84. /* Nested Paging related state */
  85. u64 nested_cr3;
  86. };
  87. #define MSRPM_OFFSETS 16
  88. static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
  89. /*
  90. * Set osvw_len to higher value when updated Revision Guides
  91. * are published and we know what the new status bits are
  92. */
  93. static uint64_t osvw_len = 4, osvw_status;
  94. struct vcpu_svm {
  95. struct kvm_vcpu vcpu;
  96. struct vmcb *vmcb;
  97. unsigned long vmcb_pa;
  98. struct svm_cpu_data *svm_data;
  99. uint64_t asid_generation;
  100. uint64_t sysenter_esp;
  101. uint64_t sysenter_eip;
  102. u64 next_rip;
  103. u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
  104. struct {
  105. u16 fs;
  106. u16 gs;
  107. u16 ldt;
  108. u64 gs_base;
  109. } host;
  110. u32 *msrpm;
  111. ulong nmi_iret_rip;
  112. struct nested_state nested;
  113. bool nmi_singlestep;
  114. unsigned int3_injected;
  115. unsigned long int3_rip;
  116. u32 apf_reason;
  117. u64 tsc_ratio;
  118. };
  119. static DEFINE_PER_CPU(u64, current_tsc_ratio);
  120. #define TSC_RATIO_DEFAULT 0x0100000000ULL
  121. #define MSR_INVALID 0xffffffffU
  122. static struct svm_direct_access_msrs {
  123. u32 index; /* Index of the MSR */
  124. bool always; /* True if intercept is always on */
  125. } direct_access_msrs[] = {
  126. { .index = MSR_STAR, .always = true },
  127. { .index = MSR_IA32_SYSENTER_CS, .always = true },
  128. #ifdef CONFIG_X86_64
  129. { .index = MSR_GS_BASE, .always = true },
  130. { .index = MSR_FS_BASE, .always = true },
  131. { .index = MSR_KERNEL_GS_BASE, .always = true },
  132. { .index = MSR_LSTAR, .always = true },
  133. { .index = MSR_CSTAR, .always = true },
  134. { .index = MSR_SYSCALL_MASK, .always = true },
  135. #endif
  136. { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
  137. { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
  138. { .index = MSR_IA32_LASTINTFROMIP, .always = false },
  139. { .index = MSR_IA32_LASTINTTOIP, .always = false },
  140. { .index = MSR_INVALID, .always = false },
  141. };
  142. /* enable NPT for AMD64 and X86 with PAE */
  143. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  144. static bool npt_enabled = true;
  145. #else
  146. static bool npt_enabled;
  147. #endif
  148. /* allow nested paging (virtualized MMU) for all guests */
  149. static int npt = true;
  150. module_param(npt, int, S_IRUGO);
  151. /* allow nested virtualization in KVM/SVM */
  152. static int nested = true;
  153. module_param(nested, int, S_IRUGO);
  154. static void svm_flush_tlb(struct kvm_vcpu *vcpu);
  155. static void svm_complete_interrupts(struct vcpu_svm *svm);
  156. static int nested_svm_exit_handled(struct vcpu_svm *svm);
  157. static int nested_svm_intercept(struct vcpu_svm *svm);
  158. static int nested_svm_vmexit(struct vcpu_svm *svm);
  159. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  160. bool has_error_code, u32 error_code);
  161. static u64 __scale_tsc(u64 ratio, u64 tsc);
  162. enum {
  163. VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
  164. pause filter count */
  165. VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
  166. VMCB_ASID, /* ASID */
  167. VMCB_INTR, /* int_ctl, int_vector */
  168. VMCB_NPT, /* npt_en, nCR3, gPAT */
  169. VMCB_CR, /* CR0, CR3, CR4, EFER */
  170. VMCB_DR, /* DR6, DR7 */
  171. VMCB_DT, /* GDT, IDT */
  172. VMCB_SEG, /* CS, DS, SS, ES, CPL */
  173. VMCB_CR2, /* CR2 only */
  174. VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
  175. VMCB_DIRTY_MAX,
  176. };
  177. /* TPR and CR2 are always written before VMRUN */
  178. #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
  179. static inline void mark_all_dirty(struct vmcb *vmcb)
  180. {
  181. vmcb->control.clean = 0;
  182. }
  183. static inline void mark_all_clean(struct vmcb *vmcb)
  184. {
  185. vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
  186. & ~VMCB_ALWAYS_DIRTY_MASK;
  187. }
  188. static inline void mark_dirty(struct vmcb *vmcb, int bit)
  189. {
  190. vmcb->control.clean &= ~(1 << bit);
  191. }
  192. static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
  193. {
  194. return container_of(vcpu, struct vcpu_svm, vcpu);
  195. }
  196. static void recalc_intercepts(struct vcpu_svm *svm)
  197. {
  198. struct vmcb_control_area *c, *h;
  199. struct nested_state *g;
  200. mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
  201. if (!is_guest_mode(&svm->vcpu))
  202. return;
  203. c = &svm->vmcb->control;
  204. h = &svm->nested.hsave->control;
  205. g = &svm->nested;
  206. c->intercept_cr = h->intercept_cr | g->intercept_cr;
  207. c->intercept_dr = h->intercept_dr | g->intercept_dr;
  208. c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
  209. c->intercept = h->intercept | g->intercept;
  210. }
  211. static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
  212. {
  213. if (is_guest_mode(&svm->vcpu))
  214. return svm->nested.hsave;
  215. else
  216. return svm->vmcb;
  217. }
  218. static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
  219. {
  220. struct vmcb *vmcb = get_host_vmcb(svm);
  221. vmcb->control.intercept_cr |= (1U << bit);
  222. recalc_intercepts(svm);
  223. }
  224. static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
  225. {
  226. struct vmcb *vmcb = get_host_vmcb(svm);
  227. vmcb->control.intercept_cr &= ~(1U << bit);
  228. recalc_intercepts(svm);
  229. }
  230. static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
  231. {
  232. struct vmcb *vmcb = get_host_vmcb(svm);
  233. return vmcb->control.intercept_cr & (1U << bit);
  234. }
  235. static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
  236. {
  237. struct vmcb *vmcb = get_host_vmcb(svm);
  238. vmcb->control.intercept_dr |= (1U << bit);
  239. recalc_intercepts(svm);
  240. }
  241. static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
  242. {
  243. struct vmcb *vmcb = get_host_vmcb(svm);
  244. vmcb->control.intercept_dr &= ~(1U << bit);
  245. recalc_intercepts(svm);
  246. }
  247. static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
  248. {
  249. struct vmcb *vmcb = get_host_vmcb(svm);
  250. vmcb->control.intercept_exceptions |= (1U << bit);
  251. recalc_intercepts(svm);
  252. }
  253. static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
  254. {
  255. struct vmcb *vmcb = get_host_vmcb(svm);
  256. vmcb->control.intercept_exceptions &= ~(1U << bit);
  257. recalc_intercepts(svm);
  258. }
  259. static inline void set_intercept(struct vcpu_svm *svm, int bit)
  260. {
  261. struct vmcb *vmcb = get_host_vmcb(svm);
  262. vmcb->control.intercept |= (1ULL << bit);
  263. recalc_intercepts(svm);
  264. }
  265. static inline void clr_intercept(struct vcpu_svm *svm, int bit)
  266. {
  267. struct vmcb *vmcb = get_host_vmcb(svm);
  268. vmcb->control.intercept &= ~(1ULL << bit);
  269. recalc_intercepts(svm);
  270. }
  271. static inline void enable_gif(struct vcpu_svm *svm)
  272. {
  273. svm->vcpu.arch.hflags |= HF_GIF_MASK;
  274. }
  275. static inline void disable_gif(struct vcpu_svm *svm)
  276. {
  277. svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
  278. }
  279. static inline bool gif_set(struct vcpu_svm *svm)
  280. {
  281. return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
  282. }
  283. static unsigned long iopm_base;
  284. struct kvm_ldttss_desc {
  285. u16 limit0;
  286. u16 base0;
  287. unsigned base1:8, type:5, dpl:2, p:1;
  288. unsigned limit1:4, zero0:3, g:1, base2:8;
  289. u32 base3;
  290. u32 zero1;
  291. } __attribute__((packed));
  292. struct svm_cpu_data {
  293. int cpu;
  294. u64 asid_generation;
  295. u32 max_asid;
  296. u32 next_asid;
  297. struct kvm_ldttss_desc *tss_desc;
  298. struct page *save_area;
  299. };
  300. static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
  301. struct svm_init_data {
  302. int cpu;
  303. int r;
  304. };
  305. static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
  306. #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
  307. #define MSRS_RANGE_SIZE 2048
  308. #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
  309. static u32 svm_msrpm_offset(u32 msr)
  310. {
  311. u32 offset;
  312. int i;
  313. for (i = 0; i < NUM_MSR_MAPS; i++) {
  314. if (msr < msrpm_ranges[i] ||
  315. msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
  316. continue;
  317. offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
  318. offset += (i * MSRS_RANGE_SIZE); /* add range offset */
  319. /* Now we have the u8 offset - but need the u32 offset */
  320. return offset / 4;
  321. }
  322. /* MSR not in any range */
  323. return MSR_INVALID;
  324. }
  325. #define MAX_INST_SIZE 15
  326. static inline void clgi(void)
  327. {
  328. asm volatile (__ex(SVM_CLGI));
  329. }
  330. static inline void stgi(void)
  331. {
  332. asm volatile (__ex(SVM_STGI));
  333. }
  334. static inline void invlpga(unsigned long addr, u32 asid)
  335. {
  336. asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
  337. }
  338. static int get_npt_level(void)
  339. {
  340. #ifdef CONFIG_X86_64
  341. return PT64_ROOT_LEVEL;
  342. #else
  343. return PT32E_ROOT_LEVEL;
  344. #endif
  345. }
  346. static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  347. {
  348. vcpu->arch.efer = efer;
  349. if (!npt_enabled && !(efer & EFER_LMA))
  350. efer &= ~EFER_LME;
  351. to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
  352. mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
  353. }
  354. static int is_external_interrupt(u32 info)
  355. {
  356. info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
  357. return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
  358. }
  359. static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  360. {
  361. struct vcpu_svm *svm = to_svm(vcpu);
  362. u32 ret = 0;
  363. if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
  364. ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
  365. return ret & mask;
  366. }
  367. static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  368. {
  369. struct vcpu_svm *svm = to_svm(vcpu);
  370. if (mask == 0)
  371. svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
  372. else
  373. svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
  374. }
  375. static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  376. {
  377. struct vcpu_svm *svm = to_svm(vcpu);
  378. if (svm->vmcb->control.next_rip != 0)
  379. svm->next_rip = svm->vmcb->control.next_rip;
  380. if (!svm->next_rip) {
  381. if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
  382. EMULATE_DONE)
  383. printk(KERN_DEBUG "%s: NOP\n", __func__);
  384. return;
  385. }
  386. if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
  387. printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
  388. __func__, kvm_rip_read(vcpu), svm->next_rip);
  389. kvm_rip_write(vcpu, svm->next_rip);
  390. svm_set_interrupt_shadow(vcpu, 0);
  391. }
  392. static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
  393. bool has_error_code, u32 error_code,
  394. bool reinject)
  395. {
  396. struct vcpu_svm *svm = to_svm(vcpu);
  397. /*
  398. * If we are within a nested VM we'd better #VMEXIT and let the guest
  399. * handle the exception
  400. */
  401. if (!reinject &&
  402. nested_svm_check_exception(svm, nr, has_error_code, error_code))
  403. return;
  404. if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
  405. unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
  406. /*
  407. * For guest debugging where we have to reinject #BP if some
  408. * INT3 is guest-owned:
  409. * Emulate nRIP by moving RIP forward. Will fail if injection
  410. * raises a fault that is not intercepted. Still better than
  411. * failing in all cases.
  412. */
  413. skip_emulated_instruction(&svm->vcpu);
  414. rip = kvm_rip_read(&svm->vcpu);
  415. svm->int3_rip = rip + svm->vmcb->save.cs.base;
  416. svm->int3_injected = rip - old_rip;
  417. }
  418. svm->vmcb->control.event_inj = nr
  419. | SVM_EVTINJ_VALID
  420. | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
  421. | SVM_EVTINJ_TYPE_EXEPT;
  422. svm->vmcb->control.event_inj_err = error_code;
  423. }
  424. static void svm_init_erratum_383(void)
  425. {
  426. u32 low, high;
  427. int err;
  428. u64 val;
  429. if (!cpu_has_amd_erratum(amd_erratum_383))
  430. return;
  431. /* Use _safe variants to not break nested virtualization */
  432. val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
  433. if (err)
  434. return;
  435. val |= (1ULL << 47);
  436. low = lower_32_bits(val);
  437. high = upper_32_bits(val);
  438. native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
  439. erratum_383_found = true;
  440. }
  441. static void svm_init_osvw(struct kvm_vcpu *vcpu)
  442. {
  443. /*
  444. * Guests should see errata 400 and 415 as fixed (assuming that
  445. * HLT and IO instructions are intercepted).
  446. */
  447. vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
  448. vcpu->arch.osvw.status = osvw_status & ~(6ULL);
  449. /*
  450. * By increasing VCPU's osvw.length to 3 we are telling the guest that
  451. * all osvw.status bits inside that length, including bit 0 (which is
  452. * reserved for erratum 298), are valid. However, if host processor's
  453. * osvw_len is 0 then osvw_status[0] carries no information. We need to
  454. * be conservative here and therefore we tell the guest that erratum 298
  455. * is present (because we really don't know).
  456. */
  457. if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
  458. vcpu->arch.osvw.status |= 1;
  459. }
  460. static int has_svm(void)
  461. {
  462. const char *msg;
  463. if (!cpu_has_svm(&msg)) {
  464. printk(KERN_INFO "has_svm: %s\n", msg);
  465. return 0;
  466. }
  467. return 1;
  468. }
  469. static void svm_hardware_disable(void *garbage)
  470. {
  471. /* Make sure we clean up behind us */
  472. if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
  473. wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
  474. cpu_svm_disable();
  475. }
  476. static int svm_hardware_enable(void *garbage)
  477. {
  478. struct svm_cpu_data *sd;
  479. uint64_t efer;
  480. struct desc_ptr gdt_descr;
  481. struct desc_struct *gdt;
  482. int me = raw_smp_processor_id();
  483. rdmsrl(MSR_EFER, efer);
  484. if (efer & EFER_SVME)
  485. return -EBUSY;
  486. if (!has_svm()) {
  487. printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
  488. me);
  489. return -EINVAL;
  490. }
  491. sd = per_cpu(svm_data, me);
  492. if (!sd) {
  493. printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
  494. me);
  495. return -EINVAL;
  496. }
  497. sd->asid_generation = 1;
  498. sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
  499. sd->next_asid = sd->max_asid + 1;
  500. native_store_gdt(&gdt_descr);
  501. gdt = (struct desc_struct *)gdt_descr.address;
  502. sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
  503. wrmsrl(MSR_EFER, efer | EFER_SVME);
  504. wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
  505. if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
  506. wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
  507. __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
  508. }
  509. /*
  510. * Get OSVW bits.
  511. *
  512. * Note that it is possible to have a system with mixed processor
  513. * revisions and therefore different OSVW bits. If bits are not the same
  514. * on different processors then choose the worst case (i.e. if erratum
  515. * is present on one processor and not on another then assume that the
  516. * erratum is present everywhere).
  517. */
  518. if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
  519. uint64_t len, status = 0;
  520. int err;
  521. len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
  522. if (!err)
  523. status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
  524. &err);
  525. if (err)
  526. osvw_status = osvw_len = 0;
  527. else {
  528. if (len < osvw_len)
  529. osvw_len = len;
  530. osvw_status |= status;
  531. osvw_status &= (1ULL << osvw_len) - 1;
  532. }
  533. } else
  534. osvw_status = osvw_len = 0;
  535. svm_init_erratum_383();
  536. return 0;
  537. }
  538. static void svm_cpu_uninit(int cpu)
  539. {
  540. struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
  541. if (!sd)
  542. return;
  543. per_cpu(svm_data, raw_smp_processor_id()) = NULL;
  544. __free_page(sd->save_area);
  545. kfree(sd);
  546. }
  547. static int svm_cpu_init(int cpu)
  548. {
  549. struct svm_cpu_data *sd;
  550. int r;
  551. sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
  552. if (!sd)
  553. return -ENOMEM;
  554. sd->cpu = cpu;
  555. sd->save_area = alloc_page(GFP_KERNEL);
  556. r = -ENOMEM;
  557. if (!sd->save_area)
  558. goto err_1;
  559. per_cpu(svm_data, cpu) = sd;
  560. return 0;
  561. err_1:
  562. kfree(sd);
  563. return r;
  564. }
  565. static bool valid_msr_intercept(u32 index)
  566. {
  567. int i;
  568. for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
  569. if (direct_access_msrs[i].index == index)
  570. return true;
  571. return false;
  572. }
  573. static void set_msr_interception(u32 *msrpm, unsigned msr,
  574. int read, int write)
  575. {
  576. u8 bit_read, bit_write;
  577. unsigned long tmp;
  578. u32 offset;
  579. /*
  580. * If this warning triggers extend the direct_access_msrs list at the
  581. * beginning of the file
  582. */
  583. WARN_ON(!valid_msr_intercept(msr));
  584. offset = svm_msrpm_offset(msr);
  585. bit_read = 2 * (msr & 0x0f);
  586. bit_write = 2 * (msr & 0x0f) + 1;
  587. tmp = msrpm[offset];
  588. BUG_ON(offset == MSR_INVALID);
  589. read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
  590. write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
  591. msrpm[offset] = tmp;
  592. }
  593. static void svm_vcpu_init_msrpm(u32 *msrpm)
  594. {
  595. int i;
  596. memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
  597. for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
  598. if (!direct_access_msrs[i].always)
  599. continue;
  600. set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
  601. }
  602. }
  603. static void add_msr_offset(u32 offset)
  604. {
  605. int i;
  606. for (i = 0; i < MSRPM_OFFSETS; ++i) {
  607. /* Offset already in list? */
  608. if (msrpm_offsets[i] == offset)
  609. return;
  610. /* Slot used by another offset? */
  611. if (msrpm_offsets[i] != MSR_INVALID)
  612. continue;
  613. /* Add offset to list */
  614. msrpm_offsets[i] = offset;
  615. return;
  616. }
  617. /*
  618. * If this BUG triggers the msrpm_offsets table has an overflow. Just
  619. * increase MSRPM_OFFSETS in this case.
  620. */
  621. BUG();
  622. }
  623. static void init_msrpm_offsets(void)
  624. {
  625. int i;
  626. memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
  627. for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
  628. u32 offset;
  629. offset = svm_msrpm_offset(direct_access_msrs[i].index);
  630. BUG_ON(offset == MSR_INVALID);
  631. add_msr_offset(offset);
  632. }
  633. }
  634. static void svm_enable_lbrv(struct vcpu_svm *svm)
  635. {
  636. u32 *msrpm = svm->msrpm;
  637. svm->vmcb->control.lbr_ctl = 1;
  638. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
  639. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
  640. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
  641. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
  642. }
  643. static void svm_disable_lbrv(struct vcpu_svm *svm)
  644. {
  645. u32 *msrpm = svm->msrpm;
  646. svm->vmcb->control.lbr_ctl = 0;
  647. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
  648. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
  649. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
  650. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
  651. }
  652. static __init int svm_hardware_setup(void)
  653. {
  654. int cpu;
  655. struct page *iopm_pages;
  656. void *iopm_va;
  657. int r;
  658. iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
  659. if (!iopm_pages)
  660. return -ENOMEM;
  661. iopm_va = page_address(iopm_pages);
  662. memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
  663. iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
  664. init_msrpm_offsets();
  665. if (boot_cpu_has(X86_FEATURE_NX))
  666. kvm_enable_efer_bits(EFER_NX);
  667. if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
  668. kvm_enable_efer_bits(EFER_FFXSR);
  669. if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
  670. u64 max;
  671. kvm_has_tsc_control = true;
  672. /*
  673. * Make sure the user can only configure tsc_khz values that
  674. * fit into a signed integer.
  675. * A min value is not calculated needed because it will always
  676. * be 1 on all machines and a value of 0 is used to disable
  677. * tsc-scaling for the vcpu.
  678. */
  679. max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
  680. kvm_max_guest_tsc_khz = max;
  681. }
  682. if (nested) {
  683. printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
  684. kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
  685. }
  686. for_each_possible_cpu(cpu) {
  687. r = svm_cpu_init(cpu);
  688. if (r)
  689. goto err;
  690. }
  691. if (!boot_cpu_has(X86_FEATURE_NPT))
  692. npt_enabled = false;
  693. if (npt_enabled && !npt) {
  694. printk(KERN_INFO "kvm: Nested Paging disabled\n");
  695. npt_enabled = false;
  696. }
  697. if (npt_enabled) {
  698. printk(KERN_INFO "kvm: Nested Paging enabled\n");
  699. kvm_enable_tdp();
  700. } else
  701. kvm_disable_tdp();
  702. return 0;
  703. err:
  704. __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
  705. iopm_base = 0;
  706. return r;
  707. }
  708. static __exit void svm_hardware_unsetup(void)
  709. {
  710. int cpu;
  711. for_each_possible_cpu(cpu)
  712. svm_cpu_uninit(cpu);
  713. __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
  714. iopm_base = 0;
  715. }
  716. static void init_seg(struct vmcb_seg *seg)
  717. {
  718. seg->selector = 0;
  719. seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
  720. SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
  721. seg->limit = 0xffff;
  722. seg->base = 0;
  723. }
  724. static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
  725. {
  726. seg->selector = 0;
  727. seg->attrib = SVM_SELECTOR_P_MASK | type;
  728. seg->limit = 0xffff;
  729. seg->base = 0;
  730. }
  731. static u64 __scale_tsc(u64 ratio, u64 tsc)
  732. {
  733. u64 mult, frac, _tsc;
  734. mult = ratio >> 32;
  735. frac = ratio & ((1ULL << 32) - 1);
  736. _tsc = tsc;
  737. _tsc *= mult;
  738. _tsc += (tsc >> 32) * frac;
  739. _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
  740. return _tsc;
  741. }
  742. static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
  743. {
  744. struct vcpu_svm *svm = to_svm(vcpu);
  745. u64 _tsc = tsc;
  746. if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
  747. _tsc = __scale_tsc(svm->tsc_ratio, tsc);
  748. return _tsc;
  749. }
  750. static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
  751. {
  752. struct vcpu_svm *svm = to_svm(vcpu);
  753. u64 ratio;
  754. u64 khz;
  755. /* Guest TSC same frequency as host TSC? */
  756. if (!scale) {
  757. svm->tsc_ratio = TSC_RATIO_DEFAULT;
  758. return;
  759. }
  760. /* TSC scaling supported? */
  761. if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
  762. if (user_tsc_khz > tsc_khz) {
  763. vcpu->arch.tsc_catchup = 1;
  764. vcpu->arch.tsc_always_catchup = 1;
  765. } else
  766. WARN(1, "user requested TSC rate below hardware speed\n");
  767. return;
  768. }
  769. khz = user_tsc_khz;
  770. /* TSC scaling required - calculate ratio */
  771. ratio = khz << 32;
  772. do_div(ratio, tsc_khz);
  773. if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
  774. WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
  775. user_tsc_khz);
  776. return;
  777. }
  778. svm->tsc_ratio = ratio;
  779. }
  780. static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
  781. {
  782. struct vcpu_svm *svm = to_svm(vcpu);
  783. u64 g_tsc_offset = 0;
  784. if (is_guest_mode(vcpu)) {
  785. g_tsc_offset = svm->vmcb->control.tsc_offset -
  786. svm->nested.hsave->control.tsc_offset;
  787. svm->nested.hsave->control.tsc_offset = offset;
  788. }
  789. svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
  790. mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
  791. }
  792. static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
  793. {
  794. struct vcpu_svm *svm = to_svm(vcpu);
  795. WARN_ON(adjustment < 0);
  796. if (host)
  797. adjustment = svm_scale_tsc(vcpu, adjustment);
  798. svm->vmcb->control.tsc_offset += adjustment;
  799. if (is_guest_mode(vcpu))
  800. svm->nested.hsave->control.tsc_offset += adjustment;
  801. mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
  802. }
  803. static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
  804. {
  805. u64 tsc;
  806. tsc = svm_scale_tsc(vcpu, native_read_tsc());
  807. return target_tsc - tsc;
  808. }
  809. static void init_vmcb(struct vcpu_svm *svm)
  810. {
  811. struct vmcb_control_area *control = &svm->vmcb->control;
  812. struct vmcb_save_area *save = &svm->vmcb->save;
  813. svm->vcpu.fpu_active = 1;
  814. svm->vcpu.arch.hflags = 0;
  815. set_cr_intercept(svm, INTERCEPT_CR0_READ);
  816. set_cr_intercept(svm, INTERCEPT_CR3_READ);
  817. set_cr_intercept(svm, INTERCEPT_CR4_READ);
  818. set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
  819. set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
  820. set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
  821. set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
  822. set_dr_intercept(svm, INTERCEPT_DR0_READ);
  823. set_dr_intercept(svm, INTERCEPT_DR1_READ);
  824. set_dr_intercept(svm, INTERCEPT_DR2_READ);
  825. set_dr_intercept(svm, INTERCEPT_DR3_READ);
  826. set_dr_intercept(svm, INTERCEPT_DR4_READ);
  827. set_dr_intercept(svm, INTERCEPT_DR5_READ);
  828. set_dr_intercept(svm, INTERCEPT_DR6_READ);
  829. set_dr_intercept(svm, INTERCEPT_DR7_READ);
  830. set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
  831. set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
  832. set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
  833. set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
  834. set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
  835. set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
  836. set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
  837. set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
  838. set_exception_intercept(svm, PF_VECTOR);
  839. set_exception_intercept(svm, UD_VECTOR);
  840. set_exception_intercept(svm, MC_VECTOR);
  841. set_intercept(svm, INTERCEPT_INTR);
  842. set_intercept(svm, INTERCEPT_NMI);
  843. set_intercept(svm, INTERCEPT_SMI);
  844. set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
  845. set_intercept(svm, INTERCEPT_RDPMC);
  846. set_intercept(svm, INTERCEPT_CPUID);
  847. set_intercept(svm, INTERCEPT_INVD);
  848. set_intercept(svm, INTERCEPT_HLT);
  849. set_intercept(svm, INTERCEPT_INVLPG);
  850. set_intercept(svm, INTERCEPT_INVLPGA);
  851. set_intercept(svm, INTERCEPT_IOIO_PROT);
  852. set_intercept(svm, INTERCEPT_MSR_PROT);
  853. set_intercept(svm, INTERCEPT_TASK_SWITCH);
  854. set_intercept(svm, INTERCEPT_SHUTDOWN);
  855. set_intercept(svm, INTERCEPT_VMRUN);
  856. set_intercept(svm, INTERCEPT_VMMCALL);
  857. set_intercept(svm, INTERCEPT_VMLOAD);
  858. set_intercept(svm, INTERCEPT_VMSAVE);
  859. set_intercept(svm, INTERCEPT_STGI);
  860. set_intercept(svm, INTERCEPT_CLGI);
  861. set_intercept(svm, INTERCEPT_SKINIT);
  862. set_intercept(svm, INTERCEPT_WBINVD);
  863. set_intercept(svm, INTERCEPT_MONITOR);
  864. set_intercept(svm, INTERCEPT_MWAIT);
  865. set_intercept(svm, INTERCEPT_XSETBV);
  866. control->iopm_base_pa = iopm_base;
  867. control->msrpm_base_pa = __pa(svm->msrpm);
  868. control->int_ctl = V_INTR_MASKING_MASK;
  869. init_seg(&save->es);
  870. init_seg(&save->ss);
  871. init_seg(&save->ds);
  872. init_seg(&save->fs);
  873. init_seg(&save->gs);
  874. save->cs.selector = 0xf000;
  875. /* Executable/Readable Code Segment */
  876. save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
  877. SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
  878. save->cs.limit = 0xffff;
  879. /*
  880. * cs.base should really be 0xffff0000, but vmx can't handle that, so
  881. * be consistent with it.
  882. *
  883. * Replace when we have real mode working for vmx.
  884. */
  885. save->cs.base = 0xf0000;
  886. save->gdtr.limit = 0xffff;
  887. save->idtr.limit = 0xffff;
  888. init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
  889. init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
  890. svm_set_efer(&svm->vcpu, 0);
  891. save->dr6 = 0xffff0ff0;
  892. save->dr7 = 0x400;
  893. kvm_set_rflags(&svm->vcpu, 2);
  894. save->rip = 0x0000fff0;
  895. svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
  896. /*
  897. * This is the guest-visible cr0 value.
  898. * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
  899. */
  900. svm->vcpu.arch.cr0 = 0;
  901. (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
  902. save->cr4 = X86_CR4_PAE;
  903. /* rdx = ?? */
  904. if (npt_enabled) {
  905. /* Setup VMCB for Nested Paging */
  906. control->nested_ctl = 1;
  907. clr_intercept(svm, INTERCEPT_INVLPG);
  908. clr_exception_intercept(svm, PF_VECTOR);
  909. clr_cr_intercept(svm, INTERCEPT_CR3_READ);
  910. clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
  911. save->g_pat = 0x0007040600070406ULL;
  912. save->cr3 = 0;
  913. save->cr4 = 0;
  914. }
  915. svm->asid_generation = 0;
  916. svm->nested.vmcb = 0;
  917. svm->vcpu.arch.hflags = 0;
  918. if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
  919. control->pause_filter_count = 3000;
  920. set_intercept(svm, INTERCEPT_PAUSE);
  921. }
  922. mark_all_dirty(svm->vmcb);
  923. enable_gif(svm);
  924. }
  925. static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
  926. {
  927. struct vcpu_svm *svm = to_svm(vcpu);
  928. init_vmcb(svm);
  929. if (!kvm_vcpu_is_bsp(vcpu)) {
  930. kvm_rip_write(vcpu, 0);
  931. svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
  932. svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
  933. }
  934. vcpu->arch.regs_avail = ~0;
  935. vcpu->arch.regs_dirty = ~0;
  936. return 0;
  937. }
  938. static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
  939. {
  940. struct vcpu_svm *svm;
  941. struct page *page;
  942. struct page *msrpm_pages;
  943. struct page *hsave_page;
  944. struct page *nested_msrpm_pages;
  945. int err;
  946. svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  947. if (!svm) {
  948. err = -ENOMEM;
  949. goto out;
  950. }
  951. svm->tsc_ratio = TSC_RATIO_DEFAULT;
  952. err = kvm_vcpu_init(&svm->vcpu, kvm, id);
  953. if (err)
  954. goto free_svm;
  955. err = -ENOMEM;
  956. page = alloc_page(GFP_KERNEL);
  957. if (!page)
  958. goto uninit;
  959. msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  960. if (!msrpm_pages)
  961. goto free_page1;
  962. nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  963. if (!nested_msrpm_pages)
  964. goto free_page2;
  965. hsave_page = alloc_page(GFP_KERNEL);
  966. if (!hsave_page)
  967. goto free_page3;
  968. svm->nested.hsave = page_address(hsave_page);
  969. svm->msrpm = page_address(msrpm_pages);
  970. svm_vcpu_init_msrpm(svm->msrpm);
  971. svm->nested.msrpm = page_address(nested_msrpm_pages);
  972. svm_vcpu_init_msrpm(svm->nested.msrpm);
  973. svm->vmcb = page_address(page);
  974. clear_page(svm->vmcb);
  975. svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
  976. svm->asid_generation = 0;
  977. init_vmcb(svm);
  978. kvm_write_tsc(&svm->vcpu, 0);
  979. err = fx_init(&svm->vcpu);
  980. if (err)
  981. goto free_page4;
  982. svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
  983. if (kvm_vcpu_is_bsp(&svm->vcpu))
  984. svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
  985. svm_init_osvw(&svm->vcpu);
  986. return &svm->vcpu;
  987. free_page4:
  988. __free_page(hsave_page);
  989. free_page3:
  990. __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
  991. free_page2:
  992. __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
  993. free_page1:
  994. __free_page(page);
  995. uninit:
  996. kvm_vcpu_uninit(&svm->vcpu);
  997. free_svm:
  998. kmem_cache_free(kvm_vcpu_cache, svm);
  999. out:
  1000. return ERR_PTR(err);
  1001. }
  1002. static void svm_free_vcpu(struct kvm_vcpu *vcpu)
  1003. {
  1004. struct vcpu_svm *svm = to_svm(vcpu);
  1005. __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
  1006. __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
  1007. __free_page(virt_to_page(svm->nested.hsave));
  1008. __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
  1009. kvm_vcpu_uninit(vcpu);
  1010. kmem_cache_free(kvm_vcpu_cache, svm);
  1011. }
  1012. static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1013. {
  1014. struct vcpu_svm *svm = to_svm(vcpu);
  1015. int i;
  1016. if (unlikely(cpu != vcpu->cpu)) {
  1017. svm->asid_generation = 0;
  1018. mark_all_dirty(svm->vmcb);
  1019. }
  1020. #ifdef CONFIG_X86_64
  1021. rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
  1022. #endif
  1023. savesegment(fs, svm->host.fs);
  1024. savesegment(gs, svm->host.gs);
  1025. svm->host.ldt = kvm_read_ldt();
  1026. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  1027. rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  1028. if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
  1029. svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
  1030. __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
  1031. wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
  1032. }
  1033. }
  1034. static void svm_vcpu_put(struct kvm_vcpu *vcpu)
  1035. {
  1036. struct vcpu_svm *svm = to_svm(vcpu);
  1037. int i;
  1038. ++vcpu->stat.host_state_reload;
  1039. kvm_load_ldt(svm->host.ldt);
  1040. #ifdef CONFIG_X86_64
  1041. loadsegment(fs, svm->host.fs);
  1042. wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
  1043. load_gs_index(svm->host.gs);
  1044. #else
  1045. #ifdef CONFIG_X86_32_LAZY_GS
  1046. loadsegment(gs, svm->host.gs);
  1047. #endif
  1048. #endif
  1049. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  1050. wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  1051. }
  1052. static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
  1053. {
  1054. return to_svm(vcpu)->vmcb->save.rflags;
  1055. }
  1056. static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  1057. {
  1058. to_svm(vcpu)->vmcb->save.rflags = rflags;
  1059. }
  1060. static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
  1061. {
  1062. switch (reg) {
  1063. case VCPU_EXREG_PDPTR:
  1064. BUG_ON(!npt_enabled);
  1065. load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
  1066. break;
  1067. default:
  1068. BUG();
  1069. }
  1070. }
  1071. static void svm_set_vintr(struct vcpu_svm *svm)
  1072. {
  1073. set_intercept(svm, INTERCEPT_VINTR);
  1074. }
  1075. static void svm_clear_vintr(struct vcpu_svm *svm)
  1076. {
  1077. clr_intercept(svm, INTERCEPT_VINTR);
  1078. }
  1079. static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
  1080. {
  1081. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  1082. switch (seg) {
  1083. case VCPU_SREG_CS: return &save->cs;
  1084. case VCPU_SREG_DS: return &save->ds;
  1085. case VCPU_SREG_ES: return &save->es;
  1086. case VCPU_SREG_FS: return &save->fs;
  1087. case VCPU_SREG_GS: return &save->gs;
  1088. case VCPU_SREG_SS: return &save->ss;
  1089. case VCPU_SREG_TR: return &save->tr;
  1090. case VCPU_SREG_LDTR: return &save->ldtr;
  1091. }
  1092. BUG();
  1093. return NULL;
  1094. }
  1095. static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
  1096. {
  1097. struct vmcb_seg *s = svm_seg(vcpu, seg);
  1098. return s->base;
  1099. }
  1100. static void svm_get_segment(struct kvm_vcpu *vcpu,
  1101. struct kvm_segment *var, int seg)
  1102. {
  1103. struct vmcb_seg *s = svm_seg(vcpu, seg);
  1104. var->base = s->base;
  1105. var->limit = s->limit;
  1106. var->selector = s->selector;
  1107. var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
  1108. var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
  1109. var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
  1110. var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
  1111. var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
  1112. var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
  1113. var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
  1114. var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
  1115. /*
  1116. * AMD's VMCB does not have an explicit unusable field, so emulate it
  1117. * for cross vendor migration purposes by "not present"
  1118. */
  1119. var->unusable = !var->present || (var->type == 0);
  1120. switch (seg) {
  1121. case VCPU_SREG_CS:
  1122. /*
  1123. * SVM always stores 0 for the 'G' bit in the CS selector in
  1124. * the VMCB on a VMEXIT. This hurts cross-vendor migration:
  1125. * Intel's VMENTRY has a check on the 'G' bit.
  1126. */
  1127. var->g = s->limit > 0xfffff;
  1128. break;
  1129. case VCPU_SREG_TR:
  1130. /*
  1131. * Work around a bug where the busy flag in the tr selector
  1132. * isn't exposed
  1133. */
  1134. var->type |= 0x2;
  1135. break;
  1136. case VCPU_SREG_DS:
  1137. case VCPU_SREG_ES:
  1138. case VCPU_SREG_FS:
  1139. case VCPU_SREG_GS:
  1140. /*
  1141. * The accessed bit must always be set in the segment
  1142. * descriptor cache, although it can be cleared in the
  1143. * descriptor, the cached bit always remains at 1. Since
  1144. * Intel has a check on this, set it here to support
  1145. * cross-vendor migration.
  1146. */
  1147. if (!var->unusable)
  1148. var->type |= 0x1;
  1149. break;
  1150. case VCPU_SREG_SS:
  1151. /*
  1152. * On AMD CPUs sometimes the DB bit in the segment
  1153. * descriptor is left as 1, although the whole segment has
  1154. * been made unusable. Clear it here to pass an Intel VMX
  1155. * entry check when cross vendor migrating.
  1156. */
  1157. if (var->unusable)
  1158. var->db = 0;
  1159. break;
  1160. }
  1161. }
  1162. static int svm_get_cpl(struct kvm_vcpu *vcpu)
  1163. {
  1164. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  1165. return save->cpl;
  1166. }
  1167. static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  1168. {
  1169. struct vcpu_svm *svm = to_svm(vcpu);
  1170. dt->size = svm->vmcb->save.idtr.limit;
  1171. dt->address = svm->vmcb->save.idtr.base;
  1172. }
  1173. static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  1174. {
  1175. struct vcpu_svm *svm = to_svm(vcpu);
  1176. svm->vmcb->save.idtr.limit = dt->size;
  1177. svm->vmcb->save.idtr.base = dt->address ;
  1178. mark_dirty(svm->vmcb, VMCB_DT);
  1179. }
  1180. static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  1181. {
  1182. struct vcpu_svm *svm = to_svm(vcpu);
  1183. dt->size = svm->vmcb->save.gdtr.limit;
  1184. dt->address = svm->vmcb->save.gdtr.base;
  1185. }
  1186. static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  1187. {
  1188. struct vcpu_svm *svm = to_svm(vcpu);
  1189. svm->vmcb->save.gdtr.limit = dt->size;
  1190. svm->vmcb->save.gdtr.base = dt->address ;
  1191. mark_dirty(svm->vmcb, VMCB_DT);
  1192. }
  1193. static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
  1194. {
  1195. }
  1196. static void svm_decache_cr3(struct kvm_vcpu *vcpu)
  1197. {
  1198. }
  1199. static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
  1200. {
  1201. }
  1202. static void update_cr0_intercept(struct vcpu_svm *svm)
  1203. {
  1204. ulong gcr0 = svm->vcpu.arch.cr0;
  1205. u64 *hcr0 = &svm->vmcb->save.cr0;
  1206. if (!svm->vcpu.fpu_active)
  1207. *hcr0 |= SVM_CR0_SELECTIVE_MASK;
  1208. else
  1209. *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
  1210. | (gcr0 & SVM_CR0_SELECTIVE_MASK);
  1211. mark_dirty(svm->vmcb, VMCB_CR);
  1212. if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
  1213. clr_cr_intercept(svm, INTERCEPT_CR0_READ);
  1214. clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
  1215. } else {
  1216. set_cr_intercept(svm, INTERCEPT_CR0_READ);
  1217. set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
  1218. }
  1219. }
  1220. static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  1221. {
  1222. struct vcpu_svm *svm = to_svm(vcpu);
  1223. #ifdef CONFIG_X86_64
  1224. if (vcpu->arch.efer & EFER_LME) {
  1225. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  1226. vcpu->arch.efer |= EFER_LMA;
  1227. svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
  1228. }
  1229. if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
  1230. vcpu->arch.efer &= ~EFER_LMA;
  1231. svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
  1232. }
  1233. }
  1234. #endif
  1235. vcpu->arch.cr0 = cr0;
  1236. if (!npt_enabled)
  1237. cr0 |= X86_CR0_PG | X86_CR0_WP;
  1238. if (!vcpu->fpu_active)
  1239. cr0 |= X86_CR0_TS;
  1240. /*
  1241. * re-enable caching here because the QEMU bios
  1242. * does not do it - this results in some delay at
  1243. * reboot
  1244. */
  1245. cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
  1246. svm->vmcb->save.cr0 = cr0;
  1247. mark_dirty(svm->vmcb, VMCB_CR);
  1248. update_cr0_intercept(svm);
  1249. }
  1250. static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  1251. {
  1252. unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
  1253. unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
  1254. if (cr4 & X86_CR4_VMXE)
  1255. return 1;
  1256. if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
  1257. svm_flush_tlb(vcpu);
  1258. vcpu->arch.cr4 = cr4;
  1259. if (!npt_enabled)
  1260. cr4 |= X86_CR4_PAE;
  1261. cr4 |= host_cr4_mce;
  1262. to_svm(vcpu)->vmcb->save.cr4 = cr4;
  1263. mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
  1264. return 0;
  1265. }
  1266. static void svm_set_segment(struct kvm_vcpu *vcpu,
  1267. struct kvm_segment *var, int seg)
  1268. {
  1269. struct vcpu_svm *svm = to_svm(vcpu);
  1270. struct vmcb_seg *s = svm_seg(vcpu, seg);
  1271. s->base = var->base;
  1272. s->limit = var->limit;
  1273. s->selector = var->selector;
  1274. if (var->unusable)
  1275. s->attrib = 0;
  1276. else {
  1277. s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
  1278. s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
  1279. s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
  1280. s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
  1281. s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
  1282. s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
  1283. s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
  1284. s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
  1285. }
  1286. if (seg == VCPU_SREG_CS)
  1287. svm->vmcb->save.cpl
  1288. = (svm->vmcb->save.cs.attrib
  1289. >> SVM_SELECTOR_DPL_SHIFT) & 3;
  1290. mark_dirty(svm->vmcb, VMCB_SEG);
  1291. }
  1292. static void update_db_intercept(struct kvm_vcpu *vcpu)
  1293. {
  1294. struct vcpu_svm *svm = to_svm(vcpu);
  1295. clr_exception_intercept(svm, DB_VECTOR);
  1296. clr_exception_intercept(svm, BP_VECTOR);
  1297. if (svm->nmi_singlestep)
  1298. set_exception_intercept(svm, DB_VECTOR);
  1299. if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
  1300. if (vcpu->guest_debug &
  1301. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
  1302. set_exception_intercept(svm, DB_VECTOR);
  1303. if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
  1304. set_exception_intercept(svm, BP_VECTOR);
  1305. } else
  1306. vcpu->guest_debug = 0;
  1307. }
  1308. static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
  1309. {
  1310. struct vcpu_svm *svm = to_svm(vcpu);
  1311. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  1312. svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
  1313. else
  1314. svm->vmcb->save.dr7 = vcpu->arch.dr7;
  1315. mark_dirty(svm->vmcb, VMCB_DR);
  1316. update_db_intercept(vcpu);
  1317. }
  1318. static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
  1319. {
  1320. if (sd->next_asid > sd->max_asid) {
  1321. ++sd->asid_generation;
  1322. sd->next_asid = 1;
  1323. svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
  1324. }
  1325. svm->asid_generation = sd->asid_generation;
  1326. svm->vmcb->control.asid = sd->next_asid++;
  1327. mark_dirty(svm->vmcb, VMCB_ASID);
  1328. }
  1329. static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
  1330. {
  1331. struct vcpu_svm *svm = to_svm(vcpu);
  1332. svm->vmcb->save.dr7 = value;
  1333. mark_dirty(svm->vmcb, VMCB_DR);
  1334. }
  1335. static int pf_interception(struct vcpu_svm *svm)
  1336. {
  1337. u64 fault_address = svm->vmcb->control.exit_info_2;
  1338. u32 error_code;
  1339. int r = 1;
  1340. switch (svm->apf_reason) {
  1341. default:
  1342. error_code = svm->vmcb->control.exit_info_1;
  1343. trace_kvm_page_fault(fault_address, error_code);
  1344. if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
  1345. kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
  1346. r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
  1347. svm->vmcb->control.insn_bytes,
  1348. svm->vmcb->control.insn_len);
  1349. break;
  1350. case KVM_PV_REASON_PAGE_NOT_PRESENT:
  1351. svm->apf_reason = 0;
  1352. local_irq_disable();
  1353. kvm_async_pf_task_wait(fault_address);
  1354. local_irq_enable();
  1355. break;
  1356. case KVM_PV_REASON_PAGE_READY:
  1357. svm->apf_reason = 0;
  1358. local_irq_disable();
  1359. kvm_async_pf_task_wake(fault_address);
  1360. local_irq_enable();
  1361. break;
  1362. }
  1363. return r;
  1364. }
  1365. static int db_interception(struct vcpu_svm *svm)
  1366. {
  1367. struct kvm_run *kvm_run = svm->vcpu.run;
  1368. if (!(svm->vcpu.guest_debug &
  1369. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
  1370. !svm->nmi_singlestep) {
  1371. kvm_queue_exception(&svm->vcpu, DB_VECTOR);
  1372. return 1;
  1373. }
  1374. if (svm->nmi_singlestep) {
  1375. svm->nmi_singlestep = false;
  1376. if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
  1377. svm->vmcb->save.rflags &=
  1378. ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  1379. update_db_intercept(&svm->vcpu);
  1380. }
  1381. if (svm->vcpu.guest_debug &
  1382. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
  1383. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1384. kvm_run->debug.arch.pc =
  1385. svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  1386. kvm_run->debug.arch.exception = DB_VECTOR;
  1387. return 0;
  1388. }
  1389. return 1;
  1390. }
  1391. static int bp_interception(struct vcpu_svm *svm)
  1392. {
  1393. struct kvm_run *kvm_run = svm->vcpu.run;
  1394. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1395. kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  1396. kvm_run->debug.arch.exception = BP_VECTOR;
  1397. return 0;
  1398. }
  1399. static int ud_interception(struct vcpu_svm *svm)
  1400. {
  1401. int er;
  1402. er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
  1403. if (er != EMULATE_DONE)
  1404. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1405. return 1;
  1406. }
  1407. static void svm_fpu_activate(struct kvm_vcpu *vcpu)
  1408. {
  1409. struct vcpu_svm *svm = to_svm(vcpu);
  1410. clr_exception_intercept(svm, NM_VECTOR);
  1411. svm->vcpu.fpu_active = 1;
  1412. update_cr0_intercept(svm);
  1413. }
  1414. static int nm_interception(struct vcpu_svm *svm)
  1415. {
  1416. svm_fpu_activate(&svm->vcpu);
  1417. return 1;
  1418. }
  1419. static bool is_erratum_383(void)
  1420. {
  1421. int err, i;
  1422. u64 value;
  1423. if (!erratum_383_found)
  1424. return false;
  1425. value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
  1426. if (err)
  1427. return false;
  1428. /* Bit 62 may or may not be set for this mce */
  1429. value &= ~(1ULL << 62);
  1430. if (value != 0xb600000000010015ULL)
  1431. return false;
  1432. /* Clear MCi_STATUS registers */
  1433. for (i = 0; i < 6; ++i)
  1434. native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
  1435. value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
  1436. if (!err) {
  1437. u32 low, high;
  1438. value &= ~(1ULL << 2);
  1439. low = lower_32_bits(value);
  1440. high = upper_32_bits(value);
  1441. native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
  1442. }
  1443. /* Flush tlb to evict multi-match entries */
  1444. __flush_tlb_all();
  1445. return true;
  1446. }
  1447. static void svm_handle_mce(struct vcpu_svm *svm)
  1448. {
  1449. if (is_erratum_383()) {
  1450. /*
  1451. * Erratum 383 triggered. Guest state is corrupt so kill the
  1452. * guest.
  1453. */
  1454. pr_err("KVM: Guest triggered AMD Erratum 383\n");
  1455. kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
  1456. return;
  1457. }
  1458. /*
  1459. * On an #MC intercept the MCE handler is not called automatically in
  1460. * the host. So do it by hand here.
  1461. */
  1462. asm volatile (
  1463. "int $0x12\n");
  1464. /* not sure if we ever come back to this point */
  1465. return;
  1466. }
  1467. static int mc_interception(struct vcpu_svm *svm)
  1468. {
  1469. return 1;
  1470. }
  1471. static int shutdown_interception(struct vcpu_svm *svm)
  1472. {
  1473. struct kvm_run *kvm_run = svm->vcpu.run;
  1474. /*
  1475. * VMCB is undefined after a SHUTDOWN intercept
  1476. * so reinitialize it.
  1477. */
  1478. clear_page(svm->vmcb);
  1479. init_vmcb(svm);
  1480. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  1481. return 0;
  1482. }
  1483. static int io_interception(struct vcpu_svm *svm)
  1484. {
  1485. struct kvm_vcpu *vcpu = &svm->vcpu;
  1486. u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
  1487. int size, in, string;
  1488. unsigned port;
  1489. ++svm->vcpu.stat.io_exits;
  1490. string = (io_info & SVM_IOIO_STR_MASK) != 0;
  1491. in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
  1492. if (string || in)
  1493. return emulate_instruction(vcpu, 0) == EMULATE_DONE;
  1494. port = io_info >> 16;
  1495. size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
  1496. svm->next_rip = svm->vmcb->control.exit_info_2;
  1497. skip_emulated_instruction(&svm->vcpu);
  1498. return kvm_fast_pio_out(vcpu, size, port);
  1499. }
  1500. static int nmi_interception(struct vcpu_svm *svm)
  1501. {
  1502. return 1;
  1503. }
  1504. static int intr_interception(struct vcpu_svm *svm)
  1505. {
  1506. ++svm->vcpu.stat.irq_exits;
  1507. return 1;
  1508. }
  1509. static int nop_on_interception(struct vcpu_svm *svm)
  1510. {
  1511. return 1;
  1512. }
  1513. static int halt_interception(struct vcpu_svm *svm)
  1514. {
  1515. svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
  1516. skip_emulated_instruction(&svm->vcpu);
  1517. return kvm_emulate_halt(&svm->vcpu);
  1518. }
  1519. static int vmmcall_interception(struct vcpu_svm *svm)
  1520. {
  1521. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1522. skip_emulated_instruction(&svm->vcpu);
  1523. kvm_emulate_hypercall(&svm->vcpu);
  1524. return 1;
  1525. }
  1526. static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
  1527. {
  1528. struct vcpu_svm *svm = to_svm(vcpu);
  1529. return svm->nested.nested_cr3;
  1530. }
  1531. static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
  1532. {
  1533. struct vcpu_svm *svm = to_svm(vcpu);
  1534. u64 cr3 = svm->nested.nested_cr3;
  1535. u64 pdpte;
  1536. int ret;
  1537. ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
  1538. offset_in_page(cr3) + index * 8, 8);
  1539. if (ret)
  1540. return 0;
  1541. return pdpte;
  1542. }
  1543. static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
  1544. unsigned long root)
  1545. {
  1546. struct vcpu_svm *svm = to_svm(vcpu);
  1547. svm->vmcb->control.nested_cr3 = root;
  1548. mark_dirty(svm->vmcb, VMCB_NPT);
  1549. svm_flush_tlb(vcpu);
  1550. }
  1551. static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
  1552. struct x86_exception *fault)
  1553. {
  1554. struct vcpu_svm *svm = to_svm(vcpu);
  1555. svm->vmcb->control.exit_code = SVM_EXIT_NPF;
  1556. svm->vmcb->control.exit_code_hi = 0;
  1557. svm->vmcb->control.exit_info_1 = fault->error_code;
  1558. svm->vmcb->control.exit_info_2 = fault->address;
  1559. nested_svm_vmexit(svm);
  1560. }
  1561. static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
  1562. {
  1563. int r;
  1564. r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
  1565. vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
  1566. vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
  1567. vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
  1568. vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
  1569. vcpu->arch.mmu.shadow_root_level = get_npt_level();
  1570. vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
  1571. return r;
  1572. }
  1573. static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
  1574. {
  1575. vcpu->arch.walk_mmu = &vcpu->arch.mmu;
  1576. }
  1577. static int nested_svm_check_permissions(struct vcpu_svm *svm)
  1578. {
  1579. if (!(svm->vcpu.arch.efer & EFER_SVME)
  1580. || !is_paging(&svm->vcpu)) {
  1581. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1582. return 1;
  1583. }
  1584. if (svm->vmcb->save.cpl) {
  1585. kvm_inject_gp(&svm->vcpu, 0);
  1586. return 1;
  1587. }
  1588. return 0;
  1589. }
  1590. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  1591. bool has_error_code, u32 error_code)
  1592. {
  1593. int vmexit;
  1594. if (!is_guest_mode(&svm->vcpu))
  1595. return 0;
  1596. svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
  1597. svm->vmcb->control.exit_code_hi = 0;
  1598. svm->vmcb->control.exit_info_1 = error_code;
  1599. svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
  1600. vmexit = nested_svm_intercept(svm);
  1601. if (vmexit == NESTED_EXIT_DONE)
  1602. svm->nested.exit_required = true;
  1603. return vmexit;
  1604. }
  1605. /* This function returns true if it is save to enable the irq window */
  1606. static inline bool nested_svm_intr(struct vcpu_svm *svm)
  1607. {
  1608. if (!is_guest_mode(&svm->vcpu))
  1609. return true;
  1610. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1611. return true;
  1612. if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
  1613. return false;
  1614. /*
  1615. * if vmexit was already requested (by intercepted exception
  1616. * for instance) do not overwrite it with "external interrupt"
  1617. * vmexit.
  1618. */
  1619. if (svm->nested.exit_required)
  1620. return false;
  1621. svm->vmcb->control.exit_code = SVM_EXIT_INTR;
  1622. svm->vmcb->control.exit_info_1 = 0;
  1623. svm->vmcb->control.exit_info_2 = 0;
  1624. if (svm->nested.intercept & 1ULL) {
  1625. /*
  1626. * The #vmexit can't be emulated here directly because this
  1627. * code path runs with irqs and preemtion disabled. A
  1628. * #vmexit emulation might sleep. Only signal request for
  1629. * the #vmexit here.
  1630. */
  1631. svm->nested.exit_required = true;
  1632. trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
  1633. return false;
  1634. }
  1635. return true;
  1636. }
  1637. /* This function returns true if it is save to enable the nmi window */
  1638. static inline bool nested_svm_nmi(struct vcpu_svm *svm)
  1639. {
  1640. if (!is_guest_mode(&svm->vcpu))
  1641. return true;
  1642. if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
  1643. return true;
  1644. svm->vmcb->control.exit_code = SVM_EXIT_NMI;
  1645. svm->nested.exit_required = true;
  1646. return false;
  1647. }
  1648. static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
  1649. {
  1650. struct page *page;
  1651. might_sleep();
  1652. page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
  1653. if (is_error_page(page))
  1654. goto error;
  1655. *_page = page;
  1656. return kmap(page);
  1657. error:
  1658. kvm_release_page_clean(page);
  1659. kvm_inject_gp(&svm->vcpu, 0);
  1660. return NULL;
  1661. }
  1662. static void nested_svm_unmap(struct page *page)
  1663. {
  1664. kunmap(page);
  1665. kvm_release_page_dirty(page);
  1666. }
  1667. static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
  1668. {
  1669. unsigned port;
  1670. u8 val, bit;
  1671. u64 gpa;
  1672. if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
  1673. return NESTED_EXIT_HOST;
  1674. port = svm->vmcb->control.exit_info_1 >> 16;
  1675. gpa = svm->nested.vmcb_iopm + (port / 8);
  1676. bit = port % 8;
  1677. val = 0;
  1678. if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
  1679. val &= (1 << bit);
  1680. return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  1681. }
  1682. static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
  1683. {
  1684. u32 offset, msr, value;
  1685. int write, mask;
  1686. if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
  1687. return NESTED_EXIT_HOST;
  1688. msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1689. offset = svm_msrpm_offset(msr);
  1690. write = svm->vmcb->control.exit_info_1 & 1;
  1691. mask = 1 << ((2 * (msr & 0xf)) + write);
  1692. if (offset == MSR_INVALID)
  1693. return NESTED_EXIT_DONE;
  1694. /* Offset is in 32 bit units but need in 8 bit units */
  1695. offset *= 4;
  1696. if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
  1697. return NESTED_EXIT_DONE;
  1698. return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  1699. }
  1700. static int nested_svm_exit_special(struct vcpu_svm *svm)
  1701. {
  1702. u32 exit_code = svm->vmcb->control.exit_code;
  1703. switch (exit_code) {
  1704. case SVM_EXIT_INTR:
  1705. case SVM_EXIT_NMI:
  1706. case SVM_EXIT_EXCP_BASE + MC_VECTOR:
  1707. return NESTED_EXIT_HOST;
  1708. case SVM_EXIT_NPF:
  1709. /* For now we are always handling NPFs when using them */
  1710. if (npt_enabled)
  1711. return NESTED_EXIT_HOST;
  1712. break;
  1713. case SVM_EXIT_EXCP_BASE + PF_VECTOR:
  1714. /* When we're shadowing, trap PFs, but not async PF */
  1715. if (!npt_enabled && svm->apf_reason == 0)
  1716. return NESTED_EXIT_HOST;
  1717. break;
  1718. case SVM_EXIT_EXCP_BASE + NM_VECTOR:
  1719. nm_interception(svm);
  1720. break;
  1721. default:
  1722. break;
  1723. }
  1724. return NESTED_EXIT_CONTINUE;
  1725. }
  1726. /*
  1727. * If this function returns true, this #vmexit was already handled
  1728. */
  1729. static int nested_svm_intercept(struct vcpu_svm *svm)
  1730. {
  1731. u32 exit_code = svm->vmcb->control.exit_code;
  1732. int vmexit = NESTED_EXIT_HOST;
  1733. switch (exit_code) {
  1734. case SVM_EXIT_MSR:
  1735. vmexit = nested_svm_exit_handled_msr(svm);
  1736. break;
  1737. case SVM_EXIT_IOIO:
  1738. vmexit = nested_svm_intercept_ioio(svm);
  1739. break;
  1740. case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
  1741. u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
  1742. if (svm->nested.intercept_cr & bit)
  1743. vmexit = NESTED_EXIT_DONE;
  1744. break;
  1745. }
  1746. case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
  1747. u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
  1748. if (svm->nested.intercept_dr & bit)
  1749. vmexit = NESTED_EXIT_DONE;
  1750. break;
  1751. }
  1752. case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
  1753. u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
  1754. if (svm->nested.intercept_exceptions & excp_bits)
  1755. vmexit = NESTED_EXIT_DONE;
  1756. /* async page fault always cause vmexit */
  1757. else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
  1758. svm->apf_reason != 0)
  1759. vmexit = NESTED_EXIT_DONE;
  1760. break;
  1761. }
  1762. case SVM_EXIT_ERR: {
  1763. vmexit = NESTED_EXIT_DONE;
  1764. break;
  1765. }
  1766. default: {
  1767. u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
  1768. if (svm->nested.intercept & exit_bits)
  1769. vmexit = NESTED_EXIT_DONE;
  1770. }
  1771. }
  1772. return vmexit;
  1773. }
  1774. static int nested_svm_exit_handled(struct vcpu_svm *svm)
  1775. {
  1776. int vmexit;
  1777. vmexit = nested_svm_intercept(svm);
  1778. if (vmexit == NESTED_EXIT_DONE)
  1779. nested_svm_vmexit(svm);
  1780. return vmexit;
  1781. }
  1782. static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
  1783. {
  1784. struct vmcb_control_area *dst = &dst_vmcb->control;
  1785. struct vmcb_control_area *from = &from_vmcb->control;
  1786. dst->intercept_cr = from->intercept_cr;
  1787. dst->intercept_dr = from->intercept_dr;
  1788. dst->intercept_exceptions = from->intercept_exceptions;
  1789. dst->intercept = from->intercept;
  1790. dst->iopm_base_pa = from->iopm_base_pa;
  1791. dst->msrpm_base_pa = from->msrpm_base_pa;
  1792. dst->tsc_offset = from->tsc_offset;
  1793. dst->asid = from->asid;
  1794. dst->tlb_ctl = from->tlb_ctl;
  1795. dst->int_ctl = from->int_ctl;
  1796. dst->int_vector = from->int_vector;
  1797. dst->int_state = from->int_state;
  1798. dst->exit_code = from->exit_code;
  1799. dst->exit_code_hi = from->exit_code_hi;
  1800. dst->exit_info_1 = from->exit_info_1;
  1801. dst->exit_info_2 = from->exit_info_2;
  1802. dst->exit_int_info = from->exit_int_info;
  1803. dst->exit_int_info_err = from->exit_int_info_err;
  1804. dst->nested_ctl = from->nested_ctl;
  1805. dst->event_inj = from->event_inj;
  1806. dst->event_inj_err = from->event_inj_err;
  1807. dst->nested_cr3 = from->nested_cr3;
  1808. dst->lbr_ctl = from->lbr_ctl;
  1809. }
  1810. static int nested_svm_vmexit(struct vcpu_svm *svm)
  1811. {
  1812. struct vmcb *nested_vmcb;
  1813. struct vmcb *hsave = svm->nested.hsave;
  1814. struct vmcb *vmcb = svm->vmcb;
  1815. struct page *page;
  1816. trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
  1817. vmcb->control.exit_info_1,
  1818. vmcb->control.exit_info_2,
  1819. vmcb->control.exit_int_info,
  1820. vmcb->control.exit_int_info_err,
  1821. KVM_ISA_SVM);
  1822. nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
  1823. if (!nested_vmcb)
  1824. return 1;
  1825. /* Exit Guest-Mode */
  1826. leave_guest_mode(&svm->vcpu);
  1827. svm->nested.vmcb = 0;
  1828. /* Give the current vmcb to the guest */
  1829. disable_gif(svm);
  1830. nested_vmcb->save.es = vmcb->save.es;
  1831. nested_vmcb->save.cs = vmcb->save.cs;
  1832. nested_vmcb->save.ss = vmcb->save.ss;
  1833. nested_vmcb->save.ds = vmcb->save.ds;
  1834. nested_vmcb->save.gdtr = vmcb->save.gdtr;
  1835. nested_vmcb->save.idtr = vmcb->save.idtr;
  1836. nested_vmcb->save.efer = svm->vcpu.arch.efer;
  1837. nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
  1838. nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
  1839. nested_vmcb->save.cr2 = vmcb->save.cr2;
  1840. nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
  1841. nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
  1842. nested_vmcb->save.rip = vmcb->save.rip;
  1843. nested_vmcb->save.rsp = vmcb->save.rsp;
  1844. nested_vmcb->save.rax = vmcb->save.rax;
  1845. nested_vmcb->save.dr7 = vmcb->save.dr7;
  1846. nested_vmcb->save.dr6 = vmcb->save.dr6;
  1847. nested_vmcb->save.cpl = vmcb->save.cpl;
  1848. nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
  1849. nested_vmcb->control.int_vector = vmcb->control.int_vector;
  1850. nested_vmcb->control.int_state = vmcb->control.int_state;
  1851. nested_vmcb->control.exit_code = vmcb->control.exit_code;
  1852. nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
  1853. nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
  1854. nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
  1855. nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
  1856. nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
  1857. nested_vmcb->control.next_rip = vmcb->control.next_rip;
  1858. /*
  1859. * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
  1860. * to make sure that we do not lose injected events. So check event_inj
  1861. * here and copy it to exit_int_info if it is valid.
  1862. * Exit_int_info and event_inj can't be both valid because the case
  1863. * below only happens on a VMRUN instruction intercept which has
  1864. * no valid exit_int_info set.
  1865. */
  1866. if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
  1867. struct vmcb_control_area *nc = &nested_vmcb->control;
  1868. nc->exit_int_info = vmcb->control.event_inj;
  1869. nc->exit_int_info_err = vmcb->control.event_inj_err;
  1870. }
  1871. nested_vmcb->control.tlb_ctl = 0;
  1872. nested_vmcb->control.event_inj = 0;
  1873. nested_vmcb->control.event_inj_err = 0;
  1874. /* We always set V_INTR_MASKING and remember the old value in hflags */
  1875. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1876. nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
  1877. /* Restore the original control entries */
  1878. copy_vmcb_control_area(vmcb, hsave);
  1879. kvm_clear_exception_queue(&svm->vcpu);
  1880. kvm_clear_interrupt_queue(&svm->vcpu);
  1881. svm->nested.nested_cr3 = 0;
  1882. /* Restore selected save entries */
  1883. svm->vmcb->save.es = hsave->save.es;
  1884. svm->vmcb->save.cs = hsave->save.cs;
  1885. svm->vmcb->save.ss = hsave->save.ss;
  1886. svm->vmcb->save.ds = hsave->save.ds;
  1887. svm->vmcb->save.gdtr = hsave->save.gdtr;
  1888. svm->vmcb->save.idtr = hsave->save.idtr;
  1889. kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
  1890. svm_set_efer(&svm->vcpu, hsave->save.efer);
  1891. svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
  1892. svm_set_cr4(&svm->vcpu, hsave->save.cr4);
  1893. if (npt_enabled) {
  1894. svm->vmcb->save.cr3 = hsave->save.cr3;
  1895. svm->vcpu.arch.cr3 = hsave->save.cr3;
  1896. } else {
  1897. (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
  1898. }
  1899. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
  1900. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
  1901. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
  1902. svm->vmcb->save.dr7 = 0;
  1903. svm->vmcb->save.cpl = 0;
  1904. svm->vmcb->control.exit_int_info = 0;
  1905. mark_all_dirty(svm->vmcb);
  1906. nested_svm_unmap(page);
  1907. nested_svm_uninit_mmu_context(&svm->vcpu);
  1908. kvm_mmu_reset_context(&svm->vcpu);
  1909. kvm_mmu_load(&svm->vcpu);
  1910. return 0;
  1911. }
  1912. static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
  1913. {
  1914. /*
  1915. * This function merges the msr permission bitmaps of kvm and the
  1916. * nested vmcb. It is omptimized in that it only merges the parts where
  1917. * the kvm msr permission bitmap may contain zero bits
  1918. */
  1919. int i;
  1920. if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
  1921. return true;
  1922. for (i = 0; i < MSRPM_OFFSETS; i++) {
  1923. u32 value, p;
  1924. u64 offset;
  1925. if (msrpm_offsets[i] == 0xffffffff)
  1926. break;
  1927. p = msrpm_offsets[i];
  1928. offset = svm->nested.vmcb_msrpm + (p * 4);
  1929. if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
  1930. return false;
  1931. svm->nested.msrpm[p] = svm->msrpm[p] | value;
  1932. }
  1933. svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
  1934. return true;
  1935. }
  1936. static bool nested_vmcb_checks(struct vmcb *vmcb)
  1937. {
  1938. if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
  1939. return false;
  1940. if (vmcb->control.asid == 0)
  1941. return false;
  1942. if (vmcb->control.nested_ctl && !npt_enabled)
  1943. return false;
  1944. return true;
  1945. }
  1946. static bool nested_svm_vmrun(struct vcpu_svm *svm)
  1947. {
  1948. struct vmcb *nested_vmcb;
  1949. struct vmcb *hsave = svm->nested.hsave;
  1950. struct vmcb *vmcb = svm->vmcb;
  1951. struct page *page;
  1952. u64 vmcb_gpa;
  1953. vmcb_gpa = svm->vmcb->save.rax;
  1954. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1955. if (!nested_vmcb)
  1956. return false;
  1957. if (!nested_vmcb_checks(nested_vmcb)) {
  1958. nested_vmcb->control.exit_code = SVM_EXIT_ERR;
  1959. nested_vmcb->control.exit_code_hi = 0;
  1960. nested_vmcb->control.exit_info_1 = 0;
  1961. nested_vmcb->control.exit_info_2 = 0;
  1962. nested_svm_unmap(page);
  1963. return false;
  1964. }
  1965. trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
  1966. nested_vmcb->save.rip,
  1967. nested_vmcb->control.int_ctl,
  1968. nested_vmcb->control.event_inj,
  1969. nested_vmcb->control.nested_ctl);
  1970. trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
  1971. nested_vmcb->control.intercept_cr >> 16,
  1972. nested_vmcb->control.intercept_exceptions,
  1973. nested_vmcb->control.intercept);
  1974. /* Clear internal status */
  1975. kvm_clear_exception_queue(&svm->vcpu);
  1976. kvm_clear_interrupt_queue(&svm->vcpu);
  1977. /*
  1978. * Save the old vmcb, so we don't need to pick what we save, but can
  1979. * restore everything when a VMEXIT occurs
  1980. */
  1981. hsave->save.es = vmcb->save.es;
  1982. hsave->save.cs = vmcb->save.cs;
  1983. hsave->save.ss = vmcb->save.ss;
  1984. hsave->save.ds = vmcb->save.ds;
  1985. hsave->save.gdtr = vmcb->save.gdtr;
  1986. hsave->save.idtr = vmcb->save.idtr;
  1987. hsave->save.efer = svm->vcpu.arch.efer;
  1988. hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
  1989. hsave->save.cr4 = svm->vcpu.arch.cr4;
  1990. hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
  1991. hsave->save.rip = kvm_rip_read(&svm->vcpu);
  1992. hsave->save.rsp = vmcb->save.rsp;
  1993. hsave->save.rax = vmcb->save.rax;
  1994. if (npt_enabled)
  1995. hsave->save.cr3 = vmcb->save.cr3;
  1996. else
  1997. hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
  1998. copy_vmcb_control_area(hsave, vmcb);
  1999. if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
  2000. svm->vcpu.arch.hflags |= HF_HIF_MASK;
  2001. else
  2002. svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
  2003. if (nested_vmcb->control.nested_ctl) {
  2004. kvm_mmu_unload(&svm->vcpu);
  2005. svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
  2006. nested_svm_init_mmu_context(&svm->vcpu);
  2007. }
  2008. /* Load the nested guest state */
  2009. svm->vmcb->save.es = nested_vmcb->save.es;
  2010. svm->vmcb->save.cs = nested_vmcb->save.cs;
  2011. svm->vmcb->save.ss = nested_vmcb->save.ss;
  2012. svm->vmcb->save.ds = nested_vmcb->save.ds;
  2013. svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
  2014. svm->vmcb->save.idtr = nested_vmcb->save.idtr;
  2015. kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
  2016. svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
  2017. svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
  2018. svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
  2019. if (npt_enabled) {
  2020. svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
  2021. svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
  2022. } else
  2023. (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
  2024. /* Guest paging mode is active - reset mmu */
  2025. kvm_mmu_reset_context(&svm->vcpu);
  2026. svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
  2027. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
  2028. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
  2029. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
  2030. /* In case we don't even reach vcpu_run, the fields are not updated */
  2031. svm->vmcb->save.rax = nested_vmcb->save.rax;
  2032. svm->vmcb->save.rsp = nested_vmcb->save.rsp;
  2033. svm->vmcb->save.rip = nested_vmcb->save.rip;
  2034. svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
  2035. svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
  2036. svm->vmcb->save.cpl = nested_vmcb->save.cpl;
  2037. svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
  2038. svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
  2039. /* cache intercepts */
  2040. svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
  2041. svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
  2042. svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
  2043. svm->nested.intercept = nested_vmcb->control.intercept;
  2044. svm_flush_tlb(&svm->vcpu);
  2045. svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
  2046. if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
  2047. svm->vcpu.arch.hflags |= HF_VINTR_MASK;
  2048. else
  2049. svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
  2050. if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
  2051. /* We only want the cr8 intercept bits of the guest */
  2052. clr_cr_intercept(svm, INTERCEPT_CR8_READ);
  2053. clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
  2054. }
  2055. /* We don't want to see VMMCALLs from a nested guest */
  2056. clr_intercept(svm, INTERCEPT_VMMCALL);
  2057. svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
  2058. svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
  2059. svm->vmcb->control.int_state = nested_vmcb->control.int_state;
  2060. svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
  2061. svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
  2062. svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
  2063. nested_svm_unmap(page);
  2064. /* Enter Guest-Mode */
  2065. enter_guest_mode(&svm->vcpu);
  2066. /*
  2067. * Merge guest and host intercepts - must be called with vcpu in
  2068. * guest-mode to take affect here
  2069. */
  2070. recalc_intercepts(svm);
  2071. svm->nested.vmcb = vmcb_gpa;
  2072. enable_gif(svm);
  2073. mark_all_dirty(svm->vmcb);
  2074. return true;
  2075. }
  2076. static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
  2077. {
  2078. to_vmcb->save.fs = from_vmcb->save.fs;
  2079. to_vmcb->save.gs = from_vmcb->save.gs;
  2080. to_vmcb->save.tr = from_vmcb->save.tr;
  2081. to_vmcb->save.ldtr = from_vmcb->save.ldtr;
  2082. to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
  2083. to_vmcb->save.star = from_vmcb->save.star;
  2084. to_vmcb->save.lstar = from_vmcb->save.lstar;
  2085. to_vmcb->save.cstar = from_vmcb->save.cstar;
  2086. to_vmcb->save.sfmask = from_vmcb->save.sfmask;
  2087. to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
  2088. to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
  2089. to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
  2090. }
  2091. static int vmload_interception(struct vcpu_svm *svm)
  2092. {
  2093. struct vmcb *nested_vmcb;
  2094. struct page *page;
  2095. if (nested_svm_check_permissions(svm))
  2096. return 1;
  2097. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  2098. if (!nested_vmcb)
  2099. return 1;
  2100. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  2101. skip_emulated_instruction(&svm->vcpu);
  2102. nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
  2103. nested_svm_unmap(page);
  2104. return 1;
  2105. }
  2106. static int vmsave_interception(struct vcpu_svm *svm)
  2107. {
  2108. struct vmcb *nested_vmcb;
  2109. struct page *page;
  2110. if (nested_svm_check_permissions(svm))
  2111. return 1;
  2112. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  2113. if (!nested_vmcb)
  2114. return 1;
  2115. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  2116. skip_emulated_instruction(&svm->vcpu);
  2117. nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
  2118. nested_svm_unmap(page);
  2119. return 1;
  2120. }
  2121. static int vmrun_interception(struct vcpu_svm *svm)
  2122. {
  2123. if (nested_svm_check_permissions(svm))
  2124. return 1;
  2125. /* Save rip after vmrun instruction */
  2126. kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
  2127. if (!nested_svm_vmrun(svm))
  2128. return 1;
  2129. if (!nested_svm_vmrun_msrpm(svm))
  2130. goto failed;
  2131. return 1;
  2132. failed:
  2133. svm->vmcb->control.exit_code = SVM_EXIT_ERR;
  2134. svm->vmcb->control.exit_code_hi = 0;
  2135. svm->vmcb->control.exit_info_1 = 0;
  2136. svm->vmcb->control.exit_info_2 = 0;
  2137. nested_svm_vmexit(svm);
  2138. return 1;
  2139. }
  2140. static int stgi_interception(struct vcpu_svm *svm)
  2141. {
  2142. if (nested_svm_check_permissions(svm))
  2143. return 1;
  2144. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  2145. skip_emulated_instruction(&svm->vcpu);
  2146. kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
  2147. enable_gif(svm);
  2148. return 1;
  2149. }
  2150. static int clgi_interception(struct vcpu_svm *svm)
  2151. {
  2152. if (nested_svm_check_permissions(svm))
  2153. return 1;
  2154. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  2155. skip_emulated_instruction(&svm->vcpu);
  2156. disable_gif(svm);
  2157. /* After a CLGI no interrupts should come */
  2158. svm_clear_vintr(svm);
  2159. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  2160. mark_dirty(svm->vmcb, VMCB_INTR);
  2161. return 1;
  2162. }
  2163. static int invlpga_interception(struct vcpu_svm *svm)
  2164. {
  2165. struct kvm_vcpu *vcpu = &svm->vcpu;
  2166. trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
  2167. vcpu->arch.regs[VCPU_REGS_RAX]);
  2168. /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
  2169. kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
  2170. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  2171. skip_emulated_instruction(&svm->vcpu);
  2172. return 1;
  2173. }
  2174. static int skinit_interception(struct vcpu_svm *svm)
  2175. {
  2176. trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
  2177. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  2178. return 1;
  2179. }
  2180. static int xsetbv_interception(struct vcpu_svm *svm)
  2181. {
  2182. u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
  2183. u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
  2184. if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
  2185. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  2186. skip_emulated_instruction(&svm->vcpu);
  2187. }
  2188. return 1;
  2189. }
  2190. static int invalid_op_interception(struct vcpu_svm *svm)
  2191. {
  2192. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  2193. return 1;
  2194. }
  2195. static int task_switch_interception(struct vcpu_svm *svm)
  2196. {
  2197. u16 tss_selector;
  2198. int reason;
  2199. int int_type = svm->vmcb->control.exit_int_info &
  2200. SVM_EXITINTINFO_TYPE_MASK;
  2201. int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
  2202. uint32_t type =
  2203. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
  2204. uint32_t idt_v =
  2205. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
  2206. bool has_error_code = false;
  2207. u32 error_code = 0;
  2208. tss_selector = (u16)svm->vmcb->control.exit_info_1;
  2209. if (svm->vmcb->control.exit_info_2 &
  2210. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
  2211. reason = TASK_SWITCH_IRET;
  2212. else if (svm->vmcb->control.exit_info_2 &
  2213. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
  2214. reason = TASK_SWITCH_JMP;
  2215. else if (idt_v)
  2216. reason = TASK_SWITCH_GATE;
  2217. else
  2218. reason = TASK_SWITCH_CALL;
  2219. if (reason == TASK_SWITCH_GATE) {
  2220. switch (type) {
  2221. case SVM_EXITINTINFO_TYPE_NMI:
  2222. svm->vcpu.arch.nmi_injected = false;
  2223. break;
  2224. case SVM_EXITINTINFO_TYPE_EXEPT:
  2225. if (svm->vmcb->control.exit_info_2 &
  2226. (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
  2227. has_error_code = true;
  2228. error_code =
  2229. (u32)svm->vmcb->control.exit_info_2;
  2230. }
  2231. kvm_clear_exception_queue(&svm->vcpu);
  2232. break;
  2233. case SVM_EXITINTINFO_TYPE_INTR:
  2234. kvm_clear_interrupt_queue(&svm->vcpu);
  2235. break;
  2236. default:
  2237. break;
  2238. }
  2239. }
  2240. if (reason != TASK_SWITCH_GATE ||
  2241. int_type == SVM_EXITINTINFO_TYPE_SOFT ||
  2242. (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
  2243. (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
  2244. skip_emulated_instruction(&svm->vcpu);
  2245. if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
  2246. has_error_code, error_code) == EMULATE_FAIL) {
  2247. svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  2248. svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  2249. svm->vcpu.run->internal.ndata = 0;
  2250. return 0;
  2251. }
  2252. return 1;
  2253. }
  2254. static int cpuid_interception(struct vcpu_svm *svm)
  2255. {
  2256. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  2257. kvm_emulate_cpuid(&svm->vcpu);
  2258. return 1;
  2259. }
  2260. static int iret_interception(struct vcpu_svm *svm)
  2261. {
  2262. ++svm->vcpu.stat.nmi_window_exits;
  2263. clr_intercept(svm, INTERCEPT_IRET);
  2264. svm->vcpu.arch.hflags |= HF_IRET_MASK;
  2265. svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
  2266. return 1;
  2267. }
  2268. static int invlpg_interception(struct vcpu_svm *svm)
  2269. {
  2270. if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
  2271. return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
  2272. kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
  2273. skip_emulated_instruction(&svm->vcpu);
  2274. return 1;
  2275. }
  2276. static int emulate_on_interception(struct vcpu_svm *svm)
  2277. {
  2278. return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
  2279. }
  2280. static int rdpmc_interception(struct vcpu_svm *svm)
  2281. {
  2282. int err;
  2283. if (!static_cpu_has(X86_FEATURE_NRIPS))
  2284. return emulate_on_interception(svm);
  2285. err = kvm_rdpmc(&svm->vcpu);
  2286. kvm_complete_insn_gp(&svm->vcpu, err);
  2287. return 1;
  2288. }
  2289. bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
  2290. {
  2291. unsigned long cr0 = svm->vcpu.arch.cr0;
  2292. bool ret = false;
  2293. u64 intercept;
  2294. intercept = svm->nested.intercept;
  2295. if (!is_guest_mode(&svm->vcpu) ||
  2296. (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
  2297. return false;
  2298. cr0 &= ~SVM_CR0_SELECTIVE_MASK;
  2299. val &= ~SVM_CR0_SELECTIVE_MASK;
  2300. if (cr0 ^ val) {
  2301. svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
  2302. ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
  2303. }
  2304. return ret;
  2305. }
  2306. #define CR_VALID (1ULL << 63)
  2307. static int cr_interception(struct vcpu_svm *svm)
  2308. {
  2309. int reg, cr;
  2310. unsigned long val;
  2311. int err;
  2312. if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
  2313. return emulate_on_interception(svm);
  2314. if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
  2315. return emulate_on_interception(svm);
  2316. reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
  2317. cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
  2318. err = 0;
  2319. if (cr >= 16) { /* mov to cr */
  2320. cr -= 16;
  2321. val = kvm_register_read(&svm->vcpu, reg);
  2322. switch (cr) {
  2323. case 0:
  2324. if (!check_selective_cr0_intercepted(svm, val))
  2325. err = kvm_set_cr0(&svm->vcpu, val);
  2326. else
  2327. return 1;
  2328. break;
  2329. case 3:
  2330. err = kvm_set_cr3(&svm->vcpu, val);
  2331. break;
  2332. case 4:
  2333. err = kvm_set_cr4(&svm->vcpu, val);
  2334. break;
  2335. case 8:
  2336. err = kvm_set_cr8(&svm->vcpu, val);
  2337. break;
  2338. default:
  2339. WARN(1, "unhandled write to CR%d", cr);
  2340. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  2341. return 1;
  2342. }
  2343. } else { /* mov from cr */
  2344. switch (cr) {
  2345. case 0:
  2346. val = kvm_read_cr0(&svm->vcpu);
  2347. break;
  2348. case 2:
  2349. val = svm->vcpu.arch.cr2;
  2350. break;
  2351. case 3:
  2352. val = kvm_read_cr3(&svm->vcpu);
  2353. break;
  2354. case 4:
  2355. val = kvm_read_cr4(&svm->vcpu);
  2356. break;
  2357. case 8:
  2358. val = kvm_get_cr8(&svm->vcpu);
  2359. break;
  2360. default:
  2361. WARN(1, "unhandled read from CR%d", cr);
  2362. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  2363. return 1;
  2364. }
  2365. kvm_register_write(&svm->vcpu, reg, val);
  2366. }
  2367. kvm_complete_insn_gp(&svm->vcpu, err);
  2368. return 1;
  2369. }
  2370. static int dr_interception(struct vcpu_svm *svm)
  2371. {
  2372. int reg, dr;
  2373. unsigned long val;
  2374. int err;
  2375. if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
  2376. return emulate_on_interception(svm);
  2377. reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
  2378. dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
  2379. if (dr >= 16) { /* mov to DRn */
  2380. val = kvm_register_read(&svm->vcpu, reg);
  2381. kvm_set_dr(&svm->vcpu, dr - 16, val);
  2382. } else {
  2383. err = kvm_get_dr(&svm->vcpu, dr, &val);
  2384. if (!err)
  2385. kvm_register_write(&svm->vcpu, reg, val);
  2386. }
  2387. skip_emulated_instruction(&svm->vcpu);
  2388. return 1;
  2389. }
  2390. static int cr8_write_interception(struct vcpu_svm *svm)
  2391. {
  2392. struct kvm_run *kvm_run = svm->vcpu.run;
  2393. int r;
  2394. u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
  2395. /* instruction emulation calls kvm_set_cr8() */
  2396. r = cr_interception(svm);
  2397. if (irqchip_in_kernel(svm->vcpu.kvm)) {
  2398. clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
  2399. return r;
  2400. }
  2401. if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
  2402. return r;
  2403. kvm_run->exit_reason = KVM_EXIT_SET_TPR;
  2404. return 0;
  2405. }
  2406. u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu)
  2407. {
  2408. struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
  2409. return vmcb->control.tsc_offset +
  2410. svm_scale_tsc(vcpu, native_read_tsc());
  2411. }
  2412. static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
  2413. {
  2414. struct vcpu_svm *svm = to_svm(vcpu);
  2415. switch (ecx) {
  2416. case MSR_IA32_TSC: {
  2417. *data = svm->vmcb->control.tsc_offset +
  2418. svm_scale_tsc(vcpu, native_read_tsc());
  2419. break;
  2420. }
  2421. case MSR_STAR:
  2422. *data = svm->vmcb->save.star;
  2423. break;
  2424. #ifdef CONFIG_X86_64
  2425. case MSR_LSTAR:
  2426. *data = svm->vmcb->save.lstar;
  2427. break;
  2428. case MSR_CSTAR:
  2429. *data = svm->vmcb->save.cstar;
  2430. break;
  2431. case MSR_KERNEL_GS_BASE:
  2432. *data = svm->vmcb->save.kernel_gs_base;
  2433. break;
  2434. case MSR_SYSCALL_MASK:
  2435. *data = svm->vmcb->save.sfmask;
  2436. break;
  2437. #endif
  2438. case MSR_IA32_SYSENTER_CS:
  2439. *data = svm->vmcb->save.sysenter_cs;
  2440. break;
  2441. case MSR_IA32_SYSENTER_EIP:
  2442. *data = svm->sysenter_eip;
  2443. break;
  2444. case MSR_IA32_SYSENTER_ESP:
  2445. *data = svm->sysenter_esp;
  2446. break;
  2447. /*
  2448. * Nobody will change the following 5 values in the VMCB so we can
  2449. * safely return them on rdmsr. They will always be 0 until LBRV is
  2450. * implemented.
  2451. */
  2452. case MSR_IA32_DEBUGCTLMSR:
  2453. *data = svm->vmcb->save.dbgctl;
  2454. break;
  2455. case MSR_IA32_LASTBRANCHFROMIP:
  2456. *data = svm->vmcb->save.br_from;
  2457. break;
  2458. case MSR_IA32_LASTBRANCHTOIP:
  2459. *data = svm->vmcb->save.br_to;
  2460. break;
  2461. case MSR_IA32_LASTINTFROMIP:
  2462. *data = svm->vmcb->save.last_excp_from;
  2463. break;
  2464. case MSR_IA32_LASTINTTOIP:
  2465. *data = svm->vmcb->save.last_excp_to;
  2466. break;
  2467. case MSR_VM_HSAVE_PA:
  2468. *data = svm->nested.hsave_msr;
  2469. break;
  2470. case MSR_VM_CR:
  2471. *data = svm->nested.vm_cr_msr;
  2472. break;
  2473. case MSR_IA32_UCODE_REV:
  2474. *data = 0x01000065;
  2475. break;
  2476. default:
  2477. return kvm_get_msr_common(vcpu, ecx, data);
  2478. }
  2479. return 0;
  2480. }
  2481. static int rdmsr_interception(struct vcpu_svm *svm)
  2482. {
  2483. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  2484. u64 data;
  2485. if (svm_get_msr(&svm->vcpu, ecx, &data)) {
  2486. trace_kvm_msr_read_ex(ecx);
  2487. kvm_inject_gp(&svm->vcpu, 0);
  2488. } else {
  2489. trace_kvm_msr_read(ecx, data);
  2490. svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
  2491. svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
  2492. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  2493. skip_emulated_instruction(&svm->vcpu);
  2494. }
  2495. return 1;
  2496. }
  2497. static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
  2498. {
  2499. struct vcpu_svm *svm = to_svm(vcpu);
  2500. int svm_dis, chg_mask;
  2501. if (data & ~SVM_VM_CR_VALID_MASK)
  2502. return 1;
  2503. chg_mask = SVM_VM_CR_VALID_MASK;
  2504. if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
  2505. chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
  2506. svm->nested.vm_cr_msr &= ~chg_mask;
  2507. svm->nested.vm_cr_msr |= (data & chg_mask);
  2508. svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
  2509. /* check for svm_disable while efer.svme is set */
  2510. if (svm_dis && (vcpu->arch.efer & EFER_SVME))
  2511. return 1;
  2512. return 0;
  2513. }
  2514. static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
  2515. {
  2516. struct vcpu_svm *svm = to_svm(vcpu);
  2517. switch (ecx) {
  2518. case MSR_IA32_TSC:
  2519. kvm_write_tsc(vcpu, data);
  2520. break;
  2521. case MSR_STAR:
  2522. svm->vmcb->save.star = data;
  2523. break;
  2524. #ifdef CONFIG_X86_64
  2525. case MSR_LSTAR:
  2526. svm->vmcb->save.lstar = data;
  2527. break;
  2528. case MSR_CSTAR:
  2529. svm->vmcb->save.cstar = data;
  2530. break;
  2531. case MSR_KERNEL_GS_BASE:
  2532. svm->vmcb->save.kernel_gs_base = data;
  2533. break;
  2534. case MSR_SYSCALL_MASK:
  2535. svm->vmcb->save.sfmask = data;
  2536. break;
  2537. #endif
  2538. case MSR_IA32_SYSENTER_CS:
  2539. svm->vmcb->save.sysenter_cs = data;
  2540. break;
  2541. case MSR_IA32_SYSENTER_EIP:
  2542. svm->sysenter_eip = data;
  2543. svm->vmcb->save.sysenter_eip = data;
  2544. break;
  2545. case MSR_IA32_SYSENTER_ESP:
  2546. svm->sysenter_esp = data;
  2547. svm->vmcb->save.sysenter_esp = data;
  2548. break;
  2549. case MSR_IA32_DEBUGCTLMSR:
  2550. if (!boot_cpu_has(X86_FEATURE_LBRV)) {
  2551. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
  2552. __func__, data);
  2553. break;
  2554. }
  2555. if (data & DEBUGCTL_RESERVED_BITS)
  2556. return 1;
  2557. svm->vmcb->save.dbgctl = data;
  2558. mark_dirty(svm->vmcb, VMCB_LBR);
  2559. if (data & (1ULL<<0))
  2560. svm_enable_lbrv(svm);
  2561. else
  2562. svm_disable_lbrv(svm);
  2563. break;
  2564. case MSR_VM_HSAVE_PA:
  2565. svm->nested.hsave_msr = data;
  2566. break;
  2567. case MSR_VM_CR:
  2568. return svm_set_vm_cr(vcpu, data);
  2569. case MSR_VM_IGNNE:
  2570. pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
  2571. break;
  2572. default:
  2573. return kvm_set_msr_common(vcpu, ecx, data);
  2574. }
  2575. return 0;
  2576. }
  2577. static int wrmsr_interception(struct vcpu_svm *svm)
  2578. {
  2579. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  2580. u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
  2581. | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
  2582. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  2583. if (svm_set_msr(&svm->vcpu, ecx, data)) {
  2584. trace_kvm_msr_write_ex(ecx, data);
  2585. kvm_inject_gp(&svm->vcpu, 0);
  2586. } else {
  2587. trace_kvm_msr_write(ecx, data);
  2588. skip_emulated_instruction(&svm->vcpu);
  2589. }
  2590. return 1;
  2591. }
  2592. static int msr_interception(struct vcpu_svm *svm)
  2593. {
  2594. if (svm->vmcb->control.exit_info_1)
  2595. return wrmsr_interception(svm);
  2596. else
  2597. return rdmsr_interception(svm);
  2598. }
  2599. static int interrupt_window_interception(struct vcpu_svm *svm)
  2600. {
  2601. struct kvm_run *kvm_run = svm->vcpu.run;
  2602. kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
  2603. svm_clear_vintr(svm);
  2604. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  2605. mark_dirty(svm->vmcb, VMCB_INTR);
  2606. /*
  2607. * If the user space waits to inject interrupts, exit as soon as
  2608. * possible
  2609. */
  2610. if (!irqchip_in_kernel(svm->vcpu.kvm) &&
  2611. kvm_run->request_interrupt_window &&
  2612. !kvm_cpu_has_interrupt(&svm->vcpu)) {
  2613. ++svm->vcpu.stat.irq_window_exits;
  2614. kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  2615. return 0;
  2616. }
  2617. return 1;
  2618. }
  2619. static int pause_interception(struct vcpu_svm *svm)
  2620. {
  2621. kvm_vcpu_on_spin(&(svm->vcpu));
  2622. return 1;
  2623. }
  2624. static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
  2625. [SVM_EXIT_READ_CR0] = cr_interception,
  2626. [SVM_EXIT_READ_CR3] = cr_interception,
  2627. [SVM_EXIT_READ_CR4] = cr_interception,
  2628. [SVM_EXIT_READ_CR8] = cr_interception,
  2629. [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
  2630. [SVM_EXIT_WRITE_CR0] = cr_interception,
  2631. [SVM_EXIT_WRITE_CR3] = cr_interception,
  2632. [SVM_EXIT_WRITE_CR4] = cr_interception,
  2633. [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
  2634. [SVM_EXIT_READ_DR0] = dr_interception,
  2635. [SVM_EXIT_READ_DR1] = dr_interception,
  2636. [SVM_EXIT_READ_DR2] = dr_interception,
  2637. [SVM_EXIT_READ_DR3] = dr_interception,
  2638. [SVM_EXIT_READ_DR4] = dr_interception,
  2639. [SVM_EXIT_READ_DR5] = dr_interception,
  2640. [SVM_EXIT_READ_DR6] = dr_interception,
  2641. [SVM_EXIT_READ_DR7] = dr_interception,
  2642. [SVM_EXIT_WRITE_DR0] = dr_interception,
  2643. [SVM_EXIT_WRITE_DR1] = dr_interception,
  2644. [SVM_EXIT_WRITE_DR2] = dr_interception,
  2645. [SVM_EXIT_WRITE_DR3] = dr_interception,
  2646. [SVM_EXIT_WRITE_DR4] = dr_interception,
  2647. [SVM_EXIT_WRITE_DR5] = dr_interception,
  2648. [SVM_EXIT_WRITE_DR6] = dr_interception,
  2649. [SVM_EXIT_WRITE_DR7] = dr_interception,
  2650. [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
  2651. [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
  2652. [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
  2653. [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
  2654. [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
  2655. [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
  2656. [SVM_EXIT_INTR] = intr_interception,
  2657. [SVM_EXIT_NMI] = nmi_interception,
  2658. [SVM_EXIT_SMI] = nop_on_interception,
  2659. [SVM_EXIT_INIT] = nop_on_interception,
  2660. [SVM_EXIT_VINTR] = interrupt_window_interception,
  2661. [SVM_EXIT_RDPMC] = rdpmc_interception,
  2662. [SVM_EXIT_CPUID] = cpuid_interception,
  2663. [SVM_EXIT_IRET] = iret_interception,
  2664. [SVM_EXIT_INVD] = emulate_on_interception,
  2665. [SVM_EXIT_PAUSE] = pause_interception,
  2666. [SVM_EXIT_HLT] = halt_interception,
  2667. [SVM_EXIT_INVLPG] = invlpg_interception,
  2668. [SVM_EXIT_INVLPGA] = invlpga_interception,
  2669. [SVM_EXIT_IOIO] = io_interception,
  2670. [SVM_EXIT_MSR] = msr_interception,
  2671. [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
  2672. [SVM_EXIT_SHUTDOWN] = shutdown_interception,
  2673. [SVM_EXIT_VMRUN] = vmrun_interception,
  2674. [SVM_EXIT_VMMCALL] = vmmcall_interception,
  2675. [SVM_EXIT_VMLOAD] = vmload_interception,
  2676. [SVM_EXIT_VMSAVE] = vmsave_interception,
  2677. [SVM_EXIT_STGI] = stgi_interception,
  2678. [SVM_EXIT_CLGI] = clgi_interception,
  2679. [SVM_EXIT_SKINIT] = skinit_interception,
  2680. [SVM_EXIT_WBINVD] = emulate_on_interception,
  2681. [SVM_EXIT_MONITOR] = invalid_op_interception,
  2682. [SVM_EXIT_MWAIT] = invalid_op_interception,
  2683. [SVM_EXIT_XSETBV] = xsetbv_interception,
  2684. [SVM_EXIT_NPF] = pf_interception,
  2685. };
  2686. static void dump_vmcb(struct kvm_vcpu *vcpu)
  2687. {
  2688. struct vcpu_svm *svm = to_svm(vcpu);
  2689. struct vmcb_control_area *control = &svm->vmcb->control;
  2690. struct vmcb_save_area *save = &svm->vmcb->save;
  2691. pr_err("VMCB Control Area:\n");
  2692. pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
  2693. pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
  2694. pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
  2695. pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
  2696. pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
  2697. pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
  2698. pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
  2699. pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
  2700. pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
  2701. pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
  2702. pr_err("%-20s%d\n", "asid:", control->asid);
  2703. pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
  2704. pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
  2705. pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
  2706. pr_err("%-20s%08x\n", "int_state:", control->int_state);
  2707. pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
  2708. pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
  2709. pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
  2710. pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
  2711. pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
  2712. pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
  2713. pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
  2714. pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
  2715. pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
  2716. pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
  2717. pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
  2718. pr_err("VMCB State Save Area:\n");
  2719. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2720. "es:",
  2721. save->es.selector, save->es.attrib,
  2722. save->es.limit, save->es.base);
  2723. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2724. "cs:",
  2725. save->cs.selector, save->cs.attrib,
  2726. save->cs.limit, save->cs.base);
  2727. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2728. "ss:",
  2729. save->ss.selector, save->ss.attrib,
  2730. save->ss.limit, save->ss.base);
  2731. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2732. "ds:",
  2733. save->ds.selector, save->ds.attrib,
  2734. save->ds.limit, save->ds.base);
  2735. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2736. "fs:",
  2737. save->fs.selector, save->fs.attrib,
  2738. save->fs.limit, save->fs.base);
  2739. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2740. "gs:",
  2741. save->gs.selector, save->gs.attrib,
  2742. save->gs.limit, save->gs.base);
  2743. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2744. "gdtr:",
  2745. save->gdtr.selector, save->gdtr.attrib,
  2746. save->gdtr.limit, save->gdtr.base);
  2747. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2748. "ldtr:",
  2749. save->ldtr.selector, save->ldtr.attrib,
  2750. save->ldtr.limit, save->ldtr.base);
  2751. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2752. "idtr:",
  2753. save->idtr.selector, save->idtr.attrib,
  2754. save->idtr.limit, save->idtr.base);
  2755. pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
  2756. "tr:",
  2757. save->tr.selector, save->tr.attrib,
  2758. save->tr.limit, save->tr.base);
  2759. pr_err("cpl: %d efer: %016llx\n",
  2760. save->cpl, save->efer);
  2761. pr_err("%-15s %016llx %-13s %016llx\n",
  2762. "cr0:", save->cr0, "cr2:", save->cr2);
  2763. pr_err("%-15s %016llx %-13s %016llx\n",
  2764. "cr3:", save->cr3, "cr4:", save->cr4);
  2765. pr_err("%-15s %016llx %-13s %016llx\n",
  2766. "dr6:", save->dr6, "dr7:", save->dr7);
  2767. pr_err("%-15s %016llx %-13s %016llx\n",
  2768. "rip:", save->rip, "rflags:", save->rflags);
  2769. pr_err("%-15s %016llx %-13s %016llx\n",
  2770. "rsp:", save->rsp, "rax:", save->rax);
  2771. pr_err("%-15s %016llx %-13s %016llx\n",
  2772. "star:", save->star, "lstar:", save->lstar);
  2773. pr_err("%-15s %016llx %-13s %016llx\n",
  2774. "cstar:", save->cstar, "sfmask:", save->sfmask);
  2775. pr_err("%-15s %016llx %-13s %016llx\n",
  2776. "kernel_gs_base:", save->kernel_gs_base,
  2777. "sysenter_cs:", save->sysenter_cs);
  2778. pr_err("%-15s %016llx %-13s %016llx\n",
  2779. "sysenter_esp:", save->sysenter_esp,
  2780. "sysenter_eip:", save->sysenter_eip);
  2781. pr_err("%-15s %016llx %-13s %016llx\n",
  2782. "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
  2783. pr_err("%-15s %016llx %-13s %016llx\n",
  2784. "br_from:", save->br_from, "br_to:", save->br_to);
  2785. pr_err("%-15s %016llx %-13s %016llx\n",
  2786. "excp_from:", save->last_excp_from,
  2787. "excp_to:", save->last_excp_to);
  2788. }
  2789. static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
  2790. {
  2791. struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
  2792. *info1 = control->exit_info_1;
  2793. *info2 = control->exit_info_2;
  2794. }
  2795. static int handle_exit(struct kvm_vcpu *vcpu)
  2796. {
  2797. struct vcpu_svm *svm = to_svm(vcpu);
  2798. struct kvm_run *kvm_run = vcpu->run;
  2799. u32 exit_code = svm->vmcb->control.exit_code;
  2800. if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
  2801. vcpu->arch.cr0 = svm->vmcb->save.cr0;
  2802. if (npt_enabled)
  2803. vcpu->arch.cr3 = svm->vmcb->save.cr3;
  2804. if (unlikely(svm->nested.exit_required)) {
  2805. nested_svm_vmexit(svm);
  2806. svm->nested.exit_required = false;
  2807. return 1;
  2808. }
  2809. if (is_guest_mode(vcpu)) {
  2810. int vmexit;
  2811. trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
  2812. svm->vmcb->control.exit_info_1,
  2813. svm->vmcb->control.exit_info_2,
  2814. svm->vmcb->control.exit_int_info,
  2815. svm->vmcb->control.exit_int_info_err,
  2816. KVM_ISA_SVM);
  2817. vmexit = nested_svm_exit_special(svm);
  2818. if (vmexit == NESTED_EXIT_CONTINUE)
  2819. vmexit = nested_svm_exit_handled(svm);
  2820. if (vmexit == NESTED_EXIT_DONE)
  2821. return 1;
  2822. }
  2823. svm_complete_interrupts(svm);
  2824. if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
  2825. kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  2826. kvm_run->fail_entry.hardware_entry_failure_reason
  2827. = svm->vmcb->control.exit_code;
  2828. pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
  2829. dump_vmcb(vcpu);
  2830. return 0;
  2831. }
  2832. if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
  2833. exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
  2834. exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
  2835. exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
  2836. printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
  2837. "exit_code 0x%x\n",
  2838. __func__, svm->vmcb->control.exit_int_info,
  2839. exit_code);
  2840. if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
  2841. || !svm_exit_handlers[exit_code]) {
  2842. kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
  2843. kvm_run->hw.hardware_exit_reason = exit_code;
  2844. return 0;
  2845. }
  2846. return svm_exit_handlers[exit_code](svm);
  2847. }
  2848. static void reload_tss(struct kvm_vcpu *vcpu)
  2849. {
  2850. int cpu = raw_smp_processor_id();
  2851. struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
  2852. sd->tss_desc->type = 9; /* available 32/64-bit TSS */
  2853. load_TR_desc();
  2854. }
  2855. static void pre_svm_run(struct vcpu_svm *svm)
  2856. {
  2857. int cpu = raw_smp_processor_id();
  2858. struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
  2859. /* FIXME: handle wraparound of asid_generation */
  2860. if (svm->asid_generation != sd->asid_generation)
  2861. new_asid(svm, sd);
  2862. }
  2863. static void svm_inject_nmi(struct kvm_vcpu *vcpu)
  2864. {
  2865. struct vcpu_svm *svm = to_svm(vcpu);
  2866. svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
  2867. vcpu->arch.hflags |= HF_NMI_MASK;
  2868. set_intercept(svm, INTERCEPT_IRET);
  2869. ++vcpu->stat.nmi_injections;
  2870. }
  2871. static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
  2872. {
  2873. struct vmcb_control_area *control;
  2874. control = &svm->vmcb->control;
  2875. control->int_vector = irq;
  2876. control->int_ctl &= ~V_INTR_PRIO_MASK;
  2877. control->int_ctl |= V_IRQ_MASK |
  2878. ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
  2879. mark_dirty(svm->vmcb, VMCB_INTR);
  2880. }
  2881. static void svm_set_irq(struct kvm_vcpu *vcpu)
  2882. {
  2883. struct vcpu_svm *svm = to_svm(vcpu);
  2884. BUG_ON(!(gif_set(svm)));
  2885. trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
  2886. ++vcpu->stat.irq_injections;
  2887. svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
  2888. SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
  2889. }
  2890. static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
  2891. {
  2892. struct vcpu_svm *svm = to_svm(vcpu);
  2893. if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2894. return;
  2895. if (irr == -1)
  2896. return;
  2897. if (tpr >= irr)
  2898. set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
  2899. }
  2900. static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
  2901. {
  2902. struct vcpu_svm *svm = to_svm(vcpu);
  2903. struct vmcb *vmcb = svm->vmcb;
  2904. int ret;
  2905. ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
  2906. !(svm->vcpu.arch.hflags & HF_NMI_MASK);
  2907. ret = ret && gif_set(svm) && nested_svm_nmi(svm);
  2908. return ret;
  2909. }
  2910. static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
  2911. {
  2912. struct vcpu_svm *svm = to_svm(vcpu);
  2913. return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
  2914. }
  2915. static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
  2916. {
  2917. struct vcpu_svm *svm = to_svm(vcpu);
  2918. if (masked) {
  2919. svm->vcpu.arch.hflags |= HF_NMI_MASK;
  2920. set_intercept(svm, INTERCEPT_IRET);
  2921. } else {
  2922. svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
  2923. clr_intercept(svm, INTERCEPT_IRET);
  2924. }
  2925. }
  2926. static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
  2927. {
  2928. struct vcpu_svm *svm = to_svm(vcpu);
  2929. struct vmcb *vmcb = svm->vmcb;
  2930. int ret;
  2931. if (!gif_set(svm) ||
  2932. (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
  2933. return 0;
  2934. ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
  2935. if (is_guest_mode(vcpu))
  2936. return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
  2937. return ret;
  2938. }
  2939. static void enable_irq_window(struct kvm_vcpu *vcpu)
  2940. {
  2941. struct vcpu_svm *svm = to_svm(vcpu);
  2942. /*
  2943. * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
  2944. * 1, because that's a separate STGI/VMRUN intercept. The next time we
  2945. * get that intercept, this function will be called again though and
  2946. * we'll get the vintr intercept.
  2947. */
  2948. if (gif_set(svm) && nested_svm_intr(svm)) {
  2949. svm_set_vintr(svm);
  2950. svm_inject_irq(svm, 0x0);
  2951. }
  2952. }
  2953. static void enable_nmi_window(struct kvm_vcpu *vcpu)
  2954. {
  2955. struct vcpu_svm *svm = to_svm(vcpu);
  2956. if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
  2957. == HF_NMI_MASK)
  2958. return; /* IRET will cause a vm exit */
  2959. /*
  2960. * Something prevents NMI from been injected. Single step over possible
  2961. * problem (IRET or exception injection or interrupt shadow)
  2962. */
  2963. svm->nmi_singlestep = true;
  2964. svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
  2965. update_db_intercept(vcpu);
  2966. }
  2967. static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
  2968. {
  2969. return 0;
  2970. }
  2971. static void svm_flush_tlb(struct kvm_vcpu *vcpu)
  2972. {
  2973. struct vcpu_svm *svm = to_svm(vcpu);
  2974. if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
  2975. svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
  2976. else
  2977. svm->asid_generation--;
  2978. }
  2979. static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
  2980. {
  2981. }
  2982. static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
  2983. {
  2984. struct vcpu_svm *svm = to_svm(vcpu);
  2985. if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2986. return;
  2987. if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
  2988. int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
  2989. kvm_set_cr8(vcpu, cr8);
  2990. }
  2991. }
  2992. static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
  2993. {
  2994. struct vcpu_svm *svm = to_svm(vcpu);
  2995. u64 cr8;
  2996. if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2997. return;
  2998. cr8 = kvm_get_cr8(vcpu);
  2999. svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
  3000. svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
  3001. }
  3002. static void svm_complete_interrupts(struct vcpu_svm *svm)
  3003. {
  3004. u8 vector;
  3005. int type;
  3006. u32 exitintinfo = svm->vmcb->control.exit_int_info;
  3007. unsigned int3_injected = svm->int3_injected;
  3008. svm->int3_injected = 0;
  3009. /*
  3010. * If we've made progress since setting HF_IRET_MASK, we've
  3011. * executed an IRET and can allow NMI injection.
  3012. */
  3013. if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
  3014. && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
  3015. svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
  3016. kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
  3017. }
  3018. svm->vcpu.arch.nmi_injected = false;
  3019. kvm_clear_exception_queue(&svm->vcpu);
  3020. kvm_clear_interrupt_queue(&svm->vcpu);
  3021. if (!(exitintinfo & SVM_EXITINTINFO_VALID))
  3022. return;
  3023. kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
  3024. vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
  3025. type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
  3026. switch (type) {
  3027. case SVM_EXITINTINFO_TYPE_NMI:
  3028. svm->vcpu.arch.nmi_injected = true;
  3029. break;
  3030. case SVM_EXITINTINFO_TYPE_EXEPT:
  3031. /*
  3032. * In case of software exceptions, do not reinject the vector,
  3033. * but re-execute the instruction instead. Rewind RIP first
  3034. * if we emulated INT3 before.
  3035. */
  3036. if (kvm_exception_is_soft(vector)) {
  3037. if (vector == BP_VECTOR && int3_injected &&
  3038. kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
  3039. kvm_rip_write(&svm->vcpu,
  3040. kvm_rip_read(&svm->vcpu) -
  3041. int3_injected);
  3042. break;
  3043. }
  3044. if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
  3045. u32 err = svm->vmcb->control.exit_int_info_err;
  3046. kvm_requeue_exception_e(&svm->vcpu, vector, err);
  3047. } else
  3048. kvm_requeue_exception(&svm->vcpu, vector);
  3049. break;
  3050. case SVM_EXITINTINFO_TYPE_INTR:
  3051. kvm_queue_interrupt(&svm->vcpu, vector, false);
  3052. break;
  3053. default:
  3054. break;
  3055. }
  3056. }
  3057. static void svm_cancel_injection(struct kvm_vcpu *vcpu)
  3058. {
  3059. struct vcpu_svm *svm = to_svm(vcpu);
  3060. struct vmcb_control_area *control = &svm->vmcb->control;
  3061. control->exit_int_info = control->event_inj;
  3062. control->exit_int_info_err = control->event_inj_err;
  3063. control->event_inj = 0;
  3064. svm_complete_interrupts(svm);
  3065. }
  3066. #ifdef CONFIG_X86_64
  3067. #define R "r"
  3068. #else
  3069. #define R "e"
  3070. #endif
  3071. static void svm_vcpu_run(struct kvm_vcpu *vcpu)
  3072. {
  3073. struct vcpu_svm *svm = to_svm(vcpu);
  3074. svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
  3075. svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
  3076. svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
  3077. /*
  3078. * A vmexit emulation is required before the vcpu can be executed
  3079. * again.
  3080. */
  3081. if (unlikely(svm->nested.exit_required))
  3082. return;
  3083. pre_svm_run(svm);
  3084. sync_lapic_to_cr8(vcpu);
  3085. svm->vmcb->save.cr2 = vcpu->arch.cr2;
  3086. clgi();
  3087. local_irq_enable();
  3088. asm volatile (
  3089. "push %%"R"bp; \n\t"
  3090. "mov %c[rbx](%[svm]), %%"R"bx \n\t"
  3091. "mov %c[rcx](%[svm]), %%"R"cx \n\t"
  3092. "mov %c[rdx](%[svm]), %%"R"dx \n\t"
  3093. "mov %c[rsi](%[svm]), %%"R"si \n\t"
  3094. "mov %c[rdi](%[svm]), %%"R"di \n\t"
  3095. "mov %c[rbp](%[svm]), %%"R"bp \n\t"
  3096. #ifdef CONFIG_X86_64
  3097. "mov %c[r8](%[svm]), %%r8 \n\t"
  3098. "mov %c[r9](%[svm]), %%r9 \n\t"
  3099. "mov %c[r10](%[svm]), %%r10 \n\t"
  3100. "mov %c[r11](%[svm]), %%r11 \n\t"
  3101. "mov %c[r12](%[svm]), %%r12 \n\t"
  3102. "mov %c[r13](%[svm]), %%r13 \n\t"
  3103. "mov %c[r14](%[svm]), %%r14 \n\t"
  3104. "mov %c[r15](%[svm]), %%r15 \n\t"
  3105. #endif
  3106. /* Enter guest mode */
  3107. "push %%"R"ax \n\t"
  3108. "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
  3109. __ex(SVM_VMLOAD) "\n\t"
  3110. __ex(SVM_VMRUN) "\n\t"
  3111. __ex(SVM_VMSAVE) "\n\t"
  3112. "pop %%"R"ax \n\t"
  3113. /* Save guest registers, load host registers */
  3114. "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
  3115. "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
  3116. "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
  3117. "mov %%"R"si, %c[rsi](%[svm]) \n\t"
  3118. "mov %%"R"di, %c[rdi](%[svm]) \n\t"
  3119. "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
  3120. #ifdef CONFIG_X86_64
  3121. "mov %%r8, %c[r8](%[svm]) \n\t"
  3122. "mov %%r9, %c[r9](%[svm]) \n\t"
  3123. "mov %%r10, %c[r10](%[svm]) \n\t"
  3124. "mov %%r11, %c[r11](%[svm]) \n\t"
  3125. "mov %%r12, %c[r12](%[svm]) \n\t"
  3126. "mov %%r13, %c[r13](%[svm]) \n\t"
  3127. "mov %%r14, %c[r14](%[svm]) \n\t"
  3128. "mov %%r15, %c[r15](%[svm]) \n\t"
  3129. #endif
  3130. "pop %%"R"bp"
  3131. :
  3132. : [svm]"a"(svm),
  3133. [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
  3134. [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
  3135. [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
  3136. [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
  3137. [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
  3138. [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
  3139. [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
  3140. #ifdef CONFIG_X86_64
  3141. , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
  3142. [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
  3143. [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
  3144. [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
  3145. [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
  3146. [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
  3147. [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
  3148. [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
  3149. #endif
  3150. : "cc", "memory"
  3151. , R"bx", R"cx", R"dx", R"si", R"di"
  3152. #ifdef CONFIG_X86_64
  3153. , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
  3154. #endif
  3155. );
  3156. #ifdef CONFIG_X86_64
  3157. wrmsrl(MSR_GS_BASE, svm->host.gs_base);
  3158. #else
  3159. loadsegment(fs, svm->host.fs);
  3160. #ifndef CONFIG_X86_32_LAZY_GS
  3161. loadsegment(gs, svm->host.gs);
  3162. #endif
  3163. #endif
  3164. reload_tss(vcpu);
  3165. local_irq_disable();
  3166. vcpu->arch.cr2 = svm->vmcb->save.cr2;
  3167. vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
  3168. vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
  3169. vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
  3170. trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
  3171. if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
  3172. kvm_before_handle_nmi(&svm->vcpu);
  3173. stgi();
  3174. /* Any pending NMI will happen here */
  3175. if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
  3176. kvm_after_handle_nmi(&svm->vcpu);
  3177. sync_cr8_to_lapic(vcpu);
  3178. svm->next_rip = 0;
  3179. svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
  3180. /* if exit due to PF check for async PF */
  3181. if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
  3182. svm->apf_reason = kvm_read_and_reset_pf_reason();
  3183. if (npt_enabled) {
  3184. vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
  3185. vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
  3186. }
  3187. /*
  3188. * We need to handle MC intercepts here before the vcpu has a chance to
  3189. * change the physical cpu
  3190. */
  3191. if (unlikely(svm->vmcb->control.exit_code ==
  3192. SVM_EXIT_EXCP_BASE + MC_VECTOR))
  3193. svm_handle_mce(svm);
  3194. mark_all_clean(svm->vmcb);
  3195. }
  3196. #undef R
  3197. static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
  3198. {
  3199. struct vcpu_svm *svm = to_svm(vcpu);
  3200. svm->vmcb->save.cr3 = root;
  3201. mark_dirty(svm->vmcb, VMCB_CR);
  3202. svm_flush_tlb(vcpu);
  3203. }
  3204. static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
  3205. {
  3206. struct vcpu_svm *svm = to_svm(vcpu);
  3207. svm->vmcb->control.nested_cr3 = root;
  3208. mark_dirty(svm->vmcb, VMCB_NPT);
  3209. /* Also sync guest cr3 here in case we live migrate */
  3210. svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
  3211. mark_dirty(svm->vmcb, VMCB_CR);
  3212. svm_flush_tlb(vcpu);
  3213. }
  3214. static int is_disabled(void)
  3215. {
  3216. u64 vm_cr;
  3217. rdmsrl(MSR_VM_CR, vm_cr);
  3218. if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
  3219. return 1;
  3220. return 0;
  3221. }
  3222. static void
  3223. svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
  3224. {
  3225. /*
  3226. * Patch in the VMMCALL instruction:
  3227. */
  3228. hypercall[0] = 0x0f;
  3229. hypercall[1] = 0x01;
  3230. hypercall[2] = 0xd9;
  3231. }
  3232. static void svm_check_processor_compat(void *rtn)
  3233. {
  3234. *(int *)rtn = 0;
  3235. }
  3236. static bool svm_cpu_has_accelerated_tpr(void)
  3237. {
  3238. return false;
  3239. }
  3240. static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
  3241. {
  3242. return 0;
  3243. }
  3244. static void svm_cpuid_update(struct kvm_vcpu *vcpu)
  3245. {
  3246. }
  3247. static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
  3248. {
  3249. switch (func) {
  3250. case 0x80000001:
  3251. if (nested)
  3252. entry->ecx |= (1 << 2); /* Set SVM bit */
  3253. break;
  3254. case 0x8000000A:
  3255. entry->eax = 1; /* SVM revision 1 */
  3256. entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
  3257. ASID emulation to nested SVM */
  3258. entry->ecx = 0; /* Reserved */
  3259. entry->edx = 0; /* Per default do not support any
  3260. additional features */
  3261. /* Support next_rip if host supports it */
  3262. if (boot_cpu_has(X86_FEATURE_NRIPS))
  3263. entry->edx |= SVM_FEATURE_NRIP;
  3264. /* Support NPT for the guest if enabled */
  3265. if (npt_enabled)
  3266. entry->edx |= SVM_FEATURE_NPT;
  3267. break;
  3268. }
  3269. }
  3270. static int svm_get_lpage_level(void)
  3271. {
  3272. return PT_PDPE_LEVEL;
  3273. }
  3274. static bool svm_rdtscp_supported(void)
  3275. {
  3276. return false;
  3277. }
  3278. static bool svm_has_wbinvd_exit(void)
  3279. {
  3280. return true;
  3281. }
  3282. static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
  3283. {
  3284. struct vcpu_svm *svm = to_svm(vcpu);
  3285. set_exception_intercept(svm, NM_VECTOR);
  3286. update_cr0_intercept(svm);
  3287. }
  3288. #define PRE_EX(exit) { .exit_code = (exit), \
  3289. .stage = X86_ICPT_PRE_EXCEPT, }
  3290. #define POST_EX(exit) { .exit_code = (exit), \
  3291. .stage = X86_ICPT_POST_EXCEPT, }
  3292. #define POST_MEM(exit) { .exit_code = (exit), \
  3293. .stage = X86_ICPT_POST_MEMACCESS, }
  3294. static struct __x86_intercept {
  3295. u32 exit_code;
  3296. enum x86_intercept_stage stage;
  3297. } x86_intercept_map[] = {
  3298. [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
  3299. [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
  3300. [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
  3301. [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
  3302. [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
  3303. [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
  3304. [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
  3305. [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
  3306. [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
  3307. [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
  3308. [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
  3309. [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
  3310. [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
  3311. [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
  3312. [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
  3313. [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
  3314. [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
  3315. [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
  3316. [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
  3317. [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
  3318. [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
  3319. [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
  3320. [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
  3321. [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
  3322. [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
  3323. [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
  3324. [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
  3325. [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
  3326. [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
  3327. [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
  3328. [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
  3329. [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
  3330. [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
  3331. [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
  3332. [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
  3333. [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
  3334. [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
  3335. [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
  3336. [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
  3337. [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
  3338. [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
  3339. [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
  3340. [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
  3341. [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
  3342. [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
  3343. [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
  3344. };
  3345. #undef PRE_EX
  3346. #undef POST_EX
  3347. #undef POST_MEM
  3348. static int svm_check_intercept(struct kvm_vcpu *vcpu,
  3349. struct x86_instruction_info *info,
  3350. enum x86_intercept_stage stage)
  3351. {
  3352. struct vcpu_svm *svm = to_svm(vcpu);
  3353. int vmexit, ret = X86EMUL_CONTINUE;
  3354. struct __x86_intercept icpt_info;
  3355. struct vmcb *vmcb = svm->vmcb;
  3356. if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
  3357. goto out;
  3358. icpt_info = x86_intercept_map[info->intercept];
  3359. if (stage != icpt_info.stage)
  3360. goto out;
  3361. switch (icpt_info.exit_code) {
  3362. case SVM_EXIT_READ_CR0:
  3363. if (info->intercept == x86_intercept_cr_read)
  3364. icpt_info.exit_code += info->modrm_reg;
  3365. break;
  3366. case SVM_EXIT_WRITE_CR0: {
  3367. unsigned long cr0, val;
  3368. u64 intercept;
  3369. if (info->intercept == x86_intercept_cr_write)
  3370. icpt_info.exit_code += info->modrm_reg;
  3371. if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
  3372. break;
  3373. intercept = svm->nested.intercept;
  3374. if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
  3375. break;
  3376. cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
  3377. val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
  3378. if (info->intercept == x86_intercept_lmsw) {
  3379. cr0 &= 0xfUL;
  3380. val &= 0xfUL;
  3381. /* lmsw can't clear PE - catch this here */
  3382. if (cr0 & X86_CR0_PE)
  3383. val |= X86_CR0_PE;
  3384. }
  3385. if (cr0 ^ val)
  3386. icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
  3387. break;
  3388. }
  3389. case SVM_EXIT_READ_DR0:
  3390. case SVM_EXIT_WRITE_DR0:
  3391. icpt_info.exit_code += info->modrm_reg;
  3392. break;
  3393. case SVM_EXIT_MSR:
  3394. if (info->intercept == x86_intercept_wrmsr)
  3395. vmcb->control.exit_info_1 = 1;
  3396. else
  3397. vmcb->control.exit_info_1 = 0;
  3398. break;
  3399. case SVM_EXIT_PAUSE:
  3400. /*
  3401. * We get this for NOP only, but pause
  3402. * is rep not, check this here
  3403. */
  3404. if (info->rep_prefix != REPE_PREFIX)
  3405. goto out;
  3406. case SVM_EXIT_IOIO: {
  3407. u64 exit_info;
  3408. u32 bytes;
  3409. exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
  3410. if (info->intercept == x86_intercept_in ||
  3411. info->intercept == x86_intercept_ins) {
  3412. exit_info |= SVM_IOIO_TYPE_MASK;
  3413. bytes = info->src_bytes;
  3414. } else {
  3415. bytes = info->dst_bytes;
  3416. }
  3417. if (info->intercept == x86_intercept_outs ||
  3418. info->intercept == x86_intercept_ins)
  3419. exit_info |= SVM_IOIO_STR_MASK;
  3420. if (info->rep_prefix)
  3421. exit_info |= SVM_IOIO_REP_MASK;
  3422. bytes = min(bytes, 4u);
  3423. exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
  3424. exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
  3425. vmcb->control.exit_info_1 = exit_info;
  3426. vmcb->control.exit_info_2 = info->next_rip;
  3427. break;
  3428. }
  3429. default:
  3430. break;
  3431. }
  3432. vmcb->control.next_rip = info->next_rip;
  3433. vmcb->control.exit_code = icpt_info.exit_code;
  3434. vmexit = nested_svm_exit_handled(svm);
  3435. ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
  3436. : X86EMUL_CONTINUE;
  3437. out:
  3438. return ret;
  3439. }
  3440. static struct kvm_x86_ops svm_x86_ops = {
  3441. .cpu_has_kvm_support = has_svm,
  3442. .disabled_by_bios = is_disabled,
  3443. .hardware_setup = svm_hardware_setup,
  3444. .hardware_unsetup = svm_hardware_unsetup,
  3445. .check_processor_compatibility = svm_check_processor_compat,
  3446. .hardware_enable = svm_hardware_enable,
  3447. .hardware_disable = svm_hardware_disable,
  3448. .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
  3449. .vcpu_create = svm_create_vcpu,
  3450. .vcpu_free = svm_free_vcpu,
  3451. .vcpu_reset = svm_vcpu_reset,
  3452. .prepare_guest_switch = svm_prepare_guest_switch,
  3453. .vcpu_load = svm_vcpu_load,
  3454. .vcpu_put = svm_vcpu_put,
  3455. .set_guest_debug = svm_guest_debug,
  3456. .get_msr = svm_get_msr,
  3457. .set_msr = svm_set_msr,
  3458. .get_segment_base = svm_get_segment_base,
  3459. .get_segment = svm_get_segment,
  3460. .set_segment = svm_set_segment,
  3461. .get_cpl = svm_get_cpl,
  3462. .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
  3463. .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
  3464. .decache_cr3 = svm_decache_cr3,
  3465. .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
  3466. .set_cr0 = svm_set_cr0,
  3467. .set_cr3 = svm_set_cr3,
  3468. .set_cr4 = svm_set_cr4,
  3469. .set_efer = svm_set_efer,
  3470. .get_idt = svm_get_idt,
  3471. .set_idt = svm_set_idt,
  3472. .get_gdt = svm_get_gdt,
  3473. .set_gdt = svm_set_gdt,
  3474. .set_dr7 = svm_set_dr7,
  3475. .cache_reg = svm_cache_reg,
  3476. .get_rflags = svm_get_rflags,
  3477. .set_rflags = svm_set_rflags,
  3478. .fpu_activate = svm_fpu_activate,
  3479. .fpu_deactivate = svm_fpu_deactivate,
  3480. .tlb_flush = svm_flush_tlb,
  3481. .run = svm_vcpu_run,
  3482. .handle_exit = handle_exit,
  3483. .skip_emulated_instruction = skip_emulated_instruction,
  3484. .set_interrupt_shadow = svm_set_interrupt_shadow,
  3485. .get_interrupt_shadow = svm_get_interrupt_shadow,
  3486. .patch_hypercall = svm_patch_hypercall,
  3487. .set_irq = svm_set_irq,
  3488. .set_nmi = svm_inject_nmi,
  3489. .queue_exception = svm_queue_exception,
  3490. .cancel_injection = svm_cancel_injection,
  3491. .interrupt_allowed = svm_interrupt_allowed,
  3492. .nmi_allowed = svm_nmi_allowed,
  3493. .get_nmi_mask = svm_get_nmi_mask,
  3494. .set_nmi_mask = svm_set_nmi_mask,
  3495. .enable_nmi_window = enable_nmi_window,
  3496. .enable_irq_window = enable_irq_window,
  3497. .update_cr8_intercept = update_cr8_intercept,
  3498. .set_tss_addr = svm_set_tss_addr,
  3499. .get_tdp_level = get_npt_level,
  3500. .get_mt_mask = svm_get_mt_mask,
  3501. .get_exit_info = svm_get_exit_info,
  3502. .get_lpage_level = svm_get_lpage_level,
  3503. .cpuid_update = svm_cpuid_update,
  3504. .rdtscp_supported = svm_rdtscp_supported,
  3505. .set_supported_cpuid = svm_set_supported_cpuid,
  3506. .has_wbinvd_exit = svm_has_wbinvd_exit,
  3507. .set_tsc_khz = svm_set_tsc_khz,
  3508. .write_tsc_offset = svm_write_tsc_offset,
  3509. .adjust_tsc_offset = svm_adjust_tsc_offset,
  3510. .compute_tsc_offset = svm_compute_tsc_offset,
  3511. .read_l1_tsc = svm_read_l1_tsc,
  3512. .set_tdp_cr3 = set_tdp_cr3,
  3513. .check_intercept = svm_check_intercept,
  3514. };
  3515. static int __init svm_init(void)
  3516. {
  3517. return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
  3518. __alignof__(struct vcpu_svm), THIS_MODULE);
  3519. }
  3520. static void __exit svm_exit(void)
  3521. {
  3522. kvm_exit();
  3523. }
  3524. module_init(svm_init)
  3525. module_exit(svm_exit)