vmx.c 110 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "irq.h"
  18. #include "mmu.h"
  19. #include <linux/kvm_host.h>
  20. #include <linux/module.h>
  21. #include <linux/kernel.h>
  22. #include <linux/mm.h>
  23. #include <linux/highmem.h>
  24. #include <linux/sched.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/ftrace_event.h>
  27. #include <linux/slab.h>
  28. #include "kvm_cache_regs.h"
  29. #include "x86.h"
  30. #include <asm/io.h>
  31. #include <asm/desc.h>
  32. #include <asm/vmx.h>
  33. #include <asm/virtext.h>
  34. #include <asm/mce.h>
  35. #include "trace.h"
  36. #define __ex(x) __kvm_handle_fault_on_reboot(x)
  37. MODULE_AUTHOR("Qumranet");
  38. MODULE_LICENSE("GPL");
  39. static int __read_mostly bypass_guest_pf = 1;
  40. module_param(bypass_guest_pf, bool, S_IRUGO);
  41. static int __read_mostly enable_vpid = 1;
  42. module_param_named(vpid, enable_vpid, bool, 0444);
  43. static int __read_mostly flexpriority_enabled = 1;
  44. module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
  45. static int __read_mostly enable_ept = 1;
  46. module_param_named(ept, enable_ept, bool, S_IRUGO);
  47. static int __read_mostly enable_unrestricted_guest = 1;
  48. module_param_named(unrestricted_guest,
  49. enable_unrestricted_guest, bool, S_IRUGO);
  50. static int __read_mostly emulate_invalid_guest_state = 0;
  51. module_param(emulate_invalid_guest_state, bool, S_IRUGO);
  52. #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
  53. (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
  54. #define KVM_GUEST_CR0_MASK \
  55. (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
  56. #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
  57. (X86_CR0_WP | X86_CR0_NE)
  58. #define KVM_VM_CR0_ALWAYS_ON \
  59. (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
  60. #define KVM_CR4_GUEST_OWNED_BITS \
  61. (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
  62. | X86_CR4_OSXMMEXCPT)
  63. #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
  64. #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
  65. #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
  66. /*
  67. * These 2 parameters are used to config the controls for Pause-Loop Exiting:
  68. * ple_gap: upper bound on the amount of time between two successive
  69. * executions of PAUSE in a loop. Also indicate if ple enabled.
  70. * According to test, this time is usually small than 41 cycles.
  71. * ple_window: upper bound on the amount of time a guest is allowed to execute
  72. * in a PAUSE loop. Tests indicate that most spinlocks are held for
  73. * less than 2^12 cycles
  74. * Time is measured based on a counter that runs at the same rate as the TSC,
  75. * refer SDM volume 3b section 21.6.13 & 22.1.3.
  76. */
  77. #define KVM_VMX_DEFAULT_PLE_GAP 41
  78. #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
  79. static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
  80. module_param(ple_gap, int, S_IRUGO);
  81. static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
  82. module_param(ple_window, int, S_IRUGO);
  83. struct vmcs {
  84. u32 revision_id;
  85. u32 abort;
  86. char data[0];
  87. };
  88. struct shared_msr_entry {
  89. unsigned index;
  90. u64 data;
  91. u64 mask;
  92. };
  93. struct vcpu_vmx {
  94. struct kvm_vcpu vcpu;
  95. struct list_head local_vcpus_link;
  96. unsigned long host_rsp;
  97. int launched;
  98. u8 fail;
  99. u32 idt_vectoring_info;
  100. struct shared_msr_entry *guest_msrs;
  101. int nmsrs;
  102. int save_nmsrs;
  103. #ifdef CONFIG_X86_64
  104. u64 msr_host_kernel_gs_base;
  105. u64 msr_guest_kernel_gs_base;
  106. #endif
  107. struct vmcs *vmcs;
  108. struct {
  109. int loaded;
  110. u16 fs_sel, gs_sel, ldt_sel;
  111. int gs_ldt_reload_needed;
  112. int fs_reload_needed;
  113. } host_state;
  114. struct {
  115. int vm86_active;
  116. ulong save_rflags;
  117. struct kvm_save_segment {
  118. u16 selector;
  119. unsigned long base;
  120. u32 limit;
  121. u32 ar;
  122. } tr, es, ds, fs, gs;
  123. struct {
  124. bool pending;
  125. u8 vector;
  126. unsigned rip;
  127. } irq;
  128. } rmode;
  129. int vpid;
  130. bool emulation_required;
  131. /* Support for vnmi-less CPUs */
  132. int soft_vnmi_blocked;
  133. ktime_t entry_time;
  134. s64 vnmi_blocked_time;
  135. u32 exit_reason;
  136. bool rdtscp_enabled;
  137. };
  138. static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
  139. {
  140. return container_of(vcpu, struct vcpu_vmx, vcpu);
  141. }
  142. static int init_rmode(struct kvm *kvm);
  143. static u64 construct_eptp(unsigned long root_hpa);
  144. static DEFINE_PER_CPU(struct vmcs *, vmxarea);
  145. static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
  146. static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
  147. static unsigned long *vmx_io_bitmap_a;
  148. static unsigned long *vmx_io_bitmap_b;
  149. static unsigned long *vmx_msr_bitmap_legacy;
  150. static unsigned long *vmx_msr_bitmap_longmode;
  151. static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
  152. static DEFINE_SPINLOCK(vmx_vpid_lock);
  153. static struct vmcs_config {
  154. int size;
  155. int order;
  156. u32 revision_id;
  157. u32 pin_based_exec_ctrl;
  158. u32 cpu_based_exec_ctrl;
  159. u32 cpu_based_2nd_exec_ctrl;
  160. u32 vmexit_ctrl;
  161. u32 vmentry_ctrl;
  162. } vmcs_config;
  163. static struct vmx_capability {
  164. u32 ept;
  165. u32 vpid;
  166. } vmx_capability;
  167. #define VMX_SEGMENT_FIELD(seg) \
  168. [VCPU_SREG_##seg] = { \
  169. .selector = GUEST_##seg##_SELECTOR, \
  170. .base = GUEST_##seg##_BASE, \
  171. .limit = GUEST_##seg##_LIMIT, \
  172. .ar_bytes = GUEST_##seg##_AR_BYTES, \
  173. }
  174. static struct kvm_vmx_segment_field {
  175. unsigned selector;
  176. unsigned base;
  177. unsigned limit;
  178. unsigned ar_bytes;
  179. } kvm_vmx_segment_fields[] = {
  180. VMX_SEGMENT_FIELD(CS),
  181. VMX_SEGMENT_FIELD(DS),
  182. VMX_SEGMENT_FIELD(ES),
  183. VMX_SEGMENT_FIELD(FS),
  184. VMX_SEGMENT_FIELD(GS),
  185. VMX_SEGMENT_FIELD(SS),
  186. VMX_SEGMENT_FIELD(TR),
  187. VMX_SEGMENT_FIELD(LDTR),
  188. };
  189. static u64 host_efer;
  190. static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
  191. /*
  192. * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
  193. * away by decrementing the array size.
  194. */
  195. static const u32 vmx_msr_index[] = {
  196. #ifdef CONFIG_X86_64
  197. MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
  198. #endif
  199. MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
  200. };
  201. #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
  202. static inline bool is_page_fault(u32 intr_info)
  203. {
  204. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
  205. INTR_INFO_VALID_MASK)) ==
  206. (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
  207. }
  208. static inline bool is_no_device(u32 intr_info)
  209. {
  210. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
  211. INTR_INFO_VALID_MASK)) ==
  212. (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
  213. }
  214. static inline bool is_invalid_opcode(u32 intr_info)
  215. {
  216. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
  217. INTR_INFO_VALID_MASK)) ==
  218. (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
  219. }
  220. static inline bool is_external_interrupt(u32 intr_info)
  221. {
  222. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
  223. == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
  224. }
  225. static inline bool is_machine_check(u32 intr_info)
  226. {
  227. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
  228. INTR_INFO_VALID_MASK)) ==
  229. (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
  230. }
  231. static inline bool cpu_has_vmx_msr_bitmap(void)
  232. {
  233. return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
  234. }
  235. static inline bool cpu_has_vmx_tpr_shadow(void)
  236. {
  237. return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
  238. }
  239. static inline bool vm_need_tpr_shadow(struct kvm *kvm)
  240. {
  241. return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
  242. }
  243. static inline bool cpu_has_secondary_exec_ctrls(void)
  244. {
  245. return vmcs_config.cpu_based_exec_ctrl &
  246. CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
  247. }
  248. static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
  249. {
  250. return vmcs_config.cpu_based_2nd_exec_ctrl &
  251. SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
  252. }
  253. static inline bool cpu_has_vmx_flexpriority(void)
  254. {
  255. return cpu_has_vmx_tpr_shadow() &&
  256. cpu_has_vmx_virtualize_apic_accesses();
  257. }
  258. static inline bool cpu_has_vmx_ept_execute_only(void)
  259. {
  260. return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
  261. }
  262. static inline bool cpu_has_vmx_eptp_uncacheable(void)
  263. {
  264. return vmx_capability.ept & VMX_EPTP_UC_BIT;
  265. }
  266. static inline bool cpu_has_vmx_eptp_writeback(void)
  267. {
  268. return vmx_capability.ept & VMX_EPTP_WB_BIT;
  269. }
  270. static inline bool cpu_has_vmx_ept_2m_page(void)
  271. {
  272. return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
  273. }
  274. static inline bool cpu_has_vmx_ept_1g_page(void)
  275. {
  276. return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
  277. }
  278. static inline bool cpu_has_vmx_invept_individual_addr(void)
  279. {
  280. return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
  281. }
  282. static inline bool cpu_has_vmx_invept_context(void)
  283. {
  284. return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
  285. }
  286. static inline bool cpu_has_vmx_invept_global(void)
  287. {
  288. return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
  289. }
  290. static inline bool cpu_has_vmx_ept(void)
  291. {
  292. return vmcs_config.cpu_based_2nd_exec_ctrl &
  293. SECONDARY_EXEC_ENABLE_EPT;
  294. }
  295. static inline bool cpu_has_vmx_unrestricted_guest(void)
  296. {
  297. return vmcs_config.cpu_based_2nd_exec_ctrl &
  298. SECONDARY_EXEC_UNRESTRICTED_GUEST;
  299. }
  300. static inline bool cpu_has_vmx_ple(void)
  301. {
  302. return vmcs_config.cpu_based_2nd_exec_ctrl &
  303. SECONDARY_EXEC_PAUSE_LOOP_EXITING;
  304. }
  305. static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
  306. {
  307. return flexpriority_enabled && irqchip_in_kernel(kvm);
  308. }
  309. static inline bool cpu_has_vmx_vpid(void)
  310. {
  311. return vmcs_config.cpu_based_2nd_exec_ctrl &
  312. SECONDARY_EXEC_ENABLE_VPID;
  313. }
  314. static inline bool cpu_has_vmx_rdtscp(void)
  315. {
  316. return vmcs_config.cpu_based_2nd_exec_ctrl &
  317. SECONDARY_EXEC_RDTSCP;
  318. }
  319. static inline bool cpu_has_virtual_nmis(void)
  320. {
  321. return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
  322. }
  323. static inline bool report_flexpriority(void)
  324. {
  325. return flexpriority_enabled;
  326. }
  327. static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
  328. {
  329. int i;
  330. for (i = 0; i < vmx->nmsrs; ++i)
  331. if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
  332. return i;
  333. return -1;
  334. }
  335. static inline void __invvpid(int ext, u16 vpid, gva_t gva)
  336. {
  337. struct {
  338. u64 vpid : 16;
  339. u64 rsvd : 48;
  340. u64 gva;
  341. } operand = { vpid, 0, gva };
  342. asm volatile (__ex(ASM_VMX_INVVPID)
  343. /* CF==1 or ZF==1 --> rc = -1 */
  344. "; ja 1f ; ud2 ; 1:"
  345. : : "a"(&operand), "c"(ext) : "cc", "memory");
  346. }
  347. static inline void __invept(int ext, u64 eptp, gpa_t gpa)
  348. {
  349. struct {
  350. u64 eptp, gpa;
  351. } operand = {eptp, gpa};
  352. asm volatile (__ex(ASM_VMX_INVEPT)
  353. /* CF==1 or ZF==1 --> rc = -1 */
  354. "; ja 1f ; ud2 ; 1:\n"
  355. : : "a" (&operand), "c" (ext) : "cc", "memory");
  356. }
  357. static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
  358. {
  359. int i;
  360. i = __find_msr_index(vmx, msr);
  361. if (i >= 0)
  362. return &vmx->guest_msrs[i];
  363. return NULL;
  364. }
  365. static void vmcs_clear(struct vmcs *vmcs)
  366. {
  367. u64 phys_addr = __pa(vmcs);
  368. u8 error;
  369. asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
  370. : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
  371. : "cc", "memory");
  372. if (error)
  373. printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
  374. vmcs, phys_addr);
  375. }
  376. static void __vcpu_clear(void *arg)
  377. {
  378. struct vcpu_vmx *vmx = arg;
  379. int cpu = raw_smp_processor_id();
  380. if (vmx->vcpu.cpu == cpu)
  381. vmcs_clear(vmx->vmcs);
  382. if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
  383. per_cpu(current_vmcs, cpu) = NULL;
  384. rdtscll(vmx->vcpu.arch.host_tsc);
  385. list_del(&vmx->local_vcpus_link);
  386. vmx->vcpu.cpu = -1;
  387. vmx->launched = 0;
  388. }
  389. static void vcpu_clear(struct vcpu_vmx *vmx)
  390. {
  391. if (vmx->vcpu.cpu == -1)
  392. return;
  393. smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
  394. }
  395. static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
  396. {
  397. if (vmx->vpid == 0)
  398. return;
  399. __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
  400. }
  401. static inline void ept_sync_global(void)
  402. {
  403. if (cpu_has_vmx_invept_global())
  404. __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
  405. }
  406. static inline void ept_sync_context(u64 eptp)
  407. {
  408. if (enable_ept) {
  409. if (cpu_has_vmx_invept_context())
  410. __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
  411. else
  412. ept_sync_global();
  413. }
  414. }
  415. static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
  416. {
  417. if (enable_ept) {
  418. if (cpu_has_vmx_invept_individual_addr())
  419. __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
  420. eptp, gpa);
  421. else
  422. ept_sync_context(eptp);
  423. }
  424. }
  425. static unsigned long vmcs_readl(unsigned long field)
  426. {
  427. unsigned long value;
  428. asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
  429. : "=a"(value) : "d"(field) : "cc");
  430. return value;
  431. }
  432. static u16 vmcs_read16(unsigned long field)
  433. {
  434. return vmcs_readl(field);
  435. }
  436. static u32 vmcs_read32(unsigned long field)
  437. {
  438. return vmcs_readl(field);
  439. }
  440. static u64 vmcs_read64(unsigned long field)
  441. {
  442. #ifdef CONFIG_X86_64
  443. return vmcs_readl(field);
  444. #else
  445. return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
  446. #endif
  447. }
  448. static noinline void vmwrite_error(unsigned long field, unsigned long value)
  449. {
  450. printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
  451. field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
  452. dump_stack();
  453. }
  454. static void vmcs_writel(unsigned long field, unsigned long value)
  455. {
  456. u8 error;
  457. asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
  458. : "=q"(error) : "a"(value), "d"(field) : "cc");
  459. if (unlikely(error))
  460. vmwrite_error(field, value);
  461. }
  462. static void vmcs_write16(unsigned long field, u16 value)
  463. {
  464. vmcs_writel(field, value);
  465. }
  466. static void vmcs_write32(unsigned long field, u32 value)
  467. {
  468. vmcs_writel(field, value);
  469. }
  470. static void vmcs_write64(unsigned long field, u64 value)
  471. {
  472. vmcs_writel(field, value);
  473. #ifndef CONFIG_X86_64
  474. asm volatile ("");
  475. vmcs_writel(field+1, value >> 32);
  476. #endif
  477. }
  478. static void vmcs_clear_bits(unsigned long field, u32 mask)
  479. {
  480. vmcs_writel(field, vmcs_readl(field) & ~mask);
  481. }
  482. static void vmcs_set_bits(unsigned long field, u32 mask)
  483. {
  484. vmcs_writel(field, vmcs_readl(field) | mask);
  485. }
  486. static void update_exception_bitmap(struct kvm_vcpu *vcpu)
  487. {
  488. u32 eb;
  489. eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
  490. (1u << NM_VECTOR) | (1u << DB_VECTOR);
  491. if ((vcpu->guest_debug &
  492. (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
  493. (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
  494. eb |= 1u << BP_VECTOR;
  495. if (to_vmx(vcpu)->rmode.vm86_active)
  496. eb = ~0;
  497. if (enable_ept)
  498. eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
  499. if (vcpu->fpu_active)
  500. eb &= ~(1u << NM_VECTOR);
  501. vmcs_write32(EXCEPTION_BITMAP, eb);
  502. }
  503. static void reload_tss(void)
  504. {
  505. /*
  506. * VT restores TR but not its size. Useless.
  507. */
  508. struct desc_ptr gdt;
  509. struct desc_struct *descs;
  510. native_store_gdt(&gdt);
  511. descs = (void *)gdt.address;
  512. descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
  513. load_TR_desc();
  514. }
  515. static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
  516. {
  517. u64 guest_efer;
  518. u64 ignore_bits;
  519. guest_efer = vmx->vcpu.arch.efer;
  520. /*
  521. * NX is emulated; LMA and LME handled by hardware; SCE meaninless
  522. * outside long mode
  523. */
  524. ignore_bits = EFER_NX | EFER_SCE;
  525. #ifdef CONFIG_X86_64
  526. ignore_bits |= EFER_LMA | EFER_LME;
  527. /* SCE is meaningful only in long mode on Intel */
  528. if (guest_efer & EFER_LMA)
  529. ignore_bits &= ~(u64)EFER_SCE;
  530. #endif
  531. guest_efer &= ~ignore_bits;
  532. guest_efer |= host_efer & ignore_bits;
  533. vmx->guest_msrs[efer_offset].data = guest_efer;
  534. vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
  535. return true;
  536. }
  537. static unsigned long segment_base(u16 selector)
  538. {
  539. struct desc_ptr gdt;
  540. struct desc_struct *d;
  541. unsigned long table_base;
  542. unsigned long v;
  543. if (!(selector & ~3))
  544. return 0;
  545. native_store_gdt(&gdt);
  546. table_base = gdt.address;
  547. if (selector & 4) { /* from ldt */
  548. u16 ldt_selector = kvm_read_ldt();
  549. if (!(ldt_selector & ~3))
  550. return 0;
  551. table_base = segment_base(ldt_selector);
  552. }
  553. d = (struct desc_struct *)(table_base + (selector & ~7));
  554. v = get_desc_base(d);
  555. #ifdef CONFIG_X86_64
  556. if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
  557. v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
  558. #endif
  559. return v;
  560. }
  561. static inline unsigned long kvm_read_tr_base(void)
  562. {
  563. u16 tr;
  564. asm("str %0" : "=g"(tr));
  565. return segment_base(tr);
  566. }
  567. static void vmx_save_host_state(struct kvm_vcpu *vcpu)
  568. {
  569. struct vcpu_vmx *vmx = to_vmx(vcpu);
  570. int i;
  571. if (vmx->host_state.loaded)
  572. return;
  573. vmx->host_state.loaded = 1;
  574. /*
  575. * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
  576. * allow segment selectors with cpl > 0 or ti == 1.
  577. */
  578. vmx->host_state.ldt_sel = kvm_read_ldt();
  579. vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
  580. vmx->host_state.fs_sel = kvm_read_fs();
  581. if (!(vmx->host_state.fs_sel & 7)) {
  582. vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
  583. vmx->host_state.fs_reload_needed = 0;
  584. } else {
  585. vmcs_write16(HOST_FS_SELECTOR, 0);
  586. vmx->host_state.fs_reload_needed = 1;
  587. }
  588. vmx->host_state.gs_sel = kvm_read_gs();
  589. if (!(vmx->host_state.gs_sel & 7))
  590. vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
  591. else {
  592. vmcs_write16(HOST_GS_SELECTOR, 0);
  593. vmx->host_state.gs_ldt_reload_needed = 1;
  594. }
  595. #ifdef CONFIG_X86_64
  596. vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
  597. vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
  598. #else
  599. vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
  600. vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
  601. #endif
  602. #ifdef CONFIG_X86_64
  603. if (is_long_mode(&vmx->vcpu)) {
  604. rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
  605. wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
  606. }
  607. #endif
  608. for (i = 0; i < vmx->save_nmsrs; ++i)
  609. kvm_set_shared_msr(vmx->guest_msrs[i].index,
  610. vmx->guest_msrs[i].data,
  611. vmx->guest_msrs[i].mask);
  612. }
  613. static void __vmx_load_host_state(struct vcpu_vmx *vmx)
  614. {
  615. unsigned long flags;
  616. if (!vmx->host_state.loaded)
  617. return;
  618. ++vmx->vcpu.stat.host_state_reload;
  619. vmx->host_state.loaded = 0;
  620. if (vmx->host_state.fs_reload_needed)
  621. kvm_load_fs(vmx->host_state.fs_sel);
  622. if (vmx->host_state.gs_ldt_reload_needed) {
  623. kvm_load_ldt(vmx->host_state.ldt_sel);
  624. /*
  625. * If we have to reload gs, we must take care to
  626. * preserve our gs base.
  627. */
  628. local_irq_save(flags);
  629. kvm_load_gs(vmx->host_state.gs_sel);
  630. #ifdef CONFIG_X86_64
  631. wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
  632. #endif
  633. local_irq_restore(flags);
  634. }
  635. reload_tss();
  636. #ifdef CONFIG_X86_64
  637. if (is_long_mode(&vmx->vcpu)) {
  638. rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
  639. wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
  640. }
  641. #endif
  642. }
  643. static void vmx_load_host_state(struct vcpu_vmx *vmx)
  644. {
  645. preempt_disable();
  646. __vmx_load_host_state(vmx);
  647. preempt_enable();
  648. }
  649. /*
  650. * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  651. * vcpu mutex is already taken.
  652. */
  653. static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  654. {
  655. struct vcpu_vmx *vmx = to_vmx(vcpu);
  656. u64 phys_addr = __pa(vmx->vmcs);
  657. u64 tsc_this, delta, new_offset;
  658. if (vcpu->cpu != cpu) {
  659. vcpu_clear(vmx);
  660. kvm_migrate_timers(vcpu);
  661. set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
  662. local_irq_disable();
  663. list_add(&vmx->local_vcpus_link,
  664. &per_cpu(vcpus_on_cpu, cpu));
  665. local_irq_enable();
  666. }
  667. if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
  668. u8 error;
  669. per_cpu(current_vmcs, cpu) = vmx->vmcs;
  670. asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
  671. : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
  672. : "cc");
  673. if (error)
  674. printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
  675. vmx->vmcs, phys_addr);
  676. }
  677. if (vcpu->cpu != cpu) {
  678. struct desc_ptr dt;
  679. unsigned long sysenter_esp;
  680. vcpu->cpu = cpu;
  681. /*
  682. * Linux uses per-cpu TSS and GDT, so set these when switching
  683. * processors.
  684. */
  685. vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
  686. native_store_gdt(&dt);
  687. vmcs_writel(HOST_GDTR_BASE, dt.address); /* 22.2.4 */
  688. rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
  689. vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
  690. /*
  691. * Make sure the time stamp counter is monotonous.
  692. */
  693. rdtscll(tsc_this);
  694. if (tsc_this < vcpu->arch.host_tsc) {
  695. delta = vcpu->arch.host_tsc - tsc_this;
  696. new_offset = vmcs_read64(TSC_OFFSET) + delta;
  697. vmcs_write64(TSC_OFFSET, new_offset);
  698. }
  699. }
  700. }
  701. static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
  702. {
  703. __vmx_load_host_state(to_vmx(vcpu));
  704. }
  705. static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
  706. {
  707. ulong cr0;
  708. if (vcpu->fpu_active)
  709. return;
  710. vcpu->fpu_active = 1;
  711. cr0 = vmcs_readl(GUEST_CR0);
  712. cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
  713. cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
  714. vmcs_writel(GUEST_CR0, cr0);
  715. update_exception_bitmap(vcpu);
  716. vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
  717. vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
  718. }
  719. static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
  720. static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
  721. {
  722. vmx_decache_cr0_guest_bits(vcpu);
  723. vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
  724. update_exception_bitmap(vcpu);
  725. vcpu->arch.cr0_guest_owned_bits = 0;
  726. vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
  727. vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
  728. }
  729. static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
  730. {
  731. unsigned long rflags, save_rflags;
  732. rflags = vmcs_readl(GUEST_RFLAGS);
  733. if (to_vmx(vcpu)->rmode.vm86_active) {
  734. rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
  735. save_rflags = to_vmx(vcpu)->rmode.save_rflags;
  736. rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
  737. }
  738. return rflags;
  739. }
  740. static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  741. {
  742. if (to_vmx(vcpu)->rmode.vm86_active) {
  743. to_vmx(vcpu)->rmode.save_rflags = rflags;
  744. rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
  745. }
  746. vmcs_writel(GUEST_RFLAGS, rflags);
  747. }
  748. static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  749. {
  750. u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
  751. int ret = 0;
  752. if (interruptibility & GUEST_INTR_STATE_STI)
  753. ret |= KVM_X86_SHADOW_INT_STI;
  754. if (interruptibility & GUEST_INTR_STATE_MOV_SS)
  755. ret |= KVM_X86_SHADOW_INT_MOV_SS;
  756. return ret & mask;
  757. }
  758. static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  759. {
  760. u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
  761. u32 interruptibility = interruptibility_old;
  762. interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
  763. if (mask & KVM_X86_SHADOW_INT_MOV_SS)
  764. interruptibility |= GUEST_INTR_STATE_MOV_SS;
  765. else if (mask & KVM_X86_SHADOW_INT_STI)
  766. interruptibility |= GUEST_INTR_STATE_STI;
  767. if ((interruptibility != interruptibility_old))
  768. vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
  769. }
  770. static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  771. {
  772. unsigned long rip;
  773. rip = kvm_rip_read(vcpu);
  774. rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  775. kvm_rip_write(vcpu, rip);
  776. /* skipping an emulated instruction also counts */
  777. vmx_set_interrupt_shadow(vcpu, 0);
  778. }
  779. static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
  780. bool has_error_code, u32 error_code)
  781. {
  782. struct vcpu_vmx *vmx = to_vmx(vcpu);
  783. u32 intr_info = nr | INTR_INFO_VALID_MASK;
  784. if (has_error_code) {
  785. vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
  786. intr_info |= INTR_INFO_DELIVER_CODE_MASK;
  787. }
  788. if (vmx->rmode.vm86_active) {
  789. vmx->rmode.irq.pending = true;
  790. vmx->rmode.irq.vector = nr;
  791. vmx->rmode.irq.rip = kvm_rip_read(vcpu);
  792. if (kvm_exception_is_soft(nr))
  793. vmx->rmode.irq.rip +=
  794. vmx->vcpu.arch.event_exit_inst_len;
  795. intr_info |= INTR_TYPE_SOFT_INTR;
  796. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
  797. vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
  798. kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
  799. return;
  800. }
  801. if (kvm_exception_is_soft(nr)) {
  802. vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
  803. vmx->vcpu.arch.event_exit_inst_len);
  804. intr_info |= INTR_TYPE_SOFT_EXCEPTION;
  805. } else
  806. intr_info |= INTR_TYPE_HARD_EXCEPTION;
  807. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
  808. }
  809. static bool vmx_rdtscp_supported(void)
  810. {
  811. return cpu_has_vmx_rdtscp();
  812. }
  813. /*
  814. * Swap MSR entry in host/guest MSR entry array.
  815. */
  816. static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
  817. {
  818. struct shared_msr_entry tmp;
  819. tmp = vmx->guest_msrs[to];
  820. vmx->guest_msrs[to] = vmx->guest_msrs[from];
  821. vmx->guest_msrs[from] = tmp;
  822. }
  823. /*
  824. * Set up the vmcs to automatically save and restore system
  825. * msrs. Don't touch the 64-bit msrs if the guest is in legacy
  826. * mode, as fiddling with msrs is very expensive.
  827. */
  828. static void setup_msrs(struct vcpu_vmx *vmx)
  829. {
  830. int save_nmsrs, index;
  831. unsigned long *msr_bitmap;
  832. vmx_load_host_state(vmx);
  833. save_nmsrs = 0;
  834. #ifdef CONFIG_X86_64
  835. if (is_long_mode(&vmx->vcpu)) {
  836. index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
  837. if (index >= 0)
  838. move_msr_up(vmx, index, save_nmsrs++);
  839. index = __find_msr_index(vmx, MSR_LSTAR);
  840. if (index >= 0)
  841. move_msr_up(vmx, index, save_nmsrs++);
  842. index = __find_msr_index(vmx, MSR_CSTAR);
  843. if (index >= 0)
  844. move_msr_up(vmx, index, save_nmsrs++);
  845. index = __find_msr_index(vmx, MSR_TSC_AUX);
  846. if (index >= 0 && vmx->rdtscp_enabled)
  847. move_msr_up(vmx, index, save_nmsrs++);
  848. /*
  849. * MSR_K6_STAR is only needed on long mode guests, and only
  850. * if efer.sce is enabled.
  851. */
  852. index = __find_msr_index(vmx, MSR_K6_STAR);
  853. if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
  854. move_msr_up(vmx, index, save_nmsrs++);
  855. }
  856. #endif
  857. index = __find_msr_index(vmx, MSR_EFER);
  858. if (index >= 0 && update_transition_efer(vmx, index))
  859. move_msr_up(vmx, index, save_nmsrs++);
  860. vmx->save_nmsrs = save_nmsrs;
  861. if (cpu_has_vmx_msr_bitmap()) {
  862. if (is_long_mode(&vmx->vcpu))
  863. msr_bitmap = vmx_msr_bitmap_longmode;
  864. else
  865. msr_bitmap = vmx_msr_bitmap_legacy;
  866. vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
  867. }
  868. }
  869. /*
  870. * reads and returns guest's timestamp counter "register"
  871. * guest_tsc = host_tsc + tsc_offset -- 21.3
  872. */
  873. static u64 guest_read_tsc(void)
  874. {
  875. u64 host_tsc, tsc_offset;
  876. rdtscll(host_tsc);
  877. tsc_offset = vmcs_read64(TSC_OFFSET);
  878. return host_tsc + tsc_offset;
  879. }
  880. /*
  881. * writes 'guest_tsc' into guest's timestamp counter "register"
  882. * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
  883. */
  884. static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
  885. {
  886. vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
  887. }
  888. /*
  889. * Reads an msr value (of 'msr_index') into 'pdata'.
  890. * Returns 0 on success, non-0 otherwise.
  891. * Assumes vcpu_load() was already called.
  892. */
  893. static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  894. {
  895. u64 data;
  896. struct shared_msr_entry *msr;
  897. if (!pdata) {
  898. printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
  899. return -EINVAL;
  900. }
  901. switch (msr_index) {
  902. #ifdef CONFIG_X86_64
  903. case MSR_FS_BASE:
  904. data = vmcs_readl(GUEST_FS_BASE);
  905. break;
  906. case MSR_GS_BASE:
  907. data = vmcs_readl(GUEST_GS_BASE);
  908. break;
  909. case MSR_KERNEL_GS_BASE:
  910. vmx_load_host_state(to_vmx(vcpu));
  911. data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
  912. break;
  913. #endif
  914. case MSR_EFER:
  915. return kvm_get_msr_common(vcpu, msr_index, pdata);
  916. case MSR_IA32_TSC:
  917. data = guest_read_tsc();
  918. break;
  919. case MSR_IA32_SYSENTER_CS:
  920. data = vmcs_read32(GUEST_SYSENTER_CS);
  921. break;
  922. case MSR_IA32_SYSENTER_EIP:
  923. data = vmcs_readl(GUEST_SYSENTER_EIP);
  924. break;
  925. case MSR_IA32_SYSENTER_ESP:
  926. data = vmcs_readl(GUEST_SYSENTER_ESP);
  927. break;
  928. case MSR_TSC_AUX:
  929. if (!to_vmx(vcpu)->rdtscp_enabled)
  930. return 1;
  931. /* Otherwise falls through */
  932. default:
  933. vmx_load_host_state(to_vmx(vcpu));
  934. msr = find_msr_entry(to_vmx(vcpu), msr_index);
  935. if (msr) {
  936. vmx_load_host_state(to_vmx(vcpu));
  937. data = msr->data;
  938. break;
  939. }
  940. return kvm_get_msr_common(vcpu, msr_index, pdata);
  941. }
  942. *pdata = data;
  943. return 0;
  944. }
  945. /*
  946. * Writes msr value into into the appropriate "register".
  947. * Returns 0 on success, non-0 otherwise.
  948. * Assumes vcpu_load() was already called.
  949. */
  950. static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  951. {
  952. struct vcpu_vmx *vmx = to_vmx(vcpu);
  953. struct shared_msr_entry *msr;
  954. u64 host_tsc;
  955. int ret = 0;
  956. switch (msr_index) {
  957. case MSR_EFER:
  958. vmx_load_host_state(vmx);
  959. ret = kvm_set_msr_common(vcpu, msr_index, data);
  960. break;
  961. #ifdef CONFIG_X86_64
  962. case MSR_FS_BASE:
  963. vmcs_writel(GUEST_FS_BASE, data);
  964. break;
  965. case MSR_GS_BASE:
  966. vmcs_writel(GUEST_GS_BASE, data);
  967. break;
  968. case MSR_KERNEL_GS_BASE:
  969. vmx_load_host_state(vmx);
  970. vmx->msr_guest_kernel_gs_base = data;
  971. break;
  972. #endif
  973. case MSR_IA32_SYSENTER_CS:
  974. vmcs_write32(GUEST_SYSENTER_CS, data);
  975. break;
  976. case MSR_IA32_SYSENTER_EIP:
  977. vmcs_writel(GUEST_SYSENTER_EIP, data);
  978. break;
  979. case MSR_IA32_SYSENTER_ESP:
  980. vmcs_writel(GUEST_SYSENTER_ESP, data);
  981. break;
  982. case MSR_IA32_TSC:
  983. rdtscll(host_tsc);
  984. guest_write_tsc(data, host_tsc);
  985. break;
  986. case MSR_IA32_CR_PAT:
  987. if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
  988. vmcs_write64(GUEST_IA32_PAT, data);
  989. vcpu->arch.pat = data;
  990. break;
  991. }
  992. ret = kvm_set_msr_common(vcpu, msr_index, data);
  993. break;
  994. case MSR_TSC_AUX:
  995. if (!vmx->rdtscp_enabled)
  996. return 1;
  997. /* Check reserved bit, higher 32 bits should be zero */
  998. if ((data >> 32) != 0)
  999. return 1;
  1000. /* Otherwise falls through */
  1001. default:
  1002. msr = find_msr_entry(vmx, msr_index);
  1003. if (msr) {
  1004. vmx_load_host_state(vmx);
  1005. msr->data = data;
  1006. break;
  1007. }
  1008. ret = kvm_set_msr_common(vcpu, msr_index, data);
  1009. }
  1010. return ret;
  1011. }
  1012. static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
  1013. {
  1014. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  1015. switch (reg) {
  1016. case VCPU_REGS_RSP:
  1017. vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
  1018. break;
  1019. case VCPU_REGS_RIP:
  1020. vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
  1021. break;
  1022. case VCPU_EXREG_PDPTR:
  1023. if (enable_ept)
  1024. ept_save_pdptrs(vcpu);
  1025. break;
  1026. default:
  1027. break;
  1028. }
  1029. }
  1030. static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
  1031. {
  1032. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  1033. vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
  1034. else
  1035. vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
  1036. update_exception_bitmap(vcpu);
  1037. }
  1038. static __init int cpu_has_kvm_support(void)
  1039. {
  1040. return cpu_has_vmx();
  1041. }
  1042. static __init int vmx_disabled_by_bios(void)
  1043. {
  1044. u64 msr;
  1045. rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
  1046. return (msr & (FEATURE_CONTROL_LOCKED |
  1047. FEATURE_CONTROL_VMXON_ENABLED))
  1048. == FEATURE_CONTROL_LOCKED;
  1049. /* locked but not enabled */
  1050. }
  1051. static int hardware_enable(void *garbage)
  1052. {
  1053. int cpu = raw_smp_processor_id();
  1054. u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
  1055. u64 old;
  1056. if (read_cr4() & X86_CR4_VMXE)
  1057. return -EBUSY;
  1058. INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
  1059. rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
  1060. if ((old & (FEATURE_CONTROL_LOCKED |
  1061. FEATURE_CONTROL_VMXON_ENABLED))
  1062. != (FEATURE_CONTROL_LOCKED |
  1063. FEATURE_CONTROL_VMXON_ENABLED))
  1064. /* enable and lock */
  1065. wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
  1066. FEATURE_CONTROL_LOCKED |
  1067. FEATURE_CONTROL_VMXON_ENABLED);
  1068. write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
  1069. asm volatile (ASM_VMX_VMXON_RAX
  1070. : : "a"(&phys_addr), "m"(phys_addr)
  1071. : "memory", "cc");
  1072. ept_sync_global();
  1073. return 0;
  1074. }
  1075. static void vmclear_local_vcpus(void)
  1076. {
  1077. int cpu = raw_smp_processor_id();
  1078. struct vcpu_vmx *vmx, *n;
  1079. list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
  1080. local_vcpus_link)
  1081. __vcpu_clear(vmx);
  1082. }
  1083. /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
  1084. * tricks.
  1085. */
  1086. static void kvm_cpu_vmxoff(void)
  1087. {
  1088. asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
  1089. write_cr4(read_cr4() & ~X86_CR4_VMXE);
  1090. }
  1091. static void hardware_disable(void *garbage)
  1092. {
  1093. vmclear_local_vcpus();
  1094. kvm_cpu_vmxoff();
  1095. }
  1096. static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
  1097. u32 msr, u32 *result)
  1098. {
  1099. u32 vmx_msr_low, vmx_msr_high;
  1100. u32 ctl = ctl_min | ctl_opt;
  1101. rdmsr(msr, vmx_msr_low, vmx_msr_high);
  1102. ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
  1103. ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
  1104. /* Ensure minimum (required) set of control bits are supported. */
  1105. if (ctl_min & ~ctl)
  1106. return -EIO;
  1107. *result = ctl;
  1108. return 0;
  1109. }
  1110. static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
  1111. {
  1112. u32 vmx_msr_low, vmx_msr_high;
  1113. u32 min, opt, min2, opt2;
  1114. u32 _pin_based_exec_control = 0;
  1115. u32 _cpu_based_exec_control = 0;
  1116. u32 _cpu_based_2nd_exec_control = 0;
  1117. u32 _vmexit_control = 0;
  1118. u32 _vmentry_control = 0;
  1119. min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
  1120. opt = PIN_BASED_VIRTUAL_NMIS;
  1121. if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
  1122. &_pin_based_exec_control) < 0)
  1123. return -EIO;
  1124. min = CPU_BASED_HLT_EXITING |
  1125. #ifdef CONFIG_X86_64
  1126. CPU_BASED_CR8_LOAD_EXITING |
  1127. CPU_BASED_CR8_STORE_EXITING |
  1128. #endif
  1129. CPU_BASED_CR3_LOAD_EXITING |
  1130. CPU_BASED_CR3_STORE_EXITING |
  1131. CPU_BASED_USE_IO_BITMAPS |
  1132. CPU_BASED_MOV_DR_EXITING |
  1133. CPU_BASED_USE_TSC_OFFSETING |
  1134. CPU_BASED_MWAIT_EXITING |
  1135. CPU_BASED_MONITOR_EXITING |
  1136. CPU_BASED_INVLPG_EXITING;
  1137. opt = CPU_BASED_TPR_SHADOW |
  1138. CPU_BASED_USE_MSR_BITMAPS |
  1139. CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
  1140. if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
  1141. &_cpu_based_exec_control) < 0)
  1142. return -EIO;
  1143. #ifdef CONFIG_X86_64
  1144. if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
  1145. _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
  1146. ~CPU_BASED_CR8_STORE_EXITING;
  1147. #endif
  1148. if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
  1149. min2 = 0;
  1150. opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
  1151. SECONDARY_EXEC_WBINVD_EXITING |
  1152. SECONDARY_EXEC_ENABLE_VPID |
  1153. SECONDARY_EXEC_ENABLE_EPT |
  1154. SECONDARY_EXEC_UNRESTRICTED_GUEST |
  1155. SECONDARY_EXEC_PAUSE_LOOP_EXITING |
  1156. SECONDARY_EXEC_RDTSCP;
  1157. if (adjust_vmx_controls(min2, opt2,
  1158. MSR_IA32_VMX_PROCBASED_CTLS2,
  1159. &_cpu_based_2nd_exec_control) < 0)
  1160. return -EIO;
  1161. }
  1162. #ifndef CONFIG_X86_64
  1163. if (!(_cpu_based_2nd_exec_control &
  1164. SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
  1165. _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
  1166. #endif
  1167. if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
  1168. /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
  1169. enabled */
  1170. _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
  1171. CPU_BASED_CR3_STORE_EXITING |
  1172. CPU_BASED_INVLPG_EXITING);
  1173. rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
  1174. vmx_capability.ept, vmx_capability.vpid);
  1175. }
  1176. min = 0;
  1177. #ifdef CONFIG_X86_64
  1178. min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
  1179. #endif
  1180. opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
  1181. if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
  1182. &_vmexit_control) < 0)
  1183. return -EIO;
  1184. min = 0;
  1185. opt = VM_ENTRY_LOAD_IA32_PAT;
  1186. if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
  1187. &_vmentry_control) < 0)
  1188. return -EIO;
  1189. rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
  1190. /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
  1191. if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
  1192. return -EIO;
  1193. #ifdef CONFIG_X86_64
  1194. /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
  1195. if (vmx_msr_high & (1u<<16))
  1196. return -EIO;
  1197. #endif
  1198. /* Require Write-Back (WB) memory type for VMCS accesses. */
  1199. if (((vmx_msr_high >> 18) & 15) != 6)
  1200. return -EIO;
  1201. vmcs_conf->size = vmx_msr_high & 0x1fff;
  1202. vmcs_conf->order = get_order(vmcs_config.size);
  1203. vmcs_conf->revision_id = vmx_msr_low;
  1204. vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
  1205. vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
  1206. vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
  1207. vmcs_conf->vmexit_ctrl = _vmexit_control;
  1208. vmcs_conf->vmentry_ctrl = _vmentry_control;
  1209. return 0;
  1210. }
  1211. static struct vmcs *alloc_vmcs_cpu(int cpu)
  1212. {
  1213. int node = cpu_to_node(cpu);
  1214. struct page *pages;
  1215. struct vmcs *vmcs;
  1216. pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
  1217. if (!pages)
  1218. return NULL;
  1219. vmcs = page_address(pages);
  1220. memset(vmcs, 0, vmcs_config.size);
  1221. vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
  1222. return vmcs;
  1223. }
  1224. static struct vmcs *alloc_vmcs(void)
  1225. {
  1226. return alloc_vmcs_cpu(raw_smp_processor_id());
  1227. }
  1228. static void free_vmcs(struct vmcs *vmcs)
  1229. {
  1230. free_pages((unsigned long)vmcs, vmcs_config.order);
  1231. }
  1232. static void free_kvm_area(void)
  1233. {
  1234. int cpu;
  1235. for_each_possible_cpu(cpu) {
  1236. free_vmcs(per_cpu(vmxarea, cpu));
  1237. per_cpu(vmxarea, cpu) = NULL;
  1238. }
  1239. }
  1240. static __init int alloc_kvm_area(void)
  1241. {
  1242. int cpu;
  1243. for_each_possible_cpu(cpu) {
  1244. struct vmcs *vmcs;
  1245. vmcs = alloc_vmcs_cpu(cpu);
  1246. if (!vmcs) {
  1247. free_kvm_area();
  1248. return -ENOMEM;
  1249. }
  1250. per_cpu(vmxarea, cpu) = vmcs;
  1251. }
  1252. return 0;
  1253. }
  1254. static __init int hardware_setup(void)
  1255. {
  1256. if (setup_vmcs_config(&vmcs_config) < 0)
  1257. return -EIO;
  1258. if (boot_cpu_has(X86_FEATURE_NX))
  1259. kvm_enable_efer_bits(EFER_NX);
  1260. if (!cpu_has_vmx_vpid())
  1261. enable_vpid = 0;
  1262. if (!cpu_has_vmx_ept()) {
  1263. enable_ept = 0;
  1264. enable_unrestricted_guest = 0;
  1265. }
  1266. if (!cpu_has_vmx_unrestricted_guest())
  1267. enable_unrestricted_guest = 0;
  1268. if (!cpu_has_vmx_flexpriority())
  1269. flexpriority_enabled = 0;
  1270. if (!cpu_has_vmx_tpr_shadow())
  1271. kvm_x86_ops->update_cr8_intercept = NULL;
  1272. if (enable_ept && !cpu_has_vmx_ept_2m_page())
  1273. kvm_disable_largepages();
  1274. if (!cpu_has_vmx_ple())
  1275. ple_gap = 0;
  1276. return alloc_kvm_area();
  1277. }
  1278. static __exit void hardware_unsetup(void)
  1279. {
  1280. free_kvm_area();
  1281. }
  1282. static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
  1283. {
  1284. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  1285. if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
  1286. vmcs_write16(sf->selector, save->selector);
  1287. vmcs_writel(sf->base, save->base);
  1288. vmcs_write32(sf->limit, save->limit);
  1289. vmcs_write32(sf->ar_bytes, save->ar);
  1290. } else {
  1291. u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
  1292. << AR_DPL_SHIFT;
  1293. vmcs_write32(sf->ar_bytes, 0x93 | dpl);
  1294. }
  1295. }
  1296. static void enter_pmode(struct kvm_vcpu *vcpu)
  1297. {
  1298. unsigned long flags;
  1299. struct vcpu_vmx *vmx = to_vmx(vcpu);
  1300. vmx->emulation_required = 1;
  1301. vmx->rmode.vm86_active = 0;
  1302. vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
  1303. vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
  1304. vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
  1305. flags = vmcs_readl(GUEST_RFLAGS);
  1306. flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
  1307. flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
  1308. vmcs_writel(GUEST_RFLAGS, flags);
  1309. vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
  1310. (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
  1311. update_exception_bitmap(vcpu);
  1312. if (emulate_invalid_guest_state)
  1313. return;
  1314. fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
  1315. fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
  1316. fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
  1317. fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
  1318. vmcs_write16(GUEST_SS_SELECTOR, 0);
  1319. vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
  1320. vmcs_write16(GUEST_CS_SELECTOR,
  1321. vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
  1322. vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
  1323. }
  1324. static gva_t rmode_tss_base(struct kvm *kvm)
  1325. {
  1326. if (!kvm->arch.tss_addr) {
  1327. struct kvm_memslots *slots;
  1328. gfn_t base_gfn;
  1329. slots = rcu_dereference(kvm->memslots);
  1330. base_gfn = kvm->memslots->memslots[0].base_gfn +
  1331. kvm->memslots->memslots[0].npages - 3;
  1332. return base_gfn << PAGE_SHIFT;
  1333. }
  1334. return kvm->arch.tss_addr;
  1335. }
  1336. static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
  1337. {
  1338. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  1339. save->selector = vmcs_read16(sf->selector);
  1340. save->base = vmcs_readl(sf->base);
  1341. save->limit = vmcs_read32(sf->limit);
  1342. save->ar = vmcs_read32(sf->ar_bytes);
  1343. vmcs_write16(sf->selector, save->base >> 4);
  1344. vmcs_write32(sf->base, save->base & 0xfffff);
  1345. vmcs_write32(sf->limit, 0xffff);
  1346. vmcs_write32(sf->ar_bytes, 0xf3);
  1347. }
  1348. static void enter_rmode(struct kvm_vcpu *vcpu)
  1349. {
  1350. unsigned long flags;
  1351. struct vcpu_vmx *vmx = to_vmx(vcpu);
  1352. if (enable_unrestricted_guest)
  1353. return;
  1354. vmx->emulation_required = 1;
  1355. vmx->rmode.vm86_active = 1;
  1356. vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
  1357. vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
  1358. vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
  1359. vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
  1360. vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
  1361. vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
  1362. flags = vmcs_readl(GUEST_RFLAGS);
  1363. vmx->rmode.save_rflags = flags;
  1364. flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
  1365. vmcs_writel(GUEST_RFLAGS, flags);
  1366. vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
  1367. update_exception_bitmap(vcpu);
  1368. if (emulate_invalid_guest_state)
  1369. goto continue_rmode;
  1370. vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
  1371. vmcs_write32(GUEST_SS_LIMIT, 0xffff);
  1372. vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
  1373. vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
  1374. vmcs_write32(GUEST_CS_LIMIT, 0xffff);
  1375. if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
  1376. vmcs_writel(GUEST_CS_BASE, 0xf0000);
  1377. vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
  1378. fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
  1379. fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
  1380. fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
  1381. fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
  1382. continue_rmode:
  1383. kvm_mmu_reset_context(vcpu);
  1384. init_rmode(vcpu->kvm);
  1385. }
  1386. static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  1387. {
  1388. struct vcpu_vmx *vmx = to_vmx(vcpu);
  1389. struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
  1390. if (!msr)
  1391. return;
  1392. /*
  1393. * Force kernel_gs_base reloading before EFER changes, as control
  1394. * of this msr depends on is_long_mode().
  1395. */
  1396. vmx_load_host_state(to_vmx(vcpu));
  1397. vcpu->arch.efer = efer;
  1398. if (efer & EFER_LMA) {
  1399. vmcs_write32(VM_ENTRY_CONTROLS,
  1400. vmcs_read32(VM_ENTRY_CONTROLS) |
  1401. VM_ENTRY_IA32E_MODE);
  1402. msr->data = efer;
  1403. } else {
  1404. vmcs_write32(VM_ENTRY_CONTROLS,
  1405. vmcs_read32(VM_ENTRY_CONTROLS) &
  1406. ~VM_ENTRY_IA32E_MODE);
  1407. msr->data = efer & ~EFER_LME;
  1408. }
  1409. setup_msrs(vmx);
  1410. }
  1411. #ifdef CONFIG_X86_64
  1412. static void enter_lmode(struct kvm_vcpu *vcpu)
  1413. {
  1414. u32 guest_tr_ar;
  1415. guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
  1416. if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
  1417. printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
  1418. __func__);
  1419. vmcs_write32(GUEST_TR_AR_BYTES,
  1420. (guest_tr_ar & ~AR_TYPE_MASK)
  1421. | AR_TYPE_BUSY_64_TSS);
  1422. }
  1423. vcpu->arch.efer |= EFER_LMA;
  1424. vmx_set_efer(vcpu, vcpu->arch.efer);
  1425. }
  1426. static void exit_lmode(struct kvm_vcpu *vcpu)
  1427. {
  1428. vcpu->arch.efer &= ~EFER_LMA;
  1429. vmcs_write32(VM_ENTRY_CONTROLS,
  1430. vmcs_read32(VM_ENTRY_CONTROLS)
  1431. & ~VM_ENTRY_IA32E_MODE);
  1432. }
  1433. #endif
  1434. static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
  1435. {
  1436. vpid_sync_vcpu_all(to_vmx(vcpu));
  1437. if (enable_ept)
  1438. ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
  1439. }
  1440. static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
  1441. {
  1442. ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
  1443. vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
  1444. vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
  1445. }
  1446. static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
  1447. {
  1448. ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
  1449. vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
  1450. vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
  1451. }
  1452. static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
  1453. {
  1454. if (!test_bit(VCPU_EXREG_PDPTR,
  1455. (unsigned long *)&vcpu->arch.regs_dirty))
  1456. return;
  1457. if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
  1458. vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]);
  1459. vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]);
  1460. vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]);
  1461. vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]);
  1462. }
  1463. }
  1464. static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
  1465. {
  1466. if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
  1467. vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
  1468. vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
  1469. vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
  1470. vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
  1471. }
  1472. __set_bit(VCPU_EXREG_PDPTR,
  1473. (unsigned long *)&vcpu->arch.regs_avail);
  1474. __set_bit(VCPU_EXREG_PDPTR,
  1475. (unsigned long *)&vcpu->arch.regs_dirty);
  1476. }
  1477. static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
  1478. static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
  1479. unsigned long cr0,
  1480. struct kvm_vcpu *vcpu)
  1481. {
  1482. if (!(cr0 & X86_CR0_PG)) {
  1483. /* From paging/starting to nonpaging */
  1484. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
  1485. vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
  1486. (CPU_BASED_CR3_LOAD_EXITING |
  1487. CPU_BASED_CR3_STORE_EXITING));
  1488. vcpu->arch.cr0 = cr0;
  1489. vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
  1490. } else if (!is_paging(vcpu)) {
  1491. /* From nonpaging to paging */
  1492. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
  1493. vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
  1494. ~(CPU_BASED_CR3_LOAD_EXITING |
  1495. CPU_BASED_CR3_STORE_EXITING));
  1496. vcpu->arch.cr0 = cr0;
  1497. vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
  1498. }
  1499. if (!(cr0 & X86_CR0_WP))
  1500. *hw_cr0 &= ~X86_CR0_WP;
  1501. }
  1502. static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  1503. {
  1504. struct vcpu_vmx *vmx = to_vmx(vcpu);
  1505. unsigned long hw_cr0;
  1506. if (enable_unrestricted_guest)
  1507. hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
  1508. | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
  1509. else
  1510. hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
  1511. if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
  1512. enter_pmode(vcpu);
  1513. if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
  1514. enter_rmode(vcpu);
  1515. #ifdef CONFIG_X86_64
  1516. if (vcpu->arch.efer & EFER_LME) {
  1517. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
  1518. enter_lmode(vcpu);
  1519. if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
  1520. exit_lmode(vcpu);
  1521. }
  1522. #endif
  1523. if (enable_ept)
  1524. ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
  1525. if (!vcpu->fpu_active)
  1526. hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
  1527. vmcs_writel(CR0_READ_SHADOW, cr0);
  1528. vmcs_writel(GUEST_CR0, hw_cr0);
  1529. vcpu->arch.cr0 = cr0;
  1530. }
  1531. static u64 construct_eptp(unsigned long root_hpa)
  1532. {
  1533. u64 eptp;
  1534. /* TODO write the value reading from MSR */
  1535. eptp = VMX_EPT_DEFAULT_MT |
  1536. VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
  1537. eptp |= (root_hpa & PAGE_MASK);
  1538. return eptp;
  1539. }
  1540. static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  1541. {
  1542. unsigned long guest_cr3;
  1543. u64 eptp;
  1544. guest_cr3 = cr3;
  1545. if (enable_ept) {
  1546. eptp = construct_eptp(cr3);
  1547. vmcs_write64(EPT_POINTER, eptp);
  1548. guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
  1549. vcpu->kvm->arch.ept_identity_map_addr;
  1550. ept_load_pdptrs(vcpu);
  1551. }
  1552. vmx_flush_tlb(vcpu);
  1553. vmcs_writel(GUEST_CR3, guest_cr3);
  1554. }
  1555. static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  1556. {
  1557. unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
  1558. KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
  1559. vcpu->arch.cr4 = cr4;
  1560. if (enable_ept) {
  1561. if (!is_paging(vcpu)) {
  1562. hw_cr4 &= ~X86_CR4_PAE;
  1563. hw_cr4 |= X86_CR4_PSE;
  1564. } else if (!(cr4 & X86_CR4_PAE)) {
  1565. hw_cr4 &= ~X86_CR4_PAE;
  1566. }
  1567. }
  1568. vmcs_writel(CR4_READ_SHADOW, cr4);
  1569. vmcs_writel(GUEST_CR4, hw_cr4);
  1570. }
  1571. static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
  1572. {
  1573. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  1574. return vmcs_readl(sf->base);
  1575. }
  1576. static void vmx_get_segment(struct kvm_vcpu *vcpu,
  1577. struct kvm_segment *var, int seg)
  1578. {
  1579. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  1580. u32 ar;
  1581. var->base = vmcs_readl(sf->base);
  1582. var->limit = vmcs_read32(sf->limit);
  1583. var->selector = vmcs_read16(sf->selector);
  1584. ar = vmcs_read32(sf->ar_bytes);
  1585. if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
  1586. ar = 0;
  1587. var->type = ar & 15;
  1588. var->s = (ar >> 4) & 1;
  1589. var->dpl = (ar >> 5) & 3;
  1590. var->present = (ar >> 7) & 1;
  1591. var->avl = (ar >> 12) & 1;
  1592. var->l = (ar >> 13) & 1;
  1593. var->db = (ar >> 14) & 1;
  1594. var->g = (ar >> 15) & 1;
  1595. var->unusable = (ar >> 16) & 1;
  1596. }
  1597. static int vmx_get_cpl(struct kvm_vcpu *vcpu)
  1598. {
  1599. if (!is_protmode(vcpu))
  1600. return 0;
  1601. if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
  1602. return 3;
  1603. return vmcs_read16(GUEST_CS_SELECTOR) & 3;
  1604. }
  1605. static u32 vmx_segment_access_rights(struct kvm_segment *var)
  1606. {
  1607. u32 ar;
  1608. if (var->unusable)
  1609. ar = 1 << 16;
  1610. else {
  1611. ar = var->type & 15;
  1612. ar |= (var->s & 1) << 4;
  1613. ar |= (var->dpl & 3) << 5;
  1614. ar |= (var->present & 1) << 7;
  1615. ar |= (var->avl & 1) << 12;
  1616. ar |= (var->l & 1) << 13;
  1617. ar |= (var->db & 1) << 14;
  1618. ar |= (var->g & 1) << 15;
  1619. }
  1620. if (ar == 0) /* a 0 value means unusable */
  1621. ar = AR_UNUSABLE_MASK;
  1622. return ar;
  1623. }
  1624. static void vmx_set_segment(struct kvm_vcpu *vcpu,
  1625. struct kvm_segment *var, int seg)
  1626. {
  1627. struct vcpu_vmx *vmx = to_vmx(vcpu);
  1628. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  1629. u32 ar;
  1630. if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
  1631. vmx->rmode.tr.selector = var->selector;
  1632. vmx->rmode.tr.base = var->base;
  1633. vmx->rmode.tr.limit = var->limit;
  1634. vmx->rmode.tr.ar = vmx_segment_access_rights(var);
  1635. return;
  1636. }
  1637. vmcs_writel(sf->base, var->base);
  1638. vmcs_write32(sf->limit, var->limit);
  1639. vmcs_write16(sf->selector, var->selector);
  1640. if (vmx->rmode.vm86_active && var->s) {
  1641. /*
  1642. * Hack real-mode segments into vm86 compatibility.
  1643. */
  1644. if (var->base == 0xffff0000 && var->selector == 0xf000)
  1645. vmcs_writel(sf->base, 0xf0000);
  1646. ar = 0xf3;
  1647. } else
  1648. ar = vmx_segment_access_rights(var);
  1649. /*
  1650. * Fix the "Accessed" bit in AR field of segment registers for older
  1651. * qemu binaries.
  1652. * IA32 arch specifies that at the time of processor reset the
  1653. * "Accessed" bit in the AR field of segment registers is 1. And qemu
  1654. * is setting it to 0 in the usedland code. This causes invalid guest
  1655. * state vmexit when "unrestricted guest" mode is turned on.
  1656. * Fix for this setup issue in cpu_reset is being pushed in the qemu
  1657. * tree. Newer qemu binaries with that qemu fix would not need this
  1658. * kvm hack.
  1659. */
  1660. if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
  1661. ar |= 0x1; /* Accessed */
  1662. vmcs_write32(sf->ar_bytes, ar);
  1663. }
  1664. static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  1665. {
  1666. u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
  1667. *db = (ar >> 14) & 1;
  1668. *l = (ar >> 13) & 1;
  1669. }
  1670. static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  1671. {
  1672. dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
  1673. dt->address = vmcs_readl(GUEST_IDTR_BASE);
  1674. }
  1675. static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  1676. {
  1677. vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
  1678. vmcs_writel(GUEST_IDTR_BASE, dt->address);
  1679. }
  1680. static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  1681. {
  1682. dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
  1683. dt->address = vmcs_readl(GUEST_GDTR_BASE);
  1684. }
  1685. static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  1686. {
  1687. vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
  1688. vmcs_writel(GUEST_GDTR_BASE, dt->address);
  1689. }
  1690. static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
  1691. {
  1692. struct kvm_segment var;
  1693. u32 ar;
  1694. vmx_get_segment(vcpu, &var, seg);
  1695. ar = vmx_segment_access_rights(&var);
  1696. if (var.base != (var.selector << 4))
  1697. return false;
  1698. if (var.limit != 0xffff)
  1699. return false;
  1700. if (ar != 0xf3)
  1701. return false;
  1702. return true;
  1703. }
  1704. static bool code_segment_valid(struct kvm_vcpu *vcpu)
  1705. {
  1706. struct kvm_segment cs;
  1707. unsigned int cs_rpl;
  1708. vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
  1709. cs_rpl = cs.selector & SELECTOR_RPL_MASK;
  1710. if (cs.unusable)
  1711. return false;
  1712. if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
  1713. return false;
  1714. if (!cs.s)
  1715. return false;
  1716. if (cs.type & AR_TYPE_WRITEABLE_MASK) {
  1717. if (cs.dpl > cs_rpl)
  1718. return false;
  1719. } else {
  1720. if (cs.dpl != cs_rpl)
  1721. return false;
  1722. }
  1723. if (!cs.present)
  1724. return false;
  1725. /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
  1726. return true;
  1727. }
  1728. static bool stack_segment_valid(struct kvm_vcpu *vcpu)
  1729. {
  1730. struct kvm_segment ss;
  1731. unsigned int ss_rpl;
  1732. vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
  1733. ss_rpl = ss.selector & SELECTOR_RPL_MASK;
  1734. if (ss.unusable)
  1735. return true;
  1736. if (ss.type != 3 && ss.type != 7)
  1737. return false;
  1738. if (!ss.s)
  1739. return false;
  1740. if (ss.dpl != ss_rpl) /* DPL != RPL */
  1741. return false;
  1742. if (!ss.present)
  1743. return false;
  1744. return true;
  1745. }
  1746. static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
  1747. {
  1748. struct kvm_segment var;
  1749. unsigned int rpl;
  1750. vmx_get_segment(vcpu, &var, seg);
  1751. rpl = var.selector & SELECTOR_RPL_MASK;
  1752. if (var.unusable)
  1753. return true;
  1754. if (!var.s)
  1755. return false;
  1756. if (!var.present)
  1757. return false;
  1758. if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
  1759. if (var.dpl < rpl) /* DPL < RPL */
  1760. return false;
  1761. }
  1762. /* TODO: Add other members to kvm_segment_field to allow checking for other access
  1763. * rights flags
  1764. */
  1765. return true;
  1766. }
  1767. static bool tr_valid(struct kvm_vcpu *vcpu)
  1768. {
  1769. struct kvm_segment tr;
  1770. vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
  1771. if (tr.unusable)
  1772. return false;
  1773. if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
  1774. return false;
  1775. if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
  1776. return false;
  1777. if (!tr.present)
  1778. return false;
  1779. return true;
  1780. }
  1781. static bool ldtr_valid(struct kvm_vcpu *vcpu)
  1782. {
  1783. struct kvm_segment ldtr;
  1784. vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
  1785. if (ldtr.unusable)
  1786. return true;
  1787. if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
  1788. return false;
  1789. if (ldtr.type != 2)
  1790. return false;
  1791. if (!ldtr.present)
  1792. return false;
  1793. return true;
  1794. }
  1795. static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
  1796. {
  1797. struct kvm_segment cs, ss;
  1798. vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
  1799. vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
  1800. return ((cs.selector & SELECTOR_RPL_MASK) ==
  1801. (ss.selector & SELECTOR_RPL_MASK));
  1802. }
  1803. /*
  1804. * Check if guest state is valid. Returns true if valid, false if
  1805. * not.
  1806. * We assume that registers are always usable
  1807. */
  1808. static bool guest_state_valid(struct kvm_vcpu *vcpu)
  1809. {
  1810. /* real mode guest state checks */
  1811. if (!is_protmode(vcpu)) {
  1812. if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
  1813. return false;
  1814. if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
  1815. return false;
  1816. if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
  1817. return false;
  1818. if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
  1819. return false;
  1820. if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
  1821. return false;
  1822. if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
  1823. return false;
  1824. } else {
  1825. /* protected mode guest state checks */
  1826. if (!cs_ss_rpl_check(vcpu))
  1827. return false;
  1828. if (!code_segment_valid(vcpu))
  1829. return false;
  1830. if (!stack_segment_valid(vcpu))
  1831. return false;
  1832. if (!data_segment_valid(vcpu, VCPU_SREG_DS))
  1833. return false;
  1834. if (!data_segment_valid(vcpu, VCPU_SREG_ES))
  1835. return false;
  1836. if (!data_segment_valid(vcpu, VCPU_SREG_FS))
  1837. return false;
  1838. if (!data_segment_valid(vcpu, VCPU_SREG_GS))
  1839. return false;
  1840. if (!tr_valid(vcpu))
  1841. return false;
  1842. if (!ldtr_valid(vcpu))
  1843. return false;
  1844. }
  1845. /* TODO:
  1846. * - Add checks on RIP
  1847. * - Add checks on RFLAGS
  1848. */
  1849. return true;
  1850. }
  1851. static int init_rmode_tss(struct kvm *kvm)
  1852. {
  1853. gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
  1854. u16 data = 0;
  1855. int ret = 0;
  1856. int r;
  1857. r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
  1858. if (r < 0)
  1859. goto out;
  1860. data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
  1861. r = kvm_write_guest_page(kvm, fn++, &data,
  1862. TSS_IOPB_BASE_OFFSET, sizeof(u16));
  1863. if (r < 0)
  1864. goto out;
  1865. r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
  1866. if (r < 0)
  1867. goto out;
  1868. r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
  1869. if (r < 0)
  1870. goto out;
  1871. data = ~0;
  1872. r = kvm_write_guest_page(kvm, fn, &data,
  1873. RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
  1874. sizeof(u8));
  1875. if (r < 0)
  1876. goto out;
  1877. ret = 1;
  1878. out:
  1879. return ret;
  1880. }
  1881. static int init_rmode_identity_map(struct kvm *kvm)
  1882. {
  1883. int i, r, ret;
  1884. pfn_t identity_map_pfn;
  1885. u32 tmp;
  1886. if (!enable_ept)
  1887. return 1;
  1888. if (unlikely(!kvm->arch.ept_identity_pagetable)) {
  1889. printk(KERN_ERR "EPT: identity-mapping pagetable "
  1890. "haven't been allocated!\n");
  1891. return 0;
  1892. }
  1893. if (likely(kvm->arch.ept_identity_pagetable_done))
  1894. return 1;
  1895. ret = 0;
  1896. identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
  1897. r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
  1898. if (r < 0)
  1899. goto out;
  1900. /* Set up identity-mapping pagetable for EPT in real mode */
  1901. for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
  1902. tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
  1903. _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
  1904. r = kvm_write_guest_page(kvm, identity_map_pfn,
  1905. &tmp, i * sizeof(tmp), sizeof(tmp));
  1906. if (r < 0)
  1907. goto out;
  1908. }
  1909. kvm->arch.ept_identity_pagetable_done = true;
  1910. ret = 1;
  1911. out:
  1912. return ret;
  1913. }
  1914. static void seg_setup(int seg)
  1915. {
  1916. struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  1917. unsigned int ar;
  1918. vmcs_write16(sf->selector, 0);
  1919. vmcs_writel(sf->base, 0);
  1920. vmcs_write32(sf->limit, 0xffff);
  1921. if (enable_unrestricted_guest) {
  1922. ar = 0x93;
  1923. if (seg == VCPU_SREG_CS)
  1924. ar |= 0x08; /* code segment */
  1925. } else
  1926. ar = 0xf3;
  1927. vmcs_write32(sf->ar_bytes, ar);
  1928. }
  1929. static int alloc_apic_access_page(struct kvm *kvm)
  1930. {
  1931. struct kvm_userspace_memory_region kvm_userspace_mem;
  1932. int r = 0;
  1933. mutex_lock(&kvm->slots_lock);
  1934. if (kvm->arch.apic_access_page)
  1935. goto out;
  1936. kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
  1937. kvm_userspace_mem.flags = 0;
  1938. kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
  1939. kvm_userspace_mem.memory_size = PAGE_SIZE;
  1940. r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
  1941. if (r)
  1942. goto out;
  1943. kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
  1944. out:
  1945. mutex_unlock(&kvm->slots_lock);
  1946. return r;
  1947. }
  1948. static int alloc_identity_pagetable(struct kvm *kvm)
  1949. {
  1950. struct kvm_userspace_memory_region kvm_userspace_mem;
  1951. int r = 0;
  1952. mutex_lock(&kvm->slots_lock);
  1953. if (kvm->arch.ept_identity_pagetable)
  1954. goto out;
  1955. kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
  1956. kvm_userspace_mem.flags = 0;
  1957. kvm_userspace_mem.guest_phys_addr =
  1958. kvm->arch.ept_identity_map_addr;
  1959. kvm_userspace_mem.memory_size = PAGE_SIZE;
  1960. r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
  1961. if (r)
  1962. goto out;
  1963. kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
  1964. kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
  1965. out:
  1966. mutex_unlock(&kvm->slots_lock);
  1967. return r;
  1968. }
  1969. static void allocate_vpid(struct vcpu_vmx *vmx)
  1970. {
  1971. int vpid;
  1972. vmx->vpid = 0;
  1973. if (!enable_vpid)
  1974. return;
  1975. spin_lock(&vmx_vpid_lock);
  1976. vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
  1977. if (vpid < VMX_NR_VPIDS) {
  1978. vmx->vpid = vpid;
  1979. __set_bit(vpid, vmx_vpid_bitmap);
  1980. }
  1981. spin_unlock(&vmx_vpid_lock);
  1982. }
  1983. static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
  1984. {
  1985. int f = sizeof(unsigned long);
  1986. if (!cpu_has_vmx_msr_bitmap())
  1987. return;
  1988. /*
  1989. * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
  1990. * have the write-low and read-high bitmap offsets the wrong way round.
  1991. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
  1992. */
  1993. if (msr <= 0x1fff) {
  1994. __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
  1995. __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
  1996. } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
  1997. msr &= 0x1fff;
  1998. __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
  1999. __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
  2000. }
  2001. }
  2002. static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
  2003. {
  2004. if (!longmode_only)
  2005. __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
  2006. __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
  2007. }
  2008. /*
  2009. * Sets up the vmcs for emulated real mode.
  2010. */
  2011. static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
  2012. {
  2013. u32 host_sysenter_cs, msr_low, msr_high;
  2014. u32 junk;
  2015. u64 host_pat, tsc_this, tsc_base;
  2016. unsigned long a;
  2017. struct desc_ptr dt;
  2018. int i;
  2019. unsigned long kvm_vmx_return;
  2020. u32 exec_control;
  2021. /* I/O */
  2022. vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
  2023. vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
  2024. if (cpu_has_vmx_msr_bitmap())
  2025. vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
  2026. vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
  2027. /* Control */
  2028. vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
  2029. vmcs_config.pin_based_exec_ctrl);
  2030. exec_control = vmcs_config.cpu_based_exec_ctrl;
  2031. if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
  2032. exec_control &= ~CPU_BASED_TPR_SHADOW;
  2033. #ifdef CONFIG_X86_64
  2034. exec_control |= CPU_BASED_CR8_STORE_EXITING |
  2035. CPU_BASED_CR8_LOAD_EXITING;
  2036. #endif
  2037. }
  2038. if (!enable_ept)
  2039. exec_control |= CPU_BASED_CR3_STORE_EXITING |
  2040. CPU_BASED_CR3_LOAD_EXITING |
  2041. CPU_BASED_INVLPG_EXITING;
  2042. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
  2043. if (cpu_has_secondary_exec_ctrls()) {
  2044. exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
  2045. if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
  2046. exec_control &=
  2047. ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
  2048. if (vmx->vpid == 0)
  2049. exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
  2050. if (!enable_ept) {
  2051. exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
  2052. enable_unrestricted_guest = 0;
  2053. }
  2054. if (!enable_unrestricted_guest)
  2055. exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
  2056. if (!ple_gap)
  2057. exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
  2058. vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
  2059. }
  2060. if (ple_gap) {
  2061. vmcs_write32(PLE_GAP, ple_gap);
  2062. vmcs_write32(PLE_WINDOW, ple_window);
  2063. }
  2064. vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
  2065. vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
  2066. vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
  2067. vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
  2068. vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
  2069. vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
  2070. vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
  2071. vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
  2072. vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
  2073. vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
  2074. vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
  2075. vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
  2076. #ifdef CONFIG_X86_64
  2077. rdmsrl(MSR_FS_BASE, a);
  2078. vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
  2079. rdmsrl(MSR_GS_BASE, a);
  2080. vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
  2081. #else
  2082. vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
  2083. vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
  2084. #endif
  2085. vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
  2086. native_store_idt(&dt);
  2087. vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
  2088. asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
  2089. vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
  2090. vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
  2091. vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
  2092. vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
  2093. rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
  2094. vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
  2095. rdmsrl(MSR_IA32_SYSENTER_ESP, a);
  2096. vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
  2097. rdmsrl(MSR_IA32_SYSENTER_EIP, a);
  2098. vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
  2099. if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
  2100. rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
  2101. host_pat = msr_low | ((u64) msr_high << 32);
  2102. vmcs_write64(HOST_IA32_PAT, host_pat);
  2103. }
  2104. if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
  2105. rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
  2106. host_pat = msr_low | ((u64) msr_high << 32);
  2107. /* Write the default value follow host pat */
  2108. vmcs_write64(GUEST_IA32_PAT, host_pat);
  2109. /* Keep arch.pat sync with GUEST_IA32_PAT */
  2110. vmx->vcpu.arch.pat = host_pat;
  2111. }
  2112. for (i = 0; i < NR_VMX_MSR; ++i) {
  2113. u32 index = vmx_msr_index[i];
  2114. u32 data_low, data_high;
  2115. int j = vmx->nmsrs;
  2116. if (rdmsr_safe(index, &data_low, &data_high) < 0)
  2117. continue;
  2118. if (wrmsr_safe(index, data_low, data_high) < 0)
  2119. continue;
  2120. vmx->guest_msrs[j].index = i;
  2121. vmx->guest_msrs[j].data = 0;
  2122. vmx->guest_msrs[j].mask = -1ull;
  2123. ++vmx->nmsrs;
  2124. }
  2125. vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
  2126. /* 22.2.1, 20.8.1 */
  2127. vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
  2128. vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
  2129. vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
  2130. if (enable_ept)
  2131. vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
  2132. vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
  2133. tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
  2134. rdtscll(tsc_this);
  2135. if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
  2136. tsc_base = tsc_this;
  2137. guest_write_tsc(0, tsc_base);
  2138. return 0;
  2139. }
  2140. static int init_rmode(struct kvm *kvm)
  2141. {
  2142. if (!init_rmode_tss(kvm))
  2143. return 0;
  2144. if (!init_rmode_identity_map(kvm))
  2145. return 0;
  2146. return 1;
  2147. }
  2148. static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
  2149. {
  2150. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2151. u64 msr;
  2152. int ret, idx;
  2153. vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
  2154. idx = srcu_read_lock(&vcpu->kvm->srcu);
  2155. if (!init_rmode(vmx->vcpu.kvm)) {
  2156. ret = -ENOMEM;
  2157. goto out;
  2158. }
  2159. vmx->rmode.vm86_active = 0;
  2160. vmx->soft_vnmi_blocked = 0;
  2161. vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
  2162. kvm_set_cr8(&vmx->vcpu, 0);
  2163. msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
  2164. if (kvm_vcpu_is_bsp(&vmx->vcpu))
  2165. msr |= MSR_IA32_APICBASE_BSP;
  2166. kvm_set_apic_base(&vmx->vcpu, msr);
  2167. fx_init(&vmx->vcpu);
  2168. seg_setup(VCPU_SREG_CS);
  2169. /*
  2170. * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
  2171. * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
  2172. */
  2173. if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
  2174. vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
  2175. vmcs_writel(GUEST_CS_BASE, 0x000f0000);
  2176. } else {
  2177. vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
  2178. vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
  2179. }
  2180. seg_setup(VCPU_SREG_DS);
  2181. seg_setup(VCPU_SREG_ES);
  2182. seg_setup(VCPU_SREG_FS);
  2183. seg_setup(VCPU_SREG_GS);
  2184. seg_setup(VCPU_SREG_SS);
  2185. vmcs_write16(GUEST_TR_SELECTOR, 0);
  2186. vmcs_writel(GUEST_TR_BASE, 0);
  2187. vmcs_write32(GUEST_TR_LIMIT, 0xffff);
  2188. vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
  2189. vmcs_write16(GUEST_LDTR_SELECTOR, 0);
  2190. vmcs_writel(GUEST_LDTR_BASE, 0);
  2191. vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
  2192. vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
  2193. vmcs_write32(GUEST_SYSENTER_CS, 0);
  2194. vmcs_writel(GUEST_SYSENTER_ESP, 0);
  2195. vmcs_writel(GUEST_SYSENTER_EIP, 0);
  2196. vmcs_writel(GUEST_RFLAGS, 0x02);
  2197. if (kvm_vcpu_is_bsp(&vmx->vcpu))
  2198. kvm_rip_write(vcpu, 0xfff0);
  2199. else
  2200. kvm_rip_write(vcpu, 0);
  2201. kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
  2202. vmcs_writel(GUEST_DR7, 0x400);
  2203. vmcs_writel(GUEST_GDTR_BASE, 0);
  2204. vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
  2205. vmcs_writel(GUEST_IDTR_BASE, 0);
  2206. vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
  2207. vmcs_write32(GUEST_ACTIVITY_STATE, 0);
  2208. vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
  2209. vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
  2210. /* Special registers */
  2211. vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
  2212. setup_msrs(vmx);
  2213. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
  2214. if (cpu_has_vmx_tpr_shadow()) {
  2215. vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
  2216. if (vm_need_tpr_shadow(vmx->vcpu.kvm))
  2217. vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
  2218. page_to_phys(vmx->vcpu.arch.apic->regs_page));
  2219. vmcs_write32(TPR_THRESHOLD, 0);
  2220. }
  2221. if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
  2222. vmcs_write64(APIC_ACCESS_ADDR,
  2223. page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
  2224. if (vmx->vpid != 0)
  2225. vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
  2226. vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
  2227. vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
  2228. vmx_set_cr4(&vmx->vcpu, 0);
  2229. vmx_set_efer(&vmx->vcpu, 0);
  2230. vmx_fpu_activate(&vmx->vcpu);
  2231. update_exception_bitmap(&vmx->vcpu);
  2232. vpid_sync_vcpu_all(vmx);
  2233. ret = 0;
  2234. /* HACK: Don't enable emulation on guest boot/reset */
  2235. vmx->emulation_required = 0;
  2236. out:
  2237. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  2238. return ret;
  2239. }
  2240. static void enable_irq_window(struct kvm_vcpu *vcpu)
  2241. {
  2242. u32 cpu_based_vm_exec_control;
  2243. cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  2244. cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
  2245. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  2246. }
  2247. static void enable_nmi_window(struct kvm_vcpu *vcpu)
  2248. {
  2249. u32 cpu_based_vm_exec_control;
  2250. if (!cpu_has_virtual_nmis()) {
  2251. enable_irq_window(vcpu);
  2252. return;
  2253. }
  2254. cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  2255. cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
  2256. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  2257. }
  2258. static void vmx_inject_irq(struct kvm_vcpu *vcpu)
  2259. {
  2260. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2261. uint32_t intr;
  2262. int irq = vcpu->arch.interrupt.nr;
  2263. trace_kvm_inj_virq(irq);
  2264. ++vcpu->stat.irq_injections;
  2265. if (vmx->rmode.vm86_active) {
  2266. vmx->rmode.irq.pending = true;
  2267. vmx->rmode.irq.vector = irq;
  2268. vmx->rmode.irq.rip = kvm_rip_read(vcpu);
  2269. if (vcpu->arch.interrupt.soft)
  2270. vmx->rmode.irq.rip +=
  2271. vmx->vcpu.arch.event_exit_inst_len;
  2272. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
  2273. irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
  2274. vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
  2275. kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
  2276. return;
  2277. }
  2278. intr = irq | INTR_INFO_VALID_MASK;
  2279. if (vcpu->arch.interrupt.soft) {
  2280. intr |= INTR_TYPE_SOFT_INTR;
  2281. vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
  2282. vmx->vcpu.arch.event_exit_inst_len);
  2283. } else
  2284. intr |= INTR_TYPE_EXT_INTR;
  2285. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
  2286. }
  2287. static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
  2288. {
  2289. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2290. if (!cpu_has_virtual_nmis()) {
  2291. /*
  2292. * Tracking the NMI-blocked state in software is built upon
  2293. * finding the next open IRQ window. This, in turn, depends on
  2294. * well-behaving guests: They have to keep IRQs disabled at
  2295. * least as long as the NMI handler runs. Otherwise we may
  2296. * cause NMI nesting, maybe breaking the guest. But as this is
  2297. * highly unlikely, we can live with the residual risk.
  2298. */
  2299. vmx->soft_vnmi_blocked = 1;
  2300. vmx->vnmi_blocked_time = 0;
  2301. }
  2302. ++vcpu->stat.nmi_injections;
  2303. if (vmx->rmode.vm86_active) {
  2304. vmx->rmode.irq.pending = true;
  2305. vmx->rmode.irq.vector = NMI_VECTOR;
  2306. vmx->rmode.irq.rip = kvm_rip_read(vcpu);
  2307. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
  2308. NMI_VECTOR | INTR_TYPE_SOFT_INTR |
  2309. INTR_INFO_VALID_MASK);
  2310. vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
  2311. kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
  2312. return;
  2313. }
  2314. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
  2315. INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
  2316. }
  2317. static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
  2318. {
  2319. if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
  2320. return 0;
  2321. return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
  2322. (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
  2323. GUEST_INTR_STATE_NMI));
  2324. }
  2325. static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
  2326. {
  2327. if (!cpu_has_virtual_nmis())
  2328. return to_vmx(vcpu)->soft_vnmi_blocked;
  2329. else
  2330. return !!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
  2331. GUEST_INTR_STATE_NMI);
  2332. }
  2333. static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
  2334. {
  2335. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2336. if (!cpu_has_virtual_nmis()) {
  2337. if (vmx->soft_vnmi_blocked != masked) {
  2338. vmx->soft_vnmi_blocked = masked;
  2339. vmx->vnmi_blocked_time = 0;
  2340. }
  2341. } else {
  2342. if (masked)
  2343. vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  2344. GUEST_INTR_STATE_NMI);
  2345. else
  2346. vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
  2347. GUEST_INTR_STATE_NMI);
  2348. }
  2349. }
  2350. static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
  2351. {
  2352. return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
  2353. !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
  2354. (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
  2355. }
  2356. static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
  2357. {
  2358. int ret;
  2359. struct kvm_userspace_memory_region tss_mem = {
  2360. .slot = TSS_PRIVATE_MEMSLOT,
  2361. .guest_phys_addr = addr,
  2362. .memory_size = PAGE_SIZE * 3,
  2363. .flags = 0,
  2364. };
  2365. ret = kvm_set_memory_region(kvm, &tss_mem, 0);
  2366. if (ret)
  2367. return ret;
  2368. kvm->arch.tss_addr = addr;
  2369. return 0;
  2370. }
  2371. static int handle_rmode_exception(struct kvm_vcpu *vcpu,
  2372. int vec, u32 err_code)
  2373. {
  2374. /*
  2375. * Instruction with address size override prefix opcode 0x67
  2376. * Cause the #SS fault with 0 error code in VM86 mode.
  2377. */
  2378. if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
  2379. if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE)
  2380. return 1;
  2381. /*
  2382. * Forward all other exceptions that are valid in real mode.
  2383. * FIXME: Breaks guest debugging in real mode, needs to be fixed with
  2384. * the required debugging infrastructure rework.
  2385. */
  2386. switch (vec) {
  2387. case DB_VECTOR:
  2388. if (vcpu->guest_debug &
  2389. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
  2390. return 0;
  2391. kvm_queue_exception(vcpu, vec);
  2392. return 1;
  2393. case BP_VECTOR:
  2394. /*
  2395. * Update instruction length as we may reinject the exception
  2396. * from user space while in guest debugging mode.
  2397. */
  2398. to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
  2399. vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  2400. if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
  2401. return 0;
  2402. /* fall through */
  2403. case DE_VECTOR:
  2404. case OF_VECTOR:
  2405. case BR_VECTOR:
  2406. case UD_VECTOR:
  2407. case DF_VECTOR:
  2408. case SS_VECTOR:
  2409. case GP_VECTOR:
  2410. case MF_VECTOR:
  2411. kvm_queue_exception(vcpu, vec);
  2412. return 1;
  2413. }
  2414. return 0;
  2415. }
  2416. /*
  2417. * Trigger machine check on the host. We assume all the MSRs are already set up
  2418. * by the CPU and that we still run on the same CPU as the MCE occurred on.
  2419. * We pass a fake environment to the machine check handler because we want
  2420. * the guest to be always treated like user space, no matter what context
  2421. * it used internally.
  2422. */
  2423. static void kvm_machine_check(void)
  2424. {
  2425. #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
  2426. struct pt_regs regs = {
  2427. .cs = 3, /* Fake ring 3 no matter what the guest ran on */
  2428. .flags = X86_EFLAGS_IF,
  2429. };
  2430. do_machine_check(&regs, 0);
  2431. #endif
  2432. }
  2433. static int handle_machine_check(struct kvm_vcpu *vcpu)
  2434. {
  2435. /* already handled by vcpu_run */
  2436. return 1;
  2437. }
  2438. static int handle_exception(struct kvm_vcpu *vcpu)
  2439. {
  2440. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2441. struct kvm_run *kvm_run = vcpu->run;
  2442. u32 intr_info, ex_no, error_code;
  2443. unsigned long cr2, rip, dr6;
  2444. u32 vect_info;
  2445. enum emulation_result er;
  2446. vect_info = vmx->idt_vectoring_info;
  2447. intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  2448. if (is_machine_check(intr_info))
  2449. return handle_machine_check(vcpu);
  2450. if ((vect_info & VECTORING_INFO_VALID_MASK) &&
  2451. !is_page_fault(intr_info)) {
  2452. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  2453. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
  2454. vcpu->run->internal.ndata = 2;
  2455. vcpu->run->internal.data[0] = vect_info;
  2456. vcpu->run->internal.data[1] = intr_info;
  2457. return 0;
  2458. }
  2459. if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
  2460. return 1; /* already handled by vmx_vcpu_run() */
  2461. if (is_no_device(intr_info)) {
  2462. vmx_fpu_activate(vcpu);
  2463. return 1;
  2464. }
  2465. if (is_invalid_opcode(intr_info)) {
  2466. er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD);
  2467. if (er != EMULATE_DONE)
  2468. kvm_queue_exception(vcpu, UD_VECTOR);
  2469. return 1;
  2470. }
  2471. error_code = 0;
  2472. rip = kvm_rip_read(vcpu);
  2473. if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
  2474. error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
  2475. if (is_page_fault(intr_info)) {
  2476. /* EPT won't cause page fault directly */
  2477. if (enable_ept)
  2478. BUG();
  2479. cr2 = vmcs_readl(EXIT_QUALIFICATION);
  2480. trace_kvm_page_fault(cr2, error_code);
  2481. if (kvm_event_needs_reinjection(vcpu))
  2482. kvm_mmu_unprotect_page_virt(vcpu, cr2);
  2483. return kvm_mmu_page_fault(vcpu, cr2, error_code);
  2484. }
  2485. if (vmx->rmode.vm86_active &&
  2486. handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
  2487. error_code)) {
  2488. if (vcpu->arch.halt_request) {
  2489. vcpu->arch.halt_request = 0;
  2490. return kvm_emulate_halt(vcpu);
  2491. }
  2492. return 1;
  2493. }
  2494. ex_no = intr_info & INTR_INFO_VECTOR_MASK;
  2495. switch (ex_no) {
  2496. case DB_VECTOR:
  2497. dr6 = vmcs_readl(EXIT_QUALIFICATION);
  2498. if (!(vcpu->guest_debug &
  2499. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
  2500. vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
  2501. kvm_queue_exception(vcpu, DB_VECTOR);
  2502. return 1;
  2503. }
  2504. kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
  2505. kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
  2506. /* fall through */
  2507. case BP_VECTOR:
  2508. /*
  2509. * Update instruction length as we may reinject #BP from
  2510. * user space while in guest debugging mode. Reading it for
  2511. * #DB as well causes no harm, it is not used in that case.
  2512. */
  2513. vmx->vcpu.arch.event_exit_inst_len =
  2514. vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  2515. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  2516. kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
  2517. kvm_run->debug.arch.exception = ex_no;
  2518. break;
  2519. default:
  2520. kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
  2521. kvm_run->ex.exception = ex_no;
  2522. kvm_run->ex.error_code = error_code;
  2523. break;
  2524. }
  2525. return 0;
  2526. }
  2527. static int handle_external_interrupt(struct kvm_vcpu *vcpu)
  2528. {
  2529. ++vcpu->stat.irq_exits;
  2530. return 1;
  2531. }
  2532. static int handle_triple_fault(struct kvm_vcpu *vcpu)
  2533. {
  2534. vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  2535. return 0;
  2536. }
  2537. static int handle_io(struct kvm_vcpu *vcpu)
  2538. {
  2539. unsigned long exit_qualification;
  2540. int size, in, string;
  2541. unsigned port;
  2542. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  2543. string = (exit_qualification & 16) != 0;
  2544. in = (exit_qualification & 8) != 0;
  2545. ++vcpu->stat.io_exits;
  2546. if (string || in)
  2547. return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
  2548. port = exit_qualification >> 16;
  2549. size = (exit_qualification & 7) + 1;
  2550. skip_emulated_instruction(vcpu);
  2551. return kvm_fast_pio_out(vcpu, size, port);
  2552. }
  2553. static void
  2554. vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
  2555. {
  2556. /*
  2557. * Patch in the VMCALL instruction:
  2558. */
  2559. hypercall[0] = 0x0f;
  2560. hypercall[1] = 0x01;
  2561. hypercall[2] = 0xc1;
  2562. }
  2563. static int handle_cr(struct kvm_vcpu *vcpu)
  2564. {
  2565. unsigned long exit_qualification, val;
  2566. int cr;
  2567. int reg;
  2568. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  2569. cr = exit_qualification & 15;
  2570. reg = (exit_qualification >> 8) & 15;
  2571. switch ((exit_qualification >> 4) & 3) {
  2572. case 0: /* mov to cr */
  2573. val = kvm_register_read(vcpu, reg);
  2574. trace_kvm_cr_write(cr, val);
  2575. switch (cr) {
  2576. case 0:
  2577. kvm_set_cr0(vcpu, val);
  2578. skip_emulated_instruction(vcpu);
  2579. return 1;
  2580. case 3:
  2581. kvm_set_cr3(vcpu, val);
  2582. skip_emulated_instruction(vcpu);
  2583. return 1;
  2584. case 4:
  2585. kvm_set_cr4(vcpu, val);
  2586. skip_emulated_instruction(vcpu);
  2587. return 1;
  2588. case 8: {
  2589. u8 cr8_prev = kvm_get_cr8(vcpu);
  2590. u8 cr8 = kvm_register_read(vcpu, reg);
  2591. kvm_set_cr8(vcpu, cr8);
  2592. skip_emulated_instruction(vcpu);
  2593. if (irqchip_in_kernel(vcpu->kvm))
  2594. return 1;
  2595. if (cr8_prev <= cr8)
  2596. return 1;
  2597. vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
  2598. return 0;
  2599. }
  2600. };
  2601. break;
  2602. case 2: /* clts */
  2603. vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
  2604. trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
  2605. skip_emulated_instruction(vcpu);
  2606. vmx_fpu_activate(vcpu);
  2607. return 1;
  2608. case 1: /*mov from cr*/
  2609. switch (cr) {
  2610. case 3:
  2611. kvm_register_write(vcpu, reg, vcpu->arch.cr3);
  2612. trace_kvm_cr_read(cr, vcpu->arch.cr3);
  2613. skip_emulated_instruction(vcpu);
  2614. return 1;
  2615. case 8:
  2616. val = kvm_get_cr8(vcpu);
  2617. kvm_register_write(vcpu, reg, val);
  2618. trace_kvm_cr_read(cr, val);
  2619. skip_emulated_instruction(vcpu);
  2620. return 1;
  2621. }
  2622. break;
  2623. case 3: /* lmsw */
  2624. val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
  2625. trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
  2626. kvm_lmsw(vcpu, val);
  2627. skip_emulated_instruction(vcpu);
  2628. return 1;
  2629. default:
  2630. break;
  2631. }
  2632. vcpu->run->exit_reason = 0;
  2633. pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
  2634. (int)(exit_qualification >> 4) & 3, cr);
  2635. return 0;
  2636. }
  2637. static int handle_dr(struct kvm_vcpu *vcpu)
  2638. {
  2639. unsigned long exit_qualification;
  2640. int dr, reg;
  2641. /* Do not handle if the CPL > 0, will trigger GP on re-entry */
  2642. if (!kvm_require_cpl(vcpu, 0))
  2643. return 1;
  2644. dr = vmcs_readl(GUEST_DR7);
  2645. if (dr & DR7_GD) {
  2646. /*
  2647. * As the vm-exit takes precedence over the debug trap, we
  2648. * need to emulate the latter, either for the host or the
  2649. * guest debugging itself.
  2650. */
  2651. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
  2652. vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
  2653. vcpu->run->debug.arch.dr7 = dr;
  2654. vcpu->run->debug.arch.pc =
  2655. vmcs_readl(GUEST_CS_BASE) +
  2656. vmcs_readl(GUEST_RIP);
  2657. vcpu->run->debug.arch.exception = DB_VECTOR;
  2658. vcpu->run->exit_reason = KVM_EXIT_DEBUG;
  2659. return 0;
  2660. } else {
  2661. vcpu->arch.dr7 &= ~DR7_GD;
  2662. vcpu->arch.dr6 |= DR6_BD;
  2663. vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
  2664. kvm_queue_exception(vcpu, DB_VECTOR);
  2665. return 1;
  2666. }
  2667. }
  2668. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  2669. dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
  2670. reg = DEBUG_REG_ACCESS_REG(exit_qualification);
  2671. if (exit_qualification & TYPE_MOV_FROM_DR) {
  2672. unsigned long val;
  2673. if (!kvm_get_dr(vcpu, dr, &val))
  2674. kvm_register_write(vcpu, reg, val);
  2675. } else
  2676. kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
  2677. skip_emulated_instruction(vcpu);
  2678. return 1;
  2679. }
  2680. static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
  2681. {
  2682. vmcs_writel(GUEST_DR7, val);
  2683. }
  2684. static int handle_cpuid(struct kvm_vcpu *vcpu)
  2685. {
  2686. kvm_emulate_cpuid(vcpu);
  2687. return 1;
  2688. }
  2689. static int handle_rdmsr(struct kvm_vcpu *vcpu)
  2690. {
  2691. u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
  2692. u64 data;
  2693. if (vmx_get_msr(vcpu, ecx, &data)) {
  2694. trace_kvm_msr_read_ex(ecx);
  2695. kvm_inject_gp(vcpu, 0);
  2696. return 1;
  2697. }
  2698. trace_kvm_msr_read(ecx, data);
  2699. /* FIXME: handling of bits 32:63 of rax, rdx */
  2700. vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
  2701. vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
  2702. skip_emulated_instruction(vcpu);
  2703. return 1;
  2704. }
  2705. static int handle_wrmsr(struct kvm_vcpu *vcpu)
  2706. {
  2707. u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
  2708. u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
  2709. | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
  2710. if (vmx_set_msr(vcpu, ecx, data) != 0) {
  2711. trace_kvm_msr_write_ex(ecx, data);
  2712. kvm_inject_gp(vcpu, 0);
  2713. return 1;
  2714. }
  2715. trace_kvm_msr_write(ecx, data);
  2716. skip_emulated_instruction(vcpu);
  2717. return 1;
  2718. }
  2719. static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
  2720. {
  2721. return 1;
  2722. }
  2723. static int handle_interrupt_window(struct kvm_vcpu *vcpu)
  2724. {
  2725. u32 cpu_based_vm_exec_control;
  2726. /* clear pending irq */
  2727. cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  2728. cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
  2729. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  2730. ++vcpu->stat.irq_window_exits;
  2731. /*
  2732. * If the user space waits to inject interrupts, exit as soon as
  2733. * possible
  2734. */
  2735. if (!irqchip_in_kernel(vcpu->kvm) &&
  2736. vcpu->run->request_interrupt_window &&
  2737. !kvm_cpu_has_interrupt(vcpu)) {
  2738. vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  2739. return 0;
  2740. }
  2741. return 1;
  2742. }
  2743. static int handle_halt(struct kvm_vcpu *vcpu)
  2744. {
  2745. skip_emulated_instruction(vcpu);
  2746. return kvm_emulate_halt(vcpu);
  2747. }
  2748. static int handle_vmcall(struct kvm_vcpu *vcpu)
  2749. {
  2750. skip_emulated_instruction(vcpu);
  2751. kvm_emulate_hypercall(vcpu);
  2752. return 1;
  2753. }
  2754. static int handle_vmx_insn(struct kvm_vcpu *vcpu)
  2755. {
  2756. kvm_queue_exception(vcpu, UD_VECTOR);
  2757. return 1;
  2758. }
  2759. static int handle_invlpg(struct kvm_vcpu *vcpu)
  2760. {
  2761. unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  2762. kvm_mmu_invlpg(vcpu, exit_qualification);
  2763. skip_emulated_instruction(vcpu);
  2764. return 1;
  2765. }
  2766. static int handle_wbinvd(struct kvm_vcpu *vcpu)
  2767. {
  2768. skip_emulated_instruction(vcpu);
  2769. /* TODO: Add support for VT-d/pass-through device */
  2770. return 1;
  2771. }
  2772. static int handle_apic_access(struct kvm_vcpu *vcpu)
  2773. {
  2774. unsigned long exit_qualification;
  2775. enum emulation_result er;
  2776. unsigned long offset;
  2777. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  2778. offset = exit_qualification & 0xffful;
  2779. er = emulate_instruction(vcpu, 0, 0, 0);
  2780. if (er != EMULATE_DONE) {
  2781. printk(KERN_ERR
  2782. "Fail to handle apic access vmexit! Offset is 0x%lx\n",
  2783. offset);
  2784. return -ENOEXEC;
  2785. }
  2786. return 1;
  2787. }
  2788. static int handle_task_switch(struct kvm_vcpu *vcpu)
  2789. {
  2790. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2791. unsigned long exit_qualification;
  2792. bool has_error_code = false;
  2793. u32 error_code = 0;
  2794. u16 tss_selector;
  2795. int reason, type, idt_v;
  2796. idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
  2797. type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
  2798. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  2799. reason = (u32)exit_qualification >> 30;
  2800. if (reason == TASK_SWITCH_GATE && idt_v) {
  2801. switch (type) {
  2802. case INTR_TYPE_NMI_INTR:
  2803. vcpu->arch.nmi_injected = false;
  2804. if (cpu_has_virtual_nmis())
  2805. vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  2806. GUEST_INTR_STATE_NMI);
  2807. break;
  2808. case INTR_TYPE_EXT_INTR:
  2809. case INTR_TYPE_SOFT_INTR:
  2810. kvm_clear_interrupt_queue(vcpu);
  2811. break;
  2812. case INTR_TYPE_HARD_EXCEPTION:
  2813. if (vmx->idt_vectoring_info &
  2814. VECTORING_INFO_DELIVER_CODE_MASK) {
  2815. has_error_code = true;
  2816. error_code =
  2817. vmcs_read32(IDT_VECTORING_ERROR_CODE);
  2818. }
  2819. /* fall through */
  2820. case INTR_TYPE_SOFT_EXCEPTION:
  2821. kvm_clear_exception_queue(vcpu);
  2822. break;
  2823. default:
  2824. break;
  2825. }
  2826. }
  2827. tss_selector = exit_qualification;
  2828. if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
  2829. type != INTR_TYPE_EXT_INTR &&
  2830. type != INTR_TYPE_NMI_INTR))
  2831. skip_emulated_instruction(vcpu);
  2832. if (kvm_task_switch(vcpu, tss_selector, reason,
  2833. has_error_code, error_code) == EMULATE_FAIL) {
  2834. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  2835. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  2836. vcpu->run->internal.ndata = 0;
  2837. return 0;
  2838. }
  2839. /* clear all local breakpoint enable flags */
  2840. vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
  2841. /*
  2842. * TODO: What about debug traps on tss switch?
  2843. * Are we supposed to inject them and update dr6?
  2844. */
  2845. return 1;
  2846. }
  2847. static int handle_ept_violation(struct kvm_vcpu *vcpu)
  2848. {
  2849. unsigned long exit_qualification;
  2850. gpa_t gpa;
  2851. int gla_validity;
  2852. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  2853. if (exit_qualification & (1 << 6)) {
  2854. printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
  2855. return -EINVAL;
  2856. }
  2857. gla_validity = (exit_qualification >> 7) & 0x3;
  2858. if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
  2859. printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
  2860. printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
  2861. (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
  2862. vmcs_readl(GUEST_LINEAR_ADDRESS));
  2863. printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
  2864. (long unsigned int)exit_qualification);
  2865. vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
  2866. vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
  2867. return 0;
  2868. }
  2869. gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
  2870. trace_kvm_page_fault(gpa, exit_qualification);
  2871. return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
  2872. }
  2873. static u64 ept_rsvd_mask(u64 spte, int level)
  2874. {
  2875. int i;
  2876. u64 mask = 0;
  2877. for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
  2878. mask |= (1ULL << i);
  2879. if (level > 2)
  2880. /* bits 7:3 reserved */
  2881. mask |= 0xf8;
  2882. else if (level == 2) {
  2883. if (spte & (1ULL << 7))
  2884. /* 2MB ref, bits 20:12 reserved */
  2885. mask |= 0x1ff000;
  2886. else
  2887. /* bits 6:3 reserved */
  2888. mask |= 0x78;
  2889. }
  2890. return mask;
  2891. }
  2892. static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
  2893. int level)
  2894. {
  2895. printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
  2896. /* 010b (write-only) */
  2897. WARN_ON((spte & 0x7) == 0x2);
  2898. /* 110b (write/execute) */
  2899. WARN_ON((spte & 0x7) == 0x6);
  2900. /* 100b (execute-only) and value not supported by logical processor */
  2901. if (!cpu_has_vmx_ept_execute_only())
  2902. WARN_ON((spte & 0x7) == 0x4);
  2903. /* not 000b */
  2904. if ((spte & 0x7)) {
  2905. u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
  2906. if (rsvd_bits != 0) {
  2907. printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
  2908. __func__, rsvd_bits);
  2909. WARN_ON(1);
  2910. }
  2911. if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
  2912. u64 ept_mem_type = (spte & 0x38) >> 3;
  2913. if (ept_mem_type == 2 || ept_mem_type == 3 ||
  2914. ept_mem_type == 7) {
  2915. printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
  2916. __func__, ept_mem_type);
  2917. WARN_ON(1);
  2918. }
  2919. }
  2920. }
  2921. }
  2922. static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
  2923. {
  2924. u64 sptes[4];
  2925. int nr_sptes, i;
  2926. gpa_t gpa;
  2927. gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
  2928. printk(KERN_ERR "EPT: Misconfiguration.\n");
  2929. printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
  2930. nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
  2931. for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
  2932. ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
  2933. vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
  2934. vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
  2935. return 0;
  2936. }
  2937. static int handle_nmi_window(struct kvm_vcpu *vcpu)
  2938. {
  2939. u32 cpu_based_vm_exec_control;
  2940. /* clear pending NMI */
  2941. cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  2942. cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
  2943. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  2944. ++vcpu->stat.nmi_window_exits;
  2945. return 1;
  2946. }
  2947. static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
  2948. {
  2949. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2950. enum emulation_result err = EMULATE_DONE;
  2951. int ret = 1;
  2952. while (!guest_state_valid(vcpu)) {
  2953. err = emulate_instruction(vcpu, 0, 0, 0);
  2954. if (err == EMULATE_DO_MMIO) {
  2955. ret = 0;
  2956. goto out;
  2957. }
  2958. if (err != EMULATE_DONE) {
  2959. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  2960. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  2961. vcpu->run->internal.ndata = 0;
  2962. ret = 0;
  2963. goto out;
  2964. }
  2965. if (signal_pending(current))
  2966. goto out;
  2967. if (need_resched())
  2968. schedule();
  2969. }
  2970. vmx->emulation_required = 0;
  2971. out:
  2972. return ret;
  2973. }
  2974. /*
  2975. * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
  2976. * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
  2977. */
  2978. static int handle_pause(struct kvm_vcpu *vcpu)
  2979. {
  2980. skip_emulated_instruction(vcpu);
  2981. kvm_vcpu_on_spin(vcpu);
  2982. return 1;
  2983. }
  2984. static int handle_invalid_op(struct kvm_vcpu *vcpu)
  2985. {
  2986. kvm_queue_exception(vcpu, UD_VECTOR);
  2987. return 1;
  2988. }
  2989. /*
  2990. * The exit handlers return 1 if the exit was handled fully and guest execution
  2991. * may resume. Otherwise they set the kvm_run parameter to indicate what needs
  2992. * to be done to userspace and return 0.
  2993. */
  2994. static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
  2995. [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
  2996. [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
  2997. [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
  2998. [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
  2999. [EXIT_REASON_IO_INSTRUCTION] = handle_io,
  3000. [EXIT_REASON_CR_ACCESS] = handle_cr,
  3001. [EXIT_REASON_DR_ACCESS] = handle_dr,
  3002. [EXIT_REASON_CPUID] = handle_cpuid,
  3003. [EXIT_REASON_MSR_READ] = handle_rdmsr,
  3004. [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
  3005. [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
  3006. [EXIT_REASON_HLT] = handle_halt,
  3007. [EXIT_REASON_INVLPG] = handle_invlpg,
  3008. [EXIT_REASON_VMCALL] = handle_vmcall,
  3009. [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
  3010. [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
  3011. [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
  3012. [EXIT_REASON_VMPTRST] = handle_vmx_insn,
  3013. [EXIT_REASON_VMREAD] = handle_vmx_insn,
  3014. [EXIT_REASON_VMRESUME] = handle_vmx_insn,
  3015. [EXIT_REASON_VMWRITE] = handle_vmx_insn,
  3016. [EXIT_REASON_VMOFF] = handle_vmx_insn,
  3017. [EXIT_REASON_VMON] = handle_vmx_insn,
  3018. [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
  3019. [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
  3020. [EXIT_REASON_WBINVD] = handle_wbinvd,
  3021. [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
  3022. [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
  3023. [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
  3024. [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
  3025. [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
  3026. [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
  3027. [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
  3028. };
  3029. static const int kvm_vmx_max_exit_handlers =
  3030. ARRAY_SIZE(kvm_vmx_exit_handlers);
  3031. /*
  3032. * The guest has exited. See if we can fix it or if we need userspace
  3033. * assistance.
  3034. */
  3035. static int vmx_handle_exit(struct kvm_vcpu *vcpu)
  3036. {
  3037. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3038. u32 exit_reason = vmx->exit_reason;
  3039. u32 vectoring_info = vmx->idt_vectoring_info;
  3040. trace_kvm_exit(exit_reason, vcpu);
  3041. /* If guest state is invalid, start emulating */
  3042. if (vmx->emulation_required && emulate_invalid_guest_state)
  3043. return handle_invalid_guest_state(vcpu);
  3044. /* Access CR3 don't cause VMExit in paging mode, so we need
  3045. * to sync with guest real CR3. */
  3046. if (enable_ept && is_paging(vcpu))
  3047. vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
  3048. if (unlikely(vmx->fail)) {
  3049. vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  3050. vcpu->run->fail_entry.hardware_entry_failure_reason
  3051. = vmcs_read32(VM_INSTRUCTION_ERROR);
  3052. return 0;
  3053. }
  3054. if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
  3055. (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
  3056. exit_reason != EXIT_REASON_EPT_VIOLATION &&
  3057. exit_reason != EXIT_REASON_TASK_SWITCH))
  3058. printk(KERN_WARNING "%s: unexpected, valid vectoring info "
  3059. "(0x%x) and exit reason is 0x%x\n",
  3060. __func__, vectoring_info, exit_reason);
  3061. if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
  3062. if (vmx_interrupt_allowed(vcpu)) {
  3063. vmx->soft_vnmi_blocked = 0;
  3064. } else if (vmx->vnmi_blocked_time > 1000000000LL &&
  3065. vcpu->arch.nmi_pending) {
  3066. /*
  3067. * This CPU don't support us in finding the end of an
  3068. * NMI-blocked window if the guest runs with IRQs
  3069. * disabled. So we pull the trigger after 1 s of
  3070. * futile waiting, but inform the user about this.
  3071. */
  3072. printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
  3073. "state on VCPU %d after 1 s timeout\n",
  3074. __func__, vcpu->vcpu_id);
  3075. vmx->soft_vnmi_blocked = 0;
  3076. }
  3077. }
  3078. if (exit_reason < kvm_vmx_max_exit_handlers
  3079. && kvm_vmx_exit_handlers[exit_reason])
  3080. return kvm_vmx_exit_handlers[exit_reason](vcpu);
  3081. else {
  3082. vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
  3083. vcpu->run->hw.hardware_exit_reason = exit_reason;
  3084. }
  3085. return 0;
  3086. }
  3087. static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
  3088. {
  3089. if (irr == -1 || tpr < irr) {
  3090. vmcs_write32(TPR_THRESHOLD, 0);
  3091. return;
  3092. }
  3093. vmcs_write32(TPR_THRESHOLD, irr);
  3094. }
  3095. static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
  3096. {
  3097. u32 exit_intr_info;
  3098. u32 idt_vectoring_info = vmx->idt_vectoring_info;
  3099. bool unblock_nmi;
  3100. u8 vector;
  3101. int type;
  3102. bool idtv_info_valid;
  3103. exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  3104. vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
  3105. /* Handle machine checks before interrupts are enabled */
  3106. if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
  3107. || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI
  3108. && is_machine_check(exit_intr_info)))
  3109. kvm_machine_check();
  3110. /* We need to handle NMIs before interrupts are enabled */
  3111. if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
  3112. (exit_intr_info & INTR_INFO_VALID_MASK)) {
  3113. kvm_before_handle_nmi(&vmx->vcpu);
  3114. asm("int $2");
  3115. kvm_after_handle_nmi(&vmx->vcpu);
  3116. }
  3117. idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
  3118. if (cpu_has_virtual_nmis()) {
  3119. unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
  3120. vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
  3121. /*
  3122. * SDM 3: 27.7.1.2 (September 2008)
  3123. * Re-set bit "block by NMI" before VM entry if vmexit caused by
  3124. * a guest IRET fault.
  3125. * SDM 3: 23.2.2 (September 2008)
  3126. * Bit 12 is undefined in any of the following cases:
  3127. * If the VM exit sets the valid bit in the IDT-vectoring
  3128. * information field.
  3129. * If the VM exit is due to a double fault.
  3130. */
  3131. if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
  3132. vector != DF_VECTOR && !idtv_info_valid)
  3133. vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  3134. GUEST_INTR_STATE_NMI);
  3135. } else if (unlikely(vmx->soft_vnmi_blocked))
  3136. vmx->vnmi_blocked_time +=
  3137. ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
  3138. vmx->vcpu.arch.nmi_injected = false;
  3139. kvm_clear_exception_queue(&vmx->vcpu);
  3140. kvm_clear_interrupt_queue(&vmx->vcpu);
  3141. if (!idtv_info_valid)
  3142. return;
  3143. vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
  3144. type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
  3145. switch (type) {
  3146. case INTR_TYPE_NMI_INTR:
  3147. vmx->vcpu.arch.nmi_injected = true;
  3148. /*
  3149. * SDM 3: 27.7.1.2 (September 2008)
  3150. * Clear bit "block by NMI" before VM entry if a NMI
  3151. * delivery faulted.
  3152. */
  3153. vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
  3154. GUEST_INTR_STATE_NMI);
  3155. break;
  3156. case INTR_TYPE_SOFT_EXCEPTION:
  3157. vmx->vcpu.arch.event_exit_inst_len =
  3158. vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  3159. /* fall through */
  3160. case INTR_TYPE_HARD_EXCEPTION:
  3161. if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
  3162. u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE);
  3163. kvm_queue_exception_e(&vmx->vcpu, vector, err);
  3164. } else
  3165. kvm_queue_exception(&vmx->vcpu, vector);
  3166. break;
  3167. case INTR_TYPE_SOFT_INTR:
  3168. vmx->vcpu.arch.event_exit_inst_len =
  3169. vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  3170. /* fall through */
  3171. case INTR_TYPE_EXT_INTR:
  3172. kvm_queue_interrupt(&vmx->vcpu, vector,
  3173. type == INTR_TYPE_SOFT_INTR);
  3174. break;
  3175. default:
  3176. break;
  3177. }
  3178. }
  3179. /*
  3180. * Failure to inject an interrupt should give us the information
  3181. * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
  3182. * when fetching the interrupt redirection bitmap in the real-mode
  3183. * tss, this doesn't happen. So we do it ourselves.
  3184. */
  3185. static void fixup_rmode_irq(struct vcpu_vmx *vmx)
  3186. {
  3187. vmx->rmode.irq.pending = 0;
  3188. if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
  3189. return;
  3190. kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
  3191. if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
  3192. vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
  3193. vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
  3194. return;
  3195. }
  3196. vmx->idt_vectoring_info =
  3197. VECTORING_INFO_VALID_MASK
  3198. | INTR_TYPE_EXT_INTR
  3199. | vmx->rmode.irq.vector;
  3200. }
  3201. #ifdef CONFIG_X86_64
  3202. #define R "r"
  3203. #define Q "q"
  3204. #else
  3205. #define R "e"
  3206. #define Q "l"
  3207. #endif
  3208. static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
  3209. {
  3210. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3211. /* Record the guest's net vcpu time for enforced NMI injections. */
  3212. if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
  3213. vmx->entry_time = ktime_get();
  3214. /* Don't enter VMX if guest state is invalid, let the exit handler
  3215. start emulation until we arrive back to a valid state */
  3216. if (vmx->emulation_required && emulate_invalid_guest_state)
  3217. return;
  3218. if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
  3219. vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
  3220. if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
  3221. vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
  3222. /* When single-stepping over STI and MOV SS, we must clear the
  3223. * corresponding interruptibility bits in the guest state. Otherwise
  3224. * vmentry fails as it then expects bit 14 (BS) in pending debug
  3225. * exceptions being set, but that's not correct for the guest debugging
  3226. * case. */
  3227. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  3228. vmx_set_interrupt_shadow(vcpu, 0);
  3229. /*
  3230. * Loading guest fpu may have cleared host cr0.ts
  3231. */
  3232. vmcs_writel(HOST_CR0, read_cr0());
  3233. asm(
  3234. /* Store host registers */
  3235. "push %%"R"dx; push %%"R"bp;"
  3236. "push %%"R"cx \n\t"
  3237. "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
  3238. "je 1f \n\t"
  3239. "mov %%"R"sp, %c[host_rsp](%0) \n\t"
  3240. __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
  3241. "1: \n\t"
  3242. /* Reload cr2 if changed */
  3243. "mov %c[cr2](%0), %%"R"ax \n\t"
  3244. "mov %%cr2, %%"R"dx \n\t"
  3245. "cmp %%"R"ax, %%"R"dx \n\t"
  3246. "je 2f \n\t"
  3247. "mov %%"R"ax, %%cr2 \n\t"
  3248. "2: \n\t"
  3249. /* Check if vmlaunch of vmresume is needed */
  3250. "cmpl $0, %c[launched](%0) \n\t"
  3251. /* Load guest registers. Don't clobber flags. */
  3252. "mov %c[rax](%0), %%"R"ax \n\t"
  3253. "mov %c[rbx](%0), %%"R"bx \n\t"
  3254. "mov %c[rdx](%0), %%"R"dx \n\t"
  3255. "mov %c[rsi](%0), %%"R"si \n\t"
  3256. "mov %c[rdi](%0), %%"R"di \n\t"
  3257. "mov %c[rbp](%0), %%"R"bp \n\t"
  3258. #ifdef CONFIG_X86_64
  3259. "mov %c[r8](%0), %%r8 \n\t"
  3260. "mov %c[r9](%0), %%r9 \n\t"
  3261. "mov %c[r10](%0), %%r10 \n\t"
  3262. "mov %c[r11](%0), %%r11 \n\t"
  3263. "mov %c[r12](%0), %%r12 \n\t"
  3264. "mov %c[r13](%0), %%r13 \n\t"
  3265. "mov %c[r14](%0), %%r14 \n\t"
  3266. "mov %c[r15](%0), %%r15 \n\t"
  3267. #endif
  3268. "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
  3269. /* Enter guest mode */
  3270. "jne .Llaunched \n\t"
  3271. __ex(ASM_VMX_VMLAUNCH) "\n\t"
  3272. "jmp .Lkvm_vmx_return \n\t"
  3273. ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
  3274. ".Lkvm_vmx_return: "
  3275. /* Save guest registers, load host registers, keep flags */
  3276. "xchg %0, (%%"R"sp) \n\t"
  3277. "mov %%"R"ax, %c[rax](%0) \n\t"
  3278. "mov %%"R"bx, %c[rbx](%0) \n\t"
  3279. "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t"
  3280. "mov %%"R"dx, %c[rdx](%0) \n\t"
  3281. "mov %%"R"si, %c[rsi](%0) \n\t"
  3282. "mov %%"R"di, %c[rdi](%0) \n\t"
  3283. "mov %%"R"bp, %c[rbp](%0) \n\t"
  3284. #ifdef CONFIG_X86_64
  3285. "mov %%r8, %c[r8](%0) \n\t"
  3286. "mov %%r9, %c[r9](%0) \n\t"
  3287. "mov %%r10, %c[r10](%0) \n\t"
  3288. "mov %%r11, %c[r11](%0) \n\t"
  3289. "mov %%r12, %c[r12](%0) \n\t"
  3290. "mov %%r13, %c[r13](%0) \n\t"
  3291. "mov %%r14, %c[r14](%0) \n\t"
  3292. "mov %%r15, %c[r15](%0) \n\t"
  3293. #endif
  3294. "mov %%cr2, %%"R"ax \n\t"
  3295. "mov %%"R"ax, %c[cr2](%0) \n\t"
  3296. "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t"
  3297. "setbe %c[fail](%0) \n\t"
  3298. : : "c"(vmx), "d"((unsigned long)HOST_RSP),
  3299. [launched]"i"(offsetof(struct vcpu_vmx, launched)),
  3300. [fail]"i"(offsetof(struct vcpu_vmx, fail)),
  3301. [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
  3302. [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
  3303. [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
  3304. [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
  3305. [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
  3306. [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
  3307. [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
  3308. [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
  3309. #ifdef CONFIG_X86_64
  3310. [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
  3311. [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
  3312. [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
  3313. [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
  3314. [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
  3315. [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
  3316. [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
  3317. [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
  3318. #endif
  3319. [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
  3320. : "cc", "memory"
  3321. , R"bx", R"di", R"si"
  3322. #ifdef CONFIG_X86_64
  3323. , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
  3324. #endif
  3325. );
  3326. vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
  3327. | (1 << VCPU_EXREG_PDPTR));
  3328. vcpu->arch.regs_dirty = 0;
  3329. vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
  3330. if (vmx->rmode.irq.pending)
  3331. fixup_rmode_irq(vmx);
  3332. asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
  3333. vmx->launched = 1;
  3334. vmx_complete_interrupts(vmx);
  3335. }
  3336. #undef R
  3337. #undef Q
  3338. static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
  3339. {
  3340. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3341. if (vmx->vmcs) {
  3342. vcpu_clear(vmx);
  3343. free_vmcs(vmx->vmcs);
  3344. vmx->vmcs = NULL;
  3345. }
  3346. }
  3347. static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
  3348. {
  3349. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3350. spin_lock(&vmx_vpid_lock);
  3351. if (vmx->vpid != 0)
  3352. __clear_bit(vmx->vpid, vmx_vpid_bitmap);
  3353. spin_unlock(&vmx_vpid_lock);
  3354. vmx_free_vmcs(vcpu);
  3355. kfree(vmx->guest_msrs);
  3356. kvm_vcpu_uninit(vcpu);
  3357. kmem_cache_free(kvm_vcpu_cache, vmx);
  3358. }
  3359. static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
  3360. {
  3361. int err;
  3362. struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  3363. int cpu;
  3364. if (!vmx)
  3365. return ERR_PTR(-ENOMEM);
  3366. allocate_vpid(vmx);
  3367. err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
  3368. if (err)
  3369. goto free_vcpu;
  3370. vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3371. if (!vmx->guest_msrs) {
  3372. err = -ENOMEM;
  3373. goto uninit_vcpu;
  3374. }
  3375. vmx->vmcs = alloc_vmcs();
  3376. if (!vmx->vmcs)
  3377. goto free_msrs;
  3378. vmcs_clear(vmx->vmcs);
  3379. cpu = get_cpu();
  3380. vmx_vcpu_load(&vmx->vcpu, cpu);
  3381. err = vmx_vcpu_setup(vmx);
  3382. vmx_vcpu_put(&vmx->vcpu);
  3383. put_cpu();
  3384. if (err)
  3385. goto free_vmcs;
  3386. if (vm_need_virtualize_apic_accesses(kvm))
  3387. if (alloc_apic_access_page(kvm) != 0)
  3388. goto free_vmcs;
  3389. if (enable_ept) {
  3390. if (!kvm->arch.ept_identity_map_addr)
  3391. kvm->arch.ept_identity_map_addr =
  3392. VMX_EPT_IDENTITY_PAGETABLE_ADDR;
  3393. if (alloc_identity_pagetable(kvm) != 0)
  3394. goto free_vmcs;
  3395. }
  3396. return &vmx->vcpu;
  3397. free_vmcs:
  3398. free_vmcs(vmx->vmcs);
  3399. free_msrs:
  3400. kfree(vmx->guest_msrs);
  3401. uninit_vcpu:
  3402. kvm_vcpu_uninit(&vmx->vcpu);
  3403. free_vcpu:
  3404. kmem_cache_free(kvm_vcpu_cache, vmx);
  3405. return ERR_PTR(err);
  3406. }
  3407. static void __init vmx_check_processor_compat(void *rtn)
  3408. {
  3409. struct vmcs_config vmcs_conf;
  3410. *(int *)rtn = 0;
  3411. if (setup_vmcs_config(&vmcs_conf) < 0)
  3412. *(int *)rtn = -EIO;
  3413. if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
  3414. printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
  3415. smp_processor_id());
  3416. *(int *)rtn = -EIO;
  3417. }
  3418. }
  3419. static int get_ept_level(void)
  3420. {
  3421. return VMX_EPT_DEFAULT_GAW + 1;
  3422. }
  3423. static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
  3424. {
  3425. u64 ret;
  3426. /* For VT-d and EPT combination
  3427. * 1. MMIO: always map as UC
  3428. * 2. EPT with VT-d:
  3429. * a. VT-d without snooping control feature: can't guarantee the
  3430. * result, try to trust guest.
  3431. * b. VT-d with snooping control feature: snooping control feature of
  3432. * VT-d engine can guarantee the cache correctness. Just set it
  3433. * to WB to keep consistent with host. So the same as item 3.
  3434. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
  3435. * consistent with host MTRR
  3436. */
  3437. if (is_mmio)
  3438. ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
  3439. else if (vcpu->kvm->arch.iommu_domain &&
  3440. !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
  3441. ret = kvm_get_guest_memory_type(vcpu, gfn) <<
  3442. VMX_EPT_MT_EPTE_SHIFT;
  3443. else
  3444. ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
  3445. | VMX_EPT_IPAT_BIT;
  3446. return ret;
  3447. }
  3448. #define _ER(x) { EXIT_REASON_##x, #x }
  3449. static const struct trace_print_flags vmx_exit_reasons_str[] = {
  3450. _ER(EXCEPTION_NMI),
  3451. _ER(EXTERNAL_INTERRUPT),
  3452. _ER(TRIPLE_FAULT),
  3453. _ER(PENDING_INTERRUPT),
  3454. _ER(NMI_WINDOW),
  3455. _ER(TASK_SWITCH),
  3456. _ER(CPUID),
  3457. _ER(HLT),
  3458. _ER(INVLPG),
  3459. _ER(RDPMC),
  3460. _ER(RDTSC),
  3461. _ER(VMCALL),
  3462. _ER(VMCLEAR),
  3463. _ER(VMLAUNCH),
  3464. _ER(VMPTRLD),
  3465. _ER(VMPTRST),
  3466. _ER(VMREAD),
  3467. _ER(VMRESUME),
  3468. _ER(VMWRITE),
  3469. _ER(VMOFF),
  3470. _ER(VMON),
  3471. _ER(CR_ACCESS),
  3472. _ER(DR_ACCESS),
  3473. _ER(IO_INSTRUCTION),
  3474. _ER(MSR_READ),
  3475. _ER(MSR_WRITE),
  3476. _ER(MWAIT_INSTRUCTION),
  3477. _ER(MONITOR_INSTRUCTION),
  3478. _ER(PAUSE_INSTRUCTION),
  3479. _ER(MCE_DURING_VMENTRY),
  3480. _ER(TPR_BELOW_THRESHOLD),
  3481. _ER(APIC_ACCESS),
  3482. _ER(EPT_VIOLATION),
  3483. _ER(EPT_MISCONFIG),
  3484. _ER(WBINVD),
  3485. { -1, NULL }
  3486. };
  3487. #undef _ER
  3488. static int vmx_get_lpage_level(void)
  3489. {
  3490. if (enable_ept && !cpu_has_vmx_ept_1g_page())
  3491. return PT_DIRECTORY_LEVEL;
  3492. else
  3493. /* For shadow and EPT supported 1GB page */
  3494. return PT_PDPE_LEVEL;
  3495. }
  3496. static inline u32 bit(int bitno)
  3497. {
  3498. return 1 << (bitno & 31);
  3499. }
  3500. static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
  3501. {
  3502. struct kvm_cpuid_entry2 *best;
  3503. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3504. u32 exec_control;
  3505. vmx->rdtscp_enabled = false;
  3506. if (vmx_rdtscp_supported()) {
  3507. exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
  3508. if (exec_control & SECONDARY_EXEC_RDTSCP) {
  3509. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  3510. if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
  3511. vmx->rdtscp_enabled = true;
  3512. else {
  3513. exec_control &= ~SECONDARY_EXEC_RDTSCP;
  3514. vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
  3515. exec_control);
  3516. }
  3517. }
  3518. }
  3519. }
  3520. static struct kvm_x86_ops vmx_x86_ops = {
  3521. .cpu_has_kvm_support = cpu_has_kvm_support,
  3522. .disabled_by_bios = vmx_disabled_by_bios,
  3523. .hardware_setup = hardware_setup,
  3524. .hardware_unsetup = hardware_unsetup,
  3525. .check_processor_compatibility = vmx_check_processor_compat,
  3526. .hardware_enable = hardware_enable,
  3527. .hardware_disable = hardware_disable,
  3528. .cpu_has_accelerated_tpr = report_flexpriority,
  3529. .vcpu_create = vmx_create_vcpu,
  3530. .vcpu_free = vmx_free_vcpu,
  3531. .vcpu_reset = vmx_vcpu_reset,
  3532. .prepare_guest_switch = vmx_save_host_state,
  3533. .vcpu_load = vmx_vcpu_load,
  3534. .vcpu_put = vmx_vcpu_put,
  3535. .set_guest_debug = set_guest_debug,
  3536. .get_msr = vmx_get_msr,
  3537. .set_msr = vmx_set_msr,
  3538. .get_segment_base = vmx_get_segment_base,
  3539. .get_segment = vmx_get_segment,
  3540. .set_segment = vmx_set_segment,
  3541. .get_cpl = vmx_get_cpl,
  3542. .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
  3543. .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
  3544. .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
  3545. .set_cr0 = vmx_set_cr0,
  3546. .set_cr3 = vmx_set_cr3,
  3547. .set_cr4 = vmx_set_cr4,
  3548. .set_efer = vmx_set_efer,
  3549. .get_idt = vmx_get_idt,
  3550. .set_idt = vmx_set_idt,
  3551. .get_gdt = vmx_get_gdt,
  3552. .set_gdt = vmx_set_gdt,
  3553. .set_dr7 = vmx_set_dr7,
  3554. .cache_reg = vmx_cache_reg,
  3555. .get_rflags = vmx_get_rflags,
  3556. .set_rflags = vmx_set_rflags,
  3557. .fpu_activate = vmx_fpu_activate,
  3558. .fpu_deactivate = vmx_fpu_deactivate,
  3559. .tlb_flush = vmx_flush_tlb,
  3560. .run = vmx_vcpu_run,
  3561. .handle_exit = vmx_handle_exit,
  3562. .skip_emulated_instruction = skip_emulated_instruction,
  3563. .set_interrupt_shadow = vmx_set_interrupt_shadow,
  3564. .get_interrupt_shadow = vmx_get_interrupt_shadow,
  3565. .patch_hypercall = vmx_patch_hypercall,
  3566. .set_irq = vmx_inject_irq,
  3567. .set_nmi = vmx_inject_nmi,
  3568. .queue_exception = vmx_queue_exception,
  3569. .interrupt_allowed = vmx_interrupt_allowed,
  3570. .nmi_allowed = vmx_nmi_allowed,
  3571. .get_nmi_mask = vmx_get_nmi_mask,
  3572. .set_nmi_mask = vmx_set_nmi_mask,
  3573. .enable_nmi_window = enable_nmi_window,
  3574. .enable_irq_window = enable_irq_window,
  3575. .update_cr8_intercept = update_cr8_intercept,
  3576. .set_tss_addr = vmx_set_tss_addr,
  3577. .get_tdp_level = get_ept_level,
  3578. .get_mt_mask = vmx_get_mt_mask,
  3579. .exit_reasons_str = vmx_exit_reasons_str,
  3580. .get_lpage_level = vmx_get_lpage_level,
  3581. .cpuid_update = vmx_cpuid_update,
  3582. .rdtscp_supported = vmx_rdtscp_supported,
  3583. };
  3584. static int __init vmx_init(void)
  3585. {
  3586. int r, i;
  3587. rdmsrl_safe(MSR_EFER, &host_efer);
  3588. for (i = 0; i < NR_VMX_MSR; ++i)
  3589. kvm_define_shared_msr(i, vmx_msr_index[i]);
  3590. vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
  3591. if (!vmx_io_bitmap_a)
  3592. return -ENOMEM;
  3593. vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
  3594. if (!vmx_io_bitmap_b) {
  3595. r = -ENOMEM;
  3596. goto out;
  3597. }
  3598. vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
  3599. if (!vmx_msr_bitmap_legacy) {
  3600. r = -ENOMEM;
  3601. goto out1;
  3602. }
  3603. vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
  3604. if (!vmx_msr_bitmap_longmode) {
  3605. r = -ENOMEM;
  3606. goto out2;
  3607. }
  3608. /*
  3609. * Allow direct access to the PC debug port (it is often used for I/O
  3610. * delays, but the vmexits simply slow things down).
  3611. */
  3612. memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
  3613. clear_bit(0x80, vmx_io_bitmap_a);
  3614. memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
  3615. memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
  3616. memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
  3617. set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
  3618. r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
  3619. if (r)
  3620. goto out3;
  3621. vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
  3622. vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
  3623. vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
  3624. vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
  3625. vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
  3626. vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
  3627. if (enable_ept) {
  3628. bypass_guest_pf = 0;
  3629. kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
  3630. VMX_EPT_WRITABLE_MASK);
  3631. kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
  3632. VMX_EPT_EXECUTABLE_MASK);
  3633. kvm_enable_tdp();
  3634. } else
  3635. kvm_disable_tdp();
  3636. if (bypass_guest_pf)
  3637. kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
  3638. return 0;
  3639. out3:
  3640. free_page((unsigned long)vmx_msr_bitmap_longmode);
  3641. out2:
  3642. free_page((unsigned long)vmx_msr_bitmap_legacy);
  3643. out1:
  3644. free_page((unsigned long)vmx_io_bitmap_b);
  3645. out:
  3646. free_page((unsigned long)vmx_io_bitmap_a);
  3647. return r;
  3648. }
  3649. static void __exit vmx_exit(void)
  3650. {
  3651. free_page((unsigned long)vmx_msr_bitmap_legacy);
  3652. free_page((unsigned long)vmx_msr_bitmap_longmode);
  3653. free_page((unsigned long)vmx_io_bitmap_b);
  3654. free_page((unsigned long)vmx_io_bitmap_a);
  3655. kvm_exit();
  3656. }
  3657. module_init(vmx_init)
  3658. module_exit(vmx_exit)