x86.c 162 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  10. *
  11. * Authors:
  12. * Avi Kivity <avi@qumranet.com>
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Amit Shah <amit.shah@qumranet.com>
  15. * Ben-Ami Yassour <benami@il.ibm.com>
  16. *
  17. * This work is licensed under the terms of the GNU GPL, version 2. See
  18. * the COPYING file in the top-level directory.
  19. *
  20. */
  21. #include <linux/kvm_host.h>
  22. #include "irq.h"
  23. #include "mmu.h"
  24. #include "i8254.h"
  25. #include "tss.h"
  26. #include "kvm_cache_regs.h"
  27. #include "x86.h"
  28. #include <linux/clocksource.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/kvm.h>
  31. #include <linux/fs.h>
  32. #include <linux/vmalloc.h>
  33. #include <linux/module.h>
  34. #include <linux/mman.h>
  35. #include <linux/highmem.h>
  36. #include <linux/iommu.h>
  37. #include <linux/intel-iommu.h>
  38. #include <linux/cpufreq.h>
  39. #include <linux/user-return-notifier.h>
  40. #include <linux/srcu.h>
  41. #include <linux/slab.h>
  42. #include <linux/perf_event.h>
  43. #include <linux/uaccess.h>
  44. #include <linux/hash.h>
  45. #include <trace/events/kvm.h>
  46. #define CREATE_TRACE_POINTS
  47. #include "trace.h"
  48. #include <asm/debugreg.h>
  49. #include <asm/msr.h>
  50. #include <asm/desc.h>
  51. #include <asm/mtrr.h>
  52. #include <asm/mce.h>
  53. #include <asm/i387.h>
  54. #include <asm/xcr.h>
  55. #include <asm/pvclock.h>
  56. #include <asm/div64.h>
  57. #define MAX_IO_MSRS 256
  58. #define KVM_MAX_MCE_BANKS 32
  59. #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
  60. #define emul_to_vcpu(ctxt) \
  61. container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
  62. /* EFER defaults:
  63. * - enable syscall per default because its emulated by KVM
  64. * - enable LME and LMA per default on 64 bit KVM
  65. */
  66. #ifdef CONFIG_X86_64
  67. static
  68. u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
  69. #else
  70. static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
  71. #endif
  72. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  73. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  74. static void update_cr8_intercept(struct kvm_vcpu *vcpu);
  75. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  76. struct kvm_cpuid_entry2 __user *entries);
  77. struct kvm_x86_ops *kvm_x86_ops;
  78. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  79. int ignore_msrs = 0;
  80. module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
  81. bool kvm_has_tsc_control;
  82. EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
  83. u32 kvm_max_guest_tsc_khz;
  84. EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
  85. #define KVM_NR_SHARED_MSRS 16
  86. struct kvm_shared_msrs_global {
  87. int nr;
  88. u32 msrs[KVM_NR_SHARED_MSRS];
  89. };
  90. struct kvm_shared_msrs {
  91. struct user_return_notifier urn;
  92. bool registered;
  93. struct kvm_shared_msr_values {
  94. u64 host;
  95. u64 curr;
  96. } values[KVM_NR_SHARED_MSRS];
  97. };
  98. static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
  99. static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
  100. struct kvm_stats_debugfs_item debugfs_entries[] = {
  101. { "pf_fixed", VCPU_STAT(pf_fixed) },
  102. { "pf_guest", VCPU_STAT(pf_guest) },
  103. { "tlb_flush", VCPU_STAT(tlb_flush) },
  104. { "invlpg", VCPU_STAT(invlpg) },
  105. { "exits", VCPU_STAT(exits) },
  106. { "io_exits", VCPU_STAT(io_exits) },
  107. { "mmio_exits", VCPU_STAT(mmio_exits) },
  108. { "signal_exits", VCPU_STAT(signal_exits) },
  109. { "irq_window", VCPU_STAT(irq_window_exits) },
  110. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  111. { "halt_exits", VCPU_STAT(halt_exits) },
  112. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  113. { "hypercalls", VCPU_STAT(hypercalls) },
  114. { "request_irq", VCPU_STAT(request_irq_exits) },
  115. { "irq_exits", VCPU_STAT(irq_exits) },
  116. { "host_state_reload", VCPU_STAT(host_state_reload) },
  117. { "efer_reload", VCPU_STAT(efer_reload) },
  118. { "fpu_reload", VCPU_STAT(fpu_reload) },
  119. { "insn_emulation", VCPU_STAT(insn_emulation) },
  120. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  121. { "irq_injections", VCPU_STAT(irq_injections) },
  122. { "nmi_injections", VCPU_STAT(nmi_injections) },
  123. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  124. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  125. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  126. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  127. { "mmu_flooded", VM_STAT(mmu_flooded) },
  128. { "mmu_recycled", VM_STAT(mmu_recycled) },
  129. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  130. { "mmu_unsync", VM_STAT(mmu_unsync) },
  131. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  132. { "largepages", VM_STAT(lpages) },
  133. { NULL }
  134. };
  135. u64 __read_mostly host_xcr0;
  136. int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
  137. static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
  138. {
  139. int i;
  140. for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
  141. vcpu->arch.apf.gfns[i] = ~0;
  142. }
  143. static void kvm_on_user_return(struct user_return_notifier *urn)
  144. {
  145. unsigned slot;
  146. struct kvm_shared_msrs *locals
  147. = container_of(urn, struct kvm_shared_msrs, urn);
  148. struct kvm_shared_msr_values *values;
  149. for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
  150. values = &locals->values[slot];
  151. if (values->host != values->curr) {
  152. wrmsrl(shared_msrs_global.msrs[slot], values->host);
  153. values->curr = values->host;
  154. }
  155. }
  156. locals->registered = false;
  157. user_return_notifier_unregister(urn);
  158. }
  159. static void shared_msr_update(unsigned slot, u32 msr)
  160. {
  161. struct kvm_shared_msrs *smsr;
  162. u64 value;
  163. smsr = &__get_cpu_var(shared_msrs);
  164. /* only read, and nobody should modify it at this time,
  165. * so don't need lock */
  166. if (slot >= shared_msrs_global.nr) {
  167. printk(KERN_ERR "kvm: invalid MSR slot!");
  168. return;
  169. }
  170. rdmsrl_safe(msr, &value);
  171. smsr->values[slot].host = value;
  172. smsr->values[slot].curr = value;
  173. }
  174. void kvm_define_shared_msr(unsigned slot, u32 msr)
  175. {
  176. if (slot >= shared_msrs_global.nr)
  177. shared_msrs_global.nr = slot + 1;
  178. shared_msrs_global.msrs[slot] = msr;
  179. /* we need ensured the shared_msr_global have been updated */
  180. smp_wmb();
  181. }
  182. EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
  183. static void kvm_shared_msr_cpu_online(void)
  184. {
  185. unsigned i;
  186. for (i = 0; i < shared_msrs_global.nr; ++i)
  187. shared_msr_update(i, shared_msrs_global.msrs[i]);
  188. }
  189. void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
  190. {
  191. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  192. if (((value ^ smsr->values[slot].curr) & mask) == 0)
  193. return;
  194. smsr->values[slot].curr = value;
  195. wrmsrl(shared_msrs_global.msrs[slot], value);
  196. if (!smsr->registered) {
  197. smsr->urn.on_user_return = kvm_on_user_return;
  198. user_return_notifier_register(&smsr->urn);
  199. smsr->registered = true;
  200. }
  201. }
  202. EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
  203. static void drop_user_return_notifiers(void *ignore)
  204. {
  205. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  206. if (smsr->registered)
  207. kvm_on_user_return(&smsr->urn);
  208. }
  209. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  210. {
  211. if (irqchip_in_kernel(vcpu->kvm))
  212. return vcpu->arch.apic_base;
  213. else
  214. return vcpu->arch.apic_base;
  215. }
  216. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  217. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  218. {
  219. /* TODO: reserve bits check */
  220. if (irqchip_in_kernel(vcpu->kvm))
  221. kvm_lapic_set_base(vcpu, data);
  222. else
  223. vcpu->arch.apic_base = data;
  224. }
  225. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  226. #define EXCPT_BENIGN 0
  227. #define EXCPT_CONTRIBUTORY 1
  228. #define EXCPT_PF 2
  229. static int exception_class(int vector)
  230. {
  231. switch (vector) {
  232. case PF_VECTOR:
  233. return EXCPT_PF;
  234. case DE_VECTOR:
  235. case TS_VECTOR:
  236. case NP_VECTOR:
  237. case SS_VECTOR:
  238. case GP_VECTOR:
  239. return EXCPT_CONTRIBUTORY;
  240. default:
  241. break;
  242. }
  243. return EXCPT_BENIGN;
  244. }
  245. static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
  246. unsigned nr, bool has_error, u32 error_code,
  247. bool reinject)
  248. {
  249. u32 prev_nr;
  250. int class1, class2;
  251. kvm_make_request(KVM_REQ_EVENT, vcpu);
  252. if (!vcpu->arch.exception.pending) {
  253. queue:
  254. vcpu->arch.exception.pending = true;
  255. vcpu->arch.exception.has_error_code = has_error;
  256. vcpu->arch.exception.nr = nr;
  257. vcpu->arch.exception.error_code = error_code;
  258. vcpu->arch.exception.reinject = reinject;
  259. return;
  260. }
  261. /* to check exception */
  262. prev_nr = vcpu->arch.exception.nr;
  263. if (prev_nr == DF_VECTOR) {
  264. /* triple fault -> shutdown */
  265. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  266. return;
  267. }
  268. class1 = exception_class(prev_nr);
  269. class2 = exception_class(nr);
  270. if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
  271. || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
  272. /* generate double fault per SDM Table 5-5 */
  273. vcpu->arch.exception.pending = true;
  274. vcpu->arch.exception.has_error_code = true;
  275. vcpu->arch.exception.nr = DF_VECTOR;
  276. vcpu->arch.exception.error_code = 0;
  277. } else
  278. /* replace previous exception with a new one in a hope
  279. that instruction re-execution will regenerate lost
  280. exception */
  281. goto queue;
  282. }
  283. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  284. {
  285. kvm_multiple_exception(vcpu, nr, false, 0, false);
  286. }
  287. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  288. void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  289. {
  290. kvm_multiple_exception(vcpu, nr, false, 0, true);
  291. }
  292. EXPORT_SYMBOL_GPL(kvm_requeue_exception);
  293. void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
  294. {
  295. if (err)
  296. kvm_inject_gp(vcpu, 0);
  297. else
  298. kvm_x86_ops->skip_emulated_instruction(vcpu);
  299. }
  300. EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
  301. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  302. {
  303. ++vcpu->stat.pf_guest;
  304. vcpu->arch.cr2 = fault->address;
  305. kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
  306. }
  307. void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  308. {
  309. if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
  310. vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
  311. else
  312. vcpu->arch.mmu.inject_page_fault(vcpu, fault);
  313. }
  314. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  315. {
  316. kvm_make_request(KVM_REQ_EVENT, vcpu);
  317. vcpu->arch.nmi_pending = 1;
  318. }
  319. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  320. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  321. {
  322. kvm_multiple_exception(vcpu, nr, true, error_code, false);
  323. }
  324. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  325. void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  326. {
  327. kvm_multiple_exception(vcpu, nr, true, error_code, true);
  328. }
  329. EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
  330. /*
  331. * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
  332. * a #GP and return false.
  333. */
  334. bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
  335. {
  336. if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
  337. return true;
  338. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  339. return false;
  340. }
  341. EXPORT_SYMBOL_GPL(kvm_require_cpl);
  342. /*
  343. * This function will be used to read from the physical memory of the currently
  344. * running guest. The difference to kvm_read_guest_page is that this function
  345. * can read from guest physical or from the guest's guest physical memory.
  346. */
  347. int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  348. gfn_t ngfn, void *data, int offset, int len,
  349. u32 access)
  350. {
  351. gfn_t real_gfn;
  352. gpa_t ngpa;
  353. ngpa = gfn_to_gpa(ngfn);
  354. real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
  355. if (real_gfn == UNMAPPED_GVA)
  356. return -EFAULT;
  357. real_gfn = gpa_to_gfn(real_gfn);
  358. return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
  359. }
  360. EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
  361. int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
  362. void *data, int offset, int len, u32 access)
  363. {
  364. return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
  365. data, offset, len, access);
  366. }
  367. /*
  368. * Load the pae pdptrs. Return true is they are all valid.
  369. */
  370. int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
  371. {
  372. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  373. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  374. int i;
  375. int ret;
  376. u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
  377. ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
  378. offset * sizeof(u64), sizeof(pdpte),
  379. PFERR_USER_MASK|PFERR_WRITE_MASK);
  380. if (ret < 0) {
  381. ret = 0;
  382. goto out;
  383. }
  384. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  385. if (is_present_gpte(pdpte[i]) &&
  386. (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
  387. ret = 0;
  388. goto out;
  389. }
  390. }
  391. ret = 1;
  392. memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
  393. __set_bit(VCPU_EXREG_PDPTR,
  394. (unsigned long *)&vcpu->arch.regs_avail);
  395. __set_bit(VCPU_EXREG_PDPTR,
  396. (unsigned long *)&vcpu->arch.regs_dirty);
  397. out:
  398. return ret;
  399. }
  400. EXPORT_SYMBOL_GPL(load_pdptrs);
  401. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  402. {
  403. u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
  404. bool changed = true;
  405. int offset;
  406. gfn_t gfn;
  407. int r;
  408. if (is_long_mode(vcpu) || !is_pae(vcpu))
  409. return false;
  410. if (!test_bit(VCPU_EXREG_PDPTR,
  411. (unsigned long *)&vcpu->arch.regs_avail))
  412. return true;
  413. gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
  414. offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
  415. r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
  416. PFERR_USER_MASK | PFERR_WRITE_MASK);
  417. if (r < 0)
  418. goto out;
  419. changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
  420. out:
  421. return changed;
  422. }
  423. int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  424. {
  425. unsigned long old_cr0 = kvm_read_cr0(vcpu);
  426. unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
  427. X86_CR0_CD | X86_CR0_NW;
  428. cr0 |= X86_CR0_ET;
  429. #ifdef CONFIG_X86_64
  430. if (cr0 & 0xffffffff00000000UL)
  431. return 1;
  432. #endif
  433. cr0 &= ~CR0_RESERVED_BITS;
  434. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
  435. return 1;
  436. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
  437. return 1;
  438. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  439. #ifdef CONFIG_X86_64
  440. if ((vcpu->arch.efer & EFER_LME)) {
  441. int cs_db, cs_l;
  442. if (!is_pae(vcpu))
  443. return 1;
  444. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  445. if (cs_l)
  446. return 1;
  447. } else
  448. #endif
  449. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
  450. kvm_read_cr3(vcpu)))
  451. return 1;
  452. }
  453. kvm_x86_ops->set_cr0(vcpu, cr0);
  454. if ((cr0 ^ old_cr0) & X86_CR0_PG) {
  455. kvm_clear_async_pf_completion_queue(vcpu);
  456. kvm_async_pf_hash_reset(vcpu);
  457. }
  458. if ((cr0 ^ old_cr0) & update_bits)
  459. kvm_mmu_reset_context(vcpu);
  460. return 0;
  461. }
  462. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  463. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  464. {
  465. (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
  466. }
  467. EXPORT_SYMBOL_GPL(kvm_lmsw);
  468. int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  469. {
  470. u64 xcr0;
  471. /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
  472. if (index != XCR_XFEATURE_ENABLED_MASK)
  473. return 1;
  474. xcr0 = xcr;
  475. if (kvm_x86_ops->get_cpl(vcpu) != 0)
  476. return 1;
  477. if (!(xcr0 & XSTATE_FP))
  478. return 1;
  479. if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
  480. return 1;
  481. if (xcr0 & ~host_xcr0)
  482. return 1;
  483. vcpu->arch.xcr0 = xcr0;
  484. vcpu->guest_xcr0_loaded = 0;
  485. return 0;
  486. }
  487. int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  488. {
  489. if (__kvm_set_xcr(vcpu, index, xcr)) {
  490. kvm_inject_gp(vcpu, 0);
  491. return 1;
  492. }
  493. return 0;
  494. }
  495. EXPORT_SYMBOL_GPL(kvm_set_xcr);
  496. static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
  497. {
  498. struct kvm_cpuid_entry2 *best;
  499. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  500. return best && (best->ecx & bit(X86_FEATURE_XSAVE));
  501. }
  502. static void update_cpuid(struct kvm_vcpu *vcpu)
  503. {
  504. struct kvm_cpuid_entry2 *best;
  505. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  506. if (!best)
  507. return;
  508. /* Update OSXSAVE bit */
  509. if (cpu_has_xsave && best->function == 0x1) {
  510. best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
  511. if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
  512. best->ecx |= bit(X86_FEATURE_OSXSAVE);
  513. }
  514. }
  515. int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  516. {
  517. unsigned long old_cr4 = kvm_read_cr4(vcpu);
  518. unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
  519. if (cr4 & CR4_RESERVED_BITS)
  520. return 1;
  521. if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
  522. return 1;
  523. if (is_long_mode(vcpu)) {
  524. if (!(cr4 & X86_CR4_PAE))
  525. return 1;
  526. } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
  527. && ((cr4 ^ old_cr4) & pdptr_bits)
  528. && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
  529. kvm_read_cr3(vcpu)))
  530. return 1;
  531. if (cr4 & X86_CR4_VMXE)
  532. return 1;
  533. kvm_x86_ops->set_cr4(vcpu, cr4);
  534. if ((cr4 ^ old_cr4) & pdptr_bits)
  535. kvm_mmu_reset_context(vcpu);
  536. if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
  537. update_cpuid(vcpu);
  538. return 0;
  539. }
  540. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  541. int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  542. {
  543. if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
  544. kvm_mmu_sync_roots(vcpu);
  545. kvm_mmu_flush_tlb(vcpu);
  546. return 0;
  547. }
  548. if (is_long_mode(vcpu)) {
  549. if (cr3 & CR3_L_MODE_RESERVED_BITS)
  550. return 1;
  551. } else {
  552. if (is_pae(vcpu)) {
  553. if (cr3 & CR3_PAE_RESERVED_BITS)
  554. return 1;
  555. if (is_paging(vcpu) &&
  556. !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
  557. return 1;
  558. }
  559. /*
  560. * We don't check reserved bits in nonpae mode, because
  561. * this isn't enforced, and VMware depends on this.
  562. */
  563. }
  564. /*
  565. * Does the new cr3 value map to physical memory? (Note, we
  566. * catch an invalid cr3 even in real-mode, because it would
  567. * cause trouble later on when we turn on paging anyway.)
  568. *
  569. * A real CPU would silently accept an invalid cr3 and would
  570. * attempt to use it - with largely undefined (and often hard
  571. * to debug) behavior on the guest side.
  572. */
  573. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  574. return 1;
  575. vcpu->arch.cr3 = cr3;
  576. __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
  577. vcpu->arch.mmu.new_cr3(vcpu);
  578. return 0;
  579. }
  580. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  581. int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  582. {
  583. if (cr8 & CR8_RESERVED_BITS)
  584. return 1;
  585. if (irqchip_in_kernel(vcpu->kvm))
  586. kvm_lapic_set_tpr(vcpu, cr8);
  587. else
  588. vcpu->arch.cr8 = cr8;
  589. return 0;
  590. }
  591. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  592. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  593. {
  594. if (irqchip_in_kernel(vcpu->kvm))
  595. return kvm_lapic_get_cr8(vcpu);
  596. else
  597. return vcpu->arch.cr8;
  598. }
  599. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  600. static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  601. {
  602. switch (dr) {
  603. case 0 ... 3:
  604. vcpu->arch.db[dr] = val;
  605. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
  606. vcpu->arch.eff_db[dr] = val;
  607. break;
  608. case 4:
  609. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  610. return 1; /* #UD */
  611. /* fall through */
  612. case 6:
  613. if (val & 0xffffffff00000000ULL)
  614. return -1; /* #GP */
  615. vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
  616. break;
  617. case 5:
  618. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  619. return 1; /* #UD */
  620. /* fall through */
  621. default: /* 7 */
  622. if (val & 0xffffffff00000000ULL)
  623. return -1; /* #GP */
  624. vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
  625. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
  626. kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
  627. vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
  628. }
  629. break;
  630. }
  631. return 0;
  632. }
  633. int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  634. {
  635. int res;
  636. res = __kvm_set_dr(vcpu, dr, val);
  637. if (res > 0)
  638. kvm_queue_exception(vcpu, UD_VECTOR);
  639. else if (res < 0)
  640. kvm_inject_gp(vcpu, 0);
  641. return res;
  642. }
  643. EXPORT_SYMBOL_GPL(kvm_set_dr);
  644. static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  645. {
  646. switch (dr) {
  647. case 0 ... 3:
  648. *val = vcpu->arch.db[dr];
  649. break;
  650. case 4:
  651. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  652. return 1;
  653. /* fall through */
  654. case 6:
  655. *val = vcpu->arch.dr6;
  656. break;
  657. case 5:
  658. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  659. return 1;
  660. /* fall through */
  661. default: /* 7 */
  662. *val = vcpu->arch.dr7;
  663. break;
  664. }
  665. return 0;
  666. }
  667. int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  668. {
  669. if (_kvm_get_dr(vcpu, dr, val)) {
  670. kvm_queue_exception(vcpu, UD_VECTOR);
  671. return 1;
  672. }
  673. return 0;
  674. }
  675. EXPORT_SYMBOL_GPL(kvm_get_dr);
  676. /*
  677. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  678. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  679. *
  680. * This list is modified at module load time to reflect the
  681. * capabilities of the host cpu. This capabilities test skips MSRs that are
  682. * kvm-specific. Those are put in the beginning of the list.
  683. */
  684. #define KVM_SAVE_MSRS_BEGIN 8
  685. static u32 msrs_to_save[] = {
  686. MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  687. MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
  688. HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
  689. HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
  690. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  691. MSR_STAR,
  692. #ifdef CONFIG_X86_64
  693. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  694. #endif
  695. MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  696. };
  697. static unsigned num_msrs_to_save;
  698. static u32 emulated_msrs[] = {
  699. MSR_IA32_MISC_ENABLE,
  700. MSR_IA32_MCG_STATUS,
  701. MSR_IA32_MCG_CTL,
  702. };
  703. static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
  704. {
  705. u64 old_efer = vcpu->arch.efer;
  706. if (efer & efer_reserved_bits)
  707. return 1;
  708. if (is_paging(vcpu)
  709. && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
  710. return 1;
  711. if (efer & EFER_FFXSR) {
  712. struct kvm_cpuid_entry2 *feat;
  713. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  714. if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
  715. return 1;
  716. }
  717. if (efer & EFER_SVME) {
  718. struct kvm_cpuid_entry2 *feat;
  719. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  720. if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
  721. return 1;
  722. }
  723. efer &= ~EFER_LMA;
  724. efer |= vcpu->arch.efer & EFER_LMA;
  725. kvm_x86_ops->set_efer(vcpu, efer);
  726. vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
  727. /* Update reserved bits */
  728. if ((efer ^ old_efer) & EFER_NX)
  729. kvm_mmu_reset_context(vcpu);
  730. return 0;
  731. }
  732. void kvm_enable_efer_bits(u64 mask)
  733. {
  734. efer_reserved_bits &= ~mask;
  735. }
  736. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  737. /*
  738. * Writes msr value into into the appropriate "register".
  739. * Returns 0 on success, non-0 otherwise.
  740. * Assumes vcpu_load() was already called.
  741. */
  742. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  743. {
  744. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  745. }
  746. /*
  747. * Adapt set_msr() to msr_io()'s calling convention
  748. */
  749. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  750. {
  751. return kvm_set_msr(vcpu, index, *data);
  752. }
  753. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  754. {
  755. int version;
  756. int r;
  757. struct pvclock_wall_clock wc;
  758. struct timespec boot;
  759. if (!wall_clock)
  760. return;
  761. r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
  762. if (r)
  763. return;
  764. if (version & 1)
  765. ++version; /* first time write, random junk */
  766. ++version;
  767. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  768. /*
  769. * The guest calculates current wall clock time by adding
  770. * system time (updated by kvm_guest_time_update below) to the
  771. * wall clock specified here. guest system time equals host
  772. * system time for us, thus we must fill in host boot time here.
  773. */
  774. getboottime(&boot);
  775. wc.sec = boot.tv_sec;
  776. wc.nsec = boot.tv_nsec;
  777. wc.version = version;
  778. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  779. version++;
  780. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  781. }
  782. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  783. {
  784. uint32_t quotient, remainder;
  785. /* Don't try to replace with do_div(), this one calculates
  786. * "(dividend << 32) / divisor" */
  787. __asm__ ( "divl %4"
  788. : "=a" (quotient), "=d" (remainder)
  789. : "0" (0), "1" (dividend), "r" (divisor) );
  790. return quotient;
  791. }
  792. static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
  793. s8 *pshift, u32 *pmultiplier)
  794. {
  795. uint64_t scaled64;
  796. int32_t shift = 0;
  797. uint64_t tps64;
  798. uint32_t tps32;
  799. tps64 = base_khz * 1000LL;
  800. scaled64 = scaled_khz * 1000LL;
  801. while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
  802. tps64 >>= 1;
  803. shift--;
  804. }
  805. tps32 = (uint32_t)tps64;
  806. while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
  807. if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
  808. scaled64 >>= 1;
  809. else
  810. tps32 <<= 1;
  811. shift++;
  812. }
  813. *pshift = shift;
  814. *pmultiplier = div_frac(scaled64, tps32);
  815. pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
  816. __func__, base_khz, scaled_khz, shift, *pmultiplier);
  817. }
  818. static inline u64 get_kernel_ns(void)
  819. {
  820. struct timespec ts;
  821. WARN_ON(preemptible());
  822. ktime_get_ts(&ts);
  823. monotonic_to_bootbased(&ts);
  824. return timespec_to_ns(&ts);
  825. }
  826. static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
  827. unsigned long max_tsc_khz;
  828. static inline int kvm_tsc_changes_freq(void)
  829. {
  830. int cpu = get_cpu();
  831. int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
  832. cpufreq_quick_get(cpu) != 0;
  833. put_cpu();
  834. return ret;
  835. }
  836. static u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
  837. {
  838. if (vcpu->arch.virtual_tsc_khz)
  839. return vcpu->arch.virtual_tsc_khz;
  840. else
  841. return __this_cpu_read(cpu_tsc_khz);
  842. }
  843. static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
  844. {
  845. u64 ret;
  846. WARN_ON(preemptible());
  847. if (kvm_tsc_changes_freq())
  848. printk_once(KERN_WARNING
  849. "kvm: unreliable cycle conversion on adjustable rate TSC\n");
  850. ret = nsec * vcpu_tsc_khz(vcpu);
  851. do_div(ret, USEC_PER_SEC);
  852. return ret;
  853. }
  854. static void kvm_init_tsc_catchup(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
  855. {
  856. /* Compute a scale to convert nanoseconds in TSC cycles */
  857. kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
  858. &vcpu->arch.tsc_catchup_shift,
  859. &vcpu->arch.tsc_catchup_mult);
  860. }
  861. static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
  862. {
  863. u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
  864. vcpu->arch.tsc_catchup_mult,
  865. vcpu->arch.tsc_catchup_shift);
  866. tsc += vcpu->arch.last_tsc_write;
  867. return tsc;
  868. }
  869. void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
  870. {
  871. struct kvm *kvm = vcpu->kvm;
  872. u64 offset, ns, elapsed;
  873. unsigned long flags;
  874. s64 sdiff;
  875. raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
  876. offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
  877. ns = get_kernel_ns();
  878. elapsed = ns - kvm->arch.last_tsc_nsec;
  879. sdiff = data - kvm->arch.last_tsc_write;
  880. if (sdiff < 0)
  881. sdiff = -sdiff;
  882. /*
  883. * Special case: close write to TSC within 5 seconds of
  884. * another CPU is interpreted as an attempt to synchronize
  885. * The 5 seconds is to accommodate host load / swapping as
  886. * well as any reset of TSC during the boot process.
  887. *
  888. * In that case, for a reliable TSC, we can match TSC offsets,
  889. * or make a best guest using elapsed value.
  890. */
  891. if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
  892. elapsed < 5ULL * NSEC_PER_SEC) {
  893. if (!check_tsc_unstable()) {
  894. offset = kvm->arch.last_tsc_offset;
  895. pr_debug("kvm: matched tsc offset for %llu\n", data);
  896. } else {
  897. u64 delta = nsec_to_cycles(vcpu, elapsed);
  898. offset += delta;
  899. pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
  900. }
  901. ns = kvm->arch.last_tsc_nsec;
  902. }
  903. kvm->arch.last_tsc_nsec = ns;
  904. kvm->arch.last_tsc_write = data;
  905. kvm->arch.last_tsc_offset = offset;
  906. kvm_x86_ops->write_tsc_offset(vcpu, offset);
  907. raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
  908. /* Reset of TSC must disable overshoot protection below */
  909. vcpu->arch.hv_clock.tsc_timestamp = 0;
  910. vcpu->arch.last_tsc_write = data;
  911. vcpu->arch.last_tsc_nsec = ns;
  912. }
  913. EXPORT_SYMBOL_GPL(kvm_write_tsc);
  914. static int kvm_guest_time_update(struct kvm_vcpu *v)
  915. {
  916. unsigned long flags;
  917. struct kvm_vcpu_arch *vcpu = &v->arch;
  918. void *shared_kaddr;
  919. unsigned long this_tsc_khz;
  920. s64 kernel_ns, max_kernel_ns;
  921. u64 tsc_timestamp;
  922. /* Keep irq disabled to prevent changes to the clock */
  923. local_irq_save(flags);
  924. kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
  925. kernel_ns = get_kernel_ns();
  926. this_tsc_khz = vcpu_tsc_khz(v);
  927. if (unlikely(this_tsc_khz == 0)) {
  928. local_irq_restore(flags);
  929. kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
  930. return 1;
  931. }
  932. /*
  933. * We may have to catch up the TSC to match elapsed wall clock
  934. * time for two reasons, even if kvmclock is used.
  935. * 1) CPU could have been running below the maximum TSC rate
  936. * 2) Broken TSC compensation resets the base at each VCPU
  937. * entry to avoid unknown leaps of TSC even when running
  938. * again on the same CPU. This may cause apparent elapsed
  939. * time to disappear, and the guest to stand still or run
  940. * very slowly.
  941. */
  942. if (vcpu->tsc_catchup) {
  943. u64 tsc = compute_guest_tsc(v, kernel_ns);
  944. if (tsc > tsc_timestamp) {
  945. kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
  946. tsc_timestamp = tsc;
  947. }
  948. }
  949. local_irq_restore(flags);
  950. if (!vcpu->time_page)
  951. return 0;
  952. /*
  953. * Time as measured by the TSC may go backwards when resetting the base
  954. * tsc_timestamp. The reason for this is that the TSC resolution is
  955. * higher than the resolution of the other clock scales. Thus, many
  956. * possible measurments of the TSC correspond to one measurement of any
  957. * other clock, and so a spread of values is possible. This is not a
  958. * problem for the computation of the nanosecond clock; with TSC rates
  959. * around 1GHZ, there can only be a few cycles which correspond to one
  960. * nanosecond value, and any path through this code will inevitably
  961. * take longer than that. However, with the kernel_ns value itself,
  962. * the precision may be much lower, down to HZ granularity. If the
  963. * first sampling of TSC against kernel_ns ends in the low part of the
  964. * range, and the second in the high end of the range, we can get:
  965. *
  966. * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
  967. *
  968. * As the sampling errors potentially range in the thousands of cycles,
  969. * it is possible such a time value has already been observed by the
  970. * guest. To protect against this, we must compute the system time as
  971. * observed by the guest and ensure the new system time is greater.
  972. */
  973. max_kernel_ns = 0;
  974. if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
  975. max_kernel_ns = vcpu->last_guest_tsc -
  976. vcpu->hv_clock.tsc_timestamp;
  977. max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
  978. vcpu->hv_clock.tsc_to_system_mul,
  979. vcpu->hv_clock.tsc_shift);
  980. max_kernel_ns += vcpu->last_kernel_ns;
  981. }
  982. if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
  983. kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
  984. &vcpu->hv_clock.tsc_shift,
  985. &vcpu->hv_clock.tsc_to_system_mul);
  986. vcpu->hw_tsc_khz = this_tsc_khz;
  987. }
  988. if (max_kernel_ns > kernel_ns)
  989. kernel_ns = max_kernel_ns;
  990. /* With all the info we got, fill in the values */
  991. vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
  992. vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
  993. vcpu->last_kernel_ns = kernel_ns;
  994. vcpu->last_guest_tsc = tsc_timestamp;
  995. vcpu->hv_clock.flags = 0;
  996. /*
  997. * The interface expects us to write an even number signaling that the
  998. * update is finished. Since the guest won't see the intermediate
  999. * state, we just increase by 2 at the end.
  1000. */
  1001. vcpu->hv_clock.version += 2;
  1002. shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
  1003. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  1004. sizeof(vcpu->hv_clock));
  1005. kunmap_atomic(shared_kaddr, KM_USER0);
  1006. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  1007. return 0;
  1008. }
  1009. static bool msr_mtrr_valid(unsigned msr)
  1010. {
  1011. switch (msr) {
  1012. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  1013. case MSR_MTRRfix64K_00000:
  1014. case MSR_MTRRfix16K_80000:
  1015. case MSR_MTRRfix16K_A0000:
  1016. case MSR_MTRRfix4K_C0000:
  1017. case MSR_MTRRfix4K_C8000:
  1018. case MSR_MTRRfix4K_D0000:
  1019. case MSR_MTRRfix4K_D8000:
  1020. case MSR_MTRRfix4K_E0000:
  1021. case MSR_MTRRfix4K_E8000:
  1022. case MSR_MTRRfix4K_F0000:
  1023. case MSR_MTRRfix4K_F8000:
  1024. case MSR_MTRRdefType:
  1025. case MSR_IA32_CR_PAT:
  1026. return true;
  1027. case 0x2f8:
  1028. return true;
  1029. }
  1030. return false;
  1031. }
  1032. static bool valid_pat_type(unsigned t)
  1033. {
  1034. return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
  1035. }
  1036. static bool valid_mtrr_type(unsigned t)
  1037. {
  1038. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  1039. }
  1040. static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1041. {
  1042. int i;
  1043. if (!msr_mtrr_valid(msr))
  1044. return false;
  1045. if (msr == MSR_IA32_CR_PAT) {
  1046. for (i = 0; i < 8; i++)
  1047. if (!valid_pat_type((data >> (i * 8)) & 0xff))
  1048. return false;
  1049. return true;
  1050. } else if (msr == MSR_MTRRdefType) {
  1051. if (data & ~0xcff)
  1052. return false;
  1053. return valid_mtrr_type(data & 0xff);
  1054. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  1055. for (i = 0; i < 8 ; i++)
  1056. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  1057. return false;
  1058. return true;
  1059. }
  1060. /* variable MTRRs */
  1061. return valid_mtrr_type(data & 0xff);
  1062. }
  1063. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1064. {
  1065. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1066. if (!mtrr_valid(vcpu, msr, data))
  1067. return 1;
  1068. if (msr == MSR_MTRRdefType) {
  1069. vcpu->arch.mtrr_state.def_type = data;
  1070. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  1071. } else if (msr == MSR_MTRRfix64K_00000)
  1072. p[0] = data;
  1073. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1074. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  1075. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1076. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  1077. else if (msr == MSR_IA32_CR_PAT)
  1078. vcpu->arch.pat = data;
  1079. else { /* Variable MTRRs */
  1080. int idx, is_mtrr_mask;
  1081. u64 *pt;
  1082. idx = (msr - 0x200) / 2;
  1083. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1084. if (!is_mtrr_mask)
  1085. pt =
  1086. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1087. else
  1088. pt =
  1089. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1090. *pt = data;
  1091. }
  1092. kvm_mmu_reset_context(vcpu);
  1093. return 0;
  1094. }
  1095. static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1096. {
  1097. u64 mcg_cap = vcpu->arch.mcg_cap;
  1098. unsigned bank_num = mcg_cap & 0xff;
  1099. switch (msr) {
  1100. case MSR_IA32_MCG_STATUS:
  1101. vcpu->arch.mcg_status = data;
  1102. break;
  1103. case MSR_IA32_MCG_CTL:
  1104. if (!(mcg_cap & MCG_CTL_P))
  1105. return 1;
  1106. if (data != 0 && data != ~(u64)0)
  1107. return -1;
  1108. vcpu->arch.mcg_ctl = data;
  1109. break;
  1110. default:
  1111. if (msr >= MSR_IA32_MC0_CTL &&
  1112. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1113. u32 offset = msr - MSR_IA32_MC0_CTL;
  1114. /* only 0 or all 1s can be written to IA32_MCi_CTL
  1115. * some Linux kernels though clear bit 10 in bank 4 to
  1116. * workaround a BIOS/GART TBL issue on AMD K8s, ignore
  1117. * this to avoid an uncatched #GP in the guest
  1118. */
  1119. if ((offset & 0x3) == 0 &&
  1120. data != 0 && (data | (1 << 10)) != ~(u64)0)
  1121. return -1;
  1122. vcpu->arch.mce_banks[offset] = data;
  1123. break;
  1124. }
  1125. return 1;
  1126. }
  1127. return 0;
  1128. }
  1129. static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
  1130. {
  1131. struct kvm *kvm = vcpu->kvm;
  1132. int lm = is_long_mode(vcpu);
  1133. u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
  1134. : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
  1135. u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
  1136. : kvm->arch.xen_hvm_config.blob_size_32;
  1137. u32 page_num = data & ~PAGE_MASK;
  1138. u64 page_addr = data & PAGE_MASK;
  1139. u8 *page;
  1140. int r;
  1141. r = -E2BIG;
  1142. if (page_num >= blob_size)
  1143. goto out;
  1144. r = -ENOMEM;
  1145. page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  1146. if (!page)
  1147. goto out;
  1148. r = -EFAULT;
  1149. if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
  1150. goto out_free;
  1151. if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
  1152. goto out_free;
  1153. r = 0;
  1154. out_free:
  1155. kfree(page);
  1156. out:
  1157. return r;
  1158. }
  1159. static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  1160. {
  1161. return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
  1162. }
  1163. static bool kvm_hv_msr_partition_wide(u32 msr)
  1164. {
  1165. bool r = false;
  1166. switch (msr) {
  1167. case HV_X64_MSR_GUEST_OS_ID:
  1168. case HV_X64_MSR_HYPERCALL:
  1169. r = true;
  1170. break;
  1171. }
  1172. return r;
  1173. }
  1174. static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1175. {
  1176. struct kvm *kvm = vcpu->kvm;
  1177. switch (msr) {
  1178. case HV_X64_MSR_GUEST_OS_ID:
  1179. kvm->arch.hv_guest_os_id = data;
  1180. /* setting guest os id to zero disables hypercall page */
  1181. if (!kvm->arch.hv_guest_os_id)
  1182. kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  1183. break;
  1184. case HV_X64_MSR_HYPERCALL: {
  1185. u64 gfn;
  1186. unsigned long addr;
  1187. u8 instructions[4];
  1188. /* if guest os id is not set hypercall should remain disabled */
  1189. if (!kvm->arch.hv_guest_os_id)
  1190. break;
  1191. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  1192. kvm->arch.hv_hypercall = data;
  1193. break;
  1194. }
  1195. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  1196. addr = gfn_to_hva(kvm, gfn);
  1197. if (kvm_is_error_hva(addr))
  1198. return 1;
  1199. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  1200. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  1201. if (copy_to_user((void __user *)addr, instructions, 4))
  1202. return 1;
  1203. kvm->arch.hv_hypercall = data;
  1204. break;
  1205. }
  1206. default:
  1207. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1208. "data 0x%llx\n", msr, data);
  1209. return 1;
  1210. }
  1211. return 0;
  1212. }
  1213. static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1214. {
  1215. switch (msr) {
  1216. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  1217. unsigned long addr;
  1218. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  1219. vcpu->arch.hv_vapic = data;
  1220. break;
  1221. }
  1222. addr = gfn_to_hva(vcpu->kvm, data >>
  1223. HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
  1224. if (kvm_is_error_hva(addr))
  1225. return 1;
  1226. if (clear_user((void __user *)addr, PAGE_SIZE))
  1227. return 1;
  1228. vcpu->arch.hv_vapic = data;
  1229. break;
  1230. }
  1231. case HV_X64_MSR_EOI:
  1232. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  1233. case HV_X64_MSR_ICR:
  1234. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  1235. case HV_X64_MSR_TPR:
  1236. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  1237. default:
  1238. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1239. "data 0x%llx\n", msr, data);
  1240. return 1;
  1241. }
  1242. return 0;
  1243. }
  1244. static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
  1245. {
  1246. gpa_t gpa = data & ~0x3f;
  1247. /* Bits 2:5 are resrved, Should be zero */
  1248. if (data & 0x3c)
  1249. return 1;
  1250. vcpu->arch.apf.msr_val = data;
  1251. if (!(data & KVM_ASYNC_PF_ENABLED)) {
  1252. kvm_clear_async_pf_completion_queue(vcpu);
  1253. kvm_async_pf_hash_reset(vcpu);
  1254. return 0;
  1255. }
  1256. if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
  1257. return 1;
  1258. vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
  1259. kvm_async_pf_wakeup_all(vcpu);
  1260. return 0;
  1261. }
  1262. static void kvmclock_reset(struct kvm_vcpu *vcpu)
  1263. {
  1264. if (vcpu->arch.time_page) {
  1265. kvm_release_page_dirty(vcpu->arch.time_page);
  1266. vcpu->arch.time_page = NULL;
  1267. }
  1268. }
  1269. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1270. {
  1271. switch (msr) {
  1272. case MSR_EFER:
  1273. return set_efer(vcpu, data);
  1274. case MSR_K7_HWCR:
  1275. data &= ~(u64)0x40; /* ignore flush filter disable */
  1276. data &= ~(u64)0x100; /* ignore ignne emulation enable */
  1277. if (data != 0) {
  1278. pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
  1279. data);
  1280. return 1;
  1281. }
  1282. break;
  1283. case MSR_FAM10H_MMIO_CONF_BASE:
  1284. if (data != 0) {
  1285. pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
  1286. "0x%llx\n", data);
  1287. return 1;
  1288. }
  1289. break;
  1290. case MSR_AMD64_NB_CFG:
  1291. break;
  1292. case MSR_IA32_DEBUGCTLMSR:
  1293. if (!data) {
  1294. /* We support the non-activated case already */
  1295. break;
  1296. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  1297. /* Values other than LBR and BTF are vendor-specific,
  1298. thus reserved and should throw a #GP */
  1299. return 1;
  1300. }
  1301. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  1302. __func__, data);
  1303. break;
  1304. case MSR_IA32_UCODE_REV:
  1305. case MSR_IA32_UCODE_WRITE:
  1306. case MSR_VM_HSAVE_PA:
  1307. case MSR_AMD64_PATCH_LOADER:
  1308. break;
  1309. case 0x200 ... 0x2ff:
  1310. return set_msr_mtrr(vcpu, msr, data);
  1311. case MSR_IA32_APICBASE:
  1312. kvm_set_apic_base(vcpu, data);
  1313. break;
  1314. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1315. return kvm_x2apic_msr_write(vcpu, msr, data);
  1316. case MSR_IA32_MISC_ENABLE:
  1317. vcpu->arch.ia32_misc_enable_msr = data;
  1318. break;
  1319. case MSR_KVM_WALL_CLOCK_NEW:
  1320. case MSR_KVM_WALL_CLOCK:
  1321. vcpu->kvm->arch.wall_clock = data;
  1322. kvm_write_wall_clock(vcpu->kvm, data);
  1323. break;
  1324. case MSR_KVM_SYSTEM_TIME_NEW:
  1325. case MSR_KVM_SYSTEM_TIME: {
  1326. kvmclock_reset(vcpu);
  1327. vcpu->arch.time = data;
  1328. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  1329. /* we verify if the enable bit is set... */
  1330. if (!(data & 1))
  1331. break;
  1332. /* ...but clean it before doing the actual write */
  1333. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  1334. vcpu->arch.time_page =
  1335. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  1336. if (is_error_page(vcpu->arch.time_page)) {
  1337. kvm_release_page_clean(vcpu->arch.time_page);
  1338. vcpu->arch.time_page = NULL;
  1339. }
  1340. break;
  1341. }
  1342. case MSR_KVM_ASYNC_PF_EN:
  1343. if (kvm_pv_enable_async_pf(vcpu, data))
  1344. return 1;
  1345. break;
  1346. case MSR_IA32_MCG_CTL:
  1347. case MSR_IA32_MCG_STATUS:
  1348. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1349. return set_msr_mce(vcpu, msr, data);
  1350. /* Performance counters are not protected by a CPUID bit,
  1351. * so we should check all of them in the generic path for the sake of
  1352. * cross vendor migration.
  1353. * Writing a zero into the event select MSRs disables them,
  1354. * which we perfectly emulate ;-). Any other value should be at least
  1355. * reported, some guests depend on them.
  1356. */
  1357. case MSR_P6_EVNTSEL0:
  1358. case MSR_P6_EVNTSEL1:
  1359. case MSR_K7_EVNTSEL0:
  1360. case MSR_K7_EVNTSEL1:
  1361. case MSR_K7_EVNTSEL2:
  1362. case MSR_K7_EVNTSEL3:
  1363. if (data != 0)
  1364. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1365. "0x%x data 0x%llx\n", msr, data);
  1366. break;
  1367. /* at least RHEL 4 unconditionally writes to the perfctr registers,
  1368. * so we ignore writes to make it happy.
  1369. */
  1370. case MSR_P6_PERFCTR0:
  1371. case MSR_P6_PERFCTR1:
  1372. case MSR_K7_PERFCTR0:
  1373. case MSR_K7_PERFCTR1:
  1374. case MSR_K7_PERFCTR2:
  1375. case MSR_K7_PERFCTR3:
  1376. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1377. "0x%x data 0x%llx\n", msr, data);
  1378. break;
  1379. case MSR_K7_CLK_CTL:
  1380. /*
  1381. * Ignore all writes to this no longer documented MSR.
  1382. * Writes are only relevant for old K7 processors,
  1383. * all pre-dating SVM, but a recommended workaround from
  1384. * AMD for these chips. It is possible to speicify the
  1385. * affected processor models on the command line, hence
  1386. * the need to ignore the workaround.
  1387. */
  1388. break;
  1389. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1390. if (kvm_hv_msr_partition_wide(msr)) {
  1391. int r;
  1392. mutex_lock(&vcpu->kvm->lock);
  1393. r = set_msr_hyperv_pw(vcpu, msr, data);
  1394. mutex_unlock(&vcpu->kvm->lock);
  1395. return r;
  1396. } else
  1397. return set_msr_hyperv(vcpu, msr, data);
  1398. break;
  1399. case MSR_IA32_BBL_CR_CTL3:
  1400. /* Drop writes to this legacy MSR -- see rdmsr
  1401. * counterpart for further detail.
  1402. */
  1403. pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
  1404. break;
  1405. default:
  1406. if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
  1407. return xen_hvm_config(vcpu, data);
  1408. if (!ignore_msrs) {
  1409. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
  1410. msr, data);
  1411. return 1;
  1412. } else {
  1413. pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
  1414. msr, data);
  1415. break;
  1416. }
  1417. }
  1418. return 0;
  1419. }
  1420. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1421. /*
  1422. * Reads an msr value (of 'msr_index') into 'pdata'.
  1423. * Returns 0 on success, non-0 otherwise.
  1424. * Assumes vcpu_load() was already called.
  1425. */
  1426. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1427. {
  1428. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  1429. }
  1430. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1431. {
  1432. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1433. if (!msr_mtrr_valid(msr))
  1434. return 1;
  1435. if (msr == MSR_MTRRdefType)
  1436. *pdata = vcpu->arch.mtrr_state.def_type +
  1437. (vcpu->arch.mtrr_state.enabled << 10);
  1438. else if (msr == MSR_MTRRfix64K_00000)
  1439. *pdata = p[0];
  1440. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1441. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  1442. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1443. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  1444. else if (msr == MSR_IA32_CR_PAT)
  1445. *pdata = vcpu->arch.pat;
  1446. else { /* Variable MTRRs */
  1447. int idx, is_mtrr_mask;
  1448. u64 *pt;
  1449. idx = (msr - 0x200) / 2;
  1450. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1451. if (!is_mtrr_mask)
  1452. pt =
  1453. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1454. else
  1455. pt =
  1456. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1457. *pdata = *pt;
  1458. }
  1459. return 0;
  1460. }
  1461. static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1462. {
  1463. u64 data;
  1464. u64 mcg_cap = vcpu->arch.mcg_cap;
  1465. unsigned bank_num = mcg_cap & 0xff;
  1466. switch (msr) {
  1467. case MSR_IA32_P5_MC_ADDR:
  1468. case MSR_IA32_P5_MC_TYPE:
  1469. data = 0;
  1470. break;
  1471. case MSR_IA32_MCG_CAP:
  1472. data = vcpu->arch.mcg_cap;
  1473. break;
  1474. case MSR_IA32_MCG_CTL:
  1475. if (!(mcg_cap & MCG_CTL_P))
  1476. return 1;
  1477. data = vcpu->arch.mcg_ctl;
  1478. break;
  1479. case MSR_IA32_MCG_STATUS:
  1480. data = vcpu->arch.mcg_status;
  1481. break;
  1482. default:
  1483. if (msr >= MSR_IA32_MC0_CTL &&
  1484. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1485. u32 offset = msr - MSR_IA32_MC0_CTL;
  1486. data = vcpu->arch.mce_banks[offset];
  1487. break;
  1488. }
  1489. return 1;
  1490. }
  1491. *pdata = data;
  1492. return 0;
  1493. }
  1494. static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1495. {
  1496. u64 data = 0;
  1497. struct kvm *kvm = vcpu->kvm;
  1498. switch (msr) {
  1499. case HV_X64_MSR_GUEST_OS_ID:
  1500. data = kvm->arch.hv_guest_os_id;
  1501. break;
  1502. case HV_X64_MSR_HYPERCALL:
  1503. data = kvm->arch.hv_hypercall;
  1504. break;
  1505. default:
  1506. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1507. return 1;
  1508. }
  1509. *pdata = data;
  1510. return 0;
  1511. }
  1512. static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1513. {
  1514. u64 data = 0;
  1515. switch (msr) {
  1516. case HV_X64_MSR_VP_INDEX: {
  1517. int r;
  1518. struct kvm_vcpu *v;
  1519. kvm_for_each_vcpu(r, v, vcpu->kvm)
  1520. if (v == vcpu)
  1521. data = r;
  1522. break;
  1523. }
  1524. case HV_X64_MSR_EOI:
  1525. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  1526. case HV_X64_MSR_ICR:
  1527. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  1528. case HV_X64_MSR_TPR:
  1529. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  1530. default:
  1531. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1532. return 1;
  1533. }
  1534. *pdata = data;
  1535. return 0;
  1536. }
  1537. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1538. {
  1539. u64 data;
  1540. switch (msr) {
  1541. case MSR_IA32_PLATFORM_ID:
  1542. case MSR_IA32_UCODE_REV:
  1543. case MSR_IA32_EBL_CR_POWERON:
  1544. case MSR_IA32_DEBUGCTLMSR:
  1545. case MSR_IA32_LASTBRANCHFROMIP:
  1546. case MSR_IA32_LASTBRANCHTOIP:
  1547. case MSR_IA32_LASTINTFROMIP:
  1548. case MSR_IA32_LASTINTTOIP:
  1549. case MSR_K8_SYSCFG:
  1550. case MSR_K7_HWCR:
  1551. case MSR_VM_HSAVE_PA:
  1552. case MSR_P6_PERFCTR0:
  1553. case MSR_P6_PERFCTR1:
  1554. case MSR_P6_EVNTSEL0:
  1555. case MSR_P6_EVNTSEL1:
  1556. case MSR_K7_EVNTSEL0:
  1557. case MSR_K7_PERFCTR0:
  1558. case MSR_K8_INT_PENDING_MSG:
  1559. case MSR_AMD64_NB_CFG:
  1560. case MSR_FAM10H_MMIO_CONF_BASE:
  1561. data = 0;
  1562. break;
  1563. case MSR_MTRRcap:
  1564. data = 0x500 | KVM_NR_VAR_MTRR;
  1565. break;
  1566. case 0x200 ... 0x2ff:
  1567. return get_msr_mtrr(vcpu, msr, pdata);
  1568. case 0xcd: /* fsb frequency */
  1569. data = 3;
  1570. break;
  1571. /*
  1572. * MSR_EBC_FREQUENCY_ID
  1573. * Conservative value valid for even the basic CPU models.
  1574. * Models 0,1: 000 in bits 23:21 indicating a bus speed of
  1575. * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
  1576. * and 266MHz for model 3, or 4. Set Core Clock
  1577. * Frequency to System Bus Frequency Ratio to 1 (bits
  1578. * 31:24) even though these are only valid for CPU
  1579. * models > 2, however guests may end up dividing or
  1580. * multiplying by zero otherwise.
  1581. */
  1582. case MSR_EBC_FREQUENCY_ID:
  1583. data = 1 << 24;
  1584. break;
  1585. case MSR_IA32_APICBASE:
  1586. data = kvm_get_apic_base(vcpu);
  1587. break;
  1588. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1589. return kvm_x2apic_msr_read(vcpu, msr, pdata);
  1590. break;
  1591. case MSR_IA32_MISC_ENABLE:
  1592. data = vcpu->arch.ia32_misc_enable_msr;
  1593. break;
  1594. case MSR_IA32_PERF_STATUS:
  1595. /* TSC increment by tick */
  1596. data = 1000ULL;
  1597. /* CPU multiplier */
  1598. data |= (((uint64_t)4ULL) << 40);
  1599. break;
  1600. case MSR_EFER:
  1601. data = vcpu->arch.efer;
  1602. break;
  1603. case MSR_KVM_WALL_CLOCK:
  1604. case MSR_KVM_WALL_CLOCK_NEW:
  1605. data = vcpu->kvm->arch.wall_clock;
  1606. break;
  1607. case MSR_KVM_SYSTEM_TIME:
  1608. case MSR_KVM_SYSTEM_TIME_NEW:
  1609. data = vcpu->arch.time;
  1610. break;
  1611. case MSR_KVM_ASYNC_PF_EN:
  1612. data = vcpu->arch.apf.msr_val;
  1613. break;
  1614. case MSR_IA32_P5_MC_ADDR:
  1615. case MSR_IA32_P5_MC_TYPE:
  1616. case MSR_IA32_MCG_CAP:
  1617. case MSR_IA32_MCG_CTL:
  1618. case MSR_IA32_MCG_STATUS:
  1619. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1620. return get_msr_mce(vcpu, msr, pdata);
  1621. case MSR_K7_CLK_CTL:
  1622. /*
  1623. * Provide expected ramp-up count for K7. All other
  1624. * are set to zero, indicating minimum divisors for
  1625. * every field.
  1626. *
  1627. * This prevents guest kernels on AMD host with CPU
  1628. * type 6, model 8 and higher from exploding due to
  1629. * the rdmsr failing.
  1630. */
  1631. data = 0x20000000;
  1632. break;
  1633. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1634. if (kvm_hv_msr_partition_wide(msr)) {
  1635. int r;
  1636. mutex_lock(&vcpu->kvm->lock);
  1637. r = get_msr_hyperv_pw(vcpu, msr, pdata);
  1638. mutex_unlock(&vcpu->kvm->lock);
  1639. return r;
  1640. } else
  1641. return get_msr_hyperv(vcpu, msr, pdata);
  1642. break;
  1643. case MSR_IA32_BBL_CR_CTL3:
  1644. /* This legacy MSR exists but isn't fully documented in current
  1645. * silicon. It is however accessed by winxp in very narrow
  1646. * scenarios where it sets bit #19, itself documented as
  1647. * a "reserved" bit. Best effort attempt to source coherent
  1648. * read data here should the balance of the register be
  1649. * interpreted by the guest:
  1650. *
  1651. * L2 cache control register 3: 64GB range, 256KB size,
  1652. * enabled, latency 0x1, configured
  1653. */
  1654. data = 0xbe702111;
  1655. break;
  1656. default:
  1657. if (!ignore_msrs) {
  1658. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  1659. return 1;
  1660. } else {
  1661. pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
  1662. data = 0;
  1663. }
  1664. break;
  1665. }
  1666. *pdata = data;
  1667. return 0;
  1668. }
  1669. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1670. /*
  1671. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1672. *
  1673. * @return number of msrs set successfully.
  1674. */
  1675. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  1676. struct kvm_msr_entry *entries,
  1677. int (*do_msr)(struct kvm_vcpu *vcpu,
  1678. unsigned index, u64 *data))
  1679. {
  1680. int i, idx;
  1681. idx = srcu_read_lock(&vcpu->kvm->srcu);
  1682. for (i = 0; i < msrs->nmsrs; ++i)
  1683. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  1684. break;
  1685. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1686. return i;
  1687. }
  1688. /*
  1689. * Read or write a bunch of msrs. Parameters are user addresses.
  1690. *
  1691. * @return number of msrs set successfully.
  1692. */
  1693. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  1694. int (*do_msr)(struct kvm_vcpu *vcpu,
  1695. unsigned index, u64 *data),
  1696. int writeback)
  1697. {
  1698. struct kvm_msrs msrs;
  1699. struct kvm_msr_entry *entries;
  1700. int r, n;
  1701. unsigned size;
  1702. r = -EFAULT;
  1703. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  1704. goto out;
  1705. r = -E2BIG;
  1706. if (msrs.nmsrs >= MAX_IO_MSRS)
  1707. goto out;
  1708. r = -ENOMEM;
  1709. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  1710. entries = kmalloc(size, GFP_KERNEL);
  1711. if (!entries)
  1712. goto out;
  1713. r = -EFAULT;
  1714. if (copy_from_user(entries, user_msrs->entries, size))
  1715. goto out_free;
  1716. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  1717. if (r < 0)
  1718. goto out_free;
  1719. r = -EFAULT;
  1720. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  1721. goto out_free;
  1722. r = n;
  1723. out_free:
  1724. kfree(entries);
  1725. out:
  1726. return r;
  1727. }
  1728. int kvm_dev_ioctl_check_extension(long ext)
  1729. {
  1730. int r;
  1731. switch (ext) {
  1732. case KVM_CAP_IRQCHIP:
  1733. case KVM_CAP_HLT:
  1734. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  1735. case KVM_CAP_SET_TSS_ADDR:
  1736. case KVM_CAP_EXT_CPUID:
  1737. case KVM_CAP_CLOCKSOURCE:
  1738. case KVM_CAP_PIT:
  1739. case KVM_CAP_NOP_IO_DELAY:
  1740. case KVM_CAP_MP_STATE:
  1741. case KVM_CAP_SYNC_MMU:
  1742. case KVM_CAP_USER_NMI:
  1743. case KVM_CAP_REINJECT_CONTROL:
  1744. case KVM_CAP_IRQ_INJECT_STATUS:
  1745. case KVM_CAP_ASSIGN_DEV_IRQ:
  1746. case KVM_CAP_IRQFD:
  1747. case KVM_CAP_IOEVENTFD:
  1748. case KVM_CAP_PIT2:
  1749. case KVM_CAP_PIT_STATE2:
  1750. case KVM_CAP_SET_IDENTITY_MAP_ADDR:
  1751. case KVM_CAP_XEN_HVM:
  1752. case KVM_CAP_ADJUST_CLOCK:
  1753. case KVM_CAP_VCPU_EVENTS:
  1754. case KVM_CAP_HYPERV:
  1755. case KVM_CAP_HYPERV_VAPIC:
  1756. case KVM_CAP_HYPERV_SPIN:
  1757. case KVM_CAP_PCI_SEGMENT:
  1758. case KVM_CAP_DEBUGREGS:
  1759. case KVM_CAP_X86_ROBUST_SINGLESTEP:
  1760. case KVM_CAP_XSAVE:
  1761. case KVM_CAP_ASYNC_PF:
  1762. case KVM_CAP_GET_TSC_KHZ:
  1763. r = 1;
  1764. break;
  1765. case KVM_CAP_COALESCED_MMIO:
  1766. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  1767. break;
  1768. case KVM_CAP_VAPIC:
  1769. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  1770. break;
  1771. case KVM_CAP_NR_VCPUS:
  1772. r = KVM_MAX_VCPUS;
  1773. break;
  1774. case KVM_CAP_NR_MEMSLOTS:
  1775. r = KVM_MEMORY_SLOTS;
  1776. break;
  1777. case KVM_CAP_PV_MMU: /* obsolete */
  1778. r = 0;
  1779. break;
  1780. case KVM_CAP_IOMMU:
  1781. r = iommu_found();
  1782. break;
  1783. case KVM_CAP_MCE:
  1784. r = KVM_MAX_MCE_BANKS;
  1785. break;
  1786. case KVM_CAP_XCRS:
  1787. r = cpu_has_xsave;
  1788. break;
  1789. case KVM_CAP_TSC_CONTROL:
  1790. r = kvm_has_tsc_control;
  1791. break;
  1792. default:
  1793. r = 0;
  1794. break;
  1795. }
  1796. return r;
  1797. }
  1798. long kvm_arch_dev_ioctl(struct file *filp,
  1799. unsigned int ioctl, unsigned long arg)
  1800. {
  1801. void __user *argp = (void __user *)arg;
  1802. long r;
  1803. switch (ioctl) {
  1804. case KVM_GET_MSR_INDEX_LIST: {
  1805. struct kvm_msr_list __user *user_msr_list = argp;
  1806. struct kvm_msr_list msr_list;
  1807. unsigned n;
  1808. r = -EFAULT;
  1809. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  1810. goto out;
  1811. n = msr_list.nmsrs;
  1812. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  1813. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  1814. goto out;
  1815. r = -E2BIG;
  1816. if (n < msr_list.nmsrs)
  1817. goto out;
  1818. r = -EFAULT;
  1819. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  1820. num_msrs_to_save * sizeof(u32)))
  1821. goto out;
  1822. if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
  1823. &emulated_msrs,
  1824. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  1825. goto out;
  1826. r = 0;
  1827. break;
  1828. }
  1829. case KVM_GET_SUPPORTED_CPUID: {
  1830. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1831. struct kvm_cpuid2 cpuid;
  1832. r = -EFAULT;
  1833. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1834. goto out;
  1835. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  1836. cpuid_arg->entries);
  1837. if (r)
  1838. goto out;
  1839. r = -EFAULT;
  1840. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  1841. goto out;
  1842. r = 0;
  1843. break;
  1844. }
  1845. case KVM_X86_GET_MCE_CAP_SUPPORTED: {
  1846. u64 mce_cap;
  1847. mce_cap = KVM_MCE_CAP_SUPPORTED;
  1848. r = -EFAULT;
  1849. if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
  1850. goto out;
  1851. r = 0;
  1852. break;
  1853. }
  1854. default:
  1855. r = -EINVAL;
  1856. }
  1857. out:
  1858. return r;
  1859. }
  1860. static void wbinvd_ipi(void *garbage)
  1861. {
  1862. wbinvd();
  1863. }
  1864. static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
  1865. {
  1866. return vcpu->kvm->arch.iommu_domain &&
  1867. !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
  1868. }
  1869. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1870. {
  1871. /* Address WBINVD may be executed by guest */
  1872. if (need_emulate_wbinvd(vcpu)) {
  1873. if (kvm_x86_ops->has_wbinvd_exit())
  1874. cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
  1875. else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
  1876. smp_call_function_single(vcpu->cpu,
  1877. wbinvd_ipi, NULL, 1);
  1878. }
  1879. kvm_x86_ops->vcpu_load(vcpu, cpu);
  1880. if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
  1881. /* Make sure TSC doesn't go backwards */
  1882. s64 tsc_delta;
  1883. u64 tsc;
  1884. kvm_get_msr(vcpu, MSR_IA32_TSC, &tsc);
  1885. tsc_delta = !vcpu->arch.last_guest_tsc ? 0 :
  1886. tsc - vcpu->arch.last_guest_tsc;
  1887. if (tsc_delta < 0)
  1888. mark_tsc_unstable("KVM discovered backwards TSC");
  1889. if (check_tsc_unstable()) {
  1890. kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
  1891. vcpu->arch.tsc_catchup = 1;
  1892. }
  1893. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  1894. if (vcpu->cpu != cpu)
  1895. kvm_migrate_timers(vcpu);
  1896. vcpu->cpu = cpu;
  1897. }
  1898. }
  1899. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  1900. {
  1901. kvm_x86_ops->vcpu_put(vcpu);
  1902. kvm_put_guest_fpu(vcpu);
  1903. kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
  1904. }
  1905. static int is_efer_nx(void)
  1906. {
  1907. unsigned long long efer = 0;
  1908. rdmsrl_safe(MSR_EFER, &efer);
  1909. return efer & EFER_NX;
  1910. }
  1911. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  1912. {
  1913. int i;
  1914. struct kvm_cpuid_entry2 *e, *entry;
  1915. entry = NULL;
  1916. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  1917. e = &vcpu->arch.cpuid_entries[i];
  1918. if (e->function == 0x80000001) {
  1919. entry = e;
  1920. break;
  1921. }
  1922. }
  1923. if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
  1924. entry->edx &= ~(1 << 20);
  1925. printk(KERN_INFO "kvm: guest NX capability removed\n");
  1926. }
  1927. }
  1928. /* when an old userspace process fills a new kernel module */
  1929. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  1930. struct kvm_cpuid *cpuid,
  1931. struct kvm_cpuid_entry __user *entries)
  1932. {
  1933. int r, i;
  1934. struct kvm_cpuid_entry *cpuid_entries;
  1935. r = -E2BIG;
  1936. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1937. goto out;
  1938. r = -ENOMEM;
  1939. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  1940. if (!cpuid_entries)
  1941. goto out;
  1942. r = -EFAULT;
  1943. if (copy_from_user(cpuid_entries, entries,
  1944. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  1945. goto out_free;
  1946. for (i = 0; i < cpuid->nent; i++) {
  1947. vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
  1948. vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
  1949. vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
  1950. vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
  1951. vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
  1952. vcpu->arch.cpuid_entries[i].index = 0;
  1953. vcpu->arch.cpuid_entries[i].flags = 0;
  1954. vcpu->arch.cpuid_entries[i].padding[0] = 0;
  1955. vcpu->arch.cpuid_entries[i].padding[1] = 0;
  1956. vcpu->arch.cpuid_entries[i].padding[2] = 0;
  1957. }
  1958. vcpu->arch.cpuid_nent = cpuid->nent;
  1959. cpuid_fix_nx_cap(vcpu);
  1960. r = 0;
  1961. kvm_apic_set_version(vcpu);
  1962. kvm_x86_ops->cpuid_update(vcpu);
  1963. update_cpuid(vcpu);
  1964. out_free:
  1965. vfree(cpuid_entries);
  1966. out:
  1967. return r;
  1968. }
  1969. static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  1970. struct kvm_cpuid2 *cpuid,
  1971. struct kvm_cpuid_entry2 __user *entries)
  1972. {
  1973. int r;
  1974. r = -E2BIG;
  1975. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1976. goto out;
  1977. r = -EFAULT;
  1978. if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
  1979. cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
  1980. goto out;
  1981. vcpu->arch.cpuid_nent = cpuid->nent;
  1982. kvm_apic_set_version(vcpu);
  1983. kvm_x86_ops->cpuid_update(vcpu);
  1984. update_cpuid(vcpu);
  1985. return 0;
  1986. out:
  1987. return r;
  1988. }
  1989. static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  1990. struct kvm_cpuid2 *cpuid,
  1991. struct kvm_cpuid_entry2 __user *entries)
  1992. {
  1993. int r;
  1994. r = -E2BIG;
  1995. if (cpuid->nent < vcpu->arch.cpuid_nent)
  1996. goto out;
  1997. r = -EFAULT;
  1998. if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
  1999. vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
  2000. goto out;
  2001. return 0;
  2002. out:
  2003. cpuid->nent = vcpu->arch.cpuid_nent;
  2004. return r;
  2005. }
  2006. static void cpuid_mask(u32 *word, int wordnum)
  2007. {
  2008. *word &= boot_cpu_data.x86_capability[wordnum];
  2009. }
  2010. static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  2011. u32 index)
  2012. {
  2013. entry->function = function;
  2014. entry->index = index;
  2015. cpuid_count(entry->function, entry->index,
  2016. &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
  2017. entry->flags = 0;
  2018. }
  2019. #define F(x) bit(X86_FEATURE_##x)
  2020. static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  2021. u32 index, int *nent, int maxnent)
  2022. {
  2023. unsigned f_nx = is_efer_nx() ? F(NX) : 0;
  2024. #ifdef CONFIG_X86_64
  2025. unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
  2026. ? F(GBPAGES) : 0;
  2027. unsigned f_lm = F(LM);
  2028. #else
  2029. unsigned f_gbpages = 0;
  2030. unsigned f_lm = 0;
  2031. #endif
  2032. unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
  2033. /* cpuid 1.edx */
  2034. const u32 kvm_supported_word0_x86_features =
  2035. F(FPU) | F(VME) | F(DE) | F(PSE) |
  2036. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  2037. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
  2038. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  2039. F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
  2040. 0 /* Reserved, DS, ACPI */ | F(MMX) |
  2041. F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
  2042. 0 /* HTT, TM, Reserved, PBE */;
  2043. /* cpuid 0x80000001.edx */
  2044. const u32 kvm_supported_word1_x86_features =
  2045. F(FPU) | F(VME) | F(DE) | F(PSE) |
  2046. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  2047. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
  2048. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  2049. F(PAT) | F(PSE36) | 0 /* Reserved */ |
  2050. f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
  2051. F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
  2052. 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
  2053. /* cpuid 1.ecx */
  2054. const u32 kvm_supported_word4_x86_features =
  2055. F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
  2056. 0 /* DS-CPL, VMX, SMX, EST */ |
  2057. 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
  2058. 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
  2059. 0 /* Reserved, DCA */ | F(XMM4_1) |
  2060. F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
  2061. 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
  2062. F(F16C);
  2063. /* cpuid 0x80000001.ecx */
  2064. const u32 kvm_supported_word6_x86_features =
  2065. F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
  2066. F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
  2067. F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
  2068. 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
  2069. /* all calls to cpuid_count() should be made on the same cpu */
  2070. get_cpu();
  2071. do_cpuid_1_ent(entry, function, index);
  2072. ++*nent;
  2073. switch (function) {
  2074. case 0:
  2075. entry->eax = min(entry->eax, (u32)0xd);
  2076. break;
  2077. case 1:
  2078. entry->edx &= kvm_supported_word0_x86_features;
  2079. cpuid_mask(&entry->edx, 0);
  2080. entry->ecx &= kvm_supported_word4_x86_features;
  2081. cpuid_mask(&entry->ecx, 4);
  2082. /* we support x2apic emulation even if host does not support
  2083. * it since we emulate x2apic in software */
  2084. entry->ecx |= F(X2APIC);
  2085. break;
  2086. /* function 2 entries are STATEFUL. That is, repeated cpuid commands
  2087. * may return different values. This forces us to get_cpu() before
  2088. * issuing the first command, and also to emulate this annoying behavior
  2089. * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
  2090. case 2: {
  2091. int t, times = entry->eax & 0xff;
  2092. entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  2093. entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  2094. for (t = 1; t < times && *nent < maxnent; ++t) {
  2095. do_cpuid_1_ent(&entry[t], function, 0);
  2096. entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  2097. ++*nent;
  2098. }
  2099. break;
  2100. }
  2101. /* function 4 and 0xb have additional index. */
  2102. case 4: {
  2103. int i, cache_type;
  2104. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2105. /* read more entries until cache_type is zero */
  2106. for (i = 1; *nent < maxnent; ++i) {
  2107. cache_type = entry[i - 1].eax & 0x1f;
  2108. if (!cache_type)
  2109. break;
  2110. do_cpuid_1_ent(&entry[i], function, i);
  2111. entry[i].flags |=
  2112. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2113. ++*nent;
  2114. }
  2115. break;
  2116. }
  2117. case 0xb: {
  2118. int i, level_type;
  2119. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2120. /* read more entries until level_type is zero */
  2121. for (i = 1; *nent < maxnent; ++i) {
  2122. level_type = entry[i - 1].ecx & 0xff00;
  2123. if (!level_type)
  2124. break;
  2125. do_cpuid_1_ent(&entry[i], function, i);
  2126. entry[i].flags |=
  2127. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2128. ++*nent;
  2129. }
  2130. break;
  2131. }
  2132. case 0xd: {
  2133. int i;
  2134. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2135. for (i = 1; *nent < maxnent && i < 64; ++i) {
  2136. if (entry[i].eax == 0)
  2137. continue;
  2138. do_cpuid_1_ent(&entry[i], function, i);
  2139. entry[i].flags |=
  2140. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2141. ++*nent;
  2142. }
  2143. break;
  2144. }
  2145. case KVM_CPUID_SIGNATURE: {
  2146. char signature[12] = "KVMKVMKVM\0\0";
  2147. u32 *sigptr = (u32 *)signature;
  2148. entry->eax = 0;
  2149. entry->ebx = sigptr[0];
  2150. entry->ecx = sigptr[1];
  2151. entry->edx = sigptr[2];
  2152. break;
  2153. }
  2154. case KVM_CPUID_FEATURES:
  2155. entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
  2156. (1 << KVM_FEATURE_NOP_IO_DELAY) |
  2157. (1 << KVM_FEATURE_CLOCKSOURCE2) |
  2158. (1 << KVM_FEATURE_ASYNC_PF) |
  2159. (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
  2160. entry->ebx = 0;
  2161. entry->ecx = 0;
  2162. entry->edx = 0;
  2163. break;
  2164. case 0x80000000:
  2165. entry->eax = min(entry->eax, 0x8000001a);
  2166. break;
  2167. case 0x80000001:
  2168. entry->edx &= kvm_supported_word1_x86_features;
  2169. cpuid_mask(&entry->edx, 1);
  2170. entry->ecx &= kvm_supported_word6_x86_features;
  2171. cpuid_mask(&entry->ecx, 6);
  2172. break;
  2173. }
  2174. kvm_x86_ops->set_supported_cpuid(function, entry);
  2175. put_cpu();
  2176. }
  2177. #undef F
  2178. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  2179. struct kvm_cpuid_entry2 __user *entries)
  2180. {
  2181. struct kvm_cpuid_entry2 *cpuid_entries;
  2182. int limit, nent = 0, r = -E2BIG;
  2183. u32 func;
  2184. if (cpuid->nent < 1)
  2185. goto out;
  2186. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  2187. cpuid->nent = KVM_MAX_CPUID_ENTRIES;
  2188. r = -ENOMEM;
  2189. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
  2190. if (!cpuid_entries)
  2191. goto out;
  2192. do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
  2193. limit = cpuid_entries[0].eax;
  2194. for (func = 1; func <= limit && nent < cpuid->nent; ++func)
  2195. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  2196. &nent, cpuid->nent);
  2197. r = -E2BIG;
  2198. if (nent >= cpuid->nent)
  2199. goto out_free;
  2200. do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
  2201. limit = cpuid_entries[nent - 1].eax;
  2202. for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
  2203. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  2204. &nent, cpuid->nent);
  2205. r = -E2BIG;
  2206. if (nent >= cpuid->nent)
  2207. goto out_free;
  2208. do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
  2209. cpuid->nent);
  2210. r = -E2BIG;
  2211. if (nent >= cpuid->nent)
  2212. goto out_free;
  2213. do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
  2214. cpuid->nent);
  2215. r = -E2BIG;
  2216. if (nent >= cpuid->nent)
  2217. goto out_free;
  2218. r = -EFAULT;
  2219. if (copy_to_user(entries, cpuid_entries,
  2220. nent * sizeof(struct kvm_cpuid_entry2)))
  2221. goto out_free;
  2222. cpuid->nent = nent;
  2223. r = 0;
  2224. out_free:
  2225. vfree(cpuid_entries);
  2226. out:
  2227. return r;
  2228. }
  2229. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  2230. struct kvm_lapic_state *s)
  2231. {
  2232. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  2233. return 0;
  2234. }
  2235. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  2236. struct kvm_lapic_state *s)
  2237. {
  2238. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  2239. kvm_apic_post_state_restore(vcpu);
  2240. update_cr8_intercept(vcpu);
  2241. return 0;
  2242. }
  2243. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  2244. struct kvm_interrupt *irq)
  2245. {
  2246. if (irq->irq < 0 || irq->irq >= 256)
  2247. return -EINVAL;
  2248. if (irqchip_in_kernel(vcpu->kvm))
  2249. return -ENXIO;
  2250. kvm_queue_interrupt(vcpu, irq->irq, false);
  2251. kvm_make_request(KVM_REQ_EVENT, vcpu);
  2252. return 0;
  2253. }
  2254. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  2255. {
  2256. kvm_inject_nmi(vcpu);
  2257. return 0;
  2258. }
  2259. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  2260. struct kvm_tpr_access_ctl *tac)
  2261. {
  2262. if (tac->flags)
  2263. return -EINVAL;
  2264. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  2265. return 0;
  2266. }
  2267. static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
  2268. u64 mcg_cap)
  2269. {
  2270. int r;
  2271. unsigned bank_num = mcg_cap & 0xff, bank;
  2272. r = -EINVAL;
  2273. if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
  2274. goto out;
  2275. if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
  2276. goto out;
  2277. r = 0;
  2278. vcpu->arch.mcg_cap = mcg_cap;
  2279. /* Init IA32_MCG_CTL to all 1s */
  2280. if (mcg_cap & MCG_CTL_P)
  2281. vcpu->arch.mcg_ctl = ~(u64)0;
  2282. /* Init IA32_MCi_CTL to all 1s */
  2283. for (bank = 0; bank < bank_num; bank++)
  2284. vcpu->arch.mce_banks[bank*4] = ~(u64)0;
  2285. out:
  2286. return r;
  2287. }
  2288. static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
  2289. struct kvm_x86_mce *mce)
  2290. {
  2291. u64 mcg_cap = vcpu->arch.mcg_cap;
  2292. unsigned bank_num = mcg_cap & 0xff;
  2293. u64 *banks = vcpu->arch.mce_banks;
  2294. if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
  2295. return -EINVAL;
  2296. /*
  2297. * if IA32_MCG_CTL is not all 1s, the uncorrected error
  2298. * reporting is disabled
  2299. */
  2300. if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
  2301. vcpu->arch.mcg_ctl != ~(u64)0)
  2302. return 0;
  2303. banks += 4 * mce->bank;
  2304. /*
  2305. * if IA32_MCi_CTL is not all 1s, the uncorrected error
  2306. * reporting is disabled for the bank
  2307. */
  2308. if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
  2309. return 0;
  2310. if (mce->status & MCI_STATUS_UC) {
  2311. if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
  2312. !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
  2313. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  2314. return 0;
  2315. }
  2316. if (banks[1] & MCI_STATUS_VAL)
  2317. mce->status |= MCI_STATUS_OVER;
  2318. banks[2] = mce->addr;
  2319. banks[3] = mce->misc;
  2320. vcpu->arch.mcg_status = mce->mcg_status;
  2321. banks[1] = mce->status;
  2322. kvm_queue_exception(vcpu, MC_VECTOR);
  2323. } else if (!(banks[1] & MCI_STATUS_VAL)
  2324. || !(banks[1] & MCI_STATUS_UC)) {
  2325. if (banks[1] & MCI_STATUS_VAL)
  2326. mce->status |= MCI_STATUS_OVER;
  2327. banks[2] = mce->addr;
  2328. banks[3] = mce->misc;
  2329. banks[1] = mce->status;
  2330. } else
  2331. banks[1] |= MCI_STATUS_OVER;
  2332. return 0;
  2333. }
  2334. static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
  2335. struct kvm_vcpu_events *events)
  2336. {
  2337. events->exception.injected =
  2338. vcpu->arch.exception.pending &&
  2339. !kvm_exception_is_soft(vcpu->arch.exception.nr);
  2340. events->exception.nr = vcpu->arch.exception.nr;
  2341. events->exception.has_error_code = vcpu->arch.exception.has_error_code;
  2342. events->exception.pad = 0;
  2343. events->exception.error_code = vcpu->arch.exception.error_code;
  2344. events->interrupt.injected =
  2345. vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
  2346. events->interrupt.nr = vcpu->arch.interrupt.nr;
  2347. events->interrupt.soft = 0;
  2348. events->interrupt.shadow =
  2349. kvm_x86_ops->get_interrupt_shadow(vcpu,
  2350. KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
  2351. events->nmi.injected = vcpu->arch.nmi_injected;
  2352. events->nmi.pending = vcpu->arch.nmi_pending;
  2353. events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
  2354. events->nmi.pad = 0;
  2355. events->sipi_vector = vcpu->arch.sipi_vector;
  2356. events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
  2357. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  2358. | KVM_VCPUEVENT_VALID_SHADOW);
  2359. memset(&events->reserved, 0, sizeof(events->reserved));
  2360. }
  2361. static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
  2362. struct kvm_vcpu_events *events)
  2363. {
  2364. if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
  2365. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  2366. | KVM_VCPUEVENT_VALID_SHADOW))
  2367. return -EINVAL;
  2368. vcpu->arch.exception.pending = events->exception.injected;
  2369. vcpu->arch.exception.nr = events->exception.nr;
  2370. vcpu->arch.exception.has_error_code = events->exception.has_error_code;
  2371. vcpu->arch.exception.error_code = events->exception.error_code;
  2372. vcpu->arch.interrupt.pending = events->interrupt.injected;
  2373. vcpu->arch.interrupt.nr = events->interrupt.nr;
  2374. vcpu->arch.interrupt.soft = events->interrupt.soft;
  2375. if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
  2376. kvm_x86_ops->set_interrupt_shadow(vcpu,
  2377. events->interrupt.shadow);
  2378. vcpu->arch.nmi_injected = events->nmi.injected;
  2379. if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
  2380. vcpu->arch.nmi_pending = events->nmi.pending;
  2381. kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
  2382. if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
  2383. vcpu->arch.sipi_vector = events->sipi_vector;
  2384. kvm_make_request(KVM_REQ_EVENT, vcpu);
  2385. return 0;
  2386. }
  2387. static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
  2388. struct kvm_debugregs *dbgregs)
  2389. {
  2390. memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
  2391. dbgregs->dr6 = vcpu->arch.dr6;
  2392. dbgregs->dr7 = vcpu->arch.dr7;
  2393. dbgregs->flags = 0;
  2394. memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
  2395. }
  2396. static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
  2397. struct kvm_debugregs *dbgregs)
  2398. {
  2399. if (dbgregs->flags)
  2400. return -EINVAL;
  2401. memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
  2402. vcpu->arch.dr6 = dbgregs->dr6;
  2403. vcpu->arch.dr7 = dbgregs->dr7;
  2404. return 0;
  2405. }
  2406. static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
  2407. struct kvm_xsave *guest_xsave)
  2408. {
  2409. if (cpu_has_xsave)
  2410. memcpy(guest_xsave->region,
  2411. &vcpu->arch.guest_fpu.state->xsave,
  2412. xstate_size);
  2413. else {
  2414. memcpy(guest_xsave->region,
  2415. &vcpu->arch.guest_fpu.state->fxsave,
  2416. sizeof(struct i387_fxsave_struct));
  2417. *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
  2418. XSTATE_FPSSE;
  2419. }
  2420. }
  2421. static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
  2422. struct kvm_xsave *guest_xsave)
  2423. {
  2424. u64 xstate_bv =
  2425. *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
  2426. if (cpu_has_xsave)
  2427. memcpy(&vcpu->arch.guest_fpu.state->xsave,
  2428. guest_xsave->region, xstate_size);
  2429. else {
  2430. if (xstate_bv & ~XSTATE_FPSSE)
  2431. return -EINVAL;
  2432. memcpy(&vcpu->arch.guest_fpu.state->fxsave,
  2433. guest_xsave->region, sizeof(struct i387_fxsave_struct));
  2434. }
  2435. return 0;
  2436. }
  2437. static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
  2438. struct kvm_xcrs *guest_xcrs)
  2439. {
  2440. if (!cpu_has_xsave) {
  2441. guest_xcrs->nr_xcrs = 0;
  2442. return;
  2443. }
  2444. guest_xcrs->nr_xcrs = 1;
  2445. guest_xcrs->flags = 0;
  2446. guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
  2447. guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
  2448. }
  2449. static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
  2450. struct kvm_xcrs *guest_xcrs)
  2451. {
  2452. int i, r = 0;
  2453. if (!cpu_has_xsave)
  2454. return -EINVAL;
  2455. if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
  2456. return -EINVAL;
  2457. for (i = 0; i < guest_xcrs->nr_xcrs; i++)
  2458. /* Only support XCR0 currently */
  2459. if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
  2460. r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
  2461. guest_xcrs->xcrs[0].value);
  2462. break;
  2463. }
  2464. if (r)
  2465. r = -EINVAL;
  2466. return r;
  2467. }
  2468. long kvm_arch_vcpu_ioctl(struct file *filp,
  2469. unsigned int ioctl, unsigned long arg)
  2470. {
  2471. struct kvm_vcpu *vcpu = filp->private_data;
  2472. void __user *argp = (void __user *)arg;
  2473. int r;
  2474. union {
  2475. struct kvm_lapic_state *lapic;
  2476. struct kvm_xsave *xsave;
  2477. struct kvm_xcrs *xcrs;
  2478. void *buffer;
  2479. } u;
  2480. u.buffer = NULL;
  2481. switch (ioctl) {
  2482. case KVM_GET_LAPIC: {
  2483. r = -EINVAL;
  2484. if (!vcpu->arch.apic)
  2485. goto out;
  2486. u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2487. r = -ENOMEM;
  2488. if (!u.lapic)
  2489. goto out;
  2490. r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
  2491. if (r)
  2492. goto out;
  2493. r = -EFAULT;
  2494. if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
  2495. goto out;
  2496. r = 0;
  2497. break;
  2498. }
  2499. case KVM_SET_LAPIC: {
  2500. r = -EINVAL;
  2501. if (!vcpu->arch.apic)
  2502. goto out;
  2503. u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2504. r = -ENOMEM;
  2505. if (!u.lapic)
  2506. goto out;
  2507. r = -EFAULT;
  2508. if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
  2509. goto out;
  2510. r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
  2511. if (r)
  2512. goto out;
  2513. r = 0;
  2514. break;
  2515. }
  2516. case KVM_INTERRUPT: {
  2517. struct kvm_interrupt irq;
  2518. r = -EFAULT;
  2519. if (copy_from_user(&irq, argp, sizeof irq))
  2520. goto out;
  2521. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  2522. if (r)
  2523. goto out;
  2524. r = 0;
  2525. break;
  2526. }
  2527. case KVM_NMI: {
  2528. r = kvm_vcpu_ioctl_nmi(vcpu);
  2529. if (r)
  2530. goto out;
  2531. r = 0;
  2532. break;
  2533. }
  2534. case KVM_SET_CPUID: {
  2535. struct kvm_cpuid __user *cpuid_arg = argp;
  2536. struct kvm_cpuid cpuid;
  2537. r = -EFAULT;
  2538. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2539. goto out;
  2540. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  2541. if (r)
  2542. goto out;
  2543. break;
  2544. }
  2545. case KVM_SET_CPUID2: {
  2546. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2547. struct kvm_cpuid2 cpuid;
  2548. r = -EFAULT;
  2549. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2550. goto out;
  2551. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  2552. cpuid_arg->entries);
  2553. if (r)
  2554. goto out;
  2555. break;
  2556. }
  2557. case KVM_GET_CPUID2: {
  2558. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2559. struct kvm_cpuid2 cpuid;
  2560. r = -EFAULT;
  2561. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2562. goto out;
  2563. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  2564. cpuid_arg->entries);
  2565. if (r)
  2566. goto out;
  2567. r = -EFAULT;
  2568. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  2569. goto out;
  2570. r = 0;
  2571. break;
  2572. }
  2573. case KVM_GET_MSRS:
  2574. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  2575. break;
  2576. case KVM_SET_MSRS:
  2577. r = msr_io(vcpu, argp, do_set_msr, 0);
  2578. break;
  2579. case KVM_TPR_ACCESS_REPORTING: {
  2580. struct kvm_tpr_access_ctl tac;
  2581. r = -EFAULT;
  2582. if (copy_from_user(&tac, argp, sizeof tac))
  2583. goto out;
  2584. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  2585. if (r)
  2586. goto out;
  2587. r = -EFAULT;
  2588. if (copy_to_user(argp, &tac, sizeof tac))
  2589. goto out;
  2590. r = 0;
  2591. break;
  2592. };
  2593. case KVM_SET_VAPIC_ADDR: {
  2594. struct kvm_vapic_addr va;
  2595. r = -EINVAL;
  2596. if (!irqchip_in_kernel(vcpu->kvm))
  2597. goto out;
  2598. r = -EFAULT;
  2599. if (copy_from_user(&va, argp, sizeof va))
  2600. goto out;
  2601. r = 0;
  2602. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  2603. break;
  2604. }
  2605. case KVM_X86_SETUP_MCE: {
  2606. u64 mcg_cap;
  2607. r = -EFAULT;
  2608. if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
  2609. goto out;
  2610. r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
  2611. break;
  2612. }
  2613. case KVM_X86_SET_MCE: {
  2614. struct kvm_x86_mce mce;
  2615. r = -EFAULT;
  2616. if (copy_from_user(&mce, argp, sizeof mce))
  2617. goto out;
  2618. r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
  2619. break;
  2620. }
  2621. case KVM_GET_VCPU_EVENTS: {
  2622. struct kvm_vcpu_events events;
  2623. kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
  2624. r = -EFAULT;
  2625. if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
  2626. break;
  2627. r = 0;
  2628. break;
  2629. }
  2630. case KVM_SET_VCPU_EVENTS: {
  2631. struct kvm_vcpu_events events;
  2632. r = -EFAULT;
  2633. if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
  2634. break;
  2635. r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
  2636. break;
  2637. }
  2638. case KVM_GET_DEBUGREGS: {
  2639. struct kvm_debugregs dbgregs;
  2640. kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
  2641. r = -EFAULT;
  2642. if (copy_to_user(argp, &dbgregs,
  2643. sizeof(struct kvm_debugregs)))
  2644. break;
  2645. r = 0;
  2646. break;
  2647. }
  2648. case KVM_SET_DEBUGREGS: {
  2649. struct kvm_debugregs dbgregs;
  2650. r = -EFAULT;
  2651. if (copy_from_user(&dbgregs, argp,
  2652. sizeof(struct kvm_debugregs)))
  2653. break;
  2654. r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
  2655. break;
  2656. }
  2657. case KVM_GET_XSAVE: {
  2658. u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
  2659. r = -ENOMEM;
  2660. if (!u.xsave)
  2661. break;
  2662. kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
  2663. r = -EFAULT;
  2664. if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
  2665. break;
  2666. r = 0;
  2667. break;
  2668. }
  2669. case KVM_SET_XSAVE: {
  2670. u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
  2671. r = -ENOMEM;
  2672. if (!u.xsave)
  2673. break;
  2674. r = -EFAULT;
  2675. if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
  2676. break;
  2677. r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
  2678. break;
  2679. }
  2680. case KVM_GET_XCRS: {
  2681. u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
  2682. r = -ENOMEM;
  2683. if (!u.xcrs)
  2684. break;
  2685. kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
  2686. r = -EFAULT;
  2687. if (copy_to_user(argp, u.xcrs,
  2688. sizeof(struct kvm_xcrs)))
  2689. break;
  2690. r = 0;
  2691. break;
  2692. }
  2693. case KVM_SET_XCRS: {
  2694. u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
  2695. r = -ENOMEM;
  2696. if (!u.xcrs)
  2697. break;
  2698. r = -EFAULT;
  2699. if (copy_from_user(u.xcrs, argp,
  2700. sizeof(struct kvm_xcrs)))
  2701. break;
  2702. r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
  2703. break;
  2704. }
  2705. case KVM_SET_TSC_KHZ: {
  2706. u32 user_tsc_khz;
  2707. r = -EINVAL;
  2708. if (!kvm_has_tsc_control)
  2709. break;
  2710. user_tsc_khz = (u32)arg;
  2711. if (user_tsc_khz >= kvm_max_guest_tsc_khz)
  2712. goto out;
  2713. kvm_x86_ops->set_tsc_khz(vcpu, user_tsc_khz);
  2714. r = 0;
  2715. goto out;
  2716. }
  2717. case KVM_GET_TSC_KHZ: {
  2718. r = -EIO;
  2719. if (check_tsc_unstable())
  2720. goto out;
  2721. r = vcpu_tsc_khz(vcpu);
  2722. goto out;
  2723. }
  2724. default:
  2725. r = -EINVAL;
  2726. }
  2727. out:
  2728. kfree(u.buffer);
  2729. return r;
  2730. }
  2731. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  2732. {
  2733. int ret;
  2734. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  2735. return -1;
  2736. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  2737. return ret;
  2738. }
  2739. static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
  2740. u64 ident_addr)
  2741. {
  2742. kvm->arch.ept_identity_map_addr = ident_addr;
  2743. return 0;
  2744. }
  2745. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  2746. u32 kvm_nr_mmu_pages)
  2747. {
  2748. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  2749. return -EINVAL;
  2750. mutex_lock(&kvm->slots_lock);
  2751. spin_lock(&kvm->mmu_lock);
  2752. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  2753. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  2754. spin_unlock(&kvm->mmu_lock);
  2755. mutex_unlock(&kvm->slots_lock);
  2756. return 0;
  2757. }
  2758. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  2759. {
  2760. return kvm->arch.n_max_mmu_pages;
  2761. }
  2762. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2763. {
  2764. int r;
  2765. r = 0;
  2766. switch (chip->chip_id) {
  2767. case KVM_IRQCHIP_PIC_MASTER:
  2768. memcpy(&chip->chip.pic,
  2769. &pic_irqchip(kvm)->pics[0],
  2770. sizeof(struct kvm_pic_state));
  2771. break;
  2772. case KVM_IRQCHIP_PIC_SLAVE:
  2773. memcpy(&chip->chip.pic,
  2774. &pic_irqchip(kvm)->pics[1],
  2775. sizeof(struct kvm_pic_state));
  2776. break;
  2777. case KVM_IRQCHIP_IOAPIC:
  2778. r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
  2779. break;
  2780. default:
  2781. r = -EINVAL;
  2782. break;
  2783. }
  2784. return r;
  2785. }
  2786. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2787. {
  2788. int r;
  2789. r = 0;
  2790. switch (chip->chip_id) {
  2791. case KVM_IRQCHIP_PIC_MASTER:
  2792. spin_lock(&pic_irqchip(kvm)->lock);
  2793. memcpy(&pic_irqchip(kvm)->pics[0],
  2794. &chip->chip.pic,
  2795. sizeof(struct kvm_pic_state));
  2796. spin_unlock(&pic_irqchip(kvm)->lock);
  2797. break;
  2798. case KVM_IRQCHIP_PIC_SLAVE:
  2799. spin_lock(&pic_irqchip(kvm)->lock);
  2800. memcpy(&pic_irqchip(kvm)->pics[1],
  2801. &chip->chip.pic,
  2802. sizeof(struct kvm_pic_state));
  2803. spin_unlock(&pic_irqchip(kvm)->lock);
  2804. break;
  2805. case KVM_IRQCHIP_IOAPIC:
  2806. r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
  2807. break;
  2808. default:
  2809. r = -EINVAL;
  2810. break;
  2811. }
  2812. kvm_pic_update_irq(pic_irqchip(kvm));
  2813. return r;
  2814. }
  2815. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2816. {
  2817. int r = 0;
  2818. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2819. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  2820. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2821. return r;
  2822. }
  2823. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2824. {
  2825. int r = 0;
  2826. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2827. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  2828. kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
  2829. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2830. return r;
  2831. }
  2832. static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2833. {
  2834. int r = 0;
  2835. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2836. memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
  2837. sizeof(ps->channels));
  2838. ps->flags = kvm->arch.vpit->pit_state.flags;
  2839. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2840. memset(&ps->reserved, 0, sizeof(ps->reserved));
  2841. return r;
  2842. }
  2843. static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2844. {
  2845. int r = 0, start = 0;
  2846. u32 prev_legacy, cur_legacy;
  2847. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2848. prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2849. cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2850. if (!prev_legacy && cur_legacy)
  2851. start = 1;
  2852. memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
  2853. sizeof(kvm->arch.vpit->pit_state.channels));
  2854. kvm->arch.vpit->pit_state.flags = ps->flags;
  2855. kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
  2856. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2857. return r;
  2858. }
  2859. static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  2860. struct kvm_reinject_control *control)
  2861. {
  2862. if (!kvm->arch.vpit)
  2863. return -ENXIO;
  2864. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2865. kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
  2866. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2867. return 0;
  2868. }
  2869. /*
  2870. * Get (and clear) the dirty memory log for a memory slot.
  2871. */
  2872. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  2873. struct kvm_dirty_log *log)
  2874. {
  2875. int r, i;
  2876. struct kvm_memory_slot *memslot;
  2877. unsigned long n;
  2878. unsigned long is_dirty = 0;
  2879. mutex_lock(&kvm->slots_lock);
  2880. r = -EINVAL;
  2881. if (log->slot >= KVM_MEMORY_SLOTS)
  2882. goto out;
  2883. memslot = &kvm->memslots->memslots[log->slot];
  2884. r = -ENOENT;
  2885. if (!memslot->dirty_bitmap)
  2886. goto out;
  2887. n = kvm_dirty_bitmap_bytes(memslot);
  2888. for (i = 0; !is_dirty && i < n/sizeof(long); i++)
  2889. is_dirty = memslot->dirty_bitmap[i];
  2890. /* If nothing is dirty, don't bother messing with page tables. */
  2891. if (is_dirty) {
  2892. struct kvm_memslots *slots, *old_slots;
  2893. unsigned long *dirty_bitmap;
  2894. dirty_bitmap = memslot->dirty_bitmap_head;
  2895. if (memslot->dirty_bitmap == dirty_bitmap)
  2896. dirty_bitmap += n / sizeof(long);
  2897. memset(dirty_bitmap, 0, n);
  2898. r = -ENOMEM;
  2899. slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  2900. if (!slots)
  2901. goto out;
  2902. memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
  2903. slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
  2904. slots->generation++;
  2905. old_slots = kvm->memslots;
  2906. rcu_assign_pointer(kvm->memslots, slots);
  2907. synchronize_srcu_expedited(&kvm->srcu);
  2908. dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
  2909. kfree(old_slots);
  2910. spin_lock(&kvm->mmu_lock);
  2911. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  2912. spin_unlock(&kvm->mmu_lock);
  2913. r = -EFAULT;
  2914. if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
  2915. goto out;
  2916. } else {
  2917. r = -EFAULT;
  2918. if (clear_user(log->dirty_bitmap, n))
  2919. goto out;
  2920. }
  2921. r = 0;
  2922. out:
  2923. mutex_unlock(&kvm->slots_lock);
  2924. return r;
  2925. }
  2926. long kvm_arch_vm_ioctl(struct file *filp,
  2927. unsigned int ioctl, unsigned long arg)
  2928. {
  2929. struct kvm *kvm = filp->private_data;
  2930. void __user *argp = (void __user *)arg;
  2931. int r = -ENOTTY;
  2932. /*
  2933. * This union makes it completely explicit to gcc-3.x
  2934. * that these two variables' stack usage should be
  2935. * combined, not added together.
  2936. */
  2937. union {
  2938. struct kvm_pit_state ps;
  2939. struct kvm_pit_state2 ps2;
  2940. struct kvm_pit_config pit_config;
  2941. } u;
  2942. switch (ioctl) {
  2943. case KVM_SET_TSS_ADDR:
  2944. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  2945. if (r < 0)
  2946. goto out;
  2947. break;
  2948. case KVM_SET_IDENTITY_MAP_ADDR: {
  2949. u64 ident_addr;
  2950. r = -EFAULT;
  2951. if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
  2952. goto out;
  2953. r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
  2954. if (r < 0)
  2955. goto out;
  2956. break;
  2957. }
  2958. case KVM_SET_NR_MMU_PAGES:
  2959. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  2960. if (r)
  2961. goto out;
  2962. break;
  2963. case KVM_GET_NR_MMU_PAGES:
  2964. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  2965. break;
  2966. case KVM_CREATE_IRQCHIP: {
  2967. struct kvm_pic *vpic;
  2968. mutex_lock(&kvm->lock);
  2969. r = -EEXIST;
  2970. if (kvm->arch.vpic)
  2971. goto create_irqchip_unlock;
  2972. r = -ENOMEM;
  2973. vpic = kvm_create_pic(kvm);
  2974. if (vpic) {
  2975. r = kvm_ioapic_init(kvm);
  2976. if (r) {
  2977. mutex_lock(&kvm->slots_lock);
  2978. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  2979. &vpic->dev);
  2980. mutex_unlock(&kvm->slots_lock);
  2981. kfree(vpic);
  2982. goto create_irqchip_unlock;
  2983. }
  2984. } else
  2985. goto create_irqchip_unlock;
  2986. smp_wmb();
  2987. kvm->arch.vpic = vpic;
  2988. smp_wmb();
  2989. r = kvm_setup_default_irq_routing(kvm);
  2990. if (r) {
  2991. mutex_lock(&kvm->slots_lock);
  2992. mutex_lock(&kvm->irq_lock);
  2993. kvm_ioapic_destroy(kvm);
  2994. kvm_destroy_pic(kvm);
  2995. mutex_unlock(&kvm->irq_lock);
  2996. mutex_unlock(&kvm->slots_lock);
  2997. }
  2998. create_irqchip_unlock:
  2999. mutex_unlock(&kvm->lock);
  3000. break;
  3001. }
  3002. case KVM_CREATE_PIT:
  3003. u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
  3004. goto create_pit;
  3005. case KVM_CREATE_PIT2:
  3006. r = -EFAULT;
  3007. if (copy_from_user(&u.pit_config, argp,
  3008. sizeof(struct kvm_pit_config)))
  3009. goto out;
  3010. create_pit:
  3011. mutex_lock(&kvm->slots_lock);
  3012. r = -EEXIST;
  3013. if (kvm->arch.vpit)
  3014. goto create_pit_unlock;
  3015. r = -ENOMEM;
  3016. kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
  3017. if (kvm->arch.vpit)
  3018. r = 0;
  3019. create_pit_unlock:
  3020. mutex_unlock(&kvm->slots_lock);
  3021. break;
  3022. case KVM_IRQ_LINE_STATUS:
  3023. case KVM_IRQ_LINE: {
  3024. struct kvm_irq_level irq_event;
  3025. r = -EFAULT;
  3026. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  3027. goto out;
  3028. r = -ENXIO;
  3029. if (irqchip_in_kernel(kvm)) {
  3030. __s32 status;
  3031. status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  3032. irq_event.irq, irq_event.level);
  3033. if (ioctl == KVM_IRQ_LINE_STATUS) {
  3034. r = -EFAULT;
  3035. irq_event.status = status;
  3036. if (copy_to_user(argp, &irq_event,
  3037. sizeof irq_event))
  3038. goto out;
  3039. }
  3040. r = 0;
  3041. }
  3042. break;
  3043. }
  3044. case KVM_GET_IRQCHIP: {
  3045. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  3046. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  3047. r = -ENOMEM;
  3048. if (!chip)
  3049. goto out;
  3050. r = -EFAULT;
  3051. if (copy_from_user(chip, argp, sizeof *chip))
  3052. goto get_irqchip_out;
  3053. r = -ENXIO;
  3054. if (!irqchip_in_kernel(kvm))
  3055. goto get_irqchip_out;
  3056. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  3057. if (r)
  3058. goto get_irqchip_out;
  3059. r = -EFAULT;
  3060. if (copy_to_user(argp, chip, sizeof *chip))
  3061. goto get_irqchip_out;
  3062. r = 0;
  3063. get_irqchip_out:
  3064. kfree(chip);
  3065. if (r)
  3066. goto out;
  3067. break;
  3068. }
  3069. case KVM_SET_IRQCHIP: {
  3070. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  3071. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  3072. r = -ENOMEM;
  3073. if (!chip)
  3074. goto out;
  3075. r = -EFAULT;
  3076. if (copy_from_user(chip, argp, sizeof *chip))
  3077. goto set_irqchip_out;
  3078. r = -ENXIO;
  3079. if (!irqchip_in_kernel(kvm))
  3080. goto set_irqchip_out;
  3081. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  3082. if (r)
  3083. goto set_irqchip_out;
  3084. r = 0;
  3085. set_irqchip_out:
  3086. kfree(chip);
  3087. if (r)
  3088. goto out;
  3089. break;
  3090. }
  3091. case KVM_GET_PIT: {
  3092. r = -EFAULT;
  3093. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  3094. goto out;
  3095. r = -ENXIO;
  3096. if (!kvm->arch.vpit)
  3097. goto out;
  3098. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  3099. if (r)
  3100. goto out;
  3101. r = -EFAULT;
  3102. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  3103. goto out;
  3104. r = 0;
  3105. break;
  3106. }
  3107. case KVM_SET_PIT: {
  3108. r = -EFAULT;
  3109. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  3110. goto out;
  3111. r = -ENXIO;
  3112. if (!kvm->arch.vpit)
  3113. goto out;
  3114. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  3115. if (r)
  3116. goto out;
  3117. r = 0;
  3118. break;
  3119. }
  3120. case KVM_GET_PIT2: {
  3121. r = -ENXIO;
  3122. if (!kvm->arch.vpit)
  3123. goto out;
  3124. r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
  3125. if (r)
  3126. goto out;
  3127. r = -EFAULT;
  3128. if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
  3129. goto out;
  3130. r = 0;
  3131. break;
  3132. }
  3133. case KVM_SET_PIT2: {
  3134. r = -EFAULT;
  3135. if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
  3136. goto out;
  3137. r = -ENXIO;
  3138. if (!kvm->arch.vpit)
  3139. goto out;
  3140. r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
  3141. if (r)
  3142. goto out;
  3143. r = 0;
  3144. break;
  3145. }
  3146. case KVM_REINJECT_CONTROL: {
  3147. struct kvm_reinject_control control;
  3148. r = -EFAULT;
  3149. if (copy_from_user(&control, argp, sizeof(control)))
  3150. goto out;
  3151. r = kvm_vm_ioctl_reinject(kvm, &control);
  3152. if (r)
  3153. goto out;
  3154. r = 0;
  3155. break;
  3156. }
  3157. case KVM_XEN_HVM_CONFIG: {
  3158. r = -EFAULT;
  3159. if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
  3160. sizeof(struct kvm_xen_hvm_config)))
  3161. goto out;
  3162. r = -EINVAL;
  3163. if (kvm->arch.xen_hvm_config.flags)
  3164. goto out;
  3165. r = 0;
  3166. break;
  3167. }
  3168. case KVM_SET_CLOCK: {
  3169. struct kvm_clock_data user_ns;
  3170. u64 now_ns;
  3171. s64 delta;
  3172. r = -EFAULT;
  3173. if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
  3174. goto out;
  3175. r = -EINVAL;
  3176. if (user_ns.flags)
  3177. goto out;
  3178. r = 0;
  3179. local_irq_disable();
  3180. now_ns = get_kernel_ns();
  3181. delta = user_ns.clock - now_ns;
  3182. local_irq_enable();
  3183. kvm->arch.kvmclock_offset = delta;
  3184. break;
  3185. }
  3186. case KVM_GET_CLOCK: {
  3187. struct kvm_clock_data user_ns;
  3188. u64 now_ns;
  3189. local_irq_disable();
  3190. now_ns = get_kernel_ns();
  3191. user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
  3192. local_irq_enable();
  3193. user_ns.flags = 0;
  3194. memset(&user_ns.pad, 0, sizeof(user_ns.pad));
  3195. r = -EFAULT;
  3196. if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
  3197. goto out;
  3198. r = 0;
  3199. break;
  3200. }
  3201. default:
  3202. ;
  3203. }
  3204. out:
  3205. return r;
  3206. }
  3207. static void kvm_init_msr_list(void)
  3208. {
  3209. u32 dummy[2];
  3210. unsigned i, j;
  3211. /* skip the first msrs in the list. KVM-specific */
  3212. for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
  3213. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  3214. continue;
  3215. if (j < i)
  3216. msrs_to_save[j] = msrs_to_save[i];
  3217. j++;
  3218. }
  3219. num_msrs_to_save = j;
  3220. }
  3221. static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
  3222. const void *v)
  3223. {
  3224. int handled = 0;
  3225. int n;
  3226. do {
  3227. n = min(len, 8);
  3228. if (!(vcpu->arch.apic &&
  3229. !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
  3230. && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
  3231. break;
  3232. handled += n;
  3233. addr += n;
  3234. len -= n;
  3235. v += n;
  3236. } while (len);
  3237. return handled;
  3238. }
  3239. static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
  3240. {
  3241. int handled = 0;
  3242. int n;
  3243. do {
  3244. n = min(len, 8);
  3245. if (!(vcpu->arch.apic &&
  3246. !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
  3247. && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
  3248. break;
  3249. trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
  3250. handled += n;
  3251. addr += n;
  3252. len -= n;
  3253. v += n;
  3254. } while (len);
  3255. return handled;
  3256. }
  3257. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  3258. struct kvm_segment *var, int seg)
  3259. {
  3260. kvm_x86_ops->set_segment(vcpu, var, seg);
  3261. }
  3262. void kvm_get_segment(struct kvm_vcpu *vcpu,
  3263. struct kvm_segment *var, int seg)
  3264. {
  3265. kvm_x86_ops->get_segment(vcpu, var, seg);
  3266. }
  3267. static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
  3268. {
  3269. return gpa;
  3270. }
  3271. static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
  3272. {
  3273. gpa_t t_gpa;
  3274. struct x86_exception exception;
  3275. BUG_ON(!mmu_is_nested(vcpu));
  3276. /* NPT walks are always user-walks */
  3277. access |= PFERR_USER_MASK;
  3278. t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
  3279. return t_gpa;
  3280. }
  3281. gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
  3282. struct x86_exception *exception)
  3283. {
  3284. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3285. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3286. }
  3287. gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
  3288. struct x86_exception *exception)
  3289. {
  3290. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3291. access |= PFERR_FETCH_MASK;
  3292. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3293. }
  3294. gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
  3295. struct x86_exception *exception)
  3296. {
  3297. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3298. access |= PFERR_WRITE_MASK;
  3299. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3300. }
  3301. /* uses this to access any guest's mapped memory without checking CPL */
  3302. gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
  3303. struct x86_exception *exception)
  3304. {
  3305. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
  3306. }
  3307. static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
  3308. struct kvm_vcpu *vcpu, u32 access,
  3309. struct x86_exception *exception)
  3310. {
  3311. void *data = val;
  3312. int r = X86EMUL_CONTINUE;
  3313. while (bytes) {
  3314. gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
  3315. exception);
  3316. unsigned offset = addr & (PAGE_SIZE-1);
  3317. unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
  3318. int ret;
  3319. if (gpa == UNMAPPED_GVA)
  3320. return X86EMUL_PROPAGATE_FAULT;
  3321. ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
  3322. if (ret < 0) {
  3323. r = X86EMUL_IO_NEEDED;
  3324. goto out;
  3325. }
  3326. bytes -= toread;
  3327. data += toread;
  3328. addr += toread;
  3329. }
  3330. out:
  3331. return r;
  3332. }
  3333. /* used for instruction fetching */
  3334. static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
  3335. gva_t addr, void *val, unsigned int bytes,
  3336. struct x86_exception *exception)
  3337. {
  3338. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3339. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3340. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
  3341. access | PFERR_FETCH_MASK,
  3342. exception);
  3343. }
  3344. static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
  3345. gva_t addr, void *val, unsigned int bytes,
  3346. struct x86_exception *exception)
  3347. {
  3348. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3349. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3350. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
  3351. exception);
  3352. }
  3353. static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
  3354. gva_t addr, void *val, unsigned int bytes,
  3355. struct x86_exception *exception)
  3356. {
  3357. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3358. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
  3359. }
  3360. static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
  3361. gva_t addr, void *val,
  3362. unsigned int bytes,
  3363. struct x86_exception *exception)
  3364. {
  3365. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3366. void *data = val;
  3367. int r = X86EMUL_CONTINUE;
  3368. while (bytes) {
  3369. gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
  3370. PFERR_WRITE_MASK,
  3371. exception);
  3372. unsigned offset = addr & (PAGE_SIZE-1);
  3373. unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  3374. int ret;
  3375. if (gpa == UNMAPPED_GVA)
  3376. return X86EMUL_PROPAGATE_FAULT;
  3377. ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
  3378. if (ret < 0) {
  3379. r = X86EMUL_IO_NEEDED;
  3380. goto out;
  3381. }
  3382. bytes -= towrite;
  3383. data += towrite;
  3384. addr += towrite;
  3385. }
  3386. out:
  3387. return r;
  3388. }
  3389. static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
  3390. unsigned long addr,
  3391. void *val,
  3392. unsigned int bytes,
  3393. struct x86_exception *exception)
  3394. {
  3395. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3396. gpa_t gpa;
  3397. int handled;
  3398. if (vcpu->mmio_read_completed) {
  3399. memcpy(val, vcpu->mmio_data, bytes);
  3400. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
  3401. vcpu->mmio_phys_addr, *(u64 *)val);
  3402. vcpu->mmio_read_completed = 0;
  3403. return X86EMUL_CONTINUE;
  3404. }
  3405. gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
  3406. if (gpa == UNMAPPED_GVA)
  3407. return X86EMUL_PROPAGATE_FAULT;
  3408. /* For APIC access vmexit */
  3409. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3410. goto mmio;
  3411. if (kvm_read_guest_virt(ctxt, addr, val, bytes, exception)
  3412. == X86EMUL_CONTINUE)
  3413. return X86EMUL_CONTINUE;
  3414. mmio:
  3415. /*
  3416. * Is this MMIO handled locally?
  3417. */
  3418. handled = vcpu_mmio_read(vcpu, gpa, bytes, val);
  3419. if (handled == bytes)
  3420. return X86EMUL_CONTINUE;
  3421. gpa += handled;
  3422. bytes -= handled;
  3423. val += handled;
  3424. trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
  3425. vcpu->mmio_needed = 1;
  3426. vcpu->run->exit_reason = KVM_EXIT_MMIO;
  3427. vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
  3428. vcpu->mmio_size = bytes;
  3429. vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
  3430. vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0;
  3431. vcpu->mmio_index = 0;
  3432. return X86EMUL_IO_NEEDED;
  3433. }
  3434. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  3435. const void *val, int bytes)
  3436. {
  3437. int ret;
  3438. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  3439. if (ret < 0)
  3440. return 0;
  3441. kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
  3442. return 1;
  3443. }
  3444. static int emulator_write_emulated_onepage(unsigned long addr,
  3445. const void *val,
  3446. unsigned int bytes,
  3447. struct x86_exception *exception,
  3448. struct kvm_vcpu *vcpu)
  3449. {
  3450. gpa_t gpa;
  3451. int handled;
  3452. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
  3453. if (gpa == UNMAPPED_GVA)
  3454. return X86EMUL_PROPAGATE_FAULT;
  3455. /* For APIC access vmexit */
  3456. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3457. goto mmio;
  3458. if (emulator_write_phys(vcpu, gpa, val, bytes))
  3459. return X86EMUL_CONTINUE;
  3460. mmio:
  3461. trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
  3462. /*
  3463. * Is this MMIO handled locally?
  3464. */
  3465. handled = vcpu_mmio_write(vcpu, gpa, bytes, val);
  3466. if (handled == bytes)
  3467. return X86EMUL_CONTINUE;
  3468. gpa += handled;
  3469. bytes -= handled;
  3470. val += handled;
  3471. vcpu->mmio_needed = 1;
  3472. memcpy(vcpu->mmio_data, val, bytes);
  3473. vcpu->run->exit_reason = KVM_EXIT_MMIO;
  3474. vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
  3475. vcpu->mmio_size = bytes;
  3476. vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
  3477. vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1;
  3478. memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
  3479. vcpu->mmio_index = 0;
  3480. return X86EMUL_CONTINUE;
  3481. }
  3482. int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
  3483. unsigned long addr,
  3484. const void *val,
  3485. unsigned int bytes,
  3486. struct x86_exception *exception)
  3487. {
  3488. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3489. /* Crossing a page boundary? */
  3490. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  3491. int rc, now;
  3492. now = -addr & ~PAGE_MASK;
  3493. rc = emulator_write_emulated_onepage(addr, val, now, exception,
  3494. vcpu);
  3495. if (rc != X86EMUL_CONTINUE)
  3496. return rc;
  3497. addr += now;
  3498. val += now;
  3499. bytes -= now;
  3500. }
  3501. return emulator_write_emulated_onepage(addr, val, bytes, exception,
  3502. vcpu);
  3503. }
  3504. #define CMPXCHG_TYPE(t, ptr, old, new) \
  3505. (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
  3506. #ifdef CONFIG_X86_64
  3507. # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
  3508. #else
  3509. # define CMPXCHG64(ptr, old, new) \
  3510. (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
  3511. #endif
  3512. static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
  3513. unsigned long addr,
  3514. const void *old,
  3515. const void *new,
  3516. unsigned int bytes,
  3517. struct x86_exception *exception)
  3518. {
  3519. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3520. gpa_t gpa;
  3521. struct page *page;
  3522. char *kaddr;
  3523. bool exchanged;
  3524. /* guests cmpxchg8b have to be emulated atomically */
  3525. if (bytes > 8 || (bytes & (bytes - 1)))
  3526. goto emul_write;
  3527. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
  3528. if (gpa == UNMAPPED_GVA ||
  3529. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3530. goto emul_write;
  3531. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  3532. goto emul_write;
  3533. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  3534. if (is_error_page(page)) {
  3535. kvm_release_page_clean(page);
  3536. goto emul_write;
  3537. }
  3538. kaddr = kmap_atomic(page, KM_USER0);
  3539. kaddr += offset_in_page(gpa);
  3540. switch (bytes) {
  3541. case 1:
  3542. exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
  3543. break;
  3544. case 2:
  3545. exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
  3546. break;
  3547. case 4:
  3548. exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
  3549. break;
  3550. case 8:
  3551. exchanged = CMPXCHG64(kaddr, old, new);
  3552. break;
  3553. default:
  3554. BUG();
  3555. }
  3556. kunmap_atomic(kaddr, KM_USER0);
  3557. kvm_release_page_dirty(page);
  3558. if (!exchanged)
  3559. return X86EMUL_CMPXCHG_FAILED;
  3560. kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
  3561. return X86EMUL_CONTINUE;
  3562. emul_write:
  3563. printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
  3564. return emulator_write_emulated(ctxt, addr, new, bytes, exception);
  3565. }
  3566. static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
  3567. {
  3568. /* TODO: String I/O for in kernel device */
  3569. int r;
  3570. if (vcpu->arch.pio.in)
  3571. r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
  3572. vcpu->arch.pio.size, pd);
  3573. else
  3574. r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
  3575. vcpu->arch.pio.port, vcpu->arch.pio.size,
  3576. pd);
  3577. return r;
  3578. }
  3579. static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
  3580. int size, unsigned short port, void *val,
  3581. unsigned int count)
  3582. {
  3583. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3584. if (vcpu->arch.pio.count)
  3585. goto data_avail;
  3586. trace_kvm_pio(0, port, size, count);
  3587. vcpu->arch.pio.port = port;
  3588. vcpu->arch.pio.in = 1;
  3589. vcpu->arch.pio.count = count;
  3590. vcpu->arch.pio.size = size;
  3591. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3592. data_avail:
  3593. memcpy(val, vcpu->arch.pio_data, size * count);
  3594. vcpu->arch.pio.count = 0;
  3595. return 1;
  3596. }
  3597. vcpu->run->exit_reason = KVM_EXIT_IO;
  3598. vcpu->run->io.direction = KVM_EXIT_IO_IN;
  3599. vcpu->run->io.size = size;
  3600. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3601. vcpu->run->io.count = count;
  3602. vcpu->run->io.port = port;
  3603. return 0;
  3604. }
  3605. static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
  3606. int size, unsigned short port,
  3607. const void *val, unsigned int count)
  3608. {
  3609. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3610. trace_kvm_pio(1, port, size, count);
  3611. vcpu->arch.pio.port = port;
  3612. vcpu->arch.pio.in = 0;
  3613. vcpu->arch.pio.count = count;
  3614. vcpu->arch.pio.size = size;
  3615. memcpy(vcpu->arch.pio_data, val, size * count);
  3616. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3617. vcpu->arch.pio.count = 0;
  3618. return 1;
  3619. }
  3620. vcpu->run->exit_reason = KVM_EXIT_IO;
  3621. vcpu->run->io.direction = KVM_EXIT_IO_OUT;
  3622. vcpu->run->io.size = size;
  3623. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3624. vcpu->run->io.count = count;
  3625. vcpu->run->io.port = port;
  3626. return 0;
  3627. }
  3628. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  3629. {
  3630. return kvm_x86_ops->get_segment_base(vcpu, seg);
  3631. }
  3632. static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
  3633. {
  3634. kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
  3635. }
  3636. int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
  3637. {
  3638. if (!need_emulate_wbinvd(vcpu))
  3639. return X86EMUL_CONTINUE;
  3640. if (kvm_x86_ops->has_wbinvd_exit()) {
  3641. int cpu = get_cpu();
  3642. cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
  3643. smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
  3644. wbinvd_ipi, NULL, 1);
  3645. put_cpu();
  3646. cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
  3647. } else
  3648. wbinvd();
  3649. return X86EMUL_CONTINUE;
  3650. }
  3651. EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
  3652. static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
  3653. {
  3654. kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
  3655. }
  3656. int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  3657. {
  3658. return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
  3659. }
  3660. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  3661. {
  3662. return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
  3663. }
  3664. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  3665. {
  3666. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  3667. }
  3668. static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
  3669. {
  3670. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3671. unsigned long value;
  3672. switch (cr) {
  3673. case 0:
  3674. value = kvm_read_cr0(vcpu);
  3675. break;
  3676. case 2:
  3677. value = vcpu->arch.cr2;
  3678. break;
  3679. case 3:
  3680. value = kvm_read_cr3(vcpu);
  3681. break;
  3682. case 4:
  3683. value = kvm_read_cr4(vcpu);
  3684. break;
  3685. case 8:
  3686. value = kvm_get_cr8(vcpu);
  3687. break;
  3688. default:
  3689. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3690. return 0;
  3691. }
  3692. return value;
  3693. }
  3694. static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
  3695. {
  3696. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3697. int res = 0;
  3698. switch (cr) {
  3699. case 0:
  3700. res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
  3701. break;
  3702. case 2:
  3703. vcpu->arch.cr2 = val;
  3704. break;
  3705. case 3:
  3706. res = kvm_set_cr3(vcpu, val);
  3707. break;
  3708. case 4:
  3709. res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
  3710. break;
  3711. case 8:
  3712. res = kvm_set_cr8(vcpu, val);
  3713. break;
  3714. default:
  3715. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3716. res = -1;
  3717. }
  3718. return res;
  3719. }
  3720. static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
  3721. {
  3722. return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
  3723. }
  3724. static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3725. {
  3726. kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
  3727. }
  3728. static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3729. {
  3730. kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
  3731. }
  3732. static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3733. {
  3734. kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
  3735. }
  3736. static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3737. {
  3738. kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
  3739. }
  3740. static unsigned long emulator_get_cached_segment_base(
  3741. struct x86_emulate_ctxt *ctxt, int seg)
  3742. {
  3743. return get_segment_base(emul_to_vcpu(ctxt), seg);
  3744. }
  3745. static bool emulator_get_cached_descriptor(struct x86_emulate_ctxt *ctxt,
  3746. struct desc_struct *desc, u32 *base3,
  3747. int seg)
  3748. {
  3749. struct kvm_segment var;
  3750. kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
  3751. if (var.unusable)
  3752. return false;
  3753. if (var.g)
  3754. var.limit >>= 12;
  3755. set_desc_limit(desc, var.limit);
  3756. set_desc_base(desc, (unsigned long)var.base);
  3757. #ifdef CONFIG_X86_64
  3758. if (base3)
  3759. *base3 = var.base >> 32;
  3760. #endif
  3761. desc->type = var.type;
  3762. desc->s = var.s;
  3763. desc->dpl = var.dpl;
  3764. desc->p = var.present;
  3765. desc->avl = var.avl;
  3766. desc->l = var.l;
  3767. desc->d = var.db;
  3768. desc->g = var.g;
  3769. return true;
  3770. }
  3771. static void emulator_set_cached_descriptor(struct x86_emulate_ctxt *ctxt,
  3772. struct desc_struct *desc, u32 base3,
  3773. int seg)
  3774. {
  3775. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3776. struct kvm_segment var;
  3777. /* needed to preserve selector */
  3778. kvm_get_segment(vcpu, &var, seg);
  3779. var.base = get_desc_base(desc);
  3780. #ifdef CONFIG_X86_64
  3781. var.base |= ((u64)base3) << 32;
  3782. #endif
  3783. var.limit = get_desc_limit(desc);
  3784. if (desc->g)
  3785. var.limit = (var.limit << 12) | 0xfff;
  3786. var.type = desc->type;
  3787. var.present = desc->p;
  3788. var.dpl = desc->dpl;
  3789. var.db = desc->d;
  3790. var.s = desc->s;
  3791. var.l = desc->l;
  3792. var.g = desc->g;
  3793. var.avl = desc->avl;
  3794. var.present = desc->p;
  3795. var.unusable = !var.present;
  3796. var.padding = 0;
  3797. kvm_set_segment(vcpu, &var, seg);
  3798. return;
  3799. }
  3800. static u16 emulator_get_segment_selector(struct x86_emulate_ctxt *ctxt, int seg)
  3801. {
  3802. struct kvm_segment kvm_seg;
  3803. kvm_get_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
  3804. return kvm_seg.selector;
  3805. }
  3806. static void emulator_set_segment_selector(struct x86_emulate_ctxt *ctxt,
  3807. u16 sel, int seg)
  3808. {
  3809. struct kvm_segment kvm_seg;
  3810. kvm_get_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
  3811. kvm_seg.selector = sel;
  3812. kvm_set_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
  3813. }
  3814. static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
  3815. u32 msr_index, u64 *pdata)
  3816. {
  3817. return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
  3818. }
  3819. static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
  3820. u32 msr_index, u64 data)
  3821. {
  3822. return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
  3823. }
  3824. static void emulator_halt(struct x86_emulate_ctxt *ctxt)
  3825. {
  3826. emul_to_vcpu(ctxt)->arch.halt_request = 1;
  3827. }
  3828. static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
  3829. {
  3830. preempt_disable();
  3831. kvm_load_guest_fpu(ctxt->vcpu);
  3832. /*
  3833. * CR0.TS may reference the host fpu state, not the guest fpu state,
  3834. * so it may be clear at this point.
  3835. */
  3836. clts();
  3837. }
  3838. static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
  3839. {
  3840. preempt_enable();
  3841. }
  3842. static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
  3843. struct x86_instruction_info *info,
  3844. enum x86_intercept_stage stage)
  3845. {
  3846. return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
  3847. }
  3848. static struct x86_emulate_ops emulate_ops = {
  3849. .read_std = kvm_read_guest_virt_system,
  3850. .write_std = kvm_write_guest_virt_system,
  3851. .fetch = kvm_fetch_guest_virt,
  3852. .read_emulated = emulator_read_emulated,
  3853. .write_emulated = emulator_write_emulated,
  3854. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  3855. .invlpg = emulator_invlpg,
  3856. .pio_in_emulated = emulator_pio_in_emulated,
  3857. .pio_out_emulated = emulator_pio_out_emulated,
  3858. .get_cached_descriptor = emulator_get_cached_descriptor,
  3859. .set_cached_descriptor = emulator_set_cached_descriptor,
  3860. .get_segment_selector = emulator_get_segment_selector,
  3861. .set_segment_selector = emulator_set_segment_selector,
  3862. .get_cached_segment_base = emulator_get_cached_segment_base,
  3863. .get_gdt = emulator_get_gdt,
  3864. .get_idt = emulator_get_idt,
  3865. .set_gdt = emulator_set_gdt,
  3866. .set_idt = emulator_set_idt,
  3867. .get_cr = emulator_get_cr,
  3868. .set_cr = emulator_set_cr,
  3869. .cpl = emulator_get_cpl,
  3870. .get_dr = emulator_get_dr,
  3871. .set_dr = emulator_set_dr,
  3872. .set_msr = emulator_set_msr,
  3873. .get_msr = emulator_get_msr,
  3874. .halt = emulator_halt,
  3875. .wbinvd = emulator_wbinvd,
  3876. .fix_hypercall = emulator_fix_hypercall,
  3877. .get_fpu = emulator_get_fpu,
  3878. .put_fpu = emulator_put_fpu,
  3879. .intercept = emulator_intercept,
  3880. };
  3881. static void cache_all_regs(struct kvm_vcpu *vcpu)
  3882. {
  3883. kvm_register_read(vcpu, VCPU_REGS_RAX);
  3884. kvm_register_read(vcpu, VCPU_REGS_RSP);
  3885. kvm_register_read(vcpu, VCPU_REGS_RIP);
  3886. vcpu->arch.regs_dirty = ~0;
  3887. }
  3888. static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
  3889. {
  3890. u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
  3891. /*
  3892. * an sti; sti; sequence only disable interrupts for the first
  3893. * instruction. So, if the last instruction, be it emulated or
  3894. * not, left the system with the INT_STI flag enabled, it
  3895. * means that the last instruction is an sti. We should not
  3896. * leave the flag on in this case. The same goes for mov ss
  3897. */
  3898. if (!(int_shadow & mask))
  3899. kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
  3900. }
  3901. static void inject_emulated_exception(struct kvm_vcpu *vcpu)
  3902. {
  3903. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  3904. if (ctxt->exception.vector == PF_VECTOR)
  3905. kvm_propagate_fault(vcpu, &ctxt->exception);
  3906. else if (ctxt->exception.error_code_valid)
  3907. kvm_queue_exception_e(vcpu, ctxt->exception.vector,
  3908. ctxt->exception.error_code);
  3909. else
  3910. kvm_queue_exception(vcpu, ctxt->exception.vector);
  3911. }
  3912. static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
  3913. {
  3914. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  3915. int cs_db, cs_l;
  3916. cache_all_regs(vcpu);
  3917. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  3918. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  3919. vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
  3920. vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
  3921. vcpu->arch.emulate_ctxt.mode =
  3922. (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
  3923. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  3924. ? X86EMUL_MODE_VM86 : cs_l
  3925. ? X86EMUL_MODE_PROT64 : cs_db
  3926. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  3927. vcpu->arch.emulate_ctxt.guest_mode = is_guest_mode(vcpu);
  3928. memset(c, 0, sizeof(struct decode_cache));
  3929. memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
  3930. vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
  3931. }
  3932. int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
  3933. {
  3934. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  3935. int ret;
  3936. init_emulate_ctxt(vcpu);
  3937. vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
  3938. vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
  3939. vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip +
  3940. inc_eip;
  3941. ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
  3942. if (ret != X86EMUL_CONTINUE)
  3943. return EMULATE_FAIL;
  3944. vcpu->arch.emulate_ctxt.eip = c->eip;
  3945. memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
  3946. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
  3947. kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  3948. if (irq == NMI_VECTOR)
  3949. vcpu->arch.nmi_pending = false;
  3950. else
  3951. vcpu->arch.interrupt.pending = false;
  3952. return EMULATE_DONE;
  3953. }
  3954. EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
  3955. static int handle_emulation_failure(struct kvm_vcpu *vcpu)
  3956. {
  3957. int r = EMULATE_DONE;
  3958. ++vcpu->stat.insn_emulation_fail;
  3959. trace_kvm_emulate_insn_failed(vcpu);
  3960. if (!is_guest_mode(vcpu)) {
  3961. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  3962. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  3963. vcpu->run->internal.ndata = 0;
  3964. r = EMULATE_FAIL;
  3965. }
  3966. kvm_queue_exception(vcpu, UD_VECTOR);
  3967. return r;
  3968. }
  3969. static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
  3970. {
  3971. gpa_t gpa;
  3972. if (tdp_enabled)
  3973. return false;
  3974. /*
  3975. * if emulation was due to access to shadowed page table
  3976. * and it failed try to unshadow page and re-entetr the
  3977. * guest to let CPU execute the instruction.
  3978. */
  3979. if (kvm_mmu_unprotect_page_virt(vcpu, gva))
  3980. return true;
  3981. gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
  3982. if (gpa == UNMAPPED_GVA)
  3983. return true; /* let cpu generate fault */
  3984. if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
  3985. return true;
  3986. return false;
  3987. }
  3988. int x86_emulate_instruction(struct kvm_vcpu *vcpu,
  3989. unsigned long cr2,
  3990. int emulation_type,
  3991. void *insn,
  3992. int insn_len)
  3993. {
  3994. int r;
  3995. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  3996. bool writeback = true;
  3997. kvm_clear_exception_queue(vcpu);
  3998. vcpu->arch.mmio_fault_cr2 = cr2;
  3999. /*
  4000. * TODO: fix emulate.c to use guest_read/write_register
  4001. * instead of direct ->regs accesses, can save hundred cycles
  4002. * on Intel for instructions that don't read/change RSP, for
  4003. * for example.
  4004. */
  4005. cache_all_regs(vcpu);
  4006. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  4007. init_emulate_ctxt(vcpu);
  4008. vcpu->arch.emulate_ctxt.interruptibility = 0;
  4009. vcpu->arch.emulate_ctxt.have_exception = false;
  4010. vcpu->arch.emulate_ctxt.perm_ok = false;
  4011. vcpu->arch.emulate_ctxt.only_vendor_specific_insn
  4012. = emulation_type & EMULTYPE_TRAP_UD;
  4013. r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
  4014. trace_kvm_emulate_insn_start(vcpu);
  4015. ++vcpu->stat.insn_emulation;
  4016. if (r) {
  4017. if (emulation_type & EMULTYPE_TRAP_UD)
  4018. return EMULATE_FAIL;
  4019. if (reexecute_instruction(vcpu, cr2))
  4020. return EMULATE_DONE;
  4021. if (emulation_type & EMULTYPE_SKIP)
  4022. return EMULATE_FAIL;
  4023. return handle_emulation_failure(vcpu);
  4024. }
  4025. }
  4026. if (emulation_type & EMULTYPE_SKIP) {
  4027. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
  4028. return EMULATE_DONE;
  4029. }
  4030. /* this is needed for vmware backdoor interface to work since it
  4031. changes registers values during IO operation */
  4032. if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
  4033. vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
  4034. memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
  4035. }
  4036. restart:
  4037. r = x86_emulate_insn(&vcpu->arch.emulate_ctxt);
  4038. if (r == EMULATION_INTERCEPTED)
  4039. return EMULATE_DONE;
  4040. if (r == EMULATION_FAILED) {
  4041. if (reexecute_instruction(vcpu, cr2))
  4042. return EMULATE_DONE;
  4043. return handle_emulation_failure(vcpu);
  4044. }
  4045. if (vcpu->arch.emulate_ctxt.have_exception) {
  4046. inject_emulated_exception(vcpu);
  4047. r = EMULATE_DONE;
  4048. } else if (vcpu->arch.pio.count) {
  4049. if (!vcpu->arch.pio.in)
  4050. vcpu->arch.pio.count = 0;
  4051. else
  4052. writeback = false;
  4053. r = EMULATE_DO_MMIO;
  4054. } else if (vcpu->mmio_needed) {
  4055. if (!vcpu->mmio_is_write)
  4056. writeback = false;
  4057. r = EMULATE_DO_MMIO;
  4058. } else if (r == EMULATION_RESTART)
  4059. goto restart;
  4060. else
  4061. r = EMULATE_DONE;
  4062. if (writeback) {
  4063. toggle_interruptibility(vcpu,
  4064. vcpu->arch.emulate_ctxt.interruptibility);
  4065. kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  4066. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4067. memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
  4068. vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
  4069. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
  4070. } else
  4071. vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
  4072. return r;
  4073. }
  4074. EXPORT_SYMBOL_GPL(x86_emulate_instruction);
  4075. int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
  4076. {
  4077. unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4078. int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
  4079. size, port, &val, 1);
  4080. /* do not return to emulator after return from userspace */
  4081. vcpu->arch.pio.count = 0;
  4082. return ret;
  4083. }
  4084. EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
  4085. static void tsc_bad(void *info)
  4086. {
  4087. __this_cpu_write(cpu_tsc_khz, 0);
  4088. }
  4089. static void tsc_khz_changed(void *data)
  4090. {
  4091. struct cpufreq_freqs *freq = data;
  4092. unsigned long khz = 0;
  4093. if (data)
  4094. khz = freq->new;
  4095. else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  4096. khz = cpufreq_quick_get(raw_smp_processor_id());
  4097. if (!khz)
  4098. khz = tsc_khz;
  4099. __this_cpu_write(cpu_tsc_khz, khz);
  4100. }
  4101. static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  4102. void *data)
  4103. {
  4104. struct cpufreq_freqs *freq = data;
  4105. struct kvm *kvm;
  4106. struct kvm_vcpu *vcpu;
  4107. int i, send_ipi = 0;
  4108. /*
  4109. * We allow guests to temporarily run on slowing clocks,
  4110. * provided we notify them after, or to run on accelerating
  4111. * clocks, provided we notify them before. Thus time never
  4112. * goes backwards.
  4113. *
  4114. * However, we have a problem. We can't atomically update
  4115. * the frequency of a given CPU from this function; it is
  4116. * merely a notifier, which can be called from any CPU.
  4117. * Changing the TSC frequency at arbitrary points in time
  4118. * requires a recomputation of local variables related to
  4119. * the TSC for each VCPU. We must flag these local variables
  4120. * to be updated and be sure the update takes place with the
  4121. * new frequency before any guests proceed.
  4122. *
  4123. * Unfortunately, the combination of hotplug CPU and frequency
  4124. * change creates an intractable locking scenario; the order
  4125. * of when these callouts happen is undefined with respect to
  4126. * CPU hotplug, and they can race with each other. As such,
  4127. * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
  4128. * undefined; you can actually have a CPU frequency change take
  4129. * place in between the computation of X and the setting of the
  4130. * variable. To protect against this problem, all updates of
  4131. * the per_cpu tsc_khz variable are done in an interrupt
  4132. * protected IPI, and all callers wishing to update the value
  4133. * must wait for a synchronous IPI to complete (which is trivial
  4134. * if the caller is on the CPU already). This establishes the
  4135. * necessary total order on variable updates.
  4136. *
  4137. * Note that because a guest time update may take place
  4138. * anytime after the setting of the VCPU's request bit, the
  4139. * correct TSC value must be set before the request. However,
  4140. * to ensure the update actually makes it to any guest which
  4141. * starts running in hardware virtualization between the set
  4142. * and the acquisition of the spinlock, we must also ping the
  4143. * CPU after setting the request bit.
  4144. *
  4145. */
  4146. if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
  4147. return 0;
  4148. if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
  4149. return 0;
  4150. smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
  4151. raw_spin_lock(&kvm_lock);
  4152. list_for_each_entry(kvm, &vm_list, vm_list) {
  4153. kvm_for_each_vcpu(i, vcpu, kvm) {
  4154. if (vcpu->cpu != freq->cpu)
  4155. continue;
  4156. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  4157. if (vcpu->cpu != smp_processor_id())
  4158. send_ipi = 1;
  4159. }
  4160. }
  4161. raw_spin_unlock(&kvm_lock);
  4162. if (freq->old < freq->new && send_ipi) {
  4163. /*
  4164. * We upscale the frequency. Must make the guest
  4165. * doesn't see old kvmclock values while running with
  4166. * the new frequency, otherwise we risk the guest sees
  4167. * time go backwards.
  4168. *
  4169. * In case we update the frequency for another cpu
  4170. * (which might be in guest context) send an interrupt
  4171. * to kick the cpu out of guest context. Next time
  4172. * guest context is entered kvmclock will be updated,
  4173. * so the guest will not see stale values.
  4174. */
  4175. smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
  4176. }
  4177. return 0;
  4178. }
  4179. static struct notifier_block kvmclock_cpufreq_notifier_block = {
  4180. .notifier_call = kvmclock_cpufreq_notifier
  4181. };
  4182. static int kvmclock_cpu_notifier(struct notifier_block *nfb,
  4183. unsigned long action, void *hcpu)
  4184. {
  4185. unsigned int cpu = (unsigned long)hcpu;
  4186. switch (action) {
  4187. case CPU_ONLINE:
  4188. case CPU_DOWN_FAILED:
  4189. smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  4190. break;
  4191. case CPU_DOWN_PREPARE:
  4192. smp_call_function_single(cpu, tsc_bad, NULL, 1);
  4193. break;
  4194. }
  4195. return NOTIFY_OK;
  4196. }
  4197. static struct notifier_block kvmclock_cpu_notifier_block = {
  4198. .notifier_call = kvmclock_cpu_notifier,
  4199. .priority = -INT_MAX
  4200. };
  4201. static void kvm_timer_init(void)
  4202. {
  4203. int cpu;
  4204. max_tsc_khz = tsc_khz;
  4205. register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  4206. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  4207. #ifdef CONFIG_CPU_FREQ
  4208. struct cpufreq_policy policy;
  4209. memset(&policy, 0, sizeof(policy));
  4210. cpu = get_cpu();
  4211. cpufreq_get_policy(&policy, cpu);
  4212. if (policy.cpuinfo.max_freq)
  4213. max_tsc_khz = policy.cpuinfo.max_freq;
  4214. put_cpu();
  4215. #endif
  4216. cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
  4217. CPUFREQ_TRANSITION_NOTIFIER);
  4218. }
  4219. pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
  4220. for_each_online_cpu(cpu)
  4221. smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  4222. }
  4223. static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
  4224. static int kvm_is_in_guest(void)
  4225. {
  4226. return percpu_read(current_vcpu) != NULL;
  4227. }
  4228. static int kvm_is_user_mode(void)
  4229. {
  4230. int user_mode = 3;
  4231. if (percpu_read(current_vcpu))
  4232. user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
  4233. return user_mode != 0;
  4234. }
  4235. static unsigned long kvm_get_guest_ip(void)
  4236. {
  4237. unsigned long ip = 0;
  4238. if (percpu_read(current_vcpu))
  4239. ip = kvm_rip_read(percpu_read(current_vcpu));
  4240. return ip;
  4241. }
  4242. static struct perf_guest_info_callbacks kvm_guest_cbs = {
  4243. .is_in_guest = kvm_is_in_guest,
  4244. .is_user_mode = kvm_is_user_mode,
  4245. .get_guest_ip = kvm_get_guest_ip,
  4246. };
  4247. void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
  4248. {
  4249. percpu_write(current_vcpu, vcpu);
  4250. }
  4251. EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
  4252. void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
  4253. {
  4254. percpu_write(current_vcpu, NULL);
  4255. }
  4256. EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
  4257. int kvm_arch_init(void *opaque)
  4258. {
  4259. int r;
  4260. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  4261. if (kvm_x86_ops) {
  4262. printk(KERN_ERR "kvm: already loaded the other module\n");
  4263. r = -EEXIST;
  4264. goto out;
  4265. }
  4266. if (!ops->cpu_has_kvm_support()) {
  4267. printk(KERN_ERR "kvm: no hardware support\n");
  4268. r = -EOPNOTSUPP;
  4269. goto out;
  4270. }
  4271. if (ops->disabled_by_bios()) {
  4272. printk(KERN_ERR "kvm: disabled by bios\n");
  4273. r = -EOPNOTSUPP;
  4274. goto out;
  4275. }
  4276. r = kvm_mmu_module_init();
  4277. if (r)
  4278. goto out;
  4279. kvm_init_msr_list();
  4280. kvm_x86_ops = ops;
  4281. kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
  4282. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  4283. PT_DIRTY_MASK, PT64_NX_MASK, 0);
  4284. kvm_timer_init();
  4285. perf_register_guest_info_callbacks(&kvm_guest_cbs);
  4286. if (cpu_has_xsave)
  4287. host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
  4288. return 0;
  4289. out:
  4290. return r;
  4291. }
  4292. void kvm_arch_exit(void)
  4293. {
  4294. perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
  4295. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  4296. cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
  4297. CPUFREQ_TRANSITION_NOTIFIER);
  4298. unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  4299. kvm_x86_ops = NULL;
  4300. kvm_mmu_module_exit();
  4301. }
  4302. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  4303. {
  4304. ++vcpu->stat.halt_exits;
  4305. if (irqchip_in_kernel(vcpu->kvm)) {
  4306. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  4307. return 1;
  4308. } else {
  4309. vcpu->run->exit_reason = KVM_EXIT_HLT;
  4310. return 0;
  4311. }
  4312. }
  4313. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  4314. static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
  4315. unsigned long a1)
  4316. {
  4317. if (is_long_mode(vcpu))
  4318. return a0;
  4319. else
  4320. return a0 | ((gpa_t)a1 << 32);
  4321. }
  4322. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  4323. {
  4324. u64 param, ingpa, outgpa, ret;
  4325. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  4326. bool fast, longmode;
  4327. int cs_db, cs_l;
  4328. /*
  4329. * hypercall generates UD from non zero cpl and real mode
  4330. * per HYPER-V spec
  4331. */
  4332. if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
  4333. kvm_queue_exception(vcpu, UD_VECTOR);
  4334. return 0;
  4335. }
  4336. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  4337. longmode = is_long_mode(vcpu) && cs_l == 1;
  4338. if (!longmode) {
  4339. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  4340. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  4341. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  4342. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  4343. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  4344. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  4345. }
  4346. #ifdef CONFIG_X86_64
  4347. else {
  4348. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4349. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4350. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  4351. }
  4352. #endif
  4353. code = param & 0xffff;
  4354. fast = (param >> 16) & 0x1;
  4355. rep_cnt = (param >> 32) & 0xfff;
  4356. rep_idx = (param >> 48) & 0xfff;
  4357. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  4358. switch (code) {
  4359. case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
  4360. kvm_vcpu_on_spin(vcpu);
  4361. break;
  4362. default:
  4363. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  4364. break;
  4365. }
  4366. ret = res | (((u64)rep_done & 0xfff) << 32);
  4367. if (longmode) {
  4368. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  4369. } else {
  4370. kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
  4371. kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
  4372. }
  4373. return 1;
  4374. }
  4375. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  4376. {
  4377. unsigned long nr, a0, a1, a2, a3, ret;
  4378. int r = 1;
  4379. if (kvm_hv_hypercall_enabled(vcpu->kvm))
  4380. return kvm_hv_hypercall(vcpu);
  4381. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4382. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4383. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4384. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4385. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4386. trace_kvm_hypercall(nr, a0, a1, a2, a3);
  4387. if (!is_long_mode(vcpu)) {
  4388. nr &= 0xFFFFFFFF;
  4389. a0 &= 0xFFFFFFFF;
  4390. a1 &= 0xFFFFFFFF;
  4391. a2 &= 0xFFFFFFFF;
  4392. a3 &= 0xFFFFFFFF;
  4393. }
  4394. if (kvm_x86_ops->get_cpl(vcpu) != 0) {
  4395. ret = -KVM_EPERM;
  4396. goto out;
  4397. }
  4398. switch (nr) {
  4399. case KVM_HC_VAPIC_POLL_IRQ:
  4400. ret = 0;
  4401. break;
  4402. case KVM_HC_MMU_OP:
  4403. r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
  4404. break;
  4405. default:
  4406. ret = -KVM_ENOSYS;
  4407. break;
  4408. }
  4409. out:
  4410. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  4411. ++vcpu->stat.hypercalls;
  4412. return r;
  4413. }
  4414. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  4415. int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
  4416. {
  4417. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  4418. char instruction[3];
  4419. unsigned long rip = kvm_rip_read(vcpu);
  4420. /*
  4421. * Blow out the MMU to ensure that no other VCPU has an active mapping
  4422. * to ensure that the updated hypercall appears atomically across all
  4423. * VCPUs.
  4424. */
  4425. kvm_mmu_zap_all(vcpu->kvm);
  4426. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  4427. return emulator_write_emulated(&vcpu->arch.emulate_ctxt,
  4428. rip, instruction, 3, NULL);
  4429. }
  4430. static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  4431. {
  4432. struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
  4433. int j, nent = vcpu->arch.cpuid_nent;
  4434. e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
  4435. /* when no next entry is found, the current entry[i] is reselected */
  4436. for (j = i + 1; ; j = (j + 1) % nent) {
  4437. struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
  4438. if (ej->function == e->function) {
  4439. ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  4440. return j;
  4441. }
  4442. }
  4443. return 0; /* silence gcc, even though control never reaches here */
  4444. }
  4445. /* find an entry with matching function, matching index (if needed), and that
  4446. * should be read next (if it's stateful) */
  4447. static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
  4448. u32 function, u32 index)
  4449. {
  4450. if (e->function != function)
  4451. return 0;
  4452. if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
  4453. return 0;
  4454. if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
  4455. !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
  4456. return 0;
  4457. return 1;
  4458. }
  4459. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  4460. u32 function, u32 index)
  4461. {
  4462. int i;
  4463. struct kvm_cpuid_entry2 *best = NULL;
  4464. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  4465. struct kvm_cpuid_entry2 *e;
  4466. e = &vcpu->arch.cpuid_entries[i];
  4467. if (is_matching_cpuid_entry(e, function, index)) {
  4468. if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
  4469. move_to_next_stateful_cpuid_entry(vcpu, i);
  4470. best = e;
  4471. break;
  4472. }
  4473. }
  4474. return best;
  4475. }
  4476. EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
  4477. int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
  4478. {
  4479. struct kvm_cpuid_entry2 *best;
  4480. best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
  4481. if (!best || best->eax < 0x80000008)
  4482. goto not_found;
  4483. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  4484. if (best)
  4485. return best->eax & 0xff;
  4486. not_found:
  4487. return 36;
  4488. }
  4489. /*
  4490. * If no match is found, check whether we exceed the vCPU's limit
  4491. * and return the content of the highest valid _standard_ leaf instead.
  4492. * This is to satisfy the CPUID specification.
  4493. */
  4494. static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
  4495. u32 function, u32 index)
  4496. {
  4497. struct kvm_cpuid_entry2 *maxlevel;
  4498. maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
  4499. if (!maxlevel || maxlevel->eax >= function)
  4500. return NULL;
  4501. if (function & 0x80000000) {
  4502. maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
  4503. if (!maxlevel)
  4504. return NULL;
  4505. }
  4506. return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
  4507. }
  4508. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  4509. {
  4510. u32 function, index;
  4511. struct kvm_cpuid_entry2 *best;
  4512. function = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4513. index = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4514. kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
  4515. kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
  4516. kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
  4517. kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
  4518. best = kvm_find_cpuid_entry(vcpu, function, index);
  4519. if (!best)
  4520. best = check_cpuid_limit(vcpu, function, index);
  4521. if (best) {
  4522. kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
  4523. kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
  4524. kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
  4525. kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
  4526. }
  4527. kvm_x86_ops->skip_emulated_instruction(vcpu);
  4528. trace_kvm_cpuid(function,
  4529. kvm_register_read(vcpu, VCPU_REGS_RAX),
  4530. kvm_register_read(vcpu, VCPU_REGS_RBX),
  4531. kvm_register_read(vcpu, VCPU_REGS_RCX),
  4532. kvm_register_read(vcpu, VCPU_REGS_RDX));
  4533. }
  4534. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  4535. /*
  4536. * Check if userspace requested an interrupt window, and that the
  4537. * interrupt window is open.
  4538. *
  4539. * No need to exit to userspace if we already have an interrupt queued.
  4540. */
  4541. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
  4542. {
  4543. return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
  4544. vcpu->run->request_interrupt_window &&
  4545. kvm_arch_interrupt_allowed(vcpu));
  4546. }
  4547. static void post_kvm_run_save(struct kvm_vcpu *vcpu)
  4548. {
  4549. struct kvm_run *kvm_run = vcpu->run;
  4550. kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  4551. kvm_run->cr8 = kvm_get_cr8(vcpu);
  4552. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  4553. if (irqchip_in_kernel(vcpu->kvm))
  4554. kvm_run->ready_for_interrupt_injection = 1;
  4555. else
  4556. kvm_run->ready_for_interrupt_injection =
  4557. kvm_arch_interrupt_allowed(vcpu) &&
  4558. !kvm_cpu_has_interrupt(vcpu) &&
  4559. !kvm_event_needs_reinjection(vcpu);
  4560. }
  4561. static void vapic_enter(struct kvm_vcpu *vcpu)
  4562. {
  4563. struct kvm_lapic *apic = vcpu->arch.apic;
  4564. struct page *page;
  4565. if (!apic || !apic->vapic_addr)
  4566. return;
  4567. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  4568. vcpu->arch.apic->vapic_page = page;
  4569. }
  4570. static void vapic_exit(struct kvm_vcpu *vcpu)
  4571. {
  4572. struct kvm_lapic *apic = vcpu->arch.apic;
  4573. int idx;
  4574. if (!apic || !apic->vapic_addr)
  4575. return;
  4576. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4577. kvm_release_page_dirty(apic->vapic_page);
  4578. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  4579. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4580. }
  4581. static void update_cr8_intercept(struct kvm_vcpu *vcpu)
  4582. {
  4583. int max_irr, tpr;
  4584. if (!kvm_x86_ops->update_cr8_intercept)
  4585. return;
  4586. if (!vcpu->arch.apic)
  4587. return;
  4588. if (!vcpu->arch.apic->vapic_addr)
  4589. max_irr = kvm_lapic_find_highest_irr(vcpu);
  4590. else
  4591. max_irr = -1;
  4592. if (max_irr != -1)
  4593. max_irr >>= 4;
  4594. tpr = kvm_lapic_get_cr8(vcpu);
  4595. kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
  4596. }
  4597. static void inject_pending_event(struct kvm_vcpu *vcpu)
  4598. {
  4599. /* try to reinject previous events if any */
  4600. if (vcpu->arch.exception.pending) {
  4601. trace_kvm_inj_exception(vcpu->arch.exception.nr,
  4602. vcpu->arch.exception.has_error_code,
  4603. vcpu->arch.exception.error_code);
  4604. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  4605. vcpu->arch.exception.has_error_code,
  4606. vcpu->arch.exception.error_code,
  4607. vcpu->arch.exception.reinject);
  4608. return;
  4609. }
  4610. if (vcpu->arch.nmi_injected) {
  4611. kvm_x86_ops->set_nmi(vcpu);
  4612. return;
  4613. }
  4614. if (vcpu->arch.interrupt.pending) {
  4615. kvm_x86_ops->set_irq(vcpu);
  4616. return;
  4617. }
  4618. /* try to inject new event if pending */
  4619. if (vcpu->arch.nmi_pending) {
  4620. if (kvm_x86_ops->nmi_allowed(vcpu)) {
  4621. vcpu->arch.nmi_pending = false;
  4622. vcpu->arch.nmi_injected = true;
  4623. kvm_x86_ops->set_nmi(vcpu);
  4624. }
  4625. } else if (kvm_cpu_has_interrupt(vcpu)) {
  4626. if (kvm_x86_ops->interrupt_allowed(vcpu)) {
  4627. kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
  4628. false);
  4629. kvm_x86_ops->set_irq(vcpu);
  4630. }
  4631. }
  4632. }
  4633. static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
  4634. {
  4635. if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
  4636. !vcpu->guest_xcr0_loaded) {
  4637. /* kvm_set_xcr() also depends on this */
  4638. xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
  4639. vcpu->guest_xcr0_loaded = 1;
  4640. }
  4641. }
  4642. static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
  4643. {
  4644. if (vcpu->guest_xcr0_loaded) {
  4645. if (vcpu->arch.xcr0 != host_xcr0)
  4646. xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
  4647. vcpu->guest_xcr0_loaded = 0;
  4648. }
  4649. }
  4650. static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
  4651. {
  4652. int r;
  4653. bool nmi_pending;
  4654. bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
  4655. vcpu->run->request_interrupt_window;
  4656. if (vcpu->requests) {
  4657. if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
  4658. kvm_mmu_unload(vcpu);
  4659. if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
  4660. __kvm_migrate_timers(vcpu);
  4661. if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
  4662. r = kvm_guest_time_update(vcpu);
  4663. if (unlikely(r))
  4664. goto out;
  4665. }
  4666. if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
  4667. kvm_mmu_sync_roots(vcpu);
  4668. if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
  4669. kvm_x86_ops->tlb_flush(vcpu);
  4670. if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
  4671. vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
  4672. r = 0;
  4673. goto out;
  4674. }
  4675. if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
  4676. vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  4677. r = 0;
  4678. goto out;
  4679. }
  4680. if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
  4681. vcpu->fpu_active = 0;
  4682. kvm_x86_ops->fpu_deactivate(vcpu);
  4683. }
  4684. if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
  4685. /* Page is swapped out. Do synthetic halt */
  4686. vcpu->arch.apf.halted = true;
  4687. r = 1;
  4688. goto out;
  4689. }
  4690. }
  4691. r = kvm_mmu_reload(vcpu);
  4692. if (unlikely(r))
  4693. goto out;
  4694. /*
  4695. * An NMI can be injected between local nmi_pending read and
  4696. * vcpu->arch.nmi_pending read inside inject_pending_event().
  4697. * But in that case, KVM_REQ_EVENT will be set, which makes
  4698. * the race described above benign.
  4699. */
  4700. nmi_pending = ACCESS_ONCE(vcpu->arch.nmi_pending);
  4701. if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
  4702. inject_pending_event(vcpu);
  4703. /* enable NMI/IRQ window open exits if needed */
  4704. if (nmi_pending)
  4705. kvm_x86_ops->enable_nmi_window(vcpu);
  4706. else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
  4707. kvm_x86_ops->enable_irq_window(vcpu);
  4708. if (kvm_lapic_enabled(vcpu)) {
  4709. update_cr8_intercept(vcpu);
  4710. kvm_lapic_sync_to_vapic(vcpu);
  4711. }
  4712. }
  4713. preempt_disable();
  4714. kvm_x86_ops->prepare_guest_switch(vcpu);
  4715. if (vcpu->fpu_active)
  4716. kvm_load_guest_fpu(vcpu);
  4717. kvm_load_guest_xcr0(vcpu);
  4718. vcpu->mode = IN_GUEST_MODE;
  4719. /* We should set ->mode before check ->requests,
  4720. * see the comment in make_all_cpus_request.
  4721. */
  4722. smp_mb();
  4723. local_irq_disable();
  4724. if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
  4725. || need_resched() || signal_pending(current)) {
  4726. vcpu->mode = OUTSIDE_GUEST_MODE;
  4727. smp_wmb();
  4728. local_irq_enable();
  4729. preempt_enable();
  4730. kvm_x86_ops->cancel_injection(vcpu);
  4731. r = 1;
  4732. goto out;
  4733. }
  4734. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4735. kvm_guest_enter();
  4736. if (unlikely(vcpu->arch.switch_db_regs)) {
  4737. set_debugreg(0, 7);
  4738. set_debugreg(vcpu->arch.eff_db[0], 0);
  4739. set_debugreg(vcpu->arch.eff_db[1], 1);
  4740. set_debugreg(vcpu->arch.eff_db[2], 2);
  4741. set_debugreg(vcpu->arch.eff_db[3], 3);
  4742. }
  4743. trace_kvm_entry(vcpu->vcpu_id);
  4744. kvm_x86_ops->run(vcpu);
  4745. /*
  4746. * If the guest has used debug registers, at least dr7
  4747. * will be disabled while returning to the host.
  4748. * If we don't have active breakpoints in the host, we don't
  4749. * care about the messed up debug address registers. But if
  4750. * we have some of them active, restore the old state.
  4751. */
  4752. if (hw_breakpoint_active())
  4753. hw_breakpoint_restore();
  4754. kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
  4755. vcpu->mode = OUTSIDE_GUEST_MODE;
  4756. smp_wmb();
  4757. local_irq_enable();
  4758. ++vcpu->stat.exits;
  4759. /*
  4760. * We must have an instruction between local_irq_enable() and
  4761. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  4762. * the interrupt shadow. The stat.exits increment will do nicely.
  4763. * But we need to prevent reordering, hence this barrier():
  4764. */
  4765. barrier();
  4766. kvm_guest_exit();
  4767. preempt_enable();
  4768. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4769. /*
  4770. * Profile KVM exit RIPs:
  4771. */
  4772. if (unlikely(prof_on == KVM_PROFILING)) {
  4773. unsigned long rip = kvm_rip_read(vcpu);
  4774. profile_hit(KVM_PROFILING, (void *)rip);
  4775. }
  4776. kvm_lapic_sync_from_vapic(vcpu);
  4777. r = kvm_x86_ops->handle_exit(vcpu);
  4778. out:
  4779. return r;
  4780. }
  4781. static int __vcpu_run(struct kvm_vcpu *vcpu)
  4782. {
  4783. int r;
  4784. struct kvm *kvm = vcpu->kvm;
  4785. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  4786. pr_debug("vcpu %d received sipi with vector # %x\n",
  4787. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  4788. kvm_lapic_reset(vcpu);
  4789. r = kvm_arch_vcpu_reset(vcpu);
  4790. if (r)
  4791. return r;
  4792. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4793. }
  4794. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4795. vapic_enter(vcpu);
  4796. r = 1;
  4797. while (r > 0) {
  4798. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
  4799. !vcpu->arch.apf.halted)
  4800. r = vcpu_enter_guest(vcpu);
  4801. else {
  4802. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4803. kvm_vcpu_block(vcpu);
  4804. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4805. if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
  4806. {
  4807. switch(vcpu->arch.mp_state) {
  4808. case KVM_MP_STATE_HALTED:
  4809. vcpu->arch.mp_state =
  4810. KVM_MP_STATE_RUNNABLE;
  4811. case KVM_MP_STATE_RUNNABLE:
  4812. vcpu->arch.apf.halted = false;
  4813. break;
  4814. case KVM_MP_STATE_SIPI_RECEIVED:
  4815. default:
  4816. r = -EINTR;
  4817. break;
  4818. }
  4819. }
  4820. }
  4821. if (r <= 0)
  4822. break;
  4823. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  4824. if (kvm_cpu_has_pending_timer(vcpu))
  4825. kvm_inject_pending_timer_irqs(vcpu);
  4826. if (dm_request_for_irq_injection(vcpu)) {
  4827. r = -EINTR;
  4828. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4829. ++vcpu->stat.request_irq_exits;
  4830. }
  4831. kvm_check_async_pf_completion(vcpu);
  4832. if (signal_pending(current)) {
  4833. r = -EINTR;
  4834. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4835. ++vcpu->stat.signal_exits;
  4836. }
  4837. if (need_resched()) {
  4838. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4839. kvm_resched(vcpu);
  4840. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4841. }
  4842. }
  4843. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4844. vapic_exit(vcpu);
  4845. return r;
  4846. }
  4847. static int complete_mmio(struct kvm_vcpu *vcpu)
  4848. {
  4849. struct kvm_run *run = vcpu->run;
  4850. int r;
  4851. if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
  4852. return 1;
  4853. if (vcpu->mmio_needed) {
  4854. vcpu->mmio_needed = 0;
  4855. if (!vcpu->mmio_is_write)
  4856. memcpy(vcpu->mmio_data, run->mmio.data, 8);
  4857. vcpu->mmio_index += 8;
  4858. if (vcpu->mmio_index < vcpu->mmio_size) {
  4859. run->exit_reason = KVM_EXIT_MMIO;
  4860. run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index;
  4861. memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8);
  4862. run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
  4863. run->mmio.is_write = vcpu->mmio_is_write;
  4864. vcpu->mmio_needed = 1;
  4865. return 0;
  4866. }
  4867. if (vcpu->mmio_is_write)
  4868. return 1;
  4869. vcpu->mmio_read_completed = 1;
  4870. }
  4871. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4872. r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
  4873. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4874. if (r != EMULATE_DONE)
  4875. return 0;
  4876. return 1;
  4877. }
  4878. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  4879. {
  4880. int r;
  4881. sigset_t sigsaved;
  4882. if (!tsk_used_math(current) && init_fpu(current))
  4883. return -ENOMEM;
  4884. if (vcpu->sigset_active)
  4885. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  4886. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  4887. kvm_vcpu_block(vcpu);
  4888. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  4889. r = -EAGAIN;
  4890. goto out;
  4891. }
  4892. /* re-sync apic's tpr */
  4893. if (!irqchip_in_kernel(vcpu->kvm)) {
  4894. if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
  4895. r = -EINVAL;
  4896. goto out;
  4897. }
  4898. }
  4899. r = complete_mmio(vcpu);
  4900. if (r <= 0)
  4901. goto out;
  4902. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
  4903. kvm_register_write(vcpu, VCPU_REGS_RAX,
  4904. kvm_run->hypercall.ret);
  4905. r = __vcpu_run(vcpu);
  4906. out:
  4907. post_kvm_run_save(vcpu);
  4908. if (vcpu->sigset_active)
  4909. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  4910. return r;
  4911. }
  4912. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4913. {
  4914. if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
  4915. /*
  4916. * We are here if userspace calls get_regs() in the middle of
  4917. * instruction emulation. Registers state needs to be copied
  4918. * back from emulation context to vcpu. Usrapace shouldn't do
  4919. * that usually, but some bad designed PV devices (vmware
  4920. * backdoor interface) need this to work
  4921. */
  4922. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  4923. memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
  4924. vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
  4925. }
  4926. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4927. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4928. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4929. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4930. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4931. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  4932. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  4933. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  4934. #ifdef CONFIG_X86_64
  4935. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  4936. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  4937. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  4938. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  4939. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  4940. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  4941. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  4942. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  4943. #endif
  4944. regs->rip = kvm_rip_read(vcpu);
  4945. regs->rflags = kvm_get_rflags(vcpu);
  4946. return 0;
  4947. }
  4948. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4949. {
  4950. vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
  4951. vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
  4952. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  4953. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  4954. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  4955. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  4956. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  4957. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  4958. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  4959. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  4960. #ifdef CONFIG_X86_64
  4961. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  4962. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  4963. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  4964. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  4965. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  4966. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  4967. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  4968. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  4969. #endif
  4970. kvm_rip_write(vcpu, regs->rip);
  4971. kvm_set_rflags(vcpu, regs->rflags);
  4972. vcpu->arch.exception.pending = false;
  4973. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4974. return 0;
  4975. }
  4976. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  4977. {
  4978. struct kvm_segment cs;
  4979. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  4980. *db = cs.db;
  4981. *l = cs.l;
  4982. }
  4983. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  4984. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  4985. struct kvm_sregs *sregs)
  4986. {
  4987. struct desc_ptr dt;
  4988. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  4989. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  4990. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  4991. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  4992. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  4993. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  4994. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  4995. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  4996. kvm_x86_ops->get_idt(vcpu, &dt);
  4997. sregs->idt.limit = dt.size;
  4998. sregs->idt.base = dt.address;
  4999. kvm_x86_ops->get_gdt(vcpu, &dt);
  5000. sregs->gdt.limit = dt.size;
  5001. sregs->gdt.base = dt.address;
  5002. sregs->cr0 = kvm_read_cr0(vcpu);
  5003. sregs->cr2 = vcpu->arch.cr2;
  5004. sregs->cr3 = kvm_read_cr3(vcpu);
  5005. sregs->cr4 = kvm_read_cr4(vcpu);
  5006. sregs->cr8 = kvm_get_cr8(vcpu);
  5007. sregs->efer = vcpu->arch.efer;
  5008. sregs->apic_base = kvm_get_apic_base(vcpu);
  5009. memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
  5010. if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
  5011. set_bit(vcpu->arch.interrupt.nr,
  5012. (unsigned long *)sregs->interrupt_bitmap);
  5013. return 0;
  5014. }
  5015. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  5016. struct kvm_mp_state *mp_state)
  5017. {
  5018. mp_state->mp_state = vcpu->arch.mp_state;
  5019. return 0;
  5020. }
  5021. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  5022. struct kvm_mp_state *mp_state)
  5023. {
  5024. vcpu->arch.mp_state = mp_state->mp_state;
  5025. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5026. return 0;
  5027. }
  5028. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
  5029. bool has_error_code, u32 error_code)
  5030. {
  5031. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  5032. int ret;
  5033. init_emulate_ctxt(vcpu);
  5034. ret = emulator_task_switch(&vcpu->arch.emulate_ctxt,
  5035. tss_selector, reason, has_error_code,
  5036. error_code);
  5037. if (ret)
  5038. return EMULATE_FAIL;
  5039. memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
  5040. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
  5041. kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  5042. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5043. return EMULATE_DONE;
  5044. }
  5045. EXPORT_SYMBOL_GPL(kvm_task_switch);
  5046. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  5047. struct kvm_sregs *sregs)
  5048. {
  5049. int mmu_reset_needed = 0;
  5050. int pending_vec, max_bits, idx;
  5051. struct desc_ptr dt;
  5052. dt.size = sregs->idt.limit;
  5053. dt.address = sregs->idt.base;
  5054. kvm_x86_ops->set_idt(vcpu, &dt);
  5055. dt.size = sregs->gdt.limit;
  5056. dt.address = sregs->gdt.base;
  5057. kvm_x86_ops->set_gdt(vcpu, &dt);
  5058. vcpu->arch.cr2 = sregs->cr2;
  5059. mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
  5060. vcpu->arch.cr3 = sregs->cr3;
  5061. __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
  5062. kvm_set_cr8(vcpu, sregs->cr8);
  5063. mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
  5064. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  5065. kvm_set_apic_base(vcpu, sregs->apic_base);
  5066. mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
  5067. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  5068. vcpu->arch.cr0 = sregs->cr0;
  5069. mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
  5070. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  5071. if (sregs->cr4 & X86_CR4_OSXSAVE)
  5072. update_cpuid(vcpu);
  5073. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5074. if (!is_long_mode(vcpu) && is_pae(vcpu)) {
  5075. load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
  5076. mmu_reset_needed = 1;
  5077. }
  5078. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5079. if (mmu_reset_needed)
  5080. kvm_mmu_reset_context(vcpu);
  5081. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  5082. pending_vec = find_first_bit(
  5083. (const unsigned long *)sregs->interrupt_bitmap, max_bits);
  5084. if (pending_vec < max_bits) {
  5085. kvm_queue_interrupt(vcpu, pending_vec, false);
  5086. pr_debug("Set back pending irq %d\n", pending_vec);
  5087. }
  5088. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  5089. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  5090. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  5091. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  5092. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  5093. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  5094. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  5095. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  5096. update_cr8_intercept(vcpu);
  5097. /* Older userspace won't unhalt the vcpu on reset. */
  5098. if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
  5099. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  5100. !is_protmode(vcpu))
  5101. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  5102. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5103. return 0;
  5104. }
  5105. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  5106. struct kvm_guest_debug *dbg)
  5107. {
  5108. unsigned long rflags;
  5109. int i, r;
  5110. if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
  5111. r = -EBUSY;
  5112. if (vcpu->arch.exception.pending)
  5113. goto out;
  5114. if (dbg->control & KVM_GUESTDBG_INJECT_DB)
  5115. kvm_queue_exception(vcpu, DB_VECTOR);
  5116. else
  5117. kvm_queue_exception(vcpu, BP_VECTOR);
  5118. }
  5119. /*
  5120. * Read rflags as long as potentially injected trace flags are still
  5121. * filtered out.
  5122. */
  5123. rflags = kvm_get_rflags(vcpu);
  5124. vcpu->guest_debug = dbg->control;
  5125. if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
  5126. vcpu->guest_debug = 0;
  5127. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
  5128. for (i = 0; i < KVM_NR_DB_REGS; ++i)
  5129. vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
  5130. vcpu->arch.switch_db_regs =
  5131. (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
  5132. } else {
  5133. for (i = 0; i < KVM_NR_DB_REGS; i++)
  5134. vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  5135. vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
  5136. }
  5137. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  5138. vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
  5139. get_segment_base(vcpu, VCPU_SREG_CS);
  5140. /*
  5141. * Trigger an rflags update that will inject or remove the trace
  5142. * flags.
  5143. */
  5144. kvm_set_rflags(vcpu, rflags);
  5145. kvm_x86_ops->set_guest_debug(vcpu, dbg);
  5146. r = 0;
  5147. out:
  5148. return r;
  5149. }
  5150. /*
  5151. * Translate a guest virtual address to a guest physical address.
  5152. */
  5153. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  5154. struct kvm_translation *tr)
  5155. {
  5156. unsigned long vaddr = tr->linear_address;
  5157. gpa_t gpa;
  5158. int idx;
  5159. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5160. gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
  5161. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5162. tr->physical_address = gpa;
  5163. tr->valid = gpa != UNMAPPED_GVA;
  5164. tr->writeable = 1;
  5165. tr->usermode = 0;
  5166. return 0;
  5167. }
  5168. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  5169. {
  5170. struct i387_fxsave_struct *fxsave =
  5171. &vcpu->arch.guest_fpu.state->fxsave;
  5172. memcpy(fpu->fpr, fxsave->st_space, 128);
  5173. fpu->fcw = fxsave->cwd;
  5174. fpu->fsw = fxsave->swd;
  5175. fpu->ftwx = fxsave->twd;
  5176. fpu->last_opcode = fxsave->fop;
  5177. fpu->last_ip = fxsave->rip;
  5178. fpu->last_dp = fxsave->rdp;
  5179. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  5180. return 0;
  5181. }
  5182. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  5183. {
  5184. struct i387_fxsave_struct *fxsave =
  5185. &vcpu->arch.guest_fpu.state->fxsave;
  5186. memcpy(fxsave->st_space, fpu->fpr, 128);
  5187. fxsave->cwd = fpu->fcw;
  5188. fxsave->swd = fpu->fsw;
  5189. fxsave->twd = fpu->ftwx;
  5190. fxsave->fop = fpu->last_opcode;
  5191. fxsave->rip = fpu->last_ip;
  5192. fxsave->rdp = fpu->last_dp;
  5193. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  5194. return 0;
  5195. }
  5196. int fx_init(struct kvm_vcpu *vcpu)
  5197. {
  5198. int err;
  5199. err = fpu_alloc(&vcpu->arch.guest_fpu);
  5200. if (err)
  5201. return err;
  5202. fpu_finit(&vcpu->arch.guest_fpu);
  5203. /*
  5204. * Ensure guest xcr0 is valid for loading
  5205. */
  5206. vcpu->arch.xcr0 = XSTATE_FP;
  5207. vcpu->arch.cr0 |= X86_CR0_ET;
  5208. return 0;
  5209. }
  5210. EXPORT_SYMBOL_GPL(fx_init);
  5211. static void fx_free(struct kvm_vcpu *vcpu)
  5212. {
  5213. fpu_free(&vcpu->arch.guest_fpu);
  5214. }
  5215. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  5216. {
  5217. if (vcpu->guest_fpu_loaded)
  5218. return;
  5219. /*
  5220. * Restore all possible states in the guest,
  5221. * and assume host would use all available bits.
  5222. * Guest xcr0 would be loaded later.
  5223. */
  5224. kvm_put_guest_xcr0(vcpu);
  5225. vcpu->guest_fpu_loaded = 1;
  5226. unlazy_fpu(current);
  5227. fpu_restore_checking(&vcpu->arch.guest_fpu);
  5228. trace_kvm_fpu(1);
  5229. }
  5230. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  5231. {
  5232. kvm_put_guest_xcr0(vcpu);
  5233. if (!vcpu->guest_fpu_loaded)
  5234. return;
  5235. vcpu->guest_fpu_loaded = 0;
  5236. fpu_save_init(&vcpu->arch.guest_fpu);
  5237. ++vcpu->stat.fpu_reload;
  5238. kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
  5239. trace_kvm_fpu(0);
  5240. }
  5241. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  5242. {
  5243. kvmclock_reset(vcpu);
  5244. free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
  5245. fx_free(vcpu);
  5246. kvm_x86_ops->vcpu_free(vcpu);
  5247. }
  5248. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  5249. unsigned int id)
  5250. {
  5251. if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
  5252. printk_once(KERN_WARNING
  5253. "kvm: SMP vm created on host with unstable TSC; "
  5254. "guest TSC will not be reliable\n");
  5255. return kvm_x86_ops->vcpu_create(kvm, id);
  5256. }
  5257. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  5258. {
  5259. int r;
  5260. vcpu->arch.mtrr_state.have_fixed = 1;
  5261. vcpu_load(vcpu);
  5262. r = kvm_arch_vcpu_reset(vcpu);
  5263. if (r == 0)
  5264. r = kvm_mmu_setup(vcpu);
  5265. vcpu_put(vcpu);
  5266. if (r < 0)
  5267. goto free_vcpu;
  5268. return 0;
  5269. free_vcpu:
  5270. kvm_x86_ops->vcpu_free(vcpu);
  5271. return r;
  5272. }
  5273. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  5274. {
  5275. vcpu->arch.apf.msr_val = 0;
  5276. vcpu_load(vcpu);
  5277. kvm_mmu_unload(vcpu);
  5278. vcpu_put(vcpu);
  5279. fx_free(vcpu);
  5280. kvm_x86_ops->vcpu_free(vcpu);
  5281. }
  5282. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  5283. {
  5284. vcpu->arch.nmi_pending = false;
  5285. vcpu->arch.nmi_injected = false;
  5286. vcpu->arch.switch_db_regs = 0;
  5287. memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
  5288. vcpu->arch.dr6 = DR6_FIXED_1;
  5289. vcpu->arch.dr7 = DR7_FIXED_1;
  5290. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5291. vcpu->arch.apf.msr_val = 0;
  5292. kvmclock_reset(vcpu);
  5293. kvm_clear_async_pf_completion_queue(vcpu);
  5294. kvm_async_pf_hash_reset(vcpu);
  5295. vcpu->arch.apf.halted = false;
  5296. return kvm_x86_ops->vcpu_reset(vcpu);
  5297. }
  5298. int kvm_arch_hardware_enable(void *garbage)
  5299. {
  5300. struct kvm *kvm;
  5301. struct kvm_vcpu *vcpu;
  5302. int i;
  5303. kvm_shared_msr_cpu_online();
  5304. list_for_each_entry(kvm, &vm_list, vm_list)
  5305. kvm_for_each_vcpu(i, vcpu, kvm)
  5306. if (vcpu->cpu == smp_processor_id())
  5307. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  5308. return kvm_x86_ops->hardware_enable(garbage);
  5309. }
  5310. void kvm_arch_hardware_disable(void *garbage)
  5311. {
  5312. kvm_x86_ops->hardware_disable(garbage);
  5313. drop_user_return_notifiers(garbage);
  5314. }
  5315. int kvm_arch_hardware_setup(void)
  5316. {
  5317. return kvm_x86_ops->hardware_setup();
  5318. }
  5319. void kvm_arch_hardware_unsetup(void)
  5320. {
  5321. kvm_x86_ops->hardware_unsetup();
  5322. }
  5323. void kvm_arch_check_processor_compat(void *rtn)
  5324. {
  5325. kvm_x86_ops->check_processor_compatibility(rtn);
  5326. }
  5327. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  5328. {
  5329. struct page *page;
  5330. struct kvm *kvm;
  5331. int r;
  5332. BUG_ON(vcpu->kvm == NULL);
  5333. kvm = vcpu->kvm;
  5334. vcpu->arch.emulate_ctxt.ops = &emulate_ops;
  5335. vcpu->arch.walk_mmu = &vcpu->arch.mmu;
  5336. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  5337. vcpu->arch.mmu.translate_gpa = translate_gpa;
  5338. vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
  5339. if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
  5340. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  5341. else
  5342. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  5343. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  5344. if (!page) {
  5345. r = -ENOMEM;
  5346. goto fail;
  5347. }
  5348. vcpu->arch.pio_data = page_address(page);
  5349. kvm_init_tsc_catchup(vcpu, max_tsc_khz);
  5350. r = kvm_mmu_create(vcpu);
  5351. if (r < 0)
  5352. goto fail_free_pio_data;
  5353. if (irqchip_in_kernel(kvm)) {
  5354. r = kvm_create_lapic(vcpu);
  5355. if (r < 0)
  5356. goto fail_mmu_destroy;
  5357. }
  5358. vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
  5359. GFP_KERNEL);
  5360. if (!vcpu->arch.mce_banks) {
  5361. r = -ENOMEM;
  5362. goto fail_free_lapic;
  5363. }
  5364. vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
  5365. if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
  5366. goto fail_free_mce_banks;
  5367. kvm_async_pf_hash_reset(vcpu);
  5368. return 0;
  5369. fail_free_mce_banks:
  5370. kfree(vcpu->arch.mce_banks);
  5371. fail_free_lapic:
  5372. kvm_free_lapic(vcpu);
  5373. fail_mmu_destroy:
  5374. kvm_mmu_destroy(vcpu);
  5375. fail_free_pio_data:
  5376. free_page((unsigned long)vcpu->arch.pio_data);
  5377. fail:
  5378. return r;
  5379. }
  5380. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  5381. {
  5382. int idx;
  5383. kfree(vcpu->arch.mce_banks);
  5384. kvm_free_lapic(vcpu);
  5385. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5386. kvm_mmu_destroy(vcpu);
  5387. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5388. free_page((unsigned long)vcpu->arch.pio_data);
  5389. }
  5390. int kvm_arch_init_vm(struct kvm *kvm)
  5391. {
  5392. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  5393. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  5394. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  5395. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  5396. raw_spin_lock_init(&kvm->arch.tsc_write_lock);
  5397. return 0;
  5398. }
  5399. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  5400. {
  5401. vcpu_load(vcpu);
  5402. kvm_mmu_unload(vcpu);
  5403. vcpu_put(vcpu);
  5404. }
  5405. static void kvm_free_vcpus(struct kvm *kvm)
  5406. {
  5407. unsigned int i;
  5408. struct kvm_vcpu *vcpu;
  5409. /*
  5410. * Unpin any mmu pages first.
  5411. */
  5412. kvm_for_each_vcpu(i, vcpu, kvm) {
  5413. kvm_clear_async_pf_completion_queue(vcpu);
  5414. kvm_unload_vcpu_mmu(vcpu);
  5415. }
  5416. kvm_for_each_vcpu(i, vcpu, kvm)
  5417. kvm_arch_vcpu_free(vcpu);
  5418. mutex_lock(&kvm->lock);
  5419. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  5420. kvm->vcpus[i] = NULL;
  5421. atomic_set(&kvm->online_vcpus, 0);
  5422. mutex_unlock(&kvm->lock);
  5423. }
  5424. void kvm_arch_sync_events(struct kvm *kvm)
  5425. {
  5426. kvm_free_all_assigned_devices(kvm);
  5427. kvm_free_pit(kvm);
  5428. }
  5429. void kvm_arch_destroy_vm(struct kvm *kvm)
  5430. {
  5431. kvm_iommu_unmap_guest(kvm);
  5432. kfree(kvm->arch.vpic);
  5433. kfree(kvm->arch.vioapic);
  5434. kvm_free_vcpus(kvm);
  5435. if (kvm->arch.apic_access_page)
  5436. put_page(kvm->arch.apic_access_page);
  5437. if (kvm->arch.ept_identity_pagetable)
  5438. put_page(kvm->arch.ept_identity_pagetable);
  5439. }
  5440. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  5441. struct kvm_memory_slot *memslot,
  5442. struct kvm_memory_slot old,
  5443. struct kvm_userspace_memory_region *mem,
  5444. int user_alloc)
  5445. {
  5446. int npages = memslot->npages;
  5447. int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
  5448. /* Prevent internal slot pages from being moved by fork()/COW. */
  5449. if (memslot->id >= KVM_MEMORY_SLOTS)
  5450. map_flags = MAP_SHARED | MAP_ANONYMOUS;
  5451. /*To keep backward compatibility with older userspace,
  5452. *x86 needs to hanlde !user_alloc case.
  5453. */
  5454. if (!user_alloc) {
  5455. if (npages && !old.rmap) {
  5456. unsigned long userspace_addr;
  5457. down_write(&current->mm->mmap_sem);
  5458. userspace_addr = do_mmap(NULL, 0,
  5459. npages * PAGE_SIZE,
  5460. PROT_READ | PROT_WRITE,
  5461. map_flags,
  5462. 0);
  5463. up_write(&current->mm->mmap_sem);
  5464. if (IS_ERR((void *)userspace_addr))
  5465. return PTR_ERR((void *)userspace_addr);
  5466. memslot->userspace_addr = userspace_addr;
  5467. }
  5468. }
  5469. return 0;
  5470. }
  5471. void kvm_arch_commit_memory_region(struct kvm *kvm,
  5472. struct kvm_userspace_memory_region *mem,
  5473. struct kvm_memory_slot old,
  5474. int user_alloc)
  5475. {
  5476. int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
  5477. if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
  5478. int ret;
  5479. down_write(&current->mm->mmap_sem);
  5480. ret = do_munmap(current->mm, old.userspace_addr,
  5481. old.npages * PAGE_SIZE);
  5482. up_write(&current->mm->mmap_sem);
  5483. if (ret < 0)
  5484. printk(KERN_WARNING
  5485. "kvm_vm_ioctl_set_memory_region: "
  5486. "failed to munmap memory\n");
  5487. }
  5488. if (!kvm->arch.n_requested_mmu_pages)
  5489. nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  5490. spin_lock(&kvm->mmu_lock);
  5491. if (nr_mmu_pages)
  5492. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  5493. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  5494. spin_unlock(&kvm->mmu_lock);
  5495. }
  5496. void kvm_arch_flush_shadow(struct kvm *kvm)
  5497. {
  5498. kvm_mmu_zap_all(kvm);
  5499. kvm_reload_remote_mmus(kvm);
  5500. }
  5501. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  5502. {
  5503. return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
  5504. !vcpu->arch.apf.halted)
  5505. || !list_empty_careful(&vcpu->async_pf.done)
  5506. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
  5507. || vcpu->arch.nmi_pending ||
  5508. (kvm_arch_interrupt_allowed(vcpu) &&
  5509. kvm_cpu_has_interrupt(vcpu));
  5510. }
  5511. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  5512. {
  5513. int me;
  5514. int cpu = vcpu->cpu;
  5515. if (waitqueue_active(&vcpu->wq)) {
  5516. wake_up_interruptible(&vcpu->wq);
  5517. ++vcpu->stat.halt_wakeup;
  5518. }
  5519. me = get_cpu();
  5520. if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
  5521. if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
  5522. smp_send_reschedule(cpu);
  5523. put_cpu();
  5524. }
  5525. int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
  5526. {
  5527. return kvm_x86_ops->interrupt_allowed(vcpu);
  5528. }
  5529. bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
  5530. {
  5531. unsigned long current_rip = kvm_rip_read(vcpu) +
  5532. get_segment_base(vcpu, VCPU_SREG_CS);
  5533. return current_rip == linear_rip;
  5534. }
  5535. EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
  5536. unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
  5537. {
  5538. unsigned long rflags;
  5539. rflags = kvm_x86_ops->get_rflags(vcpu);
  5540. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  5541. rflags &= ~X86_EFLAGS_TF;
  5542. return rflags;
  5543. }
  5544. EXPORT_SYMBOL_GPL(kvm_get_rflags);
  5545. void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  5546. {
  5547. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
  5548. kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
  5549. rflags |= X86_EFLAGS_TF;
  5550. kvm_x86_ops->set_rflags(vcpu, rflags);
  5551. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5552. }
  5553. EXPORT_SYMBOL_GPL(kvm_set_rflags);
  5554. void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
  5555. {
  5556. int r;
  5557. if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
  5558. is_error_page(work->page))
  5559. return;
  5560. r = kvm_mmu_reload(vcpu);
  5561. if (unlikely(r))
  5562. return;
  5563. if (!vcpu->arch.mmu.direct_map &&
  5564. work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
  5565. return;
  5566. vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
  5567. }
  5568. static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
  5569. {
  5570. return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
  5571. }
  5572. static inline u32 kvm_async_pf_next_probe(u32 key)
  5573. {
  5574. return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
  5575. }
  5576. static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  5577. {
  5578. u32 key = kvm_async_pf_hash_fn(gfn);
  5579. while (vcpu->arch.apf.gfns[key] != ~0)
  5580. key = kvm_async_pf_next_probe(key);
  5581. vcpu->arch.apf.gfns[key] = gfn;
  5582. }
  5583. static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
  5584. {
  5585. int i;
  5586. u32 key = kvm_async_pf_hash_fn(gfn);
  5587. for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
  5588. (vcpu->arch.apf.gfns[key] != gfn &&
  5589. vcpu->arch.apf.gfns[key] != ~0); i++)
  5590. key = kvm_async_pf_next_probe(key);
  5591. return key;
  5592. }
  5593. bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  5594. {
  5595. return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
  5596. }
  5597. static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  5598. {
  5599. u32 i, j, k;
  5600. i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
  5601. while (true) {
  5602. vcpu->arch.apf.gfns[i] = ~0;
  5603. do {
  5604. j = kvm_async_pf_next_probe(j);
  5605. if (vcpu->arch.apf.gfns[j] == ~0)
  5606. return;
  5607. k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
  5608. /*
  5609. * k lies cyclically in ]i,j]
  5610. * | i.k.j |
  5611. * |....j i.k.| or |.k..j i...|
  5612. */
  5613. } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
  5614. vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
  5615. i = j;
  5616. }
  5617. }
  5618. static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
  5619. {
  5620. return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
  5621. sizeof(val));
  5622. }
  5623. void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
  5624. struct kvm_async_pf *work)
  5625. {
  5626. struct x86_exception fault;
  5627. trace_kvm_async_pf_not_present(work->arch.token, work->gva);
  5628. kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
  5629. if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
  5630. (vcpu->arch.apf.send_user_only &&
  5631. kvm_x86_ops->get_cpl(vcpu) == 0))
  5632. kvm_make_request(KVM_REQ_APF_HALT, vcpu);
  5633. else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
  5634. fault.vector = PF_VECTOR;
  5635. fault.error_code_valid = true;
  5636. fault.error_code = 0;
  5637. fault.nested_page_fault = false;
  5638. fault.address = work->arch.token;
  5639. kvm_inject_page_fault(vcpu, &fault);
  5640. }
  5641. }
  5642. void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
  5643. struct kvm_async_pf *work)
  5644. {
  5645. struct x86_exception fault;
  5646. trace_kvm_async_pf_ready(work->arch.token, work->gva);
  5647. if (is_error_page(work->page))
  5648. work->arch.token = ~0; /* broadcast wakeup */
  5649. else
  5650. kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
  5651. if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
  5652. !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
  5653. fault.vector = PF_VECTOR;
  5654. fault.error_code_valid = true;
  5655. fault.error_code = 0;
  5656. fault.nested_page_fault = false;
  5657. fault.address = work->arch.token;
  5658. kvm_inject_page_fault(vcpu, &fault);
  5659. }
  5660. vcpu->arch.apf.halted = false;
  5661. }
  5662. bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
  5663. {
  5664. if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
  5665. return true;
  5666. else
  5667. return !kvm_event_needs_reinjection(vcpu) &&
  5668. kvm_x86_ops->interrupt_allowed(vcpu);
  5669. }
  5670. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
  5671. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
  5672. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
  5673. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
  5674. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
  5675. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
  5676. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
  5677. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
  5678. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
  5679. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
  5680. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
  5681. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);