x86.c 165 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  10. *
  11. * Authors:
  12. * Avi Kivity <avi@qumranet.com>
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Amit Shah <amit.shah@qumranet.com>
  15. * Ben-Ami Yassour <benami@il.ibm.com>
  16. *
  17. * This work is licensed under the terms of the GNU GPL, version 2. See
  18. * the COPYING file in the top-level directory.
  19. *
  20. */
  21. #include <linux/kvm_host.h>
  22. #include "irq.h"
  23. #include "mmu.h"
  24. #include "i8254.h"
  25. #include "tss.h"
  26. #include "kvm_cache_regs.h"
  27. #include "x86.h"
  28. #include "cpuid.h"
  29. #include <linux/clocksource.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/kvm.h>
  32. #include <linux/fs.h>
  33. #include <linux/vmalloc.h>
  34. #include <linux/module.h>
  35. #include <linux/mman.h>
  36. #include <linux/highmem.h>
  37. #include <linux/iommu.h>
  38. #include <linux/intel-iommu.h>
  39. #include <linux/cpufreq.h>
  40. #include <linux/user-return-notifier.h>
  41. #include <linux/srcu.h>
  42. #include <linux/slab.h>
  43. #include <linux/perf_event.h>
  44. #include <linux/uaccess.h>
  45. #include <linux/hash.h>
  46. #include <linux/pci.h>
  47. #include <trace/events/kvm.h>
  48. #define CREATE_TRACE_POINTS
  49. #include "trace.h"
  50. #include <asm/debugreg.h>
  51. #include <asm/msr.h>
  52. #include <asm/desc.h>
  53. #include <asm/mtrr.h>
  54. #include <asm/mce.h>
  55. #include <asm/i387.h>
  56. #include <asm/fpu-internal.h> /* Ugh! */
  57. #include <asm/xcr.h>
  58. #include <asm/pvclock.h>
  59. #include <asm/div64.h>
  60. #define MAX_IO_MSRS 256
  61. #define KVM_MAX_MCE_BANKS 32
  62. #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
  63. #define emul_to_vcpu(ctxt) \
  64. container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
  65. /* EFER defaults:
  66. * - enable syscall per default because its emulated by KVM
  67. * - enable LME and LMA per default on 64 bit KVM
  68. */
  69. #ifdef CONFIG_X86_64
  70. static
  71. u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
  72. #else
  73. static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
  74. #endif
  75. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  76. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  77. static void update_cr8_intercept(struct kvm_vcpu *vcpu);
  78. static void process_nmi(struct kvm_vcpu *vcpu);
  79. struct kvm_x86_ops *kvm_x86_ops;
  80. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  81. static bool ignore_msrs = 0;
  82. module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
  83. bool kvm_has_tsc_control;
  84. EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
  85. u32 kvm_max_guest_tsc_khz;
  86. EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
  87. /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
  88. static u32 tsc_tolerance_ppm = 250;
  89. module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
  90. #define KVM_NR_SHARED_MSRS 16
  91. struct kvm_shared_msrs_global {
  92. int nr;
  93. u32 msrs[KVM_NR_SHARED_MSRS];
  94. };
  95. struct kvm_shared_msrs {
  96. struct user_return_notifier urn;
  97. bool registered;
  98. struct kvm_shared_msr_values {
  99. u64 host;
  100. u64 curr;
  101. } values[KVM_NR_SHARED_MSRS];
  102. };
  103. static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
  104. static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
  105. struct kvm_stats_debugfs_item debugfs_entries[] = {
  106. { "pf_fixed", VCPU_STAT(pf_fixed) },
  107. { "pf_guest", VCPU_STAT(pf_guest) },
  108. { "tlb_flush", VCPU_STAT(tlb_flush) },
  109. { "invlpg", VCPU_STAT(invlpg) },
  110. { "exits", VCPU_STAT(exits) },
  111. { "io_exits", VCPU_STAT(io_exits) },
  112. { "mmio_exits", VCPU_STAT(mmio_exits) },
  113. { "signal_exits", VCPU_STAT(signal_exits) },
  114. { "irq_window", VCPU_STAT(irq_window_exits) },
  115. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  116. { "halt_exits", VCPU_STAT(halt_exits) },
  117. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  118. { "hypercalls", VCPU_STAT(hypercalls) },
  119. { "request_irq", VCPU_STAT(request_irq_exits) },
  120. { "irq_exits", VCPU_STAT(irq_exits) },
  121. { "host_state_reload", VCPU_STAT(host_state_reload) },
  122. { "efer_reload", VCPU_STAT(efer_reload) },
  123. { "fpu_reload", VCPU_STAT(fpu_reload) },
  124. { "insn_emulation", VCPU_STAT(insn_emulation) },
  125. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  126. { "irq_injections", VCPU_STAT(irq_injections) },
  127. { "nmi_injections", VCPU_STAT(nmi_injections) },
  128. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  129. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  130. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  131. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  132. { "mmu_flooded", VM_STAT(mmu_flooded) },
  133. { "mmu_recycled", VM_STAT(mmu_recycled) },
  134. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  135. { "mmu_unsync", VM_STAT(mmu_unsync) },
  136. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  137. { "largepages", VM_STAT(lpages) },
  138. { NULL }
  139. };
  140. u64 __read_mostly host_xcr0;
  141. int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
  142. static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
  143. {
  144. int i;
  145. for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
  146. vcpu->arch.apf.gfns[i] = ~0;
  147. }
  148. static void kvm_on_user_return(struct user_return_notifier *urn)
  149. {
  150. unsigned slot;
  151. struct kvm_shared_msrs *locals
  152. = container_of(urn, struct kvm_shared_msrs, urn);
  153. struct kvm_shared_msr_values *values;
  154. for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
  155. values = &locals->values[slot];
  156. if (values->host != values->curr) {
  157. wrmsrl(shared_msrs_global.msrs[slot], values->host);
  158. values->curr = values->host;
  159. }
  160. }
  161. locals->registered = false;
  162. user_return_notifier_unregister(urn);
  163. }
  164. static void shared_msr_update(unsigned slot, u32 msr)
  165. {
  166. struct kvm_shared_msrs *smsr;
  167. u64 value;
  168. smsr = &__get_cpu_var(shared_msrs);
  169. /* only read, and nobody should modify it at this time,
  170. * so don't need lock */
  171. if (slot >= shared_msrs_global.nr) {
  172. printk(KERN_ERR "kvm: invalid MSR slot!");
  173. return;
  174. }
  175. rdmsrl_safe(msr, &value);
  176. smsr->values[slot].host = value;
  177. smsr->values[slot].curr = value;
  178. }
  179. void kvm_define_shared_msr(unsigned slot, u32 msr)
  180. {
  181. if (slot >= shared_msrs_global.nr)
  182. shared_msrs_global.nr = slot + 1;
  183. shared_msrs_global.msrs[slot] = msr;
  184. /* we need ensured the shared_msr_global have been updated */
  185. smp_wmb();
  186. }
  187. EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
  188. static void kvm_shared_msr_cpu_online(void)
  189. {
  190. unsigned i;
  191. for (i = 0; i < shared_msrs_global.nr; ++i)
  192. shared_msr_update(i, shared_msrs_global.msrs[i]);
  193. }
  194. void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
  195. {
  196. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  197. if (((value ^ smsr->values[slot].curr) & mask) == 0)
  198. return;
  199. smsr->values[slot].curr = value;
  200. wrmsrl(shared_msrs_global.msrs[slot], value);
  201. if (!smsr->registered) {
  202. smsr->urn.on_user_return = kvm_on_user_return;
  203. user_return_notifier_register(&smsr->urn);
  204. smsr->registered = true;
  205. }
  206. }
  207. EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
  208. static void drop_user_return_notifiers(void *ignore)
  209. {
  210. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  211. if (smsr->registered)
  212. kvm_on_user_return(&smsr->urn);
  213. }
  214. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  215. {
  216. if (irqchip_in_kernel(vcpu->kvm))
  217. return vcpu->arch.apic_base;
  218. else
  219. return vcpu->arch.apic_base;
  220. }
  221. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  222. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  223. {
  224. /* TODO: reserve bits check */
  225. if (irqchip_in_kernel(vcpu->kvm))
  226. kvm_lapic_set_base(vcpu, data);
  227. else
  228. vcpu->arch.apic_base = data;
  229. }
  230. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  231. #define EXCPT_BENIGN 0
  232. #define EXCPT_CONTRIBUTORY 1
  233. #define EXCPT_PF 2
  234. static int exception_class(int vector)
  235. {
  236. switch (vector) {
  237. case PF_VECTOR:
  238. return EXCPT_PF;
  239. case DE_VECTOR:
  240. case TS_VECTOR:
  241. case NP_VECTOR:
  242. case SS_VECTOR:
  243. case GP_VECTOR:
  244. return EXCPT_CONTRIBUTORY;
  245. default:
  246. break;
  247. }
  248. return EXCPT_BENIGN;
  249. }
  250. static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
  251. unsigned nr, bool has_error, u32 error_code,
  252. bool reinject)
  253. {
  254. u32 prev_nr;
  255. int class1, class2;
  256. kvm_make_request(KVM_REQ_EVENT, vcpu);
  257. if (!vcpu->arch.exception.pending) {
  258. queue:
  259. vcpu->arch.exception.pending = true;
  260. vcpu->arch.exception.has_error_code = has_error;
  261. vcpu->arch.exception.nr = nr;
  262. vcpu->arch.exception.error_code = error_code;
  263. vcpu->arch.exception.reinject = reinject;
  264. return;
  265. }
  266. /* to check exception */
  267. prev_nr = vcpu->arch.exception.nr;
  268. if (prev_nr == DF_VECTOR) {
  269. /* triple fault -> shutdown */
  270. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  271. return;
  272. }
  273. class1 = exception_class(prev_nr);
  274. class2 = exception_class(nr);
  275. if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
  276. || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
  277. /* generate double fault per SDM Table 5-5 */
  278. vcpu->arch.exception.pending = true;
  279. vcpu->arch.exception.has_error_code = true;
  280. vcpu->arch.exception.nr = DF_VECTOR;
  281. vcpu->arch.exception.error_code = 0;
  282. } else
  283. /* replace previous exception with a new one in a hope
  284. that instruction re-execution will regenerate lost
  285. exception */
  286. goto queue;
  287. }
  288. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  289. {
  290. kvm_multiple_exception(vcpu, nr, false, 0, false);
  291. }
  292. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  293. void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  294. {
  295. kvm_multiple_exception(vcpu, nr, false, 0, true);
  296. }
  297. EXPORT_SYMBOL_GPL(kvm_requeue_exception);
  298. void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
  299. {
  300. if (err)
  301. kvm_inject_gp(vcpu, 0);
  302. else
  303. kvm_x86_ops->skip_emulated_instruction(vcpu);
  304. }
  305. EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
  306. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  307. {
  308. ++vcpu->stat.pf_guest;
  309. vcpu->arch.cr2 = fault->address;
  310. kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
  311. }
  312. EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
  313. void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  314. {
  315. if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
  316. vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
  317. else
  318. vcpu->arch.mmu.inject_page_fault(vcpu, fault);
  319. }
  320. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  321. {
  322. atomic_inc(&vcpu->arch.nmi_queued);
  323. kvm_make_request(KVM_REQ_NMI, vcpu);
  324. }
  325. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  326. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  327. {
  328. kvm_multiple_exception(vcpu, nr, true, error_code, false);
  329. }
  330. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  331. void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  332. {
  333. kvm_multiple_exception(vcpu, nr, true, error_code, true);
  334. }
  335. EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
  336. /*
  337. * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
  338. * a #GP and return false.
  339. */
  340. bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
  341. {
  342. if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
  343. return true;
  344. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  345. return false;
  346. }
  347. EXPORT_SYMBOL_GPL(kvm_require_cpl);
  348. /*
  349. * This function will be used to read from the physical memory of the currently
  350. * running guest. The difference to kvm_read_guest_page is that this function
  351. * can read from guest physical or from the guest's guest physical memory.
  352. */
  353. int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  354. gfn_t ngfn, void *data, int offset, int len,
  355. u32 access)
  356. {
  357. gfn_t real_gfn;
  358. gpa_t ngpa;
  359. ngpa = gfn_to_gpa(ngfn);
  360. real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
  361. if (real_gfn == UNMAPPED_GVA)
  362. return -EFAULT;
  363. real_gfn = gpa_to_gfn(real_gfn);
  364. return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
  365. }
  366. EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
  367. int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
  368. void *data, int offset, int len, u32 access)
  369. {
  370. return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
  371. data, offset, len, access);
  372. }
  373. /*
  374. * Load the pae pdptrs. Return true is they are all valid.
  375. */
  376. int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
  377. {
  378. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  379. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  380. int i;
  381. int ret;
  382. u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
  383. ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
  384. offset * sizeof(u64), sizeof(pdpte),
  385. PFERR_USER_MASK|PFERR_WRITE_MASK);
  386. if (ret < 0) {
  387. ret = 0;
  388. goto out;
  389. }
  390. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  391. if (is_present_gpte(pdpte[i]) &&
  392. (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
  393. ret = 0;
  394. goto out;
  395. }
  396. }
  397. ret = 1;
  398. memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
  399. __set_bit(VCPU_EXREG_PDPTR,
  400. (unsigned long *)&vcpu->arch.regs_avail);
  401. __set_bit(VCPU_EXREG_PDPTR,
  402. (unsigned long *)&vcpu->arch.regs_dirty);
  403. out:
  404. return ret;
  405. }
  406. EXPORT_SYMBOL_GPL(load_pdptrs);
  407. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  408. {
  409. u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
  410. bool changed = true;
  411. int offset;
  412. gfn_t gfn;
  413. int r;
  414. if (is_long_mode(vcpu) || !is_pae(vcpu))
  415. return false;
  416. if (!test_bit(VCPU_EXREG_PDPTR,
  417. (unsigned long *)&vcpu->arch.regs_avail))
  418. return true;
  419. gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
  420. offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
  421. r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
  422. PFERR_USER_MASK | PFERR_WRITE_MASK);
  423. if (r < 0)
  424. goto out;
  425. changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
  426. out:
  427. return changed;
  428. }
  429. int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  430. {
  431. unsigned long old_cr0 = kvm_read_cr0(vcpu);
  432. unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
  433. X86_CR0_CD | X86_CR0_NW;
  434. cr0 |= X86_CR0_ET;
  435. #ifdef CONFIG_X86_64
  436. if (cr0 & 0xffffffff00000000UL)
  437. return 1;
  438. #endif
  439. cr0 &= ~CR0_RESERVED_BITS;
  440. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
  441. return 1;
  442. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
  443. return 1;
  444. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  445. #ifdef CONFIG_X86_64
  446. if ((vcpu->arch.efer & EFER_LME)) {
  447. int cs_db, cs_l;
  448. if (!is_pae(vcpu))
  449. return 1;
  450. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  451. if (cs_l)
  452. return 1;
  453. } else
  454. #endif
  455. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
  456. kvm_read_cr3(vcpu)))
  457. return 1;
  458. }
  459. if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
  460. return 1;
  461. kvm_x86_ops->set_cr0(vcpu, cr0);
  462. if ((cr0 ^ old_cr0) & X86_CR0_PG) {
  463. kvm_clear_async_pf_completion_queue(vcpu);
  464. kvm_async_pf_hash_reset(vcpu);
  465. }
  466. if ((cr0 ^ old_cr0) & update_bits)
  467. kvm_mmu_reset_context(vcpu);
  468. return 0;
  469. }
  470. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  471. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  472. {
  473. (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
  474. }
  475. EXPORT_SYMBOL_GPL(kvm_lmsw);
  476. int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  477. {
  478. u64 xcr0;
  479. /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
  480. if (index != XCR_XFEATURE_ENABLED_MASK)
  481. return 1;
  482. xcr0 = xcr;
  483. if (kvm_x86_ops->get_cpl(vcpu) != 0)
  484. return 1;
  485. if (!(xcr0 & XSTATE_FP))
  486. return 1;
  487. if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
  488. return 1;
  489. if (xcr0 & ~host_xcr0)
  490. return 1;
  491. vcpu->arch.xcr0 = xcr0;
  492. vcpu->guest_xcr0_loaded = 0;
  493. return 0;
  494. }
  495. int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  496. {
  497. if (__kvm_set_xcr(vcpu, index, xcr)) {
  498. kvm_inject_gp(vcpu, 0);
  499. return 1;
  500. }
  501. return 0;
  502. }
  503. EXPORT_SYMBOL_GPL(kvm_set_xcr);
  504. int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  505. {
  506. unsigned long old_cr4 = kvm_read_cr4(vcpu);
  507. unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
  508. X86_CR4_PAE | X86_CR4_SMEP;
  509. if (cr4 & CR4_RESERVED_BITS)
  510. return 1;
  511. if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
  512. return 1;
  513. if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
  514. return 1;
  515. if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
  516. return 1;
  517. if (is_long_mode(vcpu)) {
  518. if (!(cr4 & X86_CR4_PAE))
  519. return 1;
  520. } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
  521. && ((cr4 ^ old_cr4) & pdptr_bits)
  522. && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
  523. kvm_read_cr3(vcpu)))
  524. return 1;
  525. if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
  526. if (!guest_cpuid_has_pcid(vcpu))
  527. return 1;
  528. /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
  529. if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
  530. return 1;
  531. }
  532. if (kvm_x86_ops->set_cr4(vcpu, cr4))
  533. return 1;
  534. if (((cr4 ^ old_cr4) & pdptr_bits) ||
  535. (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
  536. kvm_mmu_reset_context(vcpu);
  537. if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
  538. kvm_update_cpuid(vcpu);
  539. return 0;
  540. }
  541. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  542. int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  543. {
  544. if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
  545. kvm_mmu_sync_roots(vcpu);
  546. kvm_mmu_flush_tlb(vcpu);
  547. return 0;
  548. }
  549. if (is_long_mode(vcpu)) {
  550. if (kvm_read_cr4(vcpu) & X86_CR4_PCIDE) {
  551. if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
  552. return 1;
  553. } else
  554. if (cr3 & CR3_L_MODE_RESERVED_BITS)
  555. return 1;
  556. } else {
  557. if (is_pae(vcpu)) {
  558. if (cr3 & CR3_PAE_RESERVED_BITS)
  559. return 1;
  560. if (is_paging(vcpu) &&
  561. !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
  562. return 1;
  563. }
  564. /*
  565. * We don't check reserved bits in nonpae mode, because
  566. * this isn't enforced, and VMware depends on this.
  567. */
  568. }
  569. /*
  570. * Does the new cr3 value map to physical memory? (Note, we
  571. * catch an invalid cr3 even in real-mode, because it would
  572. * cause trouble later on when we turn on paging anyway.)
  573. *
  574. * A real CPU would silently accept an invalid cr3 and would
  575. * attempt to use it - with largely undefined (and often hard
  576. * to debug) behavior on the guest side.
  577. */
  578. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  579. return 1;
  580. vcpu->arch.cr3 = cr3;
  581. __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
  582. vcpu->arch.mmu.new_cr3(vcpu);
  583. return 0;
  584. }
  585. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  586. int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  587. {
  588. if (cr8 & CR8_RESERVED_BITS)
  589. return 1;
  590. if (irqchip_in_kernel(vcpu->kvm))
  591. kvm_lapic_set_tpr(vcpu, cr8);
  592. else
  593. vcpu->arch.cr8 = cr8;
  594. return 0;
  595. }
  596. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  597. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  598. {
  599. if (irqchip_in_kernel(vcpu->kvm))
  600. return kvm_lapic_get_cr8(vcpu);
  601. else
  602. return vcpu->arch.cr8;
  603. }
  604. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  605. static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  606. {
  607. switch (dr) {
  608. case 0 ... 3:
  609. vcpu->arch.db[dr] = val;
  610. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
  611. vcpu->arch.eff_db[dr] = val;
  612. break;
  613. case 4:
  614. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  615. return 1; /* #UD */
  616. /* fall through */
  617. case 6:
  618. if (val & 0xffffffff00000000ULL)
  619. return -1; /* #GP */
  620. vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
  621. break;
  622. case 5:
  623. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  624. return 1; /* #UD */
  625. /* fall through */
  626. default: /* 7 */
  627. if (val & 0xffffffff00000000ULL)
  628. return -1; /* #GP */
  629. vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
  630. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
  631. kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
  632. vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
  633. }
  634. break;
  635. }
  636. return 0;
  637. }
  638. int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  639. {
  640. int res;
  641. res = __kvm_set_dr(vcpu, dr, val);
  642. if (res > 0)
  643. kvm_queue_exception(vcpu, UD_VECTOR);
  644. else if (res < 0)
  645. kvm_inject_gp(vcpu, 0);
  646. return res;
  647. }
  648. EXPORT_SYMBOL_GPL(kvm_set_dr);
  649. static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  650. {
  651. switch (dr) {
  652. case 0 ... 3:
  653. *val = vcpu->arch.db[dr];
  654. break;
  655. case 4:
  656. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  657. return 1;
  658. /* fall through */
  659. case 6:
  660. *val = vcpu->arch.dr6;
  661. break;
  662. case 5:
  663. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  664. return 1;
  665. /* fall through */
  666. default: /* 7 */
  667. *val = vcpu->arch.dr7;
  668. break;
  669. }
  670. return 0;
  671. }
  672. int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  673. {
  674. if (_kvm_get_dr(vcpu, dr, val)) {
  675. kvm_queue_exception(vcpu, UD_VECTOR);
  676. return 1;
  677. }
  678. return 0;
  679. }
  680. EXPORT_SYMBOL_GPL(kvm_get_dr);
  681. bool kvm_rdpmc(struct kvm_vcpu *vcpu)
  682. {
  683. u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  684. u64 data;
  685. int err;
  686. err = kvm_pmu_read_pmc(vcpu, ecx, &data);
  687. if (err)
  688. return err;
  689. kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
  690. kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
  691. return err;
  692. }
  693. EXPORT_SYMBOL_GPL(kvm_rdpmc);
  694. /*
  695. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  696. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  697. *
  698. * This list is modified at module load time to reflect the
  699. * capabilities of the host cpu. This capabilities test skips MSRs that are
  700. * kvm-specific. Those are put in the beginning of the list.
  701. */
  702. #define KVM_SAVE_MSRS_BEGIN 10
  703. static u32 msrs_to_save[] = {
  704. MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  705. MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
  706. HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
  707. HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
  708. MSR_KVM_PV_EOI_EN,
  709. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  710. MSR_STAR,
  711. #ifdef CONFIG_X86_64
  712. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  713. #endif
  714. MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  715. };
  716. static unsigned num_msrs_to_save;
  717. static u32 emulated_msrs[] = {
  718. MSR_IA32_TSCDEADLINE,
  719. MSR_IA32_MISC_ENABLE,
  720. MSR_IA32_MCG_STATUS,
  721. MSR_IA32_MCG_CTL,
  722. };
  723. static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
  724. {
  725. u64 old_efer = vcpu->arch.efer;
  726. if (efer & efer_reserved_bits)
  727. return 1;
  728. if (is_paging(vcpu)
  729. && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
  730. return 1;
  731. if (efer & EFER_FFXSR) {
  732. struct kvm_cpuid_entry2 *feat;
  733. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  734. if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
  735. return 1;
  736. }
  737. if (efer & EFER_SVME) {
  738. struct kvm_cpuid_entry2 *feat;
  739. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  740. if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
  741. return 1;
  742. }
  743. efer &= ~EFER_LMA;
  744. efer |= vcpu->arch.efer & EFER_LMA;
  745. kvm_x86_ops->set_efer(vcpu, efer);
  746. vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
  747. /* Update reserved bits */
  748. if ((efer ^ old_efer) & EFER_NX)
  749. kvm_mmu_reset_context(vcpu);
  750. return 0;
  751. }
  752. void kvm_enable_efer_bits(u64 mask)
  753. {
  754. efer_reserved_bits &= ~mask;
  755. }
  756. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  757. /*
  758. * Writes msr value into into the appropriate "register".
  759. * Returns 0 on success, non-0 otherwise.
  760. * Assumes vcpu_load() was already called.
  761. */
  762. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  763. {
  764. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  765. }
  766. /*
  767. * Adapt set_msr() to msr_io()'s calling convention
  768. */
  769. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  770. {
  771. return kvm_set_msr(vcpu, index, *data);
  772. }
  773. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  774. {
  775. int version;
  776. int r;
  777. struct pvclock_wall_clock wc;
  778. struct timespec boot;
  779. if (!wall_clock)
  780. return;
  781. r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
  782. if (r)
  783. return;
  784. if (version & 1)
  785. ++version; /* first time write, random junk */
  786. ++version;
  787. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  788. /*
  789. * The guest calculates current wall clock time by adding
  790. * system time (updated by kvm_guest_time_update below) to the
  791. * wall clock specified here. guest system time equals host
  792. * system time for us, thus we must fill in host boot time here.
  793. */
  794. getboottime(&boot);
  795. if (kvm->arch.kvmclock_offset) {
  796. struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
  797. boot = timespec_sub(boot, ts);
  798. }
  799. wc.sec = boot.tv_sec;
  800. wc.nsec = boot.tv_nsec;
  801. wc.version = version;
  802. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  803. version++;
  804. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  805. }
  806. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  807. {
  808. uint32_t quotient, remainder;
  809. /* Don't try to replace with do_div(), this one calculates
  810. * "(dividend << 32) / divisor" */
  811. __asm__ ( "divl %4"
  812. : "=a" (quotient), "=d" (remainder)
  813. : "0" (0), "1" (dividend), "r" (divisor) );
  814. return quotient;
  815. }
  816. static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
  817. s8 *pshift, u32 *pmultiplier)
  818. {
  819. uint64_t scaled64;
  820. int32_t shift = 0;
  821. uint64_t tps64;
  822. uint32_t tps32;
  823. tps64 = base_khz * 1000LL;
  824. scaled64 = scaled_khz * 1000LL;
  825. while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
  826. tps64 >>= 1;
  827. shift--;
  828. }
  829. tps32 = (uint32_t)tps64;
  830. while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
  831. if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
  832. scaled64 >>= 1;
  833. else
  834. tps32 <<= 1;
  835. shift++;
  836. }
  837. *pshift = shift;
  838. *pmultiplier = div_frac(scaled64, tps32);
  839. pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
  840. __func__, base_khz, scaled_khz, shift, *pmultiplier);
  841. }
  842. static inline u64 get_kernel_ns(void)
  843. {
  844. struct timespec ts;
  845. WARN_ON(preemptible());
  846. ktime_get_ts(&ts);
  847. monotonic_to_bootbased(&ts);
  848. return timespec_to_ns(&ts);
  849. }
  850. static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
  851. unsigned long max_tsc_khz;
  852. static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
  853. {
  854. return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
  855. vcpu->arch.virtual_tsc_shift);
  856. }
  857. static u32 adjust_tsc_khz(u32 khz, s32 ppm)
  858. {
  859. u64 v = (u64)khz * (1000000 + ppm);
  860. do_div(v, 1000000);
  861. return v;
  862. }
  863. static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
  864. {
  865. u32 thresh_lo, thresh_hi;
  866. int use_scaling = 0;
  867. /* Compute a scale to convert nanoseconds in TSC cycles */
  868. kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
  869. &vcpu->arch.virtual_tsc_shift,
  870. &vcpu->arch.virtual_tsc_mult);
  871. vcpu->arch.virtual_tsc_khz = this_tsc_khz;
  872. /*
  873. * Compute the variation in TSC rate which is acceptable
  874. * within the range of tolerance and decide if the
  875. * rate being applied is within that bounds of the hardware
  876. * rate. If so, no scaling or compensation need be done.
  877. */
  878. thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
  879. thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
  880. if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
  881. pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
  882. use_scaling = 1;
  883. }
  884. kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
  885. }
  886. static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
  887. {
  888. u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
  889. vcpu->arch.virtual_tsc_mult,
  890. vcpu->arch.virtual_tsc_shift);
  891. tsc += vcpu->arch.this_tsc_write;
  892. return tsc;
  893. }
  894. void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
  895. {
  896. struct kvm *kvm = vcpu->kvm;
  897. u64 offset, ns, elapsed;
  898. unsigned long flags;
  899. s64 usdiff;
  900. raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
  901. offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
  902. ns = get_kernel_ns();
  903. elapsed = ns - kvm->arch.last_tsc_nsec;
  904. /* n.b - signed multiplication and division required */
  905. usdiff = data - kvm->arch.last_tsc_write;
  906. #ifdef CONFIG_X86_64
  907. usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
  908. #else
  909. /* do_div() only does unsigned */
  910. asm("idivl %2; xor %%edx, %%edx"
  911. : "=A"(usdiff)
  912. : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
  913. #endif
  914. do_div(elapsed, 1000);
  915. usdiff -= elapsed;
  916. if (usdiff < 0)
  917. usdiff = -usdiff;
  918. /*
  919. * Special case: TSC write with a small delta (1 second) of virtual
  920. * cycle time against real time is interpreted as an attempt to
  921. * synchronize the CPU.
  922. *
  923. * For a reliable TSC, we can match TSC offsets, and for an unstable
  924. * TSC, we add elapsed time in this computation. We could let the
  925. * compensation code attempt to catch up if we fall behind, but
  926. * it's better to try to match offsets from the beginning.
  927. */
  928. if (usdiff < USEC_PER_SEC &&
  929. vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
  930. if (!check_tsc_unstable()) {
  931. offset = kvm->arch.cur_tsc_offset;
  932. pr_debug("kvm: matched tsc offset for %llu\n", data);
  933. } else {
  934. u64 delta = nsec_to_cycles(vcpu, elapsed);
  935. data += delta;
  936. offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
  937. pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
  938. }
  939. } else {
  940. /*
  941. * We split periods of matched TSC writes into generations.
  942. * For each generation, we track the original measured
  943. * nanosecond time, offset, and write, so if TSCs are in
  944. * sync, we can match exact offset, and if not, we can match
  945. * exact software computaion in compute_guest_tsc()
  946. *
  947. * These values are tracked in kvm->arch.cur_xxx variables.
  948. */
  949. kvm->arch.cur_tsc_generation++;
  950. kvm->arch.cur_tsc_nsec = ns;
  951. kvm->arch.cur_tsc_write = data;
  952. kvm->arch.cur_tsc_offset = offset;
  953. pr_debug("kvm: new tsc generation %u, clock %llu\n",
  954. kvm->arch.cur_tsc_generation, data);
  955. }
  956. /*
  957. * We also track th most recent recorded KHZ, write and time to
  958. * allow the matching interval to be extended at each write.
  959. */
  960. kvm->arch.last_tsc_nsec = ns;
  961. kvm->arch.last_tsc_write = data;
  962. kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
  963. /* Reset of TSC must disable overshoot protection below */
  964. vcpu->arch.hv_clock.tsc_timestamp = 0;
  965. vcpu->arch.last_guest_tsc = data;
  966. /* Keep track of which generation this VCPU has synchronized to */
  967. vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
  968. vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
  969. vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
  970. kvm_x86_ops->write_tsc_offset(vcpu, offset);
  971. raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
  972. }
  973. EXPORT_SYMBOL_GPL(kvm_write_tsc);
  974. static int kvm_guest_time_update(struct kvm_vcpu *v)
  975. {
  976. unsigned long flags;
  977. struct kvm_vcpu_arch *vcpu = &v->arch;
  978. void *shared_kaddr;
  979. unsigned long this_tsc_khz;
  980. s64 kernel_ns, max_kernel_ns;
  981. u64 tsc_timestamp;
  982. /* Keep irq disabled to prevent changes to the clock */
  983. local_irq_save(flags);
  984. tsc_timestamp = kvm_x86_ops->read_l1_tsc(v);
  985. kernel_ns = get_kernel_ns();
  986. this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
  987. if (unlikely(this_tsc_khz == 0)) {
  988. local_irq_restore(flags);
  989. kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
  990. return 1;
  991. }
  992. /*
  993. * We may have to catch up the TSC to match elapsed wall clock
  994. * time for two reasons, even if kvmclock is used.
  995. * 1) CPU could have been running below the maximum TSC rate
  996. * 2) Broken TSC compensation resets the base at each VCPU
  997. * entry to avoid unknown leaps of TSC even when running
  998. * again on the same CPU. This may cause apparent elapsed
  999. * time to disappear, and the guest to stand still or run
  1000. * very slowly.
  1001. */
  1002. if (vcpu->tsc_catchup) {
  1003. u64 tsc = compute_guest_tsc(v, kernel_ns);
  1004. if (tsc > tsc_timestamp) {
  1005. adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
  1006. tsc_timestamp = tsc;
  1007. }
  1008. }
  1009. local_irq_restore(flags);
  1010. if (!vcpu->time_page)
  1011. return 0;
  1012. /*
  1013. * Time as measured by the TSC may go backwards when resetting the base
  1014. * tsc_timestamp. The reason for this is that the TSC resolution is
  1015. * higher than the resolution of the other clock scales. Thus, many
  1016. * possible measurments of the TSC correspond to one measurement of any
  1017. * other clock, and so a spread of values is possible. This is not a
  1018. * problem for the computation of the nanosecond clock; with TSC rates
  1019. * around 1GHZ, there can only be a few cycles which correspond to one
  1020. * nanosecond value, and any path through this code will inevitably
  1021. * take longer than that. However, with the kernel_ns value itself,
  1022. * the precision may be much lower, down to HZ granularity. If the
  1023. * first sampling of TSC against kernel_ns ends in the low part of the
  1024. * range, and the second in the high end of the range, we can get:
  1025. *
  1026. * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
  1027. *
  1028. * As the sampling errors potentially range in the thousands of cycles,
  1029. * it is possible such a time value has already been observed by the
  1030. * guest. To protect against this, we must compute the system time as
  1031. * observed by the guest and ensure the new system time is greater.
  1032. */
  1033. max_kernel_ns = 0;
  1034. if (vcpu->hv_clock.tsc_timestamp) {
  1035. max_kernel_ns = vcpu->last_guest_tsc -
  1036. vcpu->hv_clock.tsc_timestamp;
  1037. max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
  1038. vcpu->hv_clock.tsc_to_system_mul,
  1039. vcpu->hv_clock.tsc_shift);
  1040. max_kernel_ns += vcpu->last_kernel_ns;
  1041. }
  1042. if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
  1043. kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
  1044. &vcpu->hv_clock.tsc_shift,
  1045. &vcpu->hv_clock.tsc_to_system_mul);
  1046. vcpu->hw_tsc_khz = this_tsc_khz;
  1047. }
  1048. if (max_kernel_ns > kernel_ns)
  1049. kernel_ns = max_kernel_ns;
  1050. /* With all the info we got, fill in the values */
  1051. vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
  1052. vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
  1053. vcpu->last_kernel_ns = kernel_ns;
  1054. vcpu->last_guest_tsc = tsc_timestamp;
  1055. vcpu->hv_clock.flags = 0;
  1056. /*
  1057. * The interface expects us to write an even number signaling that the
  1058. * update is finished. Since the guest won't see the intermediate
  1059. * state, we just increase by 2 at the end.
  1060. */
  1061. vcpu->hv_clock.version += 2;
  1062. shared_kaddr = kmap_atomic(vcpu->time_page);
  1063. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  1064. sizeof(vcpu->hv_clock));
  1065. kunmap_atomic(shared_kaddr);
  1066. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  1067. return 0;
  1068. }
  1069. static bool msr_mtrr_valid(unsigned msr)
  1070. {
  1071. switch (msr) {
  1072. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  1073. case MSR_MTRRfix64K_00000:
  1074. case MSR_MTRRfix16K_80000:
  1075. case MSR_MTRRfix16K_A0000:
  1076. case MSR_MTRRfix4K_C0000:
  1077. case MSR_MTRRfix4K_C8000:
  1078. case MSR_MTRRfix4K_D0000:
  1079. case MSR_MTRRfix4K_D8000:
  1080. case MSR_MTRRfix4K_E0000:
  1081. case MSR_MTRRfix4K_E8000:
  1082. case MSR_MTRRfix4K_F0000:
  1083. case MSR_MTRRfix4K_F8000:
  1084. case MSR_MTRRdefType:
  1085. case MSR_IA32_CR_PAT:
  1086. return true;
  1087. case 0x2f8:
  1088. return true;
  1089. }
  1090. return false;
  1091. }
  1092. static bool valid_pat_type(unsigned t)
  1093. {
  1094. return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
  1095. }
  1096. static bool valid_mtrr_type(unsigned t)
  1097. {
  1098. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  1099. }
  1100. static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1101. {
  1102. int i;
  1103. if (!msr_mtrr_valid(msr))
  1104. return false;
  1105. if (msr == MSR_IA32_CR_PAT) {
  1106. for (i = 0; i < 8; i++)
  1107. if (!valid_pat_type((data >> (i * 8)) & 0xff))
  1108. return false;
  1109. return true;
  1110. } else if (msr == MSR_MTRRdefType) {
  1111. if (data & ~0xcff)
  1112. return false;
  1113. return valid_mtrr_type(data & 0xff);
  1114. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  1115. for (i = 0; i < 8 ; i++)
  1116. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  1117. return false;
  1118. return true;
  1119. }
  1120. /* variable MTRRs */
  1121. return valid_mtrr_type(data & 0xff);
  1122. }
  1123. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1124. {
  1125. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1126. if (!mtrr_valid(vcpu, msr, data))
  1127. return 1;
  1128. if (msr == MSR_MTRRdefType) {
  1129. vcpu->arch.mtrr_state.def_type = data;
  1130. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  1131. } else if (msr == MSR_MTRRfix64K_00000)
  1132. p[0] = data;
  1133. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1134. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  1135. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1136. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  1137. else if (msr == MSR_IA32_CR_PAT)
  1138. vcpu->arch.pat = data;
  1139. else { /* Variable MTRRs */
  1140. int idx, is_mtrr_mask;
  1141. u64 *pt;
  1142. idx = (msr - 0x200) / 2;
  1143. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1144. if (!is_mtrr_mask)
  1145. pt =
  1146. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1147. else
  1148. pt =
  1149. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1150. *pt = data;
  1151. }
  1152. kvm_mmu_reset_context(vcpu);
  1153. return 0;
  1154. }
  1155. static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1156. {
  1157. u64 mcg_cap = vcpu->arch.mcg_cap;
  1158. unsigned bank_num = mcg_cap & 0xff;
  1159. switch (msr) {
  1160. case MSR_IA32_MCG_STATUS:
  1161. vcpu->arch.mcg_status = data;
  1162. break;
  1163. case MSR_IA32_MCG_CTL:
  1164. if (!(mcg_cap & MCG_CTL_P))
  1165. return 1;
  1166. if (data != 0 && data != ~(u64)0)
  1167. return -1;
  1168. vcpu->arch.mcg_ctl = data;
  1169. break;
  1170. default:
  1171. if (msr >= MSR_IA32_MC0_CTL &&
  1172. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1173. u32 offset = msr - MSR_IA32_MC0_CTL;
  1174. /* only 0 or all 1s can be written to IA32_MCi_CTL
  1175. * some Linux kernels though clear bit 10 in bank 4 to
  1176. * workaround a BIOS/GART TBL issue on AMD K8s, ignore
  1177. * this to avoid an uncatched #GP in the guest
  1178. */
  1179. if ((offset & 0x3) == 0 &&
  1180. data != 0 && (data | (1 << 10)) != ~(u64)0)
  1181. return -1;
  1182. vcpu->arch.mce_banks[offset] = data;
  1183. break;
  1184. }
  1185. return 1;
  1186. }
  1187. return 0;
  1188. }
  1189. static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
  1190. {
  1191. struct kvm *kvm = vcpu->kvm;
  1192. int lm = is_long_mode(vcpu);
  1193. u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
  1194. : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
  1195. u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
  1196. : kvm->arch.xen_hvm_config.blob_size_32;
  1197. u32 page_num = data & ~PAGE_MASK;
  1198. u64 page_addr = data & PAGE_MASK;
  1199. u8 *page;
  1200. int r;
  1201. r = -E2BIG;
  1202. if (page_num >= blob_size)
  1203. goto out;
  1204. r = -ENOMEM;
  1205. page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
  1206. if (IS_ERR(page)) {
  1207. r = PTR_ERR(page);
  1208. goto out;
  1209. }
  1210. if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
  1211. goto out_free;
  1212. r = 0;
  1213. out_free:
  1214. kfree(page);
  1215. out:
  1216. return r;
  1217. }
  1218. static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  1219. {
  1220. return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
  1221. }
  1222. static bool kvm_hv_msr_partition_wide(u32 msr)
  1223. {
  1224. bool r = false;
  1225. switch (msr) {
  1226. case HV_X64_MSR_GUEST_OS_ID:
  1227. case HV_X64_MSR_HYPERCALL:
  1228. r = true;
  1229. break;
  1230. }
  1231. return r;
  1232. }
  1233. static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1234. {
  1235. struct kvm *kvm = vcpu->kvm;
  1236. switch (msr) {
  1237. case HV_X64_MSR_GUEST_OS_ID:
  1238. kvm->arch.hv_guest_os_id = data;
  1239. /* setting guest os id to zero disables hypercall page */
  1240. if (!kvm->arch.hv_guest_os_id)
  1241. kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  1242. break;
  1243. case HV_X64_MSR_HYPERCALL: {
  1244. u64 gfn;
  1245. unsigned long addr;
  1246. u8 instructions[4];
  1247. /* if guest os id is not set hypercall should remain disabled */
  1248. if (!kvm->arch.hv_guest_os_id)
  1249. break;
  1250. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  1251. kvm->arch.hv_hypercall = data;
  1252. break;
  1253. }
  1254. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  1255. addr = gfn_to_hva(kvm, gfn);
  1256. if (kvm_is_error_hva(addr))
  1257. return 1;
  1258. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  1259. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  1260. if (__copy_to_user((void __user *)addr, instructions, 4))
  1261. return 1;
  1262. kvm->arch.hv_hypercall = data;
  1263. break;
  1264. }
  1265. default:
  1266. vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1267. "data 0x%llx\n", msr, data);
  1268. return 1;
  1269. }
  1270. return 0;
  1271. }
  1272. static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1273. {
  1274. switch (msr) {
  1275. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  1276. unsigned long addr;
  1277. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  1278. vcpu->arch.hv_vapic = data;
  1279. break;
  1280. }
  1281. addr = gfn_to_hva(vcpu->kvm, data >>
  1282. HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
  1283. if (kvm_is_error_hva(addr))
  1284. return 1;
  1285. if (__clear_user((void __user *)addr, PAGE_SIZE))
  1286. return 1;
  1287. vcpu->arch.hv_vapic = data;
  1288. break;
  1289. }
  1290. case HV_X64_MSR_EOI:
  1291. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  1292. case HV_X64_MSR_ICR:
  1293. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  1294. case HV_X64_MSR_TPR:
  1295. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  1296. default:
  1297. vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1298. "data 0x%llx\n", msr, data);
  1299. return 1;
  1300. }
  1301. return 0;
  1302. }
  1303. static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
  1304. {
  1305. gpa_t gpa = data & ~0x3f;
  1306. /* Bits 2:5 are resrved, Should be zero */
  1307. if (data & 0x3c)
  1308. return 1;
  1309. vcpu->arch.apf.msr_val = data;
  1310. if (!(data & KVM_ASYNC_PF_ENABLED)) {
  1311. kvm_clear_async_pf_completion_queue(vcpu);
  1312. kvm_async_pf_hash_reset(vcpu);
  1313. return 0;
  1314. }
  1315. if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
  1316. return 1;
  1317. vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
  1318. kvm_async_pf_wakeup_all(vcpu);
  1319. return 0;
  1320. }
  1321. static void kvmclock_reset(struct kvm_vcpu *vcpu)
  1322. {
  1323. if (vcpu->arch.time_page) {
  1324. kvm_release_page_dirty(vcpu->arch.time_page);
  1325. vcpu->arch.time_page = NULL;
  1326. }
  1327. }
  1328. static void accumulate_steal_time(struct kvm_vcpu *vcpu)
  1329. {
  1330. u64 delta;
  1331. if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  1332. return;
  1333. delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
  1334. vcpu->arch.st.last_steal = current->sched_info.run_delay;
  1335. vcpu->arch.st.accum_steal = delta;
  1336. }
  1337. static void record_steal_time(struct kvm_vcpu *vcpu)
  1338. {
  1339. if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  1340. return;
  1341. if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
  1342. &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
  1343. return;
  1344. vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
  1345. vcpu->arch.st.steal.version += 2;
  1346. vcpu->arch.st.accum_steal = 0;
  1347. kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
  1348. &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
  1349. }
  1350. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1351. {
  1352. bool pr = false;
  1353. switch (msr) {
  1354. case MSR_EFER:
  1355. return set_efer(vcpu, data);
  1356. case MSR_K7_HWCR:
  1357. data &= ~(u64)0x40; /* ignore flush filter disable */
  1358. data &= ~(u64)0x100; /* ignore ignne emulation enable */
  1359. data &= ~(u64)0x8; /* ignore TLB cache disable */
  1360. if (data != 0) {
  1361. vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
  1362. data);
  1363. return 1;
  1364. }
  1365. break;
  1366. case MSR_FAM10H_MMIO_CONF_BASE:
  1367. if (data != 0) {
  1368. vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
  1369. "0x%llx\n", data);
  1370. return 1;
  1371. }
  1372. break;
  1373. case MSR_AMD64_NB_CFG:
  1374. break;
  1375. case MSR_IA32_DEBUGCTLMSR:
  1376. if (!data) {
  1377. /* We support the non-activated case already */
  1378. break;
  1379. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  1380. /* Values other than LBR and BTF are vendor-specific,
  1381. thus reserved and should throw a #GP */
  1382. return 1;
  1383. }
  1384. vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  1385. __func__, data);
  1386. break;
  1387. case MSR_IA32_UCODE_REV:
  1388. case MSR_IA32_UCODE_WRITE:
  1389. case MSR_VM_HSAVE_PA:
  1390. case MSR_AMD64_PATCH_LOADER:
  1391. break;
  1392. case 0x200 ... 0x2ff:
  1393. return set_msr_mtrr(vcpu, msr, data);
  1394. case MSR_IA32_APICBASE:
  1395. kvm_set_apic_base(vcpu, data);
  1396. break;
  1397. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1398. return kvm_x2apic_msr_write(vcpu, msr, data);
  1399. case MSR_IA32_TSCDEADLINE:
  1400. kvm_set_lapic_tscdeadline_msr(vcpu, data);
  1401. break;
  1402. case MSR_IA32_MISC_ENABLE:
  1403. vcpu->arch.ia32_misc_enable_msr = data;
  1404. break;
  1405. case MSR_KVM_WALL_CLOCK_NEW:
  1406. case MSR_KVM_WALL_CLOCK:
  1407. vcpu->kvm->arch.wall_clock = data;
  1408. kvm_write_wall_clock(vcpu->kvm, data);
  1409. break;
  1410. case MSR_KVM_SYSTEM_TIME_NEW:
  1411. case MSR_KVM_SYSTEM_TIME: {
  1412. kvmclock_reset(vcpu);
  1413. vcpu->arch.time = data;
  1414. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  1415. /* we verify if the enable bit is set... */
  1416. if (!(data & 1))
  1417. break;
  1418. /* ...but clean it before doing the actual write */
  1419. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  1420. vcpu->arch.time_page =
  1421. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  1422. if (is_error_page(vcpu->arch.time_page)) {
  1423. kvm_release_page_clean(vcpu->arch.time_page);
  1424. vcpu->arch.time_page = NULL;
  1425. }
  1426. break;
  1427. }
  1428. case MSR_KVM_ASYNC_PF_EN:
  1429. if (kvm_pv_enable_async_pf(vcpu, data))
  1430. return 1;
  1431. break;
  1432. case MSR_KVM_STEAL_TIME:
  1433. if (unlikely(!sched_info_on()))
  1434. return 1;
  1435. if (data & KVM_STEAL_RESERVED_MASK)
  1436. return 1;
  1437. if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
  1438. data & KVM_STEAL_VALID_BITS))
  1439. return 1;
  1440. vcpu->arch.st.msr_val = data;
  1441. if (!(data & KVM_MSR_ENABLED))
  1442. break;
  1443. vcpu->arch.st.last_steal = current->sched_info.run_delay;
  1444. preempt_disable();
  1445. accumulate_steal_time(vcpu);
  1446. preempt_enable();
  1447. kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
  1448. break;
  1449. case MSR_KVM_PV_EOI_EN:
  1450. if (kvm_lapic_enable_pv_eoi(vcpu, data))
  1451. return 1;
  1452. break;
  1453. case MSR_IA32_MCG_CTL:
  1454. case MSR_IA32_MCG_STATUS:
  1455. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1456. return set_msr_mce(vcpu, msr, data);
  1457. /* Performance counters are not protected by a CPUID bit,
  1458. * so we should check all of them in the generic path for the sake of
  1459. * cross vendor migration.
  1460. * Writing a zero into the event select MSRs disables them,
  1461. * which we perfectly emulate ;-). Any other value should be at least
  1462. * reported, some guests depend on them.
  1463. */
  1464. case MSR_K7_EVNTSEL0:
  1465. case MSR_K7_EVNTSEL1:
  1466. case MSR_K7_EVNTSEL2:
  1467. case MSR_K7_EVNTSEL3:
  1468. if (data != 0)
  1469. vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1470. "0x%x data 0x%llx\n", msr, data);
  1471. break;
  1472. /* at least RHEL 4 unconditionally writes to the perfctr registers,
  1473. * so we ignore writes to make it happy.
  1474. */
  1475. case MSR_K7_PERFCTR0:
  1476. case MSR_K7_PERFCTR1:
  1477. case MSR_K7_PERFCTR2:
  1478. case MSR_K7_PERFCTR3:
  1479. vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1480. "0x%x data 0x%llx\n", msr, data);
  1481. break;
  1482. case MSR_P6_PERFCTR0:
  1483. case MSR_P6_PERFCTR1:
  1484. pr = true;
  1485. case MSR_P6_EVNTSEL0:
  1486. case MSR_P6_EVNTSEL1:
  1487. if (kvm_pmu_msr(vcpu, msr))
  1488. return kvm_pmu_set_msr(vcpu, msr, data);
  1489. if (pr || data != 0)
  1490. vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
  1491. "0x%x data 0x%llx\n", msr, data);
  1492. break;
  1493. case MSR_K7_CLK_CTL:
  1494. /*
  1495. * Ignore all writes to this no longer documented MSR.
  1496. * Writes are only relevant for old K7 processors,
  1497. * all pre-dating SVM, but a recommended workaround from
  1498. * AMD for these chips. It is possible to speicify the
  1499. * affected processor models on the command line, hence
  1500. * the need to ignore the workaround.
  1501. */
  1502. break;
  1503. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1504. if (kvm_hv_msr_partition_wide(msr)) {
  1505. int r;
  1506. mutex_lock(&vcpu->kvm->lock);
  1507. r = set_msr_hyperv_pw(vcpu, msr, data);
  1508. mutex_unlock(&vcpu->kvm->lock);
  1509. return r;
  1510. } else
  1511. return set_msr_hyperv(vcpu, msr, data);
  1512. break;
  1513. case MSR_IA32_BBL_CR_CTL3:
  1514. /* Drop writes to this legacy MSR -- see rdmsr
  1515. * counterpart for further detail.
  1516. */
  1517. vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
  1518. break;
  1519. case MSR_AMD64_OSVW_ID_LENGTH:
  1520. if (!guest_cpuid_has_osvw(vcpu))
  1521. return 1;
  1522. vcpu->arch.osvw.length = data;
  1523. break;
  1524. case MSR_AMD64_OSVW_STATUS:
  1525. if (!guest_cpuid_has_osvw(vcpu))
  1526. return 1;
  1527. vcpu->arch.osvw.status = data;
  1528. break;
  1529. default:
  1530. if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
  1531. return xen_hvm_config(vcpu, data);
  1532. if (kvm_pmu_msr(vcpu, msr))
  1533. return kvm_pmu_set_msr(vcpu, msr, data);
  1534. if (!ignore_msrs) {
  1535. vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
  1536. msr, data);
  1537. return 1;
  1538. } else {
  1539. vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
  1540. msr, data);
  1541. break;
  1542. }
  1543. }
  1544. return 0;
  1545. }
  1546. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1547. /*
  1548. * Reads an msr value (of 'msr_index') into 'pdata'.
  1549. * Returns 0 on success, non-0 otherwise.
  1550. * Assumes vcpu_load() was already called.
  1551. */
  1552. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1553. {
  1554. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  1555. }
  1556. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1557. {
  1558. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1559. if (!msr_mtrr_valid(msr))
  1560. return 1;
  1561. if (msr == MSR_MTRRdefType)
  1562. *pdata = vcpu->arch.mtrr_state.def_type +
  1563. (vcpu->arch.mtrr_state.enabled << 10);
  1564. else if (msr == MSR_MTRRfix64K_00000)
  1565. *pdata = p[0];
  1566. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1567. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  1568. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1569. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  1570. else if (msr == MSR_IA32_CR_PAT)
  1571. *pdata = vcpu->arch.pat;
  1572. else { /* Variable MTRRs */
  1573. int idx, is_mtrr_mask;
  1574. u64 *pt;
  1575. idx = (msr - 0x200) / 2;
  1576. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1577. if (!is_mtrr_mask)
  1578. pt =
  1579. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1580. else
  1581. pt =
  1582. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1583. *pdata = *pt;
  1584. }
  1585. return 0;
  1586. }
  1587. static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1588. {
  1589. u64 data;
  1590. u64 mcg_cap = vcpu->arch.mcg_cap;
  1591. unsigned bank_num = mcg_cap & 0xff;
  1592. switch (msr) {
  1593. case MSR_IA32_P5_MC_ADDR:
  1594. case MSR_IA32_P5_MC_TYPE:
  1595. data = 0;
  1596. break;
  1597. case MSR_IA32_MCG_CAP:
  1598. data = vcpu->arch.mcg_cap;
  1599. break;
  1600. case MSR_IA32_MCG_CTL:
  1601. if (!(mcg_cap & MCG_CTL_P))
  1602. return 1;
  1603. data = vcpu->arch.mcg_ctl;
  1604. break;
  1605. case MSR_IA32_MCG_STATUS:
  1606. data = vcpu->arch.mcg_status;
  1607. break;
  1608. default:
  1609. if (msr >= MSR_IA32_MC0_CTL &&
  1610. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1611. u32 offset = msr - MSR_IA32_MC0_CTL;
  1612. data = vcpu->arch.mce_banks[offset];
  1613. break;
  1614. }
  1615. return 1;
  1616. }
  1617. *pdata = data;
  1618. return 0;
  1619. }
  1620. static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1621. {
  1622. u64 data = 0;
  1623. struct kvm *kvm = vcpu->kvm;
  1624. switch (msr) {
  1625. case HV_X64_MSR_GUEST_OS_ID:
  1626. data = kvm->arch.hv_guest_os_id;
  1627. break;
  1628. case HV_X64_MSR_HYPERCALL:
  1629. data = kvm->arch.hv_hypercall;
  1630. break;
  1631. default:
  1632. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1633. return 1;
  1634. }
  1635. *pdata = data;
  1636. return 0;
  1637. }
  1638. static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1639. {
  1640. u64 data = 0;
  1641. switch (msr) {
  1642. case HV_X64_MSR_VP_INDEX: {
  1643. int r;
  1644. struct kvm_vcpu *v;
  1645. kvm_for_each_vcpu(r, v, vcpu->kvm)
  1646. if (v == vcpu)
  1647. data = r;
  1648. break;
  1649. }
  1650. case HV_X64_MSR_EOI:
  1651. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  1652. case HV_X64_MSR_ICR:
  1653. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  1654. case HV_X64_MSR_TPR:
  1655. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  1656. case HV_X64_MSR_APIC_ASSIST_PAGE:
  1657. data = vcpu->arch.hv_vapic;
  1658. break;
  1659. default:
  1660. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1661. return 1;
  1662. }
  1663. *pdata = data;
  1664. return 0;
  1665. }
  1666. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1667. {
  1668. u64 data;
  1669. switch (msr) {
  1670. case MSR_IA32_PLATFORM_ID:
  1671. case MSR_IA32_EBL_CR_POWERON:
  1672. case MSR_IA32_DEBUGCTLMSR:
  1673. case MSR_IA32_LASTBRANCHFROMIP:
  1674. case MSR_IA32_LASTBRANCHTOIP:
  1675. case MSR_IA32_LASTINTFROMIP:
  1676. case MSR_IA32_LASTINTTOIP:
  1677. case MSR_K8_SYSCFG:
  1678. case MSR_K7_HWCR:
  1679. case MSR_VM_HSAVE_PA:
  1680. case MSR_K7_EVNTSEL0:
  1681. case MSR_K7_PERFCTR0:
  1682. case MSR_K8_INT_PENDING_MSG:
  1683. case MSR_AMD64_NB_CFG:
  1684. case MSR_FAM10H_MMIO_CONF_BASE:
  1685. data = 0;
  1686. break;
  1687. case MSR_P6_PERFCTR0:
  1688. case MSR_P6_PERFCTR1:
  1689. case MSR_P6_EVNTSEL0:
  1690. case MSR_P6_EVNTSEL1:
  1691. if (kvm_pmu_msr(vcpu, msr))
  1692. return kvm_pmu_get_msr(vcpu, msr, pdata);
  1693. data = 0;
  1694. break;
  1695. case MSR_IA32_UCODE_REV:
  1696. data = 0x100000000ULL;
  1697. break;
  1698. case MSR_MTRRcap:
  1699. data = 0x500 | KVM_NR_VAR_MTRR;
  1700. break;
  1701. case 0x200 ... 0x2ff:
  1702. return get_msr_mtrr(vcpu, msr, pdata);
  1703. case 0xcd: /* fsb frequency */
  1704. data = 3;
  1705. break;
  1706. /*
  1707. * MSR_EBC_FREQUENCY_ID
  1708. * Conservative value valid for even the basic CPU models.
  1709. * Models 0,1: 000 in bits 23:21 indicating a bus speed of
  1710. * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
  1711. * and 266MHz for model 3, or 4. Set Core Clock
  1712. * Frequency to System Bus Frequency Ratio to 1 (bits
  1713. * 31:24) even though these are only valid for CPU
  1714. * models > 2, however guests may end up dividing or
  1715. * multiplying by zero otherwise.
  1716. */
  1717. case MSR_EBC_FREQUENCY_ID:
  1718. data = 1 << 24;
  1719. break;
  1720. case MSR_IA32_APICBASE:
  1721. data = kvm_get_apic_base(vcpu);
  1722. break;
  1723. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1724. return kvm_x2apic_msr_read(vcpu, msr, pdata);
  1725. break;
  1726. case MSR_IA32_TSCDEADLINE:
  1727. data = kvm_get_lapic_tscdeadline_msr(vcpu);
  1728. break;
  1729. case MSR_IA32_MISC_ENABLE:
  1730. data = vcpu->arch.ia32_misc_enable_msr;
  1731. break;
  1732. case MSR_IA32_PERF_STATUS:
  1733. /* TSC increment by tick */
  1734. data = 1000ULL;
  1735. /* CPU multiplier */
  1736. data |= (((uint64_t)4ULL) << 40);
  1737. break;
  1738. case MSR_EFER:
  1739. data = vcpu->arch.efer;
  1740. break;
  1741. case MSR_KVM_WALL_CLOCK:
  1742. case MSR_KVM_WALL_CLOCK_NEW:
  1743. data = vcpu->kvm->arch.wall_clock;
  1744. break;
  1745. case MSR_KVM_SYSTEM_TIME:
  1746. case MSR_KVM_SYSTEM_TIME_NEW:
  1747. data = vcpu->arch.time;
  1748. break;
  1749. case MSR_KVM_ASYNC_PF_EN:
  1750. data = vcpu->arch.apf.msr_val;
  1751. break;
  1752. case MSR_KVM_STEAL_TIME:
  1753. data = vcpu->arch.st.msr_val;
  1754. break;
  1755. case MSR_KVM_PV_EOI_EN:
  1756. data = vcpu->arch.pv_eoi.msr_val;
  1757. break;
  1758. case MSR_IA32_P5_MC_ADDR:
  1759. case MSR_IA32_P5_MC_TYPE:
  1760. case MSR_IA32_MCG_CAP:
  1761. case MSR_IA32_MCG_CTL:
  1762. case MSR_IA32_MCG_STATUS:
  1763. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1764. return get_msr_mce(vcpu, msr, pdata);
  1765. case MSR_K7_CLK_CTL:
  1766. /*
  1767. * Provide expected ramp-up count for K7. All other
  1768. * are set to zero, indicating minimum divisors for
  1769. * every field.
  1770. *
  1771. * This prevents guest kernels on AMD host with CPU
  1772. * type 6, model 8 and higher from exploding due to
  1773. * the rdmsr failing.
  1774. */
  1775. data = 0x20000000;
  1776. break;
  1777. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1778. if (kvm_hv_msr_partition_wide(msr)) {
  1779. int r;
  1780. mutex_lock(&vcpu->kvm->lock);
  1781. r = get_msr_hyperv_pw(vcpu, msr, pdata);
  1782. mutex_unlock(&vcpu->kvm->lock);
  1783. return r;
  1784. } else
  1785. return get_msr_hyperv(vcpu, msr, pdata);
  1786. break;
  1787. case MSR_IA32_BBL_CR_CTL3:
  1788. /* This legacy MSR exists but isn't fully documented in current
  1789. * silicon. It is however accessed by winxp in very narrow
  1790. * scenarios where it sets bit #19, itself documented as
  1791. * a "reserved" bit. Best effort attempt to source coherent
  1792. * read data here should the balance of the register be
  1793. * interpreted by the guest:
  1794. *
  1795. * L2 cache control register 3: 64GB range, 256KB size,
  1796. * enabled, latency 0x1, configured
  1797. */
  1798. data = 0xbe702111;
  1799. break;
  1800. case MSR_AMD64_OSVW_ID_LENGTH:
  1801. if (!guest_cpuid_has_osvw(vcpu))
  1802. return 1;
  1803. data = vcpu->arch.osvw.length;
  1804. break;
  1805. case MSR_AMD64_OSVW_STATUS:
  1806. if (!guest_cpuid_has_osvw(vcpu))
  1807. return 1;
  1808. data = vcpu->arch.osvw.status;
  1809. break;
  1810. default:
  1811. if (kvm_pmu_msr(vcpu, msr))
  1812. return kvm_pmu_get_msr(vcpu, msr, pdata);
  1813. if (!ignore_msrs) {
  1814. vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  1815. return 1;
  1816. } else {
  1817. vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
  1818. data = 0;
  1819. }
  1820. break;
  1821. }
  1822. *pdata = data;
  1823. return 0;
  1824. }
  1825. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1826. /*
  1827. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1828. *
  1829. * @return number of msrs set successfully.
  1830. */
  1831. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  1832. struct kvm_msr_entry *entries,
  1833. int (*do_msr)(struct kvm_vcpu *vcpu,
  1834. unsigned index, u64 *data))
  1835. {
  1836. int i, idx;
  1837. idx = srcu_read_lock(&vcpu->kvm->srcu);
  1838. for (i = 0; i < msrs->nmsrs; ++i)
  1839. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  1840. break;
  1841. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1842. return i;
  1843. }
  1844. /*
  1845. * Read or write a bunch of msrs. Parameters are user addresses.
  1846. *
  1847. * @return number of msrs set successfully.
  1848. */
  1849. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  1850. int (*do_msr)(struct kvm_vcpu *vcpu,
  1851. unsigned index, u64 *data),
  1852. int writeback)
  1853. {
  1854. struct kvm_msrs msrs;
  1855. struct kvm_msr_entry *entries;
  1856. int r, n;
  1857. unsigned size;
  1858. r = -EFAULT;
  1859. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  1860. goto out;
  1861. r = -E2BIG;
  1862. if (msrs.nmsrs >= MAX_IO_MSRS)
  1863. goto out;
  1864. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  1865. entries = memdup_user(user_msrs->entries, size);
  1866. if (IS_ERR(entries)) {
  1867. r = PTR_ERR(entries);
  1868. goto out;
  1869. }
  1870. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  1871. if (r < 0)
  1872. goto out_free;
  1873. r = -EFAULT;
  1874. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  1875. goto out_free;
  1876. r = n;
  1877. out_free:
  1878. kfree(entries);
  1879. out:
  1880. return r;
  1881. }
  1882. int kvm_dev_ioctl_check_extension(long ext)
  1883. {
  1884. int r;
  1885. switch (ext) {
  1886. case KVM_CAP_IRQCHIP:
  1887. case KVM_CAP_HLT:
  1888. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  1889. case KVM_CAP_SET_TSS_ADDR:
  1890. case KVM_CAP_EXT_CPUID:
  1891. case KVM_CAP_CLOCKSOURCE:
  1892. case KVM_CAP_PIT:
  1893. case KVM_CAP_NOP_IO_DELAY:
  1894. case KVM_CAP_MP_STATE:
  1895. case KVM_CAP_SYNC_MMU:
  1896. case KVM_CAP_USER_NMI:
  1897. case KVM_CAP_REINJECT_CONTROL:
  1898. case KVM_CAP_IRQ_INJECT_STATUS:
  1899. case KVM_CAP_ASSIGN_DEV_IRQ:
  1900. case KVM_CAP_IRQFD:
  1901. case KVM_CAP_IOEVENTFD:
  1902. case KVM_CAP_PIT2:
  1903. case KVM_CAP_PIT_STATE2:
  1904. case KVM_CAP_SET_IDENTITY_MAP_ADDR:
  1905. case KVM_CAP_XEN_HVM:
  1906. case KVM_CAP_ADJUST_CLOCK:
  1907. case KVM_CAP_VCPU_EVENTS:
  1908. case KVM_CAP_HYPERV:
  1909. case KVM_CAP_HYPERV_VAPIC:
  1910. case KVM_CAP_HYPERV_SPIN:
  1911. case KVM_CAP_PCI_SEGMENT:
  1912. case KVM_CAP_DEBUGREGS:
  1913. case KVM_CAP_X86_ROBUST_SINGLESTEP:
  1914. case KVM_CAP_XSAVE:
  1915. case KVM_CAP_ASYNC_PF:
  1916. case KVM_CAP_GET_TSC_KHZ:
  1917. case KVM_CAP_PCI_2_3:
  1918. case KVM_CAP_KVMCLOCK_CTRL:
  1919. r = 1;
  1920. break;
  1921. case KVM_CAP_COALESCED_MMIO:
  1922. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  1923. break;
  1924. case KVM_CAP_VAPIC:
  1925. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  1926. break;
  1927. case KVM_CAP_NR_VCPUS:
  1928. r = KVM_SOFT_MAX_VCPUS;
  1929. break;
  1930. case KVM_CAP_MAX_VCPUS:
  1931. r = KVM_MAX_VCPUS;
  1932. break;
  1933. case KVM_CAP_NR_MEMSLOTS:
  1934. r = KVM_MEMORY_SLOTS;
  1935. break;
  1936. case KVM_CAP_PV_MMU: /* obsolete */
  1937. r = 0;
  1938. break;
  1939. case KVM_CAP_IOMMU:
  1940. r = iommu_present(&pci_bus_type);
  1941. break;
  1942. case KVM_CAP_MCE:
  1943. r = KVM_MAX_MCE_BANKS;
  1944. break;
  1945. case KVM_CAP_XCRS:
  1946. r = cpu_has_xsave;
  1947. break;
  1948. case KVM_CAP_TSC_CONTROL:
  1949. r = kvm_has_tsc_control;
  1950. break;
  1951. case KVM_CAP_TSC_DEADLINE_TIMER:
  1952. r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
  1953. break;
  1954. default:
  1955. r = 0;
  1956. break;
  1957. }
  1958. return r;
  1959. }
  1960. long kvm_arch_dev_ioctl(struct file *filp,
  1961. unsigned int ioctl, unsigned long arg)
  1962. {
  1963. void __user *argp = (void __user *)arg;
  1964. long r;
  1965. switch (ioctl) {
  1966. case KVM_GET_MSR_INDEX_LIST: {
  1967. struct kvm_msr_list __user *user_msr_list = argp;
  1968. struct kvm_msr_list msr_list;
  1969. unsigned n;
  1970. r = -EFAULT;
  1971. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  1972. goto out;
  1973. n = msr_list.nmsrs;
  1974. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  1975. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  1976. goto out;
  1977. r = -E2BIG;
  1978. if (n < msr_list.nmsrs)
  1979. goto out;
  1980. r = -EFAULT;
  1981. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  1982. num_msrs_to_save * sizeof(u32)))
  1983. goto out;
  1984. if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
  1985. &emulated_msrs,
  1986. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  1987. goto out;
  1988. r = 0;
  1989. break;
  1990. }
  1991. case KVM_GET_SUPPORTED_CPUID: {
  1992. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1993. struct kvm_cpuid2 cpuid;
  1994. r = -EFAULT;
  1995. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1996. goto out;
  1997. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  1998. cpuid_arg->entries);
  1999. if (r)
  2000. goto out;
  2001. r = -EFAULT;
  2002. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  2003. goto out;
  2004. r = 0;
  2005. break;
  2006. }
  2007. case KVM_X86_GET_MCE_CAP_SUPPORTED: {
  2008. u64 mce_cap;
  2009. mce_cap = KVM_MCE_CAP_SUPPORTED;
  2010. r = -EFAULT;
  2011. if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
  2012. goto out;
  2013. r = 0;
  2014. break;
  2015. }
  2016. default:
  2017. r = -EINVAL;
  2018. }
  2019. out:
  2020. return r;
  2021. }
  2022. static void wbinvd_ipi(void *garbage)
  2023. {
  2024. wbinvd();
  2025. }
  2026. static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
  2027. {
  2028. return vcpu->kvm->arch.iommu_domain &&
  2029. !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
  2030. }
  2031. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  2032. {
  2033. /* Address WBINVD may be executed by guest */
  2034. if (need_emulate_wbinvd(vcpu)) {
  2035. if (kvm_x86_ops->has_wbinvd_exit())
  2036. cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
  2037. else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
  2038. smp_call_function_single(vcpu->cpu,
  2039. wbinvd_ipi, NULL, 1);
  2040. }
  2041. kvm_x86_ops->vcpu_load(vcpu, cpu);
  2042. /* Apply any externally detected TSC adjustments (due to suspend) */
  2043. if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
  2044. adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
  2045. vcpu->arch.tsc_offset_adjustment = 0;
  2046. set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
  2047. }
  2048. if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
  2049. s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
  2050. native_read_tsc() - vcpu->arch.last_host_tsc;
  2051. if (tsc_delta < 0)
  2052. mark_tsc_unstable("KVM discovered backwards TSC");
  2053. if (check_tsc_unstable()) {
  2054. u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
  2055. vcpu->arch.last_guest_tsc);
  2056. kvm_x86_ops->write_tsc_offset(vcpu, offset);
  2057. vcpu->arch.tsc_catchup = 1;
  2058. }
  2059. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  2060. if (vcpu->cpu != cpu)
  2061. kvm_migrate_timers(vcpu);
  2062. vcpu->cpu = cpu;
  2063. }
  2064. accumulate_steal_time(vcpu);
  2065. kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
  2066. }
  2067. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  2068. {
  2069. kvm_x86_ops->vcpu_put(vcpu);
  2070. kvm_put_guest_fpu(vcpu);
  2071. vcpu->arch.last_host_tsc = native_read_tsc();
  2072. }
  2073. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  2074. struct kvm_lapic_state *s)
  2075. {
  2076. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  2077. return 0;
  2078. }
  2079. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  2080. struct kvm_lapic_state *s)
  2081. {
  2082. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  2083. kvm_apic_post_state_restore(vcpu);
  2084. update_cr8_intercept(vcpu);
  2085. return 0;
  2086. }
  2087. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  2088. struct kvm_interrupt *irq)
  2089. {
  2090. if (irq->irq < 0 || irq->irq >= 256)
  2091. return -EINVAL;
  2092. if (irqchip_in_kernel(vcpu->kvm))
  2093. return -ENXIO;
  2094. kvm_queue_interrupt(vcpu, irq->irq, false);
  2095. kvm_make_request(KVM_REQ_EVENT, vcpu);
  2096. return 0;
  2097. }
  2098. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  2099. {
  2100. kvm_inject_nmi(vcpu);
  2101. return 0;
  2102. }
  2103. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  2104. struct kvm_tpr_access_ctl *tac)
  2105. {
  2106. if (tac->flags)
  2107. return -EINVAL;
  2108. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  2109. return 0;
  2110. }
  2111. static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
  2112. u64 mcg_cap)
  2113. {
  2114. int r;
  2115. unsigned bank_num = mcg_cap & 0xff, bank;
  2116. r = -EINVAL;
  2117. if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
  2118. goto out;
  2119. if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
  2120. goto out;
  2121. r = 0;
  2122. vcpu->arch.mcg_cap = mcg_cap;
  2123. /* Init IA32_MCG_CTL to all 1s */
  2124. if (mcg_cap & MCG_CTL_P)
  2125. vcpu->arch.mcg_ctl = ~(u64)0;
  2126. /* Init IA32_MCi_CTL to all 1s */
  2127. for (bank = 0; bank < bank_num; bank++)
  2128. vcpu->arch.mce_banks[bank*4] = ~(u64)0;
  2129. out:
  2130. return r;
  2131. }
  2132. static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
  2133. struct kvm_x86_mce *mce)
  2134. {
  2135. u64 mcg_cap = vcpu->arch.mcg_cap;
  2136. unsigned bank_num = mcg_cap & 0xff;
  2137. u64 *banks = vcpu->arch.mce_banks;
  2138. if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
  2139. return -EINVAL;
  2140. /*
  2141. * if IA32_MCG_CTL is not all 1s, the uncorrected error
  2142. * reporting is disabled
  2143. */
  2144. if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
  2145. vcpu->arch.mcg_ctl != ~(u64)0)
  2146. return 0;
  2147. banks += 4 * mce->bank;
  2148. /*
  2149. * if IA32_MCi_CTL is not all 1s, the uncorrected error
  2150. * reporting is disabled for the bank
  2151. */
  2152. if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
  2153. return 0;
  2154. if (mce->status & MCI_STATUS_UC) {
  2155. if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
  2156. !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
  2157. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  2158. return 0;
  2159. }
  2160. if (banks[1] & MCI_STATUS_VAL)
  2161. mce->status |= MCI_STATUS_OVER;
  2162. banks[2] = mce->addr;
  2163. banks[3] = mce->misc;
  2164. vcpu->arch.mcg_status = mce->mcg_status;
  2165. banks[1] = mce->status;
  2166. kvm_queue_exception(vcpu, MC_VECTOR);
  2167. } else if (!(banks[1] & MCI_STATUS_VAL)
  2168. || !(banks[1] & MCI_STATUS_UC)) {
  2169. if (banks[1] & MCI_STATUS_VAL)
  2170. mce->status |= MCI_STATUS_OVER;
  2171. banks[2] = mce->addr;
  2172. banks[3] = mce->misc;
  2173. banks[1] = mce->status;
  2174. } else
  2175. banks[1] |= MCI_STATUS_OVER;
  2176. return 0;
  2177. }
  2178. static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
  2179. struct kvm_vcpu_events *events)
  2180. {
  2181. process_nmi(vcpu);
  2182. events->exception.injected =
  2183. vcpu->arch.exception.pending &&
  2184. !kvm_exception_is_soft(vcpu->arch.exception.nr);
  2185. events->exception.nr = vcpu->arch.exception.nr;
  2186. events->exception.has_error_code = vcpu->arch.exception.has_error_code;
  2187. events->exception.pad = 0;
  2188. events->exception.error_code = vcpu->arch.exception.error_code;
  2189. events->interrupt.injected =
  2190. vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
  2191. events->interrupt.nr = vcpu->arch.interrupt.nr;
  2192. events->interrupt.soft = 0;
  2193. events->interrupt.shadow =
  2194. kvm_x86_ops->get_interrupt_shadow(vcpu,
  2195. KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
  2196. events->nmi.injected = vcpu->arch.nmi_injected;
  2197. events->nmi.pending = vcpu->arch.nmi_pending != 0;
  2198. events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
  2199. events->nmi.pad = 0;
  2200. events->sipi_vector = vcpu->arch.sipi_vector;
  2201. events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
  2202. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  2203. | KVM_VCPUEVENT_VALID_SHADOW);
  2204. memset(&events->reserved, 0, sizeof(events->reserved));
  2205. }
  2206. static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
  2207. struct kvm_vcpu_events *events)
  2208. {
  2209. if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
  2210. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  2211. | KVM_VCPUEVENT_VALID_SHADOW))
  2212. return -EINVAL;
  2213. process_nmi(vcpu);
  2214. vcpu->arch.exception.pending = events->exception.injected;
  2215. vcpu->arch.exception.nr = events->exception.nr;
  2216. vcpu->arch.exception.has_error_code = events->exception.has_error_code;
  2217. vcpu->arch.exception.error_code = events->exception.error_code;
  2218. vcpu->arch.interrupt.pending = events->interrupt.injected;
  2219. vcpu->arch.interrupt.nr = events->interrupt.nr;
  2220. vcpu->arch.interrupt.soft = events->interrupt.soft;
  2221. if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
  2222. kvm_x86_ops->set_interrupt_shadow(vcpu,
  2223. events->interrupt.shadow);
  2224. vcpu->arch.nmi_injected = events->nmi.injected;
  2225. if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
  2226. vcpu->arch.nmi_pending = events->nmi.pending;
  2227. kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
  2228. if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
  2229. vcpu->arch.sipi_vector = events->sipi_vector;
  2230. kvm_make_request(KVM_REQ_EVENT, vcpu);
  2231. return 0;
  2232. }
  2233. static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
  2234. struct kvm_debugregs *dbgregs)
  2235. {
  2236. memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
  2237. dbgregs->dr6 = vcpu->arch.dr6;
  2238. dbgregs->dr7 = vcpu->arch.dr7;
  2239. dbgregs->flags = 0;
  2240. memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
  2241. }
  2242. static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
  2243. struct kvm_debugregs *dbgregs)
  2244. {
  2245. if (dbgregs->flags)
  2246. return -EINVAL;
  2247. memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
  2248. vcpu->arch.dr6 = dbgregs->dr6;
  2249. vcpu->arch.dr7 = dbgregs->dr7;
  2250. return 0;
  2251. }
  2252. static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
  2253. struct kvm_xsave *guest_xsave)
  2254. {
  2255. if (cpu_has_xsave)
  2256. memcpy(guest_xsave->region,
  2257. &vcpu->arch.guest_fpu.state->xsave,
  2258. xstate_size);
  2259. else {
  2260. memcpy(guest_xsave->region,
  2261. &vcpu->arch.guest_fpu.state->fxsave,
  2262. sizeof(struct i387_fxsave_struct));
  2263. *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
  2264. XSTATE_FPSSE;
  2265. }
  2266. }
  2267. static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
  2268. struct kvm_xsave *guest_xsave)
  2269. {
  2270. u64 xstate_bv =
  2271. *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
  2272. if (cpu_has_xsave)
  2273. memcpy(&vcpu->arch.guest_fpu.state->xsave,
  2274. guest_xsave->region, xstate_size);
  2275. else {
  2276. if (xstate_bv & ~XSTATE_FPSSE)
  2277. return -EINVAL;
  2278. memcpy(&vcpu->arch.guest_fpu.state->fxsave,
  2279. guest_xsave->region, sizeof(struct i387_fxsave_struct));
  2280. }
  2281. return 0;
  2282. }
  2283. static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
  2284. struct kvm_xcrs *guest_xcrs)
  2285. {
  2286. if (!cpu_has_xsave) {
  2287. guest_xcrs->nr_xcrs = 0;
  2288. return;
  2289. }
  2290. guest_xcrs->nr_xcrs = 1;
  2291. guest_xcrs->flags = 0;
  2292. guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
  2293. guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
  2294. }
  2295. static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
  2296. struct kvm_xcrs *guest_xcrs)
  2297. {
  2298. int i, r = 0;
  2299. if (!cpu_has_xsave)
  2300. return -EINVAL;
  2301. if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
  2302. return -EINVAL;
  2303. for (i = 0; i < guest_xcrs->nr_xcrs; i++)
  2304. /* Only support XCR0 currently */
  2305. if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
  2306. r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
  2307. guest_xcrs->xcrs[0].value);
  2308. break;
  2309. }
  2310. if (r)
  2311. r = -EINVAL;
  2312. return r;
  2313. }
  2314. /*
  2315. * kvm_set_guest_paused() indicates to the guest kernel that it has been
  2316. * stopped by the hypervisor. This function will be called from the host only.
  2317. * EINVAL is returned when the host attempts to set the flag for a guest that
  2318. * does not support pv clocks.
  2319. */
  2320. static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
  2321. {
  2322. struct pvclock_vcpu_time_info *src = &vcpu->arch.hv_clock;
  2323. if (!vcpu->arch.time_page)
  2324. return -EINVAL;
  2325. src->flags |= PVCLOCK_GUEST_STOPPED;
  2326. mark_page_dirty(vcpu->kvm, vcpu->arch.time >> PAGE_SHIFT);
  2327. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  2328. return 0;
  2329. }
  2330. long kvm_arch_vcpu_ioctl(struct file *filp,
  2331. unsigned int ioctl, unsigned long arg)
  2332. {
  2333. struct kvm_vcpu *vcpu = filp->private_data;
  2334. void __user *argp = (void __user *)arg;
  2335. int r;
  2336. union {
  2337. struct kvm_lapic_state *lapic;
  2338. struct kvm_xsave *xsave;
  2339. struct kvm_xcrs *xcrs;
  2340. void *buffer;
  2341. } u;
  2342. u.buffer = NULL;
  2343. switch (ioctl) {
  2344. case KVM_GET_LAPIC: {
  2345. r = -EINVAL;
  2346. if (!vcpu->arch.apic)
  2347. goto out;
  2348. u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2349. r = -ENOMEM;
  2350. if (!u.lapic)
  2351. goto out;
  2352. r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
  2353. if (r)
  2354. goto out;
  2355. r = -EFAULT;
  2356. if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
  2357. goto out;
  2358. r = 0;
  2359. break;
  2360. }
  2361. case KVM_SET_LAPIC: {
  2362. r = -EINVAL;
  2363. if (!vcpu->arch.apic)
  2364. goto out;
  2365. u.lapic = memdup_user(argp, sizeof(*u.lapic));
  2366. if (IS_ERR(u.lapic)) {
  2367. r = PTR_ERR(u.lapic);
  2368. goto out;
  2369. }
  2370. r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
  2371. if (r)
  2372. goto out;
  2373. r = 0;
  2374. break;
  2375. }
  2376. case KVM_INTERRUPT: {
  2377. struct kvm_interrupt irq;
  2378. r = -EFAULT;
  2379. if (copy_from_user(&irq, argp, sizeof irq))
  2380. goto out;
  2381. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  2382. if (r)
  2383. goto out;
  2384. r = 0;
  2385. break;
  2386. }
  2387. case KVM_NMI: {
  2388. r = kvm_vcpu_ioctl_nmi(vcpu);
  2389. if (r)
  2390. goto out;
  2391. r = 0;
  2392. break;
  2393. }
  2394. case KVM_SET_CPUID: {
  2395. struct kvm_cpuid __user *cpuid_arg = argp;
  2396. struct kvm_cpuid cpuid;
  2397. r = -EFAULT;
  2398. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2399. goto out;
  2400. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  2401. if (r)
  2402. goto out;
  2403. break;
  2404. }
  2405. case KVM_SET_CPUID2: {
  2406. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2407. struct kvm_cpuid2 cpuid;
  2408. r = -EFAULT;
  2409. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2410. goto out;
  2411. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  2412. cpuid_arg->entries);
  2413. if (r)
  2414. goto out;
  2415. break;
  2416. }
  2417. case KVM_GET_CPUID2: {
  2418. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2419. struct kvm_cpuid2 cpuid;
  2420. r = -EFAULT;
  2421. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2422. goto out;
  2423. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  2424. cpuid_arg->entries);
  2425. if (r)
  2426. goto out;
  2427. r = -EFAULT;
  2428. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  2429. goto out;
  2430. r = 0;
  2431. break;
  2432. }
  2433. case KVM_GET_MSRS:
  2434. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  2435. break;
  2436. case KVM_SET_MSRS:
  2437. r = msr_io(vcpu, argp, do_set_msr, 0);
  2438. break;
  2439. case KVM_TPR_ACCESS_REPORTING: {
  2440. struct kvm_tpr_access_ctl tac;
  2441. r = -EFAULT;
  2442. if (copy_from_user(&tac, argp, sizeof tac))
  2443. goto out;
  2444. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  2445. if (r)
  2446. goto out;
  2447. r = -EFAULT;
  2448. if (copy_to_user(argp, &tac, sizeof tac))
  2449. goto out;
  2450. r = 0;
  2451. break;
  2452. };
  2453. case KVM_SET_VAPIC_ADDR: {
  2454. struct kvm_vapic_addr va;
  2455. r = -EINVAL;
  2456. if (!irqchip_in_kernel(vcpu->kvm))
  2457. goto out;
  2458. r = -EFAULT;
  2459. if (copy_from_user(&va, argp, sizeof va))
  2460. goto out;
  2461. r = 0;
  2462. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  2463. break;
  2464. }
  2465. case KVM_X86_SETUP_MCE: {
  2466. u64 mcg_cap;
  2467. r = -EFAULT;
  2468. if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
  2469. goto out;
  2470. r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
  2471. break;
  2472. }
  2473. case KVM_X86_SET_MCE: {
  2474. struct kvm_x86_mce mce;
  2475. r = -EFAULT;
  2476. if (copy_from_user(&mce, argp, sizeof mce))
  2477. goto out;
  2478. r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
  2479. break;
  2480. }
  2481. case KVM_GET_VCPU_EVENTS: {
  2482. struct kvm_vcpu_events events;
  2483. kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
  2484. r = -EFAULT;
  2485. if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
  2486. break;
  2487. r = 0;
  2488. break;
  2489. }
  2490. case KVM_SET_VCPU_EVENTS: {
  2491. struct kvm_vcpu_events events;
  2492. r = -EFAULT;
  2493. if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
  2494. break;
  2495. r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
  2496. break;
  2497. }
  2498. case KVM_GET_DEBUGREGS: {
  2499. struct kvm_debugregs dbgregs;
  2500. kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
  2501. r = -EFAULT;
  2502. if (copy_to_user(argp, &dbgregs,
  2503. sizeof(struct kvm_debugregs)))
  2504. break;
  2505. r = 0;
  2506. break;
  2507. }
  2508. case KVM_SET_DEBUGREGS: {
  2509. struct kvm_debugregs dbgregs;
  2510. r = -EFAULT;
  2511. if (copy_from_user(&dbgregs, argp,
  2512. sizeof(struct kvm_debugregs)))
  2513. break;
  2514. r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
  2515. break;
  2516. }
  2517. case KVM_GET_XSAVE: {
  2518. u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
  2519. r = -ENOMEM;
  2520. if (!u.xsave)
  2521. break;
  2522. kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
  2523. r = -EFAULT;
  2524. if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
  2525. break;
  2526. r = 0;
  2527. break;
  2528. }
  2529. case KVM_SET_XSAVE: {
  2530. u.xsave = memdup_user(argp, sizeof(*u.xsave));
  2531. if (IS_ERR(u.xsave)) {
  2532. r = PTR_ERR(u.xsave);
  2533. goto out;
  2534. }
  2535. r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
  2536. break;
  2537. }
  2538. case KVM_GET_XCRS: {
  2539. u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
  2540. r = -ENOMEM;
  2541. if (!u.xcrs)
  2542. break;
  2543. kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
  2544. r = -EFAULT;
  2545. if (copy_to_user(argp, u.xcrs,
  2546. sizeof(struct kvm_xcrs)))
  2547. break;
  2548. r = 0;
  2549. break;
  2550. }
  2551. case KVM_SET_XCRS: {
  2552. u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
  2553. if (IS_ERR(u.xcrs)) {
  2554. r = PTR_ERR(u.xcrs);
  2555. goto out;
  2556. }
  2557. r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
  2558. break;
  2559. }
  2560. case KVM_SET_TSC_KHZ: {
  2561. u32 user_tsc_khz;
  2562. r = -EINVAL;
  2563. user_tsc_khz = (u32)arg;
  2564. if (user_tsc_khz >= kvm_max_guest_tsc_khz)
  2565. goto out;
  2566. if (user_tsc_khz == 0)
  2567. user_tsc_khz = tsc_khz;
  2568. kvm_set_tsc_khz(vcpu, user_tsc_khz);
  2569. r = 0;
  2570. goto out;
  2571. }
  2572. case KVM_GET_TSC_KHZ: {
  2573. r = vcpu->arch.virtual_tsc_khz;
  2574. goto out;
  2575. }
  2576. case KVM_KVMCLOCK_CTRL: {
  2577. r = kvm_set_guest_paused(vcpu);
  2578. goto out;
  2579. }
  2580. default:
  2581. r = -EINVAL;
  2582. }
  2583. out:
  2584. kfree(u.buffer);
  2585. return r;
  2586. }
  2587. int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  2588. {
  2589. return VM_FAULT_SIGBUS;
  2590. }
  2591. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  2592. {
  2593. int ret;
  2594. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  2595. return -1;
  2596. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  2597. return ret;
  2598. }
  2599. static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
  2600. u64 ident_addr)
  2601. {
  2602. kvm->arch.ept_identity_map_addr = ident_addr;
  2603. return 0;
  2604. }
  2605. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  2606. u32 kvm_nr_mmu_pages)
  2607. {
  2608. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  2609. return -EINVAL;
  2610. mutex_lock(&kvm->slots_lock);
  2611. spin_lock(&kvm->mmu_lock);
  2612. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  2613. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  2614. spin_unlock(&kvm->mmu_lock);
  2615. mutex_unlock(&kvm->slots_lock);
  2616. return 0;
  2617. }
  2618. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  2619. {
  2620. return kvm->arch.n_max_mmu_pages;
  2621. }
  2622. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2623. {
  2624. int r;
  2625. r = 0;
  2626. switch (chip->chip_id) {
  2627. case KVM_IRQCHIP_PIC_MASTER:
  2628. memcpy(&chip->chip.pic,
  2629. &pic_irqchip(kvm)->pics[0],
  2630. sizeof(struct kvm_pic_state));
  2631. break;
  2632. case KVM_IRQCHIP_PIC_SLAVE:
  2633. memcpy(&chip->chip.pic,
  2634. &pic_irqchip(kvm)->pics[1],
  2635. sizeof(struct kvm_pic_state));
  2636. break;
  2637. case KVM_IRQCHIP_IOAPIC:
  2638. r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
  2639. break;
  2640. default:
  2641. r = -EINVAL;
  2642. break;
  2643. }
  2644. return r;
  2645. }
  2646. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2647. {
  2648. int r;
  2649. r = 0;
  2650. switch (chip->chip_id) {
  2651. case KVM_IRQCHIP_PIC_MASTER:
  2652. spin_lock(&pic_irqchip(kvm)->lock);
  2653. memcpy(&pic_irqchip(kvm)->pics[0],
  2654. &chip->chip.pic,
  2655. sizeof(struct kvm_pic_state));
  2656. spin_unlock(&pic_irqchip(kvm)->lock);
  2657. break;
  2658. case KVM_IRQCHIP_PIC_SLAVE:
  2659. spin_lock(&pic_irqchip(kvm)->lock);
  2660. memcpy(&pic_irqchip(kvm)->pics[1],
  2661. &chip->chip.pic,
  2662. sizeof(struct kvm_pic_state));
  2663. spin_unlock(&pic_irqchip(kvm)->lock);
  2664. break;
  2665. case KVM_IRQCHIP_IOAPIC:
  2666. r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
  2667. break;
  2668. default:
  2669. r = -EINVAL;
  2670. break;
  2671. }
  2672. kvm_pic_update_irq(pic_irqchip(kvm));
  2673. return r;
  2674. }
  2675. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2676. {
  2677. int r = 0;
  2678. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2679. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  2680. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2681. return r;
  2682. }
  2683. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2684. {
  2685. int r = 0;
  2686. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2687. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  2688. kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
  2689. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2690. return r;
  2691. }
  2692. static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2693. {
  2694. int r = 0;
  2695. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2696. memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
  2697. sizeof(ps->channels));
  2698. ps->flags = kvm->arch.vpit->pit_state.flags;
  2699. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2700. memset(&ps->reserved, 0, sizeof(ps->reserved));
  2701. return r;
  2702. }
  2703. static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2704. {
  2705. int r = 0, start = 0;
  2706. u32 prev_legacy, cur_legacy;
  2707. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2708. prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2709. cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2710. if (!prev_legacy && cur_legacy)
  2711. start = 1;
  2712. memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
  2713. sizeof(kvm->arch.vpit->pit_state.channels));
  2714. kvm->arch.vpit->pit_state.flags = ps->flags;
  2715. kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
  2716. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2717. return r;
  2718. }
  2719. static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  2720. struct kvm_reinject_control *control)
  2721. {
  2722. if (!kvm->arch.vpit)
  2723. return -ENXIO;
  2724. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2725. kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
  2726. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2727. return 0;
  2728. }
  2729. /**
  2730. * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
  2731. * @kvm: kvm instance
  2732. * @log: slot id and address to which we copy the log
  2733. *
  2734. * We need to keep it in mind that VCPU threads can write to the bitmap
  2735. * concurrently. So, to avoid losing data, we keep the following order for
  2736. * each bit:
  2737. *
  2738. * 1. Take a snapshot of the bit and clear it if needed.
  2739. * 2. Write protect the corresponding page.
  2740. * 3. Flush TLB's if needed.
  2741. * 4. Copy the snapshot to the userspace.
  2742. *
  2743. * Between 2 and 3, the guest may write to the page using the remaining TLB
  2744. * entry. This is not a problem because the page will be reported dirty at
  2745. * step 4 using the snapshot taken before and step 3 ensures that successive
  2746. * writes will be logged for the next call.
  2747. */
  2748. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  2749. {
  2750. int r;
  2751. struct kvm_memory_slot *memslot;
  2752. unsigned long n, i;
  2753. unsigned long *dirty_bitmap;
  2754. unsigned long *dirty_bitmap_buffer;
  2755. bool is_dirty = false;
  2756. mutex_lock(&kvm->slots_lock);
  2757. r = -EINVAL;
  2758. if (log->slot >= KVM_MEMORY_SLOTS)
  2759. goto out;
  2760. memslot = id_to_memslot(kvm->memslots, log->slot);
  2761. dirty_bitmap = memslot->dirty_bitmap;
  2762. r = -ENOENT;
  2763. if (!dirty_bitmap)
  2764. goto out;
  2765. n = kvm_dirty_bitmap_bytes(memslot);
  2766. dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
  2767. memset(dirty_bitmap_buffer, 0, n);
  2768. spin_lock(&kvm->mmu_lock);
  2769. for (i = 0; i < n / sizeof(long); i++) {
  2770. unsigned long mask;
  2771. gfn_t offset;
  2772. if (!dirty_bitmap[i])
  2773. continue;
  2774. is_dirty = true;
  2775. mask = xchg(&dirty_bitmap[i], 0);
  2776. dirty_bitmap_buffer[i] = mask;
  2777. offset = i * BITS_PER_LONG;
  2778. kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
  2779. }
  2780. if (is_dirty)
  2781. kvm_flush_remote_tlbs(kvm);
  2782. spin_unlock(&kvm->mmu_lock);
  2783. r = -EFAULT;
  2784. if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
  2785. goto out;
  2786. r = 0;
  2787. out:
  2788. mutex_unlock(&kvm->slots_lock);
  2789. return r;
  2790. }
  2791. long kvm_arch_vm_ioctl(struct file *filp,
  2792. unsigned int ioctl, unsigned long arg)
  2793. {
  2794. struct kvm *kvm = filp->private_data;
  2795. void __user *argp = (void __user *)arg;
  2796. int r = -ENOTTY;
  2797. /*
  2798. * This union makes it completely explicit to gcc-3.x
  2799. * that these two variables' stack usage should be
  2800. * combined, not added together.
  2801. */
  2802. union {
  2803. struct kvm_pit_state ps;
  2804. struct kvm_pit_state2 ps2;
  2805. struct kvm_pit_config pit_config;
  2806. } u;
  2807. switch (ioctl) {
  2808. case KVM_SET_TSS_ADDR:
  2809. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  2810. if (r < 0)
  2811. goto out;
  2812. break;
  2813. case KVM_SET_IDENTITY_MAP_ADDR: {
  2814. u64 ident_addr;
  2815. r = -EFAULT;
  2816. if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
  2817. goto out;
  2818. r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
  2819. if (r < 0)
  2820. goto out;
  2821. break;
  2822. }
  2823. case KVM_SET_NR_MMU_PAGES:
  2824. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  2825. if (r)
  2826. goto out;
  2827. break;
  2828. case KVM_GET_NR_MMU_PAGES:
  2829. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  2830. break;
  2831. case KVM_CREATE_IRQCHIP: {
  2832. struct kvm_pic *vpic;
  2833. mutex_lock(&kvm->lock);
  2834. r = -EEXIST;
  2835. if (kvm->arch.vpic)
  2836. goto create_irqchip_unlock;
  2837. r = -EINVAL;
  2838. if (atomic_read(&kvm->online_vcpus))
  2839. goto create_irqchip_unlock;
  2840. r = -ENOMEM;
  2841. vpic = kvm_create_pic(kvm);
  2842. if (vpic) {
  2843. r = kvm_ioapic_init(kvm);
  2844. if (r) {
  2845. mutex_lock(&kvm->slots_lock);
  2846. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  2847. &vpic->dev_master);
  2848. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  2849. &vpic->dev_slave);
  2850. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  2851. &vpic->dev_eclr);
  2852. mutex_unlock(&kvm->slots_lock);
  2853. kfree(vpic);
  2854. goto create_irqchip_unlock;
  2855. }
  2856. } else
  2857. goto create_irqchip_unlock;
  2858. smp_wmb();
  2859. kvm->arch.vpic = vpic;
  2860. smp_wmb();
  2861. r = kvm_setup_default_irq_routing(kvm);
  2862. if (r) {
  2863. mutex_lock(&kvm->slots_lock);
  2864. mutex_lock(&kvm->irq_lock);
  2865. kvm_ioapic_destroy(kvm);
  2866. kvm_destroy_pic(kvm);
  2867. mutex_unlock(&kvm->irq_lock);
  2868. mutex_unlock(&kvm->slots_lock);
  2869. }
  2870. create_irqchip_unlock:
  2871. mutex_unlock(&kvm->lock);
  2872. break;
  2873. }
  2874. case KVM_CREATE_PIT:
  2875. u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
  2876. goto create_pit;
  2877. case KVM_CREATE_PIT2:
  2878. r = -EFAULT;
  2879. if (copy_from_user(&u.pit_config, argp,
  2880. sizeof(struct kvm_pit_config)))
  2881. goto out;
  2882. create_pit:
  2883. mutex_lock(&kvm->slots_lock);
  2884. r = -EEXIST;
  2885. if (kvm->arch.vpit)
  2886. goto create_pit_unlock;
  2887. r = -ENOMEM;
  2888. kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
  2889. if (kvm->arch.vpit)
  2890. r = 0;
  2891. create_pit_unlock:
  2892. mutex_unlock(&kvm->slots_lock);
  2893. break;
  2894. case KVM_IRQ_LINE_STATUS:
  2895. case KVM_IRQ_LINE: {
  2896. struct kvm_irq_level irq_event;
  2897. r = -EFAULT;
  2898. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  2899. goto out;
  2900. r = -ENXIO;
  2901. if (irqchip_in_kernel(kvm)) {
  2902. __s32 status;
  2903. status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  2904. irq_event.irq, irq_event.level);
  2905. if (ioctl == KVM_IRQ_LINE_STATUS) {
  2906. r = -EFAULT;
  2907. irq_event.status = status;
  2908. if (copy_to_user(argp, &irq_event,
  2909. sizeof irq_event))
  2910. goto out;
  2911. }
  2912. r = 0;
  2913. }
  2914. break;
  2915. }
  2916. case KVM_GET_IRQCHIP: {
  2917. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2918. struct kvm_irqchip *chip;
  2919. chip = memdup_user(argp, sizeof(*chip));
  2920. if (IS_ERR(chip)) {
  2921. r = PTR_ERR(chip);
  2922. goto out;
  2923. }
  2924. r = -ENXIO;
  2925. if (!irqchip_in_kernel(kvm))
  2926. goto get_irqchip_out;
  2927. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  2928. if (r)
  2929. goto get_irqchip_out;
  2930. r = -EFAULT;
  2931. if (copy_to_user(argp, chip, sizeof *chip))
  2932. goto get_irqchip_out;
  2933. r = 0;
  2934. get_irqchip_out:
  2935. kfree(chip);
  2936. if (r)
  2937. goto out;
  2938. break;
  2939. }
  2940. case KVM_SET_IRQCHIP: {
  2941. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2942. struct kvm_irqchip *chip;
  2943. chip = memdup_user(argp, sizeof(*chip));
  2944. if (IS_ERR(chip)) {
  2945. r = PTR_ERR(chip);
  2946. goto out;
  2947. }
  2948. r = -ENXIO;
  2949. if (!irqchip_in_kernel(kvm))
  2950. goto set_irqchip_out;
  2951. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  2952. if (r)
  2953. goto set_irqchip_out;
  2954. r = 0;
  2955. set_irqchip_out:
  2956. kfree(chip);
  2957. if (r)
  2958. goto out;
  2959. break;
  2960. }
  2961. case KVM_GET_PIT: {
  2962. r = -EFAULT;
  2963. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  2964. goto out;
  2965. r = -ENXIO;
  2966. if (!kvm->arch.vpit)
  2967. goto out;
  2968. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  2969. if (r)
  2970. goto out;
  2971. r = -EFAULT;
  2972. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  2973. goto out;
  2974. r = 0;
  2975. break;
  2976. }
  2977. case KVM_SET_PIT: {
  2978. r = -EFAULT;
  2979. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  2980. goto out;
  2981. r = -ENXIO;
  2982. if (!kvm->arch.vpit)
  2983. goto out;
  2984. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  2985. if (r)
  2986. goto out;
  2987. r = 0;
  2988. break;
  2989. }
  2990. case KVM_GET_PIT2: {
  2991. r = -ENXIO;
  2992. if (!kvm->arch.vpit)
  2993. goto out;
  2994. r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
  2995. if (r)
  2996. goto out;
  2997. r = -EFAULT;
  2998. if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
  2999. goto out;
  3000. r = 0;
  3001. break;
  3002. }
  3003. case KVM_SET_PIT2: {
  3004. r = -EFAULT;
  3005. if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
  3006. goto out;
  3007. r = -ENXIO;
  3008. if (!kvm->arch.vpit)
  3009. goto out;
  3010. r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
  3011. if (r)
  3012. goto out;
  3013. r = 0;
  3014. break;
  3015. }
  3016. case KVM_REINJECT_CONTROL: {
  3017. struct kvm_reinject_control control;
  3018. r = -EFAULT;
  3019. if (copy_from_user(&control, argp, sizeof(control)))
  3020. goto out;
  3021. r = kvm_vm_ioctl_reinject(kvm, &control);
  3022. if (r)
  3023. goto out;
  3024. r = 0;
  3025. break;
  3026. }
  3027. case KVM_XEN_HVM_CONFIG: {
  3028. r = -EFAULT;
  3029. if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
  3030. sizeof(struct kvm_xen_hvm_config)))
  3031. goto out;
  3032. r = -EINVAL;
  3033. if (kvm->arch.xen_hvm_config.flags)
  3034. goto out;
  3035. r = 0;
  3036. break;
  3037. }
  3038. case KVM_SET_CLOCK: {
  3039. struct kvm_clock_data user_ns;
  3040. u64 now_ns;
  3041. s64 delta;
  3042. r = -EFAULT;
  3043. if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
  3044. goto out;
  3045. r = -EINVAL;
  3046. if (user_ns.flags)
  3047. goto out;
  3048. r = 0;
  3049. local_irq_disable();
  3050. now_ns = get_kernel_ns();
  3051. delta = user_ns.clock - now_ns;
  3052. local_irq_enable();
  3053. kvm->arch.kvmclock_offset = delta;
  3054. break;
  3055. }
  3056. case KVM_GET_CLOCK: {
  3057. struct kvm_clock_data user_ns;
  3058. u64 now_ns;
  3059. local_irq_disable();
  3060. now_ns = get_kernel_ns();
  3061. user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
  3062. local_irq_enable();
  3063. user_ns.flags = 0;
  3064. memset(&user_ns.pad, 0, sizeof(user_ns.pad));
  3065. r = -EFAULT;
  3066. if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
  3067. goto out;
  3068. r = 0;
  3069. break;
  3070. }
  3071. default:
  3072. ;
  3073. }
  3074. out:
  3075. return r;
  3076. }
  3077. static void kvm_init_msr_list(void)
  3078. {
  3079. u32 dummy[2];
  3080. unsigned i, j;
  3081. /* skip the first msrs in the list. KVM-specific */
  3082. for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
  3083. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  3084. continue;
  3085. if (j < i)
  3086. msrs_to_save[j] = msrs_to_save[i];
  3087. j++;
  3088. }
  3089. num_msrs_to_save = j;
  3090. }
  3091. static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
  3092. const void *v)
  3093. {
  3094. int handled = 0;
  3095. int n;
  3096. do {
  3097. n = min(len, 8);
  3098. if (!(vcpu->arch.apic &&
  3099. !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
  3100. && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
  3101. break;
  3102. handled += n;
  3103. addr += n;
  3104. len -= n;
  3105. v += n;
  3106. } while (len);
  3107. return handled;
  3108. }
  3109. static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
  3110. {
  3111. int handled = 0;
  3112. int n;
  3113. do {
  3114. n = min(len, 8);
  3115. if (!(vcpu->arch.apic &&
  3116. !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
  3117. && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
  3118. break;
  3119. trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
  3120. handled += n;
  3121. addr += n;
  3122. len -= n;
  3123. v += n;
  3124. } while (len);
  3125. return handled;
  3126. }
  3127. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  3128. struct kvm_segment *var, int seg)
  3129. {
  3130. kvm_x86_ops->set_segment(vcpu, var, seg);
  3131. }
  3132. void kvm_get_segment(struct kvm_vcpu *vcpu,
  3133. struct kvm_segment *var, int seg)
  3134. {
  3135. kvm_x86_ops->get_segment(vcpu, var, seg);
  3136. }
  3137. gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
  3138. {
  3139. gpa_t t_gpa;
  3140. struct x86_exception exception;
  3141. BUG_ON(!mmu_is_nested(vcpu));
  3142. /* NPT walks are always user-walks */
  3143. access |= PFERR_USER_MASK;
  3144. t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
  3145. return t_gpa;
  3146. }
  3147. gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
  3148. struct x86_exception *exception)
  3149. {
  3150. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3151. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3152. }
  3153. gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
  3154. struct x86_exception *exception)
  3155. {
  3156. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3157. access |= PFERR_FETCH_MASK;
  3158. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3159. }
  3160. gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
  3161. struct x86_exception *exception)
  3162. {
  3163. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3164. access |= PFERR_WRITE_MASK;
  3165. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3166. }
  3167. /* uses this to access any guest's mapped memory without checking CPL */
  3168. gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
  3169. struct x86_exception *exception)
  3170. {
  3171. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
  3172. }
  3173. static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
  3174. struct kvm_vcpu *vcpu, u32 access,
  3175. struct x86_exception *exception)
  3176. {
  3177. void *data = val;
  3178. int r = X86EMUL_CONTINUE;
  3179. while (bytes) {
  3180. gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
  3181. exception);
  3182. unsigned offset = addr & (PAGE_SIZE-1);
  3183. unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
  3184. int ret;
  3185. if (gpa == UNMAPPED_GVA)
  3186. return X86EMUL_PROPAGATE_FAULT;
  3187. ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
  3188. if (ret < 0) {
  3189. r = X86EMUL_IO_NEEDED;
  3190. goto out;
  3191. }
  3192. bytes -= toread;
  3193. data += toread;
  3194. addr += toread;
  3195. }
  3196. out:
  3197. return r;
  3198. }
  3199. /* used for instruction fetching */
  3200. static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
  3201. gva_t addr, void *val, unsigned int bytes,
  3202. struct x86_exception *exception)
  3203. {
  3204. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3205. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3206. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
  3207. access | PFERR_FETCH_MASK,
  3208. exception);
  3209. }
  3210. int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
  3211. gva_t addr, void *val, unsigned int bytes,
  3212. struct x86_exception *exception)
  3213. {
  3214. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3215. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3216. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
  3217. exception);
  3218. }
  3219. EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
  3220. static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
  3221. gva_t addr, void *val, unsigned int bytes,
  3222. struct x86_exception *exception)
  3223. {
  3224. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3225. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
  3226. }
  3227. int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
  3228. gva_t addr, void *val,
  3229. unsigned int bytes,
  3230. struct x86_exception *exception)
  3231. {
  3232. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3233. void *data = val;
  3234. int r = X86EMUL_CONTINUE;
  3235. while (bytes) {
  3236. gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
  3237. PFERR_WRITE_MASK,
  3238. exception);
  3239. unsigned offset = addr & (PAGE_SIZE-1);
  3240. unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  3241. int ret;
  3242. if (gpa == UNMAPPED_GVA)
  3243. return X86EMUL_PROPAGATE_FAULT;
  3244. ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
  3245. if (ret < 0) {
  3246. r = X86EMUL_IO_NEEDED;
  3247. goto out;
  3248. }
  3249. bytes -= towrite;
  3250. data += towrite;
  3251. addr += towrite;
  3252. }
  3253. out:
  3254. return r;
  3255. }
  3256. EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
  3257. static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
  3258. gpa_t *gpa, struct x86_exception *exception,
  3259. bool write)
  3260. {
  3261. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3262. if (vcpu_match_mmio_gva(vcpu, gva) &&
  3263. check_write_user_access(vcpu, write, access,
  3264. vcpu->arch.access)) {
  3265. *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
  3266. (gva & (PAGE_SIZE - 1));
  3267. trace_vcpu_match_mmio(gva, *gpa, write, false);
  3268. return 1;
  3269. }
  3270. if (write)
  3271. access |= PFERR_WRITE_MASK;
  3272. *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3273. if (*gpa == UNMAPPED_GVA)
  3274. return -1;
  3275. /* For APIC access vmexit */
  3276. if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3277. return 1;
  3278. if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
  3279. trace_vcpu_match_mmio(gva, *gpa, write, true);
  3280. return 1;
  3281. }
  3282. return 0;
  3283. }
  3284. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  3285. const void *val, int bytes)
  3286. {
  3287. int ret;
  3288. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  3289. if (ret < 0)
  3290. return 0;
  3291. kvm_mmu_pte_write(vcpu, gpa, val, bytes);
  3292. return 1;
  3293. }
  3294. struct read_write_emulator_ops {
  3295. int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
  3296. int bytes);
  3297. int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
  3298. void *val, int bytes);
  3299. int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
  3300. int bytes, void *val);
  3301. int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
  3302. void *val, int bytes);
  3303. bool write;
  3304. };
  3305. static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
  3306. {
  3307. if (vcpu->mmio_read_completed) {
  3308. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
  3309. vcpu->mmio_fragments[0].gpa, *(u64 *)val);
  3310. vcpu->mmio_read_completed = 0;
  3311. return 1;
  3312. }
  3313. return 0;
  3314. }
  3315. static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
  3316. void *val, int bytes)
  3317. {
  3318. return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
  3319. }
  3320. static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
  3321. void *val, int bytes)
  3322. {
  3323. return emulator_write_phys(vcpu, gpa, val, bytes);
  3324. }
  3325. static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
  3326. {
  3327. trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
  3328. return vcpu_mmio_write(vcpu, gpa, bytes, val);
  3329. }
  3330. static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
  3331. void *val, int bytes)
  3332. {
  3333. trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
  3334. return X86EMUL_IO_NEEDED;
  3335. }
  3336. static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
  3337. void *val, int bytes)
  3338. {
  3339. struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
  3340. memcpy(vcpu->run->mmio.data, frag->data, frag->len);
  3341. return X86EMUL_CONTINUE;
  3342. }
  3343. static struct read_write_emulator_ops read_emultor = {
  3344. .read_write_prepare = read_prepare,
  3345. .read_write_emulate = read_emulate,
  3346. .read_write_mmio = vcpu_mmio_read,
  3347. .read_write_exit_mmio = read_exit_mmio,
  3348. };
  3349. static struct read_write_emulator_ops write_emultor = {
  3350. .read_write_emulate = write_emulate,
  3351. .read_write_mmio = write_mmio,
  3352. .read_write_exit_mmio = write_exit_mmio,
  3353. .write = true,
  3354. };
  3355. static int emulator_read_write_onepage(unsigned long addr, void *val,
  3356. unsigned int bytes,
  3357. struct x86_exception *exception,
  3358. struct kvm_vcpu *vcpu,
  3359. struct read_write_emulator_ops *ops)
  3360. {
  3361. gpa_t gpa;
  3362. int handled, ret;
  3363. bool write = ops->write;
  3364. struct kvm_mmio_fragment *frag;
  3365. ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
  3366. if (ret < 0)
  3367. return X86EMUL_PROPAGATE_FAULT;
  3368. /* For APIC access vmexit */
  3369. if (ret)
  3370. goto mmio;
  3371. if (ops->read_write_emulate(vcpu, gpa, val, bytes))
  3372. return X86EMUL_CONTINUE;
  3373. mmio:
  3374. /*
  3375. * Is this MMIO handled locally?
  3376. */
  3377. handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
  3378. if (handled == bytes)
  3379. return X86EMUL_CONTINUE;
  3380. gpa += handled;
  3381. bytes -= handled;
  3382. val += handled;
  3383. while (bytes) {
  3384. unsigned now = min(bytes, 8U);
  3385. frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
  3386. frag->gpa = gpa;
  3387. frag->data = val;
  3388. frag->len = now;
  3389. gpa += now;
  3390. val += now;
  3391. bytes -= now;
  3392. }
  3393. return X86EMUL_CONTINUE;
  3394. }
  3395. int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
  3396. void *val, unsigned int bytes,
  3397. struct x86_exception *exception,
  3398. struct read_write_emulator_ops *ops)
  3399. {
  3400. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3401. gpa_t gpa;
  3402. int rc;
  3403. if (ops->read_write_prepare &&
  3404. ops->read_write_prepare(vcpu, val, bytes))
  3405. return X86EMUL_CONTINUE;
  3406. vcpu->mmio_nr_fragments = 0;
  3407. /* Crossing a page boundary? */
  3408. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  3409. int now;
  3410. now = -addr & ~PAGE_MASK;
  3411. rc = emulator_read_write_onepage(addr, val, now, exception,
  3412. vcpu, ops);
  3413. if (rc != X86EMUL_CONTINUE)
  3414. return rc;
  3415. addr += now;
  3416. val += now;
  3417. bytes -= now;
  3418. }
  3419. rc = emulator_read_write_onepage(addr, val, bytes, exception,
  3420. vcpu, ops);
  3421. if (rc != X86EMUL_CONTINUE)
  3422. return rc;
  3423. if (!vcpu->mmio_nr_fragments)
  3424. return rc;
  3425. gpa = vcpu->mmio_fragments[0].gpa;
  3426. vcpu->mmio_needed = 1;
  3427. vcpu->mmio_cur_fragment = 0;
  3428. vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
  3429. vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
  3430. vcpu->run->exit_reason = KVM_EXIT_MMIO;
  3431. vcpu->run->mmio.phys_addr = gpa;
  3432. return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
  3433. }
  3434. static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
  3435. unsigned long addr,
  3436. void *val,
  3437. unsigned int bytes,
  3438. struct x86_exception *exception)
  3439. {
  3440. return emulator_read_write(ctxt, addr, val, bytes,
  3441. exception, &read_emultor);
  3442. }
  3443. int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
  3444. unsigned long addr,
  3445. const void *val,
  3446. unsigned int bytes,
  3447. struct x86_exception *exception)
  3448. {
  3449. return emulator_read_write(ctxt, addr, (void *)val, bytes,
  3450. exception, &write_emultor);
  3451. }
  3452. #define CMPXCHG_TYPE(t, ptr, old, new) \
  3453. (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
  3454. #ifdef CONFIG_X86_64
  3455. # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
  3456. #else
  3457. # define CMPXCHG64(ptr, old, new) \
  3458. (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
  3459. #endif
  3460. static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
  3461. unsigned long addr,
  3462. const void *old,
  3463. const void *new,
  3464. unsigned int bytes,
  3465. struct x86_exception *exception)
  3466. {
  3467. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3468. gpa_t gpa;
  3469. struct page *page;
  3470. char *kaddr;
  3471. bool exchanged;
  3472. /* guests cmpxchg8b have to be emulated atomically */
  3473. if (bytes > 8 || (bytes & (bytes - 1)))
  3474. goto emul_write;
  3475. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
  3476. if (gpa == UNMAPPED_GVA ||
  3477. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3478. goto emul_write;
  3479. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  3480. goto emul_write;
  3481. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  3482. if (is_error_page(page)) {
  3483. kvm_release_page_clean(page);
  3484. goto emul_write;
  3485. }
  3486. kaddr = kmap_atomic(page);
  3487. kaddr += offset_in_page(gpa);
  3488. switch (bytes) {
  3489. case 1:
  3490. exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
  3491. break;
  3492. case 2:
  3493. exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
  3494. break;
  3495. case 4:
  3496. exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
  3497. break;
  3498. case 8:
  3499. exchanged = CMPXCHG64(kaddr, old, new);
  3500. break;
  3501. default:
  3502. BUG();
  3503. }
  3504. kunmap_atomic(kaddr);
  3505. kvm_release_page_dirty(page);
  3506. if (!exchanged)
  3507. return X86EMUL_CMPXCHG_FAILED;
  3508. kvm_mmu_pte_write(vcpu, gpa, new, bytes);
  3509. return X86EMUL_CONTINUE;
  3510. emul_write:
  3511. printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
  3512. return emulator_write_emulated(ctxt, addr, new, bytes, exception);
  3513. }
  3514. static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
  3515. {
  3516. /* TODO: String I/O for in kernel device */
  3517. int r;
  3518. if (vcpu->arch.pio.in)
  3519. r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
  3520. vcpu->arch.pio.size, pd);
  3521. else
  3522. r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
  3523. vcpu->arch.pio.port, vcpu->arch.pio.size,
  3524. pd);
  3525. return r;
  3526. }
  3527. static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
  3528. unsigned short port, void *val,
  3529. unsigned int count, bool in)
  3530. {
  3531. trace_kvm_pio(!in, port, size, count);
  3532. vcpu->arch.pio.port = port;
  3533. vcpu->arch.pio.in = in;
  3534. vcpu->arch.pio.count = count;
  3535. vcpu->arch.pio.size = size;
  3536. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3537. vcpu->arch.pio.count = 0;
  3538. return 1;
  3539. }
  3540. vcpu->run->exit_reason = KVM_EXIT_IO;
  3541. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  3542. vcpu->run->io.size = size;
  3543. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3544. vcpu->run->io.count = count;
  3545. vcpu->run->io.port = port;
  3546. return 0;
  3547. }
  3548. static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
  3549. int size, unsigned short port, void *val,
  3550. unsigned int count)
  3551. {
  3552. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3553. int ret;
  3554. if (vcpu->arch.pio.count)
  3555. goto data_avail;
  3556. ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
  3557. if (ret) {
  3558. data_avail:
  3559. memcpy(val, vcpu->arch.pio_data, size * count);
  3560. vcpu->arch.pio.count = 0;
  3561. return 1;
  3562. }
  3563. return 0;
  3564. }
  3565. static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
  3566. int size, unsigned short port,
  3567. const void *val, unsigned int count)
  3568. {
  3569. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3570. memcpy(vcpu->arch.pio_data, val, size * count);
  3571. return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
  3572. }
  3573. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  3574. {
  3575. return kvm_x86_ops->get_segment_base(vcpu, seg);
  3576. }
  3577. static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
  3578. {
  3579. kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
  3580. }
  3581. int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
  3582. {
  3583. if (!need_emulate_wbinvd(vcpu))
  3584. return X86EMUL_CONTINUE;
  3585. if (kvm_x86_ops->has_wbinvd_exit()) {
  3586. int cpu = get_cpu();
  3587. cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
  3588. smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
  3589. wbinvd_ipi, NULL, 1);
  3590. put_cpu();
  3591. cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
  3592. } else
  3593. wbinvd();
  3594. return X86EMUL_CONTINUE;
  3595. }
  3596. EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
  3597. static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
  3598. {
  3599. kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
  3600. }
  3601. int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  3602. {
  3603. return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
  3604. }
  3605. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  3606. {
  3607. return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
  3608. }
  3609. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  3610. {
  3611. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  3612. }
  3613. static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
  3614. {
  3615. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3616. unsigned long value;
  3617. switch (cr) {
  3618. case 0:
  3619. value = kvm_read_cr0(vcpu);
  3620. break;
  3621. case 2:
  3622. value = vcpu->arch.cr2;
  3623. break;
  3624. case 3:
  3625. value = kvm_read_cr3(vcpu);
  3626. break;
  3627. case 4:
  3628. value = kvm_read_cr4(vcpu);
  3629. break;
  3630. case 8:
  3631. value = kvm_get_cr8(vcpu);
  3632. break;
  3633. default:
  3634. kvm_err("%s: unexpected cr %u\n", __func__, cr);
  3635. return 0;
  3636. }
  3637. return value;
  3638. }
  3639. static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
  3640. {
  3641. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3642. int res = 0;
  3643. switch (cr) {
  3644. case 0:
  3645. res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
  3646. break;
  3647. case 2:
  3648. vcpu->arch.cr2 = val;
  3649. break;
  3650. case 3:
  3651. res = kvm_set_cr3(vcpu, val);
  3652. break;
  3653. case 4:
  3654. res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
  3655. break;
  3656. case 8:
  3657. res = kvm_set_cr8(vcpu, val);
  3658. break;
  3659. default:
  3660. kvm_err("%s: unexpected cr %u\n", __func__, cr);
  3661. res = -1;
  3662. }
  3663. return res;
  3664. }
  3665. static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val)
  3666. {
  3667. kvm_set_rflags(emul_to_vcpu(ctxt), val);
  3668. }
  3669. static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
  3670. {
  3671. return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
  3672. }
  3673. static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3674. {
  3675. kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
  3676. }
  3677. static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3678. {
  3679. kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
  3680. }
  3681. static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3682. {
  3683. kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
  3684. }
  3685. static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3686. {
  3687. kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
  3688. }
  3689. static unsigned long emulator_get_cached_segment_base(
  3690. struct x86_emulate_ctxt *ctxt, int seg)
  3691. {
  3692. return get_segment_base(emul_to_vcpu(ctxt), seg);
  3693. }
  3694. static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
  3695. struct desc_struct *desc, u32 *base3,
  3696. int seg)
  3697. {
  3698. struct kvm_segment var;
  3699. kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
  3700. *selector = var.selector;
  3701. if (var.unusable)
  3702. return false;
  3703. if (var.g)
  3704. var.limit >>= 12;
  3705. set_desc_limit(desc, var.limit);
  3706. set_desc_base(desc, (unsigned long)var.base);
  3707. #ifdef CONFIG_X86_64
  3708. if (base3)
  3709. *base3 = var.base >> 32;
  3710. #endif
  3711. desc->type = var.type;
  3712. desc->s = var.s;
  3713. desc->dpl = var.dpl;
  3714. desc->p = var.present;
  3715. desc->avl = var.avl;
  3716. desc->l = var.l;
  3717. desc->d = var.db;
  3718. desc->g = var.g;
  3719. return true;
  3720. }
  3721. static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
  3722. struct desc_struct *desc, u32 base3,
  3723. int seg)
  3724. {
  3725. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3726. struct kvm_segment var;
  3727. var.selector = selector;
  3728. var.base = get_desc_base(desc);
  3729. #ifdef CONFIG_X86_64
  3730. var.base |= ((u64)base3) << 32;
  3731. #endif
  3732. var.limit = get_desc_limit(desc);
  3733. if (desc->g)
  3734. var.limit = (var.limit << 12) | 0xfff;
  3735. var.type = desc->type;
  3736. var.present = desc->p;
  3737. var.dpl = desc->dpl;
  3738. var.db = desc->d;
  3739. var.s = desc->s;
  3740. var.l = desc->l;
  3741. var.g = desc->g;
  3742. var.avl = desc->avl;
  3743. var.present = desc->p;
  3744. var.unusable = !var.present;
  3745. var.padding = 0;
  3746. kvm_set_segment(vcpu, &var, seg);
  3747. return;
  3748. }
  3749. static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
  3750. u32 msr_index, u64 *pdata)
  3751. {
  3752. return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
  3753. }
  3754. static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
  3755. u32 msr_index, u64 data)
  3756. {
  3757. return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
  3758. }
  3759. static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
  3760. u32 pmc, u64 *pdata)
  3761. {
  3762. return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
  3763. }
  3764. static void emulator_halt(struct x86_emulate_ctxt *ctxt)
  3765. {
  3766. emul_to_vcpu(ctxt)->arch.halt_request = 1;
  3767. }
  3768. static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
  3769. {
  3770. preempt_disable();
  3771. kvm_load_guest_fpu(emul_to_vcpu(ctxt));
  3772. /*
  3773. * CR0.TS may reference the host fpu state, not the guest fpu state,
  3774. * so it may be clear at this point.
  3775. */
  3776. clts();
  3777. }
  3778. static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
  3779. {
  3780. preempt_enable();
  3781. }
  3782. static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
  3783. struct x86_instruction_info *info,
  3784. enum x86_intercept_stage stage)
  3785. {
  3786. return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
  3787. }
  3788. static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
  3789. u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
  3790. {
  3791. kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
  3792. }
  3793. static struct x86_emulate_ops emulate_ops = {
  3794. .read_std = kvm_read_guest_virt_system,
  3795. .write_std = kvm_write_guest_virt_system,
  3796. .fetch = kvm_fetch_guest_virt,
  3797. .read_emulated = emulator_read_emulated,
  3798. .write_emulated = emulator_write_emulated,
  3799. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  3800. .invlpg = emulator_invlpg,
  3801. .pio_in_emulated = emulator_pio_in_emulated,
  3802. .pio_out_emulated = emulator_pio_out_emulated,
  3803. .get_segment = emulator_get_segment,
  3804. .set_segment = emulator_set_segment,
  3805. .get_cached_segment_base = emulator_get_cached_segment_base,
  3806. .get_gdt = emulator_get_gdt,
  3807. .get_idt = emulator_get_idt,
  3808. .set_gdt = emulator_set_gdt,
  3809. .set_idt = emulator_set_idt,
  3810. .get_cr = emulator_get_cr,
  3811. .set_cr = emulator_set_cr,
  3812. .set_rflags = emulator_set_rflags,
  3813. .cpl = emulator_get_cpl,
  3814. .get_dr = emulator_get_dr,
  3815. .set_dr = emulator_set_dr,
  3816. .set_msr = emulator_set_msr,
  3817. .get_msr = emulator_get_msr,
  3818. .read_pmc = emulator_read_pmc,
  3819. .halt = emulator_halt,
  3820. .wbinvd = emulator_wbinvd,
  3821. .fix_hypercall = emulator_fix_hypercall,
  3822. .get_fpu = emulator_get_fpu,
  3823. .put_fpu = emulator_put_fpu,
  3824. .intercept = emulator_intercept,
  3825. .get_cpuid = emulator_get_cpuid,
  3826. };
  3827. static void cache_all_regs(struct kvm_vcpu *vcpu)
  3828. {
  3829. kvm_register_read(vcpu, VCPU_REGS_RAX);
  3830. kvm_register_read(vcpu, VCPU_REGS_RSP);
  3831. kvm_register_read(vcpu, VCPU_REGS_RIP);
  3832. vcpu->arch.regs_dirty = ~0;
  3833. }
  3834. static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
  3835. {
  3836. u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
  3837. /*
  3838. * an sti; sti; sequence only disable interrupts for the first
  3839. * instruction. So, if the last instruction, be it emulated or
  3840. * not, left the system with the INT_STI flag enabled, it
  3841. * means that the last instruction is an sti. We should not
  3842. * leave the flag on in this case. The same goes for mov ss
  3843. */
  3844. if (!(int_shadow & mask))
  3845. kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
  3846. }
  3847. static void inject_emulated_exception(struct kvm_vcpu *vcpu)
  3848. {
  3849. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  3850. if (ctxt->exception.vector == PF_VECTOR)
  3851. kvm_propagate_fault(vcpu, &ctxt->exception);
  3852. else if (ctxt->exception.error_code_valid)
  3853. kvm_queue_exception_e(vcpu, ctxt->exception.vector,
  3854. ctxt->exception.error_code);
  3855. else
  3856. kvm_queue_exception(vcpu, ctxt->exception.vector);
  3857. }
  3858. static void init_decode_cache(struct x86_emulate_ctxt *ctxt,
  3859. const unsigned long *regs)
  3860. {
  3861. memset(&ctxt->twobyte, 0,
  3862. (void *)&ctxt->regs - (void *)&ctxt->twobyte);
  3863. memcpy(ctxt->regs, regs, sizeof(ctxt->regs));
  3864. ctxt->fetch.start = 0;
  3865. ctxt->fetch.end = 0;
  3866. ctxt->io_read.pos = 0;
  3867. ctxt->io_read.end = 0;
  3868. ctxt->mem_read.pos = 0;
  3869. ctxt->mem_read.end = 0;
  3870. }
  3871. static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
  3872. {
  3873. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  3874. int cs_db, cs_l;
  3875. /*
  3876. * TODO: fix emulate.c to use guest_read/write_register
  3877. * instead of direct ->regs accesses, can save hundred cycles
  3878. * on Intel for instructions that don't read/change RSP, for
  3879. * for example.
  3880. */
  3881. cache_all_regs(vcpu);
  3882. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  3883. ctxt->eflags = kvm_get_rflags(vcpu);
  3884. ctxt->eip = kvm_rip_read(vcpu);
  3885. ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
  3886. (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
  3887. cs_l ? X86EMUL_MODE_PROT64 :
  3888. cs_db ? X86EMUL_MODE_PROT32 :
  3889. X86EMUL_MODE_PROT16;
  3890. ctxt->guest_mode = is_guest_mode(vcpu);
  3891. init_decode_cache(ctxt, vcpu->arch.regs);
  3892. vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
  3893. }
  3894. int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
  3895. {
  3896. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  3897. int ret;
  3898. init_emulate_ctxt(vcpu);
  3899. ctxt->op_bytes = 2;
  3900. ctxt->ad_bytes = 2;
  3901. ctxt->_eip = ctxt->eip + inc_eip;
  3902. ret = emulate_int_real(ctxt, irq);
  3903. if (ret != X86EMUL_CONTINUE)
  3904. return EMULATE_FAIL;
  3905. ctxt->eip = ctxt->_eip;
  3906. memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
  3907. kvm_rip_write(vcpu, ctxt->eip);
  3908. kvm_set_rflags(vcpu, ctxt->eflags);
  3909. if (irq == NMI_VECTOR)
  3910. vcpu->arch.nmi_pending = 0;
  3911. else
  3912. vcpu->arch.interrupt.pending = false;
  3913. return EMULATE_DONE;
  3914. }
  3915. EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
  3916. static int handle_emulation_failure(struct kvm_vcpu *vcpu)
  3917. {
  3918. int r = EMULATE_DONE;
  3919. ++vcpu->stat.insn_emulation_fail;
  3920. trace_kvm_emulate_insn_failed(vcpu);
  3921. if (!is_guest_mode(vcpu)) {
  3922. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  3923. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  3924. vcpu->run->internal.ndata = 0;
  3925. r = EMULATE_FAIL;
  3926. }
  3927. kvm_queue_exception(vcpu, UD_VECTOR);
  3928. return r;
  3929. }
  3930. static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
  3931. {
  3932. gpa_t gpa;
  3933. if (tdp_enabled)
  3934. return false;
  3935. /*
  3936. * if emulation was due to access to shadowed page table
  3937. * and it failed try to unshadow page and re-entetr the
  3938. * guest to let CPU execute the instruction.
  3939. */
  3940. if (kvm_mmu_unprotect_page_virt(vcpu, gva))
  3941. return true;
  3942. gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
  3943. if (gpa == UNMAPPED_GVA)
  3944. return true; /* let cpu generate fault */
  3945. if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
  3946. return true;
  3947. return false;
  3948. }
  3949. static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
  3950. unsigned long cr2, int emulation_type)
  3951. {
  3952. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3953. unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
  3954. last_retry_eip = vcpu->arch.last_retry_eip;
  3955. last_retry_addr = vcpu->arch.last_retry_addr;
  3956. /*
  3957. * If the emulation is caused by #PF and it is non-page_table
  3958. * writing instruction, it means the VM-EXIT is caused by shadow
  3959. * page protected, we can zap the shadow page and retry this
  3960. * instruction directly.
  3961. *
  3962. * Note: if the guest uses a non-page-table modifying instruction
  3963. * on the PDE that points to the instruction, then we will unmap
  3964. * the instruction and go to an infinite loop. So, we cache the
  3965. * last retried eip and the last fault address, if we meet the eip
  3966. * and the address again, we can break out of the potential infinite
  3967. * loop.
  3968. */
  3969. vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
  3970. if (!(emulation_type & EMULTYPE_RETRY))
  3971. return false;
  3972. if (x86_page_table_writing_insn(ctxt))
  3973. return false;
  3974. if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
  3975. return false;
  3976. vcpu->arch.last_retry_eip = ctxt->eip;
  3977. vcpu->arch.last_retry_addr = cr2;
  3978. if (!vcpu->arch.mmu.direct_map)
  3979. gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
  3980. kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  3981. return true;
  3982. }
  3983. int x86_emulate_instruction(struct kvm_vcpu *vcpu,
  3984. unsigned long cr2,
  3985. int emulation_type,
  3986. void *insn,
  3987. int insn_len)
  3988. {
  3989. int r;
  3990. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  3991. bool writeback = true;
  3992. kvm_clear_exception_queue(vcpu);
  3993. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  3994. init_emulate_ctxt(vcpu);
  3995. ctxt->interruptibility = 0;
  3996. ctxt->have_exception = false;
  3997. ctxt->perm_ok = false;
  3998. ctxt->only_vendor_specific_insn
  3999. = emulation_type & EMULTYPE_TRAP_UD;
  4000. r = x86_decode_insn(ctxt, insn, insn_len);
  4001. trace_kvm_emulate_insn_start(vcpu);
  4002. ++vcpu->stat.insn_emulation;
  4003. if (r != EMULATION_OK) {
  4004. if (emulation_type & EMULTYPE_TRAP_UD)
  4005. return EMULATE_FAIL;
  4006. if (reexecute_instruction(vcpu, cr2))
  4007. return EMULATE_DONE;
  4008. if (emulation_type & EMULTYPE_SKIP)
  4009. return EMULATE_FAIL;
  4010. return handle_emulation_failure(vcpu);
  4011. }
  4012. }
  4013. if (emulation_type & EMULTYPE_SKIP) {
  4014. kvm_rip_write(vcpu, ctxt->_eip);
  4015. return EMULATE_DONE;
  4016. }
  4017. if (retry_instruction(ctxt, cr2, emulation_type))
  4018. return EMULATE_DONE;
  4019. /* this is needed for vmware backdoor interface to work since it
  4020. changes registers values during IO operation */
  4021. if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
  4022. vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
  4023. memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
  4024. }
  4025. restart:
  4026. r = x86_emulate_insn(ctxt);
  4027. if (r == EMULATION_INTERCEPTED)
  4028. return EMULATE_DONE;
  4029. if (r == EMULATION_FAILED) {
  4030. if (reexecute_instruction(vcpu, cr2))
  4031. return EMULATE_DONE;
  4032. return handle_emulation_failure(vcpu);
  4033. }
  4034. if (ctxt->have_exception) {
  4035. inject_emulated_exception(vcpu);
  4036. r = EMULATE_DONE;
  4037. } else if (vcpu->arch.pio.count) {
  4038. if (!vcpu->arch.pio.in)
  4039. vcpu->arch.pio.count = 0;
  4040. else
  4041. writeback = false;
  4042. r = EMULATE_DO_MMIO;
  4043. } else if (vcpu->mmio_needed) {
  4044. if (!vcpu->mmio_is_write)
  4045. writeback = false;
  4046. r = EMULATE_DO_MMIO;
  4047. } else if (r == EMULATION_RESTART)
  4048. goto restart;
  4049. else
  4050. r = EMULATE_DONE;
  4051. if (writeback) {
  4052. toggle_interruptibility(vcpu, ctxt->interruptibility);
  4053. kvm_set_rflags(vcpu, ctxt->eflags);
  4054. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4055. memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
  4056. vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
  4057. kvm_rip_write(vcpu, ctxt->eip);
  4058. } else
  4059. vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
  4060. return r;
  4061. }
  4062. EXPORT_SYMBOL_GPL(x86_emulate_instruction);
  4063. int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
  4064. {
  4065. unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4066. int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
  4067. size, port, &val, 1);
  4068. /* do not return to emulator after return from userspace */
  4069. vcpu->arch.pio.count = 0;
  4070. return ret;
  4071. }
  4072. EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
  4073. static void tsc_bad(void *info)
  4074. {
  4075. __this_cpu_write(cpu_tsc_khz, 0);
  4076. }
  4077. static void tsc_khz_changed(void *data)
  4078. {
  4079. struct cpufreq_freqs *freq = data;
  4080. unsigned long khz = 0;
  4081. if (data)
  4082. khz = freq->new;
  4083. else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  4084. khz = cpufreq_quick_get(raw_smp_processor_id());
  4085. if (!khz)
  4086. khz = tsc_khz;
  4087. __this_cpu_write(cpu_tsc_khz, khz);
  4088. }
  4089. static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  4090. void *data)
  4091. {
  4092. struct cpufreq_freqs *freq = data;
  4093. struct kvm *kvm;
  4094. struct kvm_vcpu *vcpu;
  4095. int i, send_ipi = 0;
  4096. /*
  4097. * We allow guests to temporarily run on slowing clocks,
  4098. * provided we notify them after, or to run on accelerating
  4099. * clocks, provided we notify them before. Thus time never
  4100. * goes backwards.
  4101. *
  4102. * However, we have a problem. We can't atomically update
  4103. * the frequency of a given CPU from this function; it is
  4104. * merely a notifier, which can be called from any CPU.
  4105. * Changing the TSC frequency at arbitrary points in time
  4106. * requires a recomputation of local variables related to
  4107. * the TSC for each VCPU. We must flag these local variables
  4108. * to be updated and be sure the update takes place with the
  4109. * new frequency before any guests proceed.
  4110. *
  4111. * Unfortunately, the combination of hotplug CPU and frequency
  4112. * change creates an intractable locking scenario; the order
  4113. * of when these callouts happen is undefined with respect to
  4114. * CPU hotplug, and they can race with each other. As such,
  4115. * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
  4116. * undefined; you can actually have a CPU frequency change take
  4117. * place in between the computation of X and the setting of the
  4118. * variable. To protect against this problem, all updates of
  4119. * the per_cpu tsc_khz variable are done in an interrupt
  4120. * protected IPI, and all callers wishing to update the value
  4121. * must wait for a synchronous IPI to complete (which is trivial
  4122. * if the caller is on the CPU already). This establishes the
  4123. * necessary total order on variable updates.
  4124. *
  4125. * Note that because a guest time update may take place
  4126. * anytime after the setting of the VCPU's request bit, the
  4127. * correct TSC value must be set before the request. However,
  4128. * to ensure the update actually makes it to any guest which
  4129. * starts running in hardware virtualization between the set
  4130. * and the acquisition of the spinlock, we must also ping the
  4131. * CPU after setting the request bit.
  4132. *
  4133. */
  4134. if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
  4135. return 0;
  4136. if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
  4137. return 0;
  4138. smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
  4139. raw_spin_lock(&kvm_lock);
  4140. list_for_each_entry(kvm, &vm_list, vm_list) {
  4141. kvm_for_each_vcpu(i, vcpu, kvm) {
  4142. if (vcpu->cpu != freq->cpu)
  4143. continue;
  4144. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  4145. if (vcpu->cpu != smp_processor_id())
  4146. send_ipi = 1;
  4147. }
  4148. }
  4149. raw_spin_unlock(&kvm_lock);
  4150. if (freq->old < freq->new && send_ipi) {
  4151. /*
  4152. * We upscale the frequency. Must make the guest
  4153. * doesn't see old kvmclock values while running with
  4154. * the new frequency, otherwise we risk the guest sees
  4155. * time go backwards.
  4156. *
  4157. * In case we update the frequency for another cpu
  4158. * (which might be in guest context) send an interrupt
  4159. * to kick the cpu out of guest context. Next time
  4160. * guest context is entered kvmclock will be updated,
  4161. * so the guest will not see stale values.
  4162. */
  4163. smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
  4164. }
  4165. return 0;
  4166. }
  4167. static struct notifier_block kvmclock_cpufreq_notifier_block = {
  4168. .notifier_call = kvmclock_cpufreq_notifier
  4169. };
  4170. static int kvmclock_cpu_notifier(struct notifier_block *nfb,
  4171. unsigned long action, void *hcpu)
  4172. {
  4173. unsigned int cpu = (unsigned long)hcpu;
  4174. switch (action) {
  4175. case CPU_ONLINE:
  4176. case CPU_DOWN_FAILED:
  4177. smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  4178. break;
  4179. case CPU_DOWN_PREPARE:
  4180. smp_call_function_single(cpu, tsc_bad, NULL, 1);
  4181. break;
  4182. }
  4183. return NOTIFY_OK;
  4184. }
  4185. static struct notifier_block kvmclock_cpu_notifier_block = {
  4186. .notifier_call = kvmclock_cpu_notifier,
  4187. .priority = -INT_MAX
  4188. };
  4189. static void kvm_timer_init(void)
  4190. {
  4191. int cpu;
  4192. max_tsc_khz = tsc_khz;
  4193. register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  4194. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  4195. #ifdef CONFIG_CPU_FREQ
  4196. struct cpufreq_policy policy;
  4197. memset(&policy, 0, sizeof(policy));
  4198. cpu = get_cpu();
  4199. cpufreq_get_policy(&policy, cpu);
  4200. if (policy.cpuinfo.max_freq)
  4201. max_tsc_khz = policy.cpuinfo.max_freq;
  4202. put_cpu();
  4203. #endif
  4204. cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
  4205. CPUFREQ_TRANSITION_NOTIFIER);
  4206. }
  4207. pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
  4208. for_each_online_cpu(cpu)
  4209. smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  4210. }
  4211. static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
  4212. int kvm_is_in_guest(void)
  4213. {
  4214. return __this_cpu_read(current_vcpu) != NULL;
  4215. }
  4216. static int kvm_is_user_mode(void)
  4217. {
  4218. int user_mode = 3;
  4219. if (__this_cpu_read(current_vcpu))
  4220. user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
  4221. return user_mode != 0;
  4222. }
  4223. static unsigned long kvm_get_guest_ip(void)
  4224. {
  4225. unsigned long ip = 0;
  4226. if (__this_cpu_read(current_vcpu))
  4227. ip = kvm_rip_read(__this_cpu_read(current_vcpu));
  4228. return ip;
  4229. }
  4230. static struct perf_guest_info_callbacks kvm_guest_cbs = {
  4231. .is_in_guest = kvm_is_in_guest,
  4232. .is_user_mode = kvm_is_user_mode,
  4233. .get_guest_ip = kvm_get_guest_ip,
  4234. };
  4235. void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
  4236. {
  4237. __this_cpu_write(current_vcpu, vcpu);
  4238. }
  4239. EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
  4240. void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
  4241. {
  4242. __this_cpu_write(current_vcpu, NULL);
  4243. }
  4244. EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
  4245. static void kvm_set_mmio_spte_mask(void)
  4246. {
  4247. u64 mask;
  4248. int maxphyaddr = boot_cpu_data.x86_phys_bits;
  4249. /*
  4250. * Set the reserved bits and the present bit of an paging-structure
  4251. * entry to generate page fault with PFER.RSV = 1.
  4252. */
  4253. mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
  4254. mask |= 1ull;
  4255. #ifdef CONFIG_X86_64
  4256. /*
  4257. * If reserved bit is not supported, clear the present bit to disable
  4258. * mmio page fault.
  4259. */
  4260. if (maxphyaddr == 52)
  4261. mask &= ~1ull;
  4262. #endif
  4263. kvm_mmu_set_mmio_spte_mask(mask);
  4264. }
  4265. int kvm_arch_init(void *opaque)
  4266. {
  4267. int r;
  4268. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  4269. if (kvm_x86_ops) {
  4270. printk(KERN_ERR "kvm: already loaded the other module\n");
  4271. r = -EEXIST;
  4272. goto out;
  4273. }
  4274. if (!ops->cpu_has_kvm_support()) {
  4275. printk(KERN_ERR "kvm: no hardware support\n");
  4276. r = -EOPNOTSUPP;
  4277. goto out;
  4278. }
  4279. if (ops->disabled_by_bios()) {
  4280. printk(KERN_ERR "kvm: disabled by bios\n");
  4281. r = -EOPNOTSUPP;
  4282. goto out;
  4283. }
  4284. r = kvm_mmu_module_init();
  4285. if (r)
  4286. goto out;
  4287. kvm_set_mmio_spte_mask();
  4288. kvm_init_msr_list();
  4289. kvm_x86_ops = ops;
  4290. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  4291. PT_DIRTY_MASK, PT64_NX_MASK, 0);
  4292. kvm_timer_init();
  4293. perf_register_guest_info_callbacks(&kvm_guest_cbs);
  4294. if (cpu_has_xsave)
  4295. host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
  4296. return 0;
  4297. out:
  4298. return r;
  4299. }
  4300. void kvm_arch_exit(void)
  4301. {
  4302. perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
  4303. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  4304. cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
  4305. CPUFREQ_TRANSITION_NOTIFIER);
  4306. unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  4307. kvm_x86_ops = NULL;
  4308. kvm_mmu_module_exit();
  4309. }
  4310. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  4311. {
  4312. ++vcpu->stat.halt_exits;
  4313. if (irqchip_in_kernel(vcpu->kvm)) {
  4314. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  4315. return 1;
  4316. } else {
  4317. vcpu->run->exit_reason = KVM_EXIT_HLT;
  4318. return 0;
  4319. }
  4320. }
  4321. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  4322. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  4323. {
  4324. u64 param, ingpa, outgpa, ret;
  4325. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  4326. bool fast, longmode;
  4327. int cs_db, cs_l;
  4328. /*
  4329. * hypercall generates UD from non zero cpl and real mode
  4330. * per HYPER-V spec
  4331. */
  4332. if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
  4333. kvm_queue_exception(vcpu, UD_VECTOR);
  4334. return 0;
  4335. }
  4336. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  4337. longmode = is_long_mode(vcpu) && cs_l == 1;
  4338. if (!longmode) {
  4339. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  4340. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  4341. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  4342. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  4343. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  4344. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  4345. }
  4346. #ifdef CONFIG_X86_64
  4347. else {
  4348. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4349. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4350. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  4351. }
  4352. #endif
  4353. code = param & 0xffff;
  4354. fast = (param >> 16) & 0x1;
  4355. rep_cnt = (param >> 32) & 0xfff;
  4356. rep_idx = (param >> 48) & 0xfff;
  4357. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  4358. switch (code) {
  4359. case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
  4360. kvm_vcpu_on_spin(vcpu);
  4361. break;
  4362. default:
  4363. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  4364. break;
  4365. }
  4366. ret = res | (((u64)rep_done & 0xfff) << 32);
  4367. if (longmode) {
  4368. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  4369. } else {
  4370. kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
  4371. kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
  4372. }
  4373. return 1;
  4374. }
  4375. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  4376. {
  4377. unsigned long nr, a0, a1, a2, a3, ret;
  4378. int r = 1;
  4379. if (kvm_hv_hypercall_enabled(vcpu->kvm))
  4380. return kvm_hv_hypercall(vcpu);
  4381. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4382. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4383. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4384. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4385. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4386. trace_kvm_hypercall(nr, a0, a1, a2, a3);
  4387. if (!is_long_mode(vcpu)) {
  4388. nr &= 0xFFFFFFFF;
  4389. a0 &= 0xFFFFFFFF;
  4390. a1 &= 0xFFFFFFFF;
  4391. a2 &= 0xFFFFFFFF;
  4392. a3 &= 0xFFFFFFFF;
  4393. }
  4394. if (kvm_x86_ops->get_cpl(vcpu) != 0) {
  4395. ret = -KVM_EPERM;
  4396. goto out;
  4397. }
  4398. switch (nr) {
  4399. case KVM_HC_VAPIC_POLL_IRQ:
  4400. ret = 0;
  4401. break;
  4402. default:
  4403. ret = -KVM_ENOSYS;
  4404. break;
  4405. }
  4406. out:
  4407. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  4408. ++vcpu->stat.hypercalls;
  4409. return r;
  4410. }
  4411. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  4412. int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
  4413. {
  4414. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  4415. char instruction[3];
  4416. unsigned long rip = kvm_rip_read(vcpu);
  4417. /*
  4418. * Blow out the MMU to ensure that no other VCPU has an active mapping
  4419. * to ensure that the updated hypercall appears atomically across all
  4420. * VCPUs.
  4421. */
  4422. kvm_mmu_zap_all(vcpu->kvm);
  4423. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  4424. return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
  4425. }
  4426. /*
  4427. * Check if userspace requested an interrupt window, and that the
  4428. * interrupt window is open.
  4429. *
  4430. * No need to exit to userspace if we already have an interrupt queued.
  4431. */
  4432. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
  4433. {
  4434. return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
  4435. vcpu->run->request_interrupt_window &&
  4436. kvm_arch_interrupt_allowed(vcpu));
  4437. }
  4438. static void post_kvm_run_save(struct kvm_vcpu *vcpu)
  4439. {
  4440. struct kvm_run *kvm_run = vcpu->run;
  4441. kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  4442. kvm_run->cr8 = kvm_get_cr8(vcpu);
  4443. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  4444. if (irqchip_in_kernel(vcpu->kvm))
  4445. kvm_run->ready_for_interrupt_injection = 1;
  4446. else
  4447. kvm_run->ready_for_interrupt_injection =
  4448. kvm_arch_interrupt_allowed(vcpu) &&
  4449. !kvm_cpu_has_interrupt(vcpu) &&
  4450. !kvm_event_needs_reinjection(vcpu);
  4451. }
  4452. static int vapic_enter(struct kvm_vcpu *vcpu)
  4453. {
  4454. struct kvm_lapic *apic = vcpu->arch.apic;
  4455. struct page *page;
  4456. if (!apic || !apic->vapic_addr)
  4457. return 0;
  4458. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  4459. if (is_error_page(page))
  4460. return -EFAULT;
  4461. vcpu->arch.apic->vapic_page = page;
  4462. return 0;
  4463. }
  4464. static void vapic_exit(struct kvm_vcpu *vcpu)
  4465. {
  4466. struct kvm_lapic *apic = vcpu->arch.apic;
  4467. int idx;
  4468. if (!apic || !apic->vapic_addr)
  4469. return;
  4470. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4471. kvm_release_page_dirty(apic->vapic_page);
  4472. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  4473. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4474. }
  4475. static void update_cr8_intercept(struct kvm_vcpu *vcpu)
  4476. {
  4477. int max_irr, tpr;
  4478. if (!kvm_x86_ops->update_cr8_intercept)
  4479. return;
  4480. if (!vcpu->arch.apic)
  4481. return;
  4482. if (!vcpu->arch.apic->vapic_addr)
  4483. max_irr = kvm_lapic_find_highest_irr(vcpu);
  4484. else
  4485. max_irr = -1;
  4486. if (max_irr != -1)
  4487. max_irr >>= 4;
  4488. tpr = kvm_lapic_get_cr8(vcpu);
  4489. kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
  4490. }
  4491. static void inject_pending_event(struct kvm_vcpu *vcpu)
  4492. {
  4493. /* try to reinject previous events if any */
  4494. if (vcpu->arch.exception.pending) {
  4495. trace_kvm_inj_exception(vcpu->arch.exception.nr,
  4496. vcpu->arch.exception.has_error_code,
  4497. vcpu->arch.exception.error_code);
  4498. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  4499. vcpu->arch.exception.has_error_code,
  4500. vcpu->arch.exception.error_code,
  4501. vcpu->arch.exception.reinject);
  4502. return;
  4503. }
  4504. if (vcpu->arch.nmi_injected) {
  4505. kvm_x86_ops->set_nmi(vcpu);
  4506. return;
  4507. }
  4508. if (vcpu->arch.interrupt.pending) {
  4509. kvm_x86_ops->set_irq(vcpu);
  4510. return;
  4511. }
  4512. /* try to inject new event if pending */
  4513. if (vcpu->arch.nmi_pending) {
  4514. if (kvm_x86_ops->nmi_allowed(vcpu)) {
  4515. --vcpu->arch.nmi_pending;
  4516. vcpu->arch.nmi_injected = true;
  4517. kvm_x86_ops->set_nmi(vcpu);
  4518. }
  4519. } else if (kvm_cpu_has_interrupt(vcpu)) {
  4520. if (kvm_x86_ops->interrupt_allowed(vcpu)) {
  4521. kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
  4522. false);
  4523. kvm_x86_ops->set_irq(vcpu);
  4524. }
  4525. }
  4526. }
  4527. static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
  4528. {
  4529. if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
  4530. !vcpu->guest_xcr0_loaded) {
  4531. /* kvm_set_xcr() also depends on this */
  4532. xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
  4533. vcpu->guest_xcr0_loaded = 1;
  4534. }
  4535. }
  4536. static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
  4537. {
  4538. if (vcpu->guest_xcr0_loaded) {
  4539. if (vcpu->arch.xcr0 != host_xcr0)
  4540. xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
  4541. vcpu->guest_xcr0_loaded = 0;
  4542. }
  4543. }
  4544. static void process_nmi(struct kvm_vcpu *vcpu)
  4545. {
  4546. unsigned limit = 2;
  4547. /*
  4548. * x86 is limited to one NMI running, and one NMI pending after it.
  4549. * If an NMI is already in progress, limit further NMIs to just one.
  4550. * Otherwise, allow two (and we'll inject the first one immediately).
  4551. */
  4552. if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
  4553. limit = 1;
  4554. vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
  4555. vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
  4556. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4557. }
  4558. static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
  4559. {
  4560. int r;
  4561. bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
  4562. vcpu->run->request_interrupt_window;
  4563. bool req_immediate_exit = 0;
  4564. if (vcpu->requests) {
  4565. if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
  4566. kvm_mmu_unload(vcpu);
  4567. if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
  4568. __kvm_migrate_timers(vcpu);
  4569. if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
  4570. r = kvm_guest_time_update(vcpu);
  4571. if (unlikely(r))
  4572. goto out;
  4573. }
  4574. if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
  4575. kvm_mmu_sync_roots(vcpu);
  4576. if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
  4577. kvm_x86_ops->tlb_flush(vcpu);
  4578. if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
  4579. vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
  4580. r = 0;
  4581. goto out;
  4582. }
  4583. if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
  4584. vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  4585. r = 0;
  4586. goto out;
  4587. }
  4588. if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
  4589. vcpu->fpu_active = 0;
  4590. kvm_x86_ops->fpu_deactivate(vcpu);
  4591. }
  4592. if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
  4593. /* Page is swapped out. Do synthetic halt */
  4594. vcpu->arch.apf.halted = true;
  4595. r = 1;
  4596. goto out;
  4597. }
  4598. if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
  4599. record_steal_time(vcpu);
  4600. if (kvm_check_request(KVM_REQ_NMI, vcpu))
  4601. process_nmi(vcpu);
  4602. req_immediate_exit =
  4603. kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
  4604. if (kvm_check_request(KVM_REQ_PMU, vcpu))
  4605. kvm_handle_pmu_event(vcpu);
  4606. if (kvm_check_request(KVM_REQ_PMI, vcpu))
  4607. kvm_deliver_pmi(vcpu);
  4608. }
  4609. if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
  4610. inject_pending_event(vcpu);
  4611. /* enable NMI/IRQ window open exits if needed */
  4612. if (vcpu->arch.nmi_pending)
  4613. kvm_x86_ops->enable_nmi_window(vcpu);
  4614. else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
  4615. kvm_x86_ops->enable_irq_window(vcpu);
  4616. if (kvm_lapic_enabled(vcpu)) {
  4617. update_cr8_intercept(vcpu);
  4618. kvm_lapic_sync_to_vapic(vcpu);
  4619. }
  4620. }
  4621. r = kvm_mmu_reload(vcpu);
  4622. if (unlikely(r)) {
  4623. goto cancel_injection;
  4624. }
  4625. preempt_disable();
  4626. kvm_x86_ops->prepare_guest_switch(vcpu);
  4627. if (vcpu->fpu_active)
  4628. kvm_load_guest_fpu(vcpu);
  4629. kvm_load_guest_xcr0(vcpu);
  4630. vcpu->mode = IN_GUEST_MODE;
  4631. /* We should set ->mode before check ->requests,
  4632. * see the comment in make_all_cpus_request.
  4633. */
  4634. smp_mb();
  4635. local_irq_disable();
  4636. if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
  4637. || need_resched() || signal_pending(current)) {
  4638. vcpu->mode = OUTSIDE_GUEST_MODE;
  4639. smp_wmb();
  4640. local_irq_enable();
  4641. preempt_enable();
  4642. r = 1;
  4643. goto cancel_injection;
  4644. }
  4645. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4646. if (req_immediate_exit)
  4647. smp_send_reschedule(vcpu->cpu);
  4648. kvm_guest_enter();
  4649. if (unlikely(vcpu->arch.switch_db_regs)) {
  4650. set_debugreg(0, 7);
  4651. set_debugreg(vcpu->arch.eff_db[0], 0);
  4652. set_debugreg(vcpu->arch.eff_db[1], 1);
  4653. set_debugreg(vcpu->arch.eff_db[2], 2);
  4654. set_debugreg(vcpu->arch.eff_db[3], 3);
  4655. }
  4656. trace_kvm_entry(vcpu->vcpu_id);
  4657. kvm_x86_ops->run(vcpu);
  4658. /*
  4659. * If the guest has used debug registers, at least dr7
  4660. * will be disabled while returning to the host.
  4661. * If we don't have active breakpoints in the host, we don't
  4662. * care about the messed up debug address registers. But if
  4663. * we have some of them active, restore the old state.
  4664. */
  4665. if (hw_breakpoint_active())
  4666. hw_breakpoint_restore();
  4667. vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
  4668. vcpu->mode = OUTSIDE_GUEST_MODE;
  4669. smp_wmb();
  4670. local_irq_enable();
  4671. ++vcpu->stat.exits;
  4672. /*
  4673. * We must have an instruction between local_irq_enable() and
  4674. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  4675. * the interrupt shadow. The stat.exits increment will do nicely.
  4676. * But we need to prevent reordering, hence this barrier():
  4677. */
  4678. barrier();
  4679. kvm_guest_exit();
  4680. preempt_enable();
  4681. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4682. /*
  4683. * Profile KVM exit RIPs:
  4684. */
  4685. if (unlikely(prof_on == KVM_PROFILING)) {
  4686. unsigned long rip = kvm_rip_read(vcpu);
  4687. profile_hit(KVM_PROFILING, (void *)rip);
  4688. }
  4689. if (unlikely(vcpu->arch.tsc_always_catchup))
  4690. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  4691. if (vcpu->arch.apic_attention)
  4692. kvm_lapic_sync_from_vapic(vcpu);
  4693. r = kvm_x86_ops->handle_exit(vcpu);
  4694. return r;
  4695. cancel_injection:
  4696. kvm_x86_ops->cancel_injection(vcpu);
  4697. if (unlikely(vcpu->arch.apic_attention))
  4698. kvm_lapic_sync_from_vapic(vcpu);
  4699. out:
  4700. return r;
  4701. }
  4702. static int __vcpu_run(struct kvm_vcpu *vcpu)
  4703. {
  4704. int r;
  4705. struct kvm *kvm = vcpu->kvm;
  4706. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  4707. pr_debug("vcpu %d received sipi with vector # %x\n",
  4708. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  4709. kvm_lapic_reset(vcpu);
  4710. r = kvm_arch_vcpu_reset(vcpu);
  4711. if (r)
  4712. return r;
  4713. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4714. }
  4715. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4716. r = vapic_enter(vcpu);
  4717. if (r) {
  4718. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4719. return r;
  4720. }
  4721. r = 1;
  4722. while (r > 0) {
  4723. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
  4724. !vcpu->arch.apf.halted)
  4725. r = vcpu_enter_guest(vcpu);
  4726. else {
  4727. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4728. kvm_vcpu_block(vcpu);
  4729. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4730. if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
  4731. {
  4732. switch(vcpu->arch.mp_state) {
  4733. case KVM_MP_STATE_HALTED:
  4734. vcpu->arch.mp_state =
  4735. KVM_MP_STATE_RUNNABLE;
  4736. case KVM_MP_STATE_RUNNABLE:
  4737. vcpu->arch.apf.halted = false;
  4738. break;
  4739. case KVM_MP_STATE_SIPI_RECEIVED:
  4740. default:
  4741. r = -EINTR;
  4742. break;
  4743. }
  4744. }
  4745. }
  4746. if (r <= 0)
  4747. break;
  4748. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  4749. if (kvm_cpu_has_pending_timer(vcpu))
  4750. kvm_inject_pending_timer_irqs(vcpu);
  4751. if (dm_request_for_irq_injection(vcpu)) {
  4752. r = -EINTR;
  4753. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4754. ++vcpu->stat.request_irq_exits;
  4755. }
  4756. kvm_check_async_pf_completion(vcpu);
  4757. if (signal_pending(current)) {
  4758. r = -EINTR;
  4759. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4760. ++vcpu->stat.signal_exits;
  4761. }
  4762. if (need_resched()) {
  4763. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4764. kvm_resched(vcpu);
  4765. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4766. }
  4767. }
  4768. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4769. vapic_exit(vcpu);
  4770. return r;
  4771. }
  4772. /*
  4773. * Implements the following, as a state machine:
  4774. *
  4775. * read:
  4776. * for each fragment
  4777. * write gpa, len
  4778. * exit
  4779. * copy data
  4780. * execute insn
  4781. *
  4782. * write:
  4783. * for each fragment
  4784. * write gpa, len
  4785. * copy data
  4786. * exit
  4787. */
  4788. static int complete_mmio(struct kvm_vcpu *vcpu)
  4789. {
  4790. struct kvm_run *run = vcpu->run;
  4791. struct kvm_mmio_fragment *frag;
  4792. int r;
  4793. if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
  4794. return 1;
  4795. if (vcpu->mmio_needed) {
  4796. /* Complete previous fragment */
  4797. frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
  4798. if (!vcpu->mmio_is_write)
  4799. memcpy(frag->data, run->mmio.data, frag->len);
  4800. if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
  4801. vcpu->mmio_needed = 0;
  4802. if (vcpu->mmio_is_write)
  4803. return 1;
  4804. vcpu->mmio_read_completed = 1;
  4805. goto done;
  4806. }
  4807. /* Initiate next fragment */
  4808. ++frag;
  4809. run->exit_reason = KVM_EXIT_MMIO;
  4810. run->mmio.phys_addr = frag->gpa;
  4811. if (vcpu->mmio_is_write)
  4812. memcpy(run->mmio.data, frag->data, frag->len);
  4813. run->mmio.len = frag->len;
  4814. run->mmio.is_write = vcpu->mmio_is_write;
  4815. return 0;
  4816. }
  4817. done:
  4818. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4819. r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
  4820. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4821. if (r != EMULATE_DONE)
  4822. return 0;
  4823. return 1;
  4824. }
  4825. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  4826. {
  4827. int r;
  4828. sigset_t sigsaved;
  4829. if (!tsk_used_math(current) && init_fpu(current))
  4830. return -ENOMEM;
  4831. if (vcpu->sigset_active)
  4832. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  4833. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  4834. kvm_vcpu_block(vcpu);
  4835. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  4836. r = -EAGAIN;
  4837. goto out;
  4838. }
  4839. /* re-sync apic's tpr */
  4840. if (!irqchip_in_kernel(vcpu->kvm)) {
  4841. if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
  4842. r = -EINVAL;
  4843. goto out;
  4844. }
  4845. }
  4846. r = complete_mmio(vcpu);
  4847. if (r <= 0)
  4848. goto out;
  4849. r = __vcpu_run(vcpu);
  4850. out:
  4851. post_kvm_run_save(vcpu);
  4852. if (vcpu->sigset_active)
  4853. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  4854. return r;
  4855. }
  4856. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4857. {
  4858. if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
  4859. /*
  4860. * We are here if userspace calls get_regs() in the middle of
  4861. * instruction emulation. Registers state needs to be copied
  4862. * back from emulation context to vcpu. Usrapace shouldn't do
  4863. * that usually, but some bad designed PV devices (vmware
  4864. * backdoor interface) need this to work
  4865. */
  4866. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  4867. memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
  4868. vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
  4869. }
  4870. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4871. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4872. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4873. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4874. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4875. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  4876. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  4877. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  4878. #ifdef CONFIG_X86_64
  4879. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  4880. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  4881. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  4882. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  4883. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  4884. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  4885. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  4886. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  4887. #endif
  4888. regs->rip = kvm_rip_read(vcpu);
  4889. regs->rflags = kvm_get_rflags(vcpu);
  4890. return 0;
  4891. }
  4892. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4893. {
  4894. vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
  4895. vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
  4896. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  4897. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  4898. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  4899. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  4900. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  4901. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  4902. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  4903. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  4904. #ifdef CONFIG_X86_64
  4905. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  4906. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  4907. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  4908. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  4909. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  4910. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  4911. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  4912. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  4913. #endif
  4914. kvm_rip_write(vcpu, regs->rip);
  4915. kvm_set_rflags(vcpu, regs->rflags);
  4916. vcpu->arch.exception.pending = false;
  4917. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4918. return 0;
  4919. }
  4920. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  4921. {
  4922. struct kvm_segment cs;
  4923. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  4924. *db = cs.db;
  4925. *l = cs.l;
  4926. }
  4927. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  4928. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  4929. struct kvm_sregs *sregs)
  4930. {
  4931. struct desc_ptr dt;
  4932. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  4933. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  4934. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  4935. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  4936. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  4937. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  4938. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  4939. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  4940. kvm_x86_ops->get_idt(vcpu, &dt);
  4941. sregs->idt.limit = dt.size;
  4942. sregs->idt.base = dt.address;
  4943. kvm_x86_ops->get_gdt(vcpu, &dt);
  4944. sregs->gdt.limit = dt.size;
  4945. sregs->gdt.base = dt.address;
  4946. sregs->cr0 = kvm_read_cr0(vcpu);
  4947. sregs->cr2 = vcpu->arch.cr2;
  4948. sregs->cr3 = kvm_read_cr3(vcpu);
  4949. sregs->cr4 = kvm_read_cr4(vcpu);
  4950. sregs->cr8 = kvm_get_cr8(vcpu);
  4951. sregs->efer = vcpu->arch.efer;
  4952. sregs->apic_base = kvm_get_apic_base(vcpu);
  4953. memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
  4954. if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
  4955. set_bit(vcpu->arch.interrupt.nr,
  4956. (unsigned long *)sregs->interrupt_bitmap);
  4957. return 0;
  4958. }
  4959. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  4960. struct kvm_mp_state *mp_state)
  4961. {
  4962. mp_state->mp_state = vcpu->arch.mp_state;
  4963. return 0;
  4964. }
  4965. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  4966. struct kvm_mp_state *mp_state)
  4967. {
  4968. vcpu->arch.mp_state = mp_state->mp_state;
  4969. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4970. return 0;
  4971. }
  4972. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
  4973. int reason, bool has_error_code, u32 error_code)
  4974. {
  4975. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  4976. int ret;
  4977. init_emulate_ctxt(vcpu);
  4978. ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
  4979. has_error_code, error_code);
  4980. if (ret)
  4981. return EMULATE_FAIL;
  4982. memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
  4983. kvm_rip_write(vcpu, ctxt->eip);
  4984. kvm_set_rflags(vcpu, ctxt->eflags);
  4985. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4986. return EMULATE_DONE;
  4987. }
  4988. EXPORT_SYMBOL_GPL(kvm_task_switch);
  4989. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  4990. struct kvm_sregs *sregs)
  4991. {
  4992. int mmu_reset_needed = 0;
  4993. int pending_vec, max_bits, idx;
  4994. struct desc_ptr dt;
  4995. dt.size = sregs->idt.limit;
  4996. dt.address = sregs->idt.base;
  4997. kvm_x86_ops->set_idt(vcpu, &dt);
  4998. dt.size = sregs->gdt.limit;
  4999. dt.address = sregs->gdt.base;
  5000. kvm_x86_ops->set_gdt(vcpu, &dt);
  5001. vcpu->arch.cr2 = sregs->cr2;
  5002. mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
  5003. vcpu->arch.cr3 = sregs->cr3;
  5004. __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
  5005. kvm_set_cr8(vcpu, sregs->cr8);
  5006. mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
  5007. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  5008. kvm_set_apic_base(vcpu, sregs->apic_base);
  5009. mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
  5010. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  5011. vcpu->arch.cr0 = sregs->cr0;
  5012. mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
  5013. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  5014. if (sregs->cr4 & X86_CR4_OSXSAVE)
  5015. kvm_update_cpuid(vcpu);
  5016. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5017. if (!is_long_mode(vcpu) && is_pae(vcpu)) {
  5018. load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
  5019. mmu_reset_needed = 1;
  5020. }
  5021. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5022. if (mmu_reset_needed)
  5023. kvm_mmu_reset_context(vcpu);
  5024. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  5025. pending_vec = find_first_bit(
  5026. (const unsigned long *)sregs->interrupt_bitmap, max_bits);
  5027. if (pending_vec < max_bits) {
  5028. kvm_queue_interrupt(vcpu, pending_vec, false);
  5029. pr_debug("Set back pending irq %d\n", pending_vec);
  5030. }
  5031. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  5032. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  5033. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  5034. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  5035. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  5036. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  5037. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  5038. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  5039. update_cr8_intercept(vcpu);
  5040. /* Older userspace won't unhalt the vcpu on reset. */
  5041. if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
  5042. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  5043. !is_protmode(vcpu))
  5044. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  5045. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5046. return 0;
  5047. }
  5048. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  5049. struct kvm_guest_debug *dbg)
  5050. {
  5051. unsigned long rflags;
  5052. int i, r;
  5053. if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
  5054. r = -EBUSY;
  5055. if (vcpu->arch.exception.pending)
  5056. goto out;
  5057. if (dbg->control & KVM_GUESTDBG_INJECT_DB)
  5058. kvm_queue_exception(vcpu, DB_VECTOR);
  5059. else
  5060. kvm_queue_exception(vcpu, BP_VECTOR);
  5061. }
  5062. /*
  5063. * Read rflags as long as potentially injected trace flags are still
  5064. * filtered out.
  5065. */
  5066. rflags = kvm_get_rflags(vcpu);
  5067. vcpu->guest_debug = dbg->control;
  5068. if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
  5069. vcpu->guest_debug = 0;
  5070. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
  5071. for (i = 0; i < KVM_NR_DB_REGS; ++i)
  5072. vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
  5073. vcpu->arch.switch_db_regs =
  5074. (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
  5075. } else {
  5076. for (i = 0; i < KVM_NR_DB_REGS; i++)
  5077. vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  5078. vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
  5079. }
  5080. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  5081. vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
  5082. get_segment_base(vcpu, VCPU_SREG_CS);
  5083. /*
  5084. * Trigger an rflags update that will inject or remove the trace
  5085. * flags.
  5086. */
  5087. kvm_set_rflags(vcpu, rflags);
  5088. kvm_x86_ops->set_guest_debug(vcpu, dbg);
  5089. r = 0;
  5090. out:
  5091. return r;
  5092. }
  5093. /*
  5094. * Translate a guest virtual address to a guest physical address.
  5095. */
  5096. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  5097. struct kvm_translation *tr)
  5098. {
  5099. unsigned long vaddr = tr->linear_address;
  5100. gpa_t gpa;
  5101. int idx;
  5102. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5103. gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
  5104. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5105. tr->physical_address = gpa;
  5106. tr->valid = gpa != UNMAPPED_GVA;
  5107. tr->writeable = 1;
  5108. tr->usermode = 0;
  5109. return 0;
  5110. }
  5111. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  5112. {
  5113. struct i387_fxsave_struct *fxsave =
  5114. &vcpu->arch.guest_fpu.state->fxsave;
  5115. memcpy(fpu->fpr, fxsave->st_space, 128);
  5116. fpu->fcw = fxsave->cwd;
  5117. fpu->fsw = fxsave->swd;
  5118. fpu->ftwx = fxsave->twd;
  5119. fpu->last_opcode = fxsave->fop;
  5120. fpu->last_ip = fxsave->rip;
  5121. fpu->last_dp = fxsave->rdp;
  5122. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  5123. return 0;
  5124. }
  5125. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  5126. {
  5127. struct i387_fxsave_struct *fxsave =
  5128. &vcpu->arch.guest_fpu.state->fxsave;
  5129. memcpy(fxsave->st_space, fpu->fpr, 128);
  5130. fxsave->cwd = fpu->fcw;
  5131. fxsave->swd = fpu->fsw;
  5132. fxsave->twd = fpu->ftwx;
  5133. fxsave->fop = fpu->last_opcode;
  5134. fxsave->rip = fpu->last_ip;
  5135. fxsave->rdp = fpu->last_dp;
  5136. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  5137. return 0;
  5138. }
  5139. int fx_init(struct kvm_vcpu *vcpu)
  5140. {
  5141. int err;
  5142. err = fpu_alloc(&vcpu->arch.guest_fpu);
  5143. if (err)
  5144. return err;
  5145. fpu_finit(&vcpu->arch.guest_fpu);
  5146. /*
  5147. * Ensure guest xcr0 is valid for loading
  5148. */
  5149. vcpu->arch.xcr0 = XSTATE_FP;
  5150. vcpu->arch.cr0 |= X86_CR0_ET;
  5151. return 0;
  5152. }
  5153. EXPORT_SYMBOL_GPL(fx_init);
  5154. static void fx_free(struct kvm_vcpu *vcpu)
  5155. {
  5156. fpu_free(&vcpu->arch.guest_fpu);
  5157. }
  5158. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  5159. {
  5160. if (vcpu->guest_fpu_loaded)
  5161. return;
  5162. /*
  5163. * Restore all possible states in the guest,
  5164. * and assume host would use all available bits.
  5165. * Guest xcr0 would be loaded later.
  5166. */
  5167. kvm_put_guest_xcr0(vcpu);
  5168. vcpu->guest_fpu_loaded = 1;
  5169. unlazy_fpu(current);
  5170. fpu_restore_checking(&vcpu->arch.guest_fpu);
  5171. trace_kvm_fpu(1);
  5172. }
  5173. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  5174. {
  5175. kvm_put_guest_xcr0(vcpu);
  5176. if (!vcpu->guest_fpu_loaded)
  5177. return;
  5178. vcpu->guest_fpu_loaded = 0;
  5179. fpu_save_init(&vcpu->arch.guest_fpu);
  5180. ++vcpu->stat.fpu_reload;
  5181. kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
  5182. trace_kvm_fpu(0);
  5183. }
  5184. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  5185. {
  5186. kvmclock_reset(vcpu);
  5187. free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
  5188. fx_free(vcpu);
  5189. kvm_x86_ops->vcpu_free(vcpu);
  5190. }
  5191. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  5192. unsigned int id)
  5193. {
  5194. if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
  5195. printk_once(KERN_WARNING
  5196. "kvm: SMP vm created on host with unstable TSC; "
  5197. "guest TSC will not be reliable\n");
  5198. return kvm_x86_ops->vcpu_create(kvm, id);
  5199. }
  5200. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  5201. {
  5202. int r;
  5203. vcpu->arch.mtrr_state.have_fixed = 1;
  5204. vcpu_load(vcpu);
  5205. r = kvm_arch_vcpu_reset(vcpu);
  5206. if (r == 0)
  5207. r = kvm_mmu_setup(vcpu);
  5208. vcpu_put(vcpu);
  5209. return r;
  5210. }
  5211. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  5212. {
  5213. vcpu->arch.apf.msr_val = 0;
  5214. vcpu_load(vcpu);
  5215. kvm_mmu_unload(vcpu);
  5216. vcpu_put(vcpu);
  5217. fx_free(vcpu);
  5218. kvm_x86_ops->vcpu_free(vcpu);
  5219. }
  5220. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  5221. {
  5222. atomic_set(&vcpu->arch.nmi_queued, 0);
  5223. vcpu->arch.nmi_pending = 0;
  5224. vcpu->arch.nmi_injected = false;
  5225. vcpu->arch.switch_db_regs = 0;
  5226. memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
  5227. vcpu->arch.dr6 = DR6_FIXED_1;
  5228. vcpu->arch.dr7 = DR7_FIXED_1;
  5229. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5230. vcpu->arch.apf.msr_val = 0;
  5231. vcpu->arch.st.msr_val = 0;
  5232. kvmclock_reset(vcpu);
  5233. kvm_clear_async_pf_completion_queue(vcpu);
  5234. kvm_async_pf_hash_reset(vcpu);
  5235. vcpu->arch.apf.halted = false;
  5236. kvm_pmu_reset(vcpu);
  5237. return kvm_x86_ops->vcpu_reset(vcpu);
  5238. }
  5239. int kvm_arch_hardware_enable(void *garbage)
  5240. {
  5241. struct kvm *kvm;
  5242. struct kvm_vcpu *vcpu;
  5243. int i;
  5244. int ret;
  5245. u64 local_tsc;
  5246. u64 max_tsc = 0;
  5247. bool stable, backwards_tsc = false;
  5248. kvm_shared_msr_cpu_online();
  5249. ret = kvm_x86_ops->hardware_enable(garbage);
  5250. if (ret != 0)
  5251. return ret;
  5252. local_tsc = native_read_tsc();
  5253. stable = !check_tsc_unstable();
  5254. list_for_each_entry(kvm, &vm_list, vm_list) {
  5255. kvm_for_each_vcpu(i, vcpu, kvm) {
  5256. if (!stable && vcpu->cpu == smp_processor_id())
  5257. set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
  5258. if (stable && vcpu->arch.last_host_tsc > local_tsc) {
  5259. backwards_tsc = true;
  5260. if (vcpu->arch.last_host_tsc > max_tsc)
  5261. max_tsc = vcpu->arch.last_host_tsc;
  5262. }
  5263. }
  5264. }
  5265. /*
  5266. * Sometimes, even reliable TSCs go backwards. This happens on
  5267. * platforms that reset TSC during suspend or hibernate actions, but
  5268. * maintain synchronization. We must compensate. Fortunately, we can
  5269. * detect that condition here, which happens early in CPU bringup,
  5270. * before any KVM threads can be running. Unfortunately, we can't
  5271. * bring the TSCs fully up to date with real time, as we aren't yet far
  5272. * enough into CPU bringup that we know how much real time has actually
  5273. * elapsed; our helper function, get_kernel_ns() will be using boot
  5274. * variables that haven't been updated yet.
  5275. *
  5276. * So we simply find the maximum observed TSC above, then record the
  5277. * adjustment to TSC in each VCPU. When the VCPU later gets loaded,
  5278. * the adjustment will be applied. Note that we accumulate
  5279. * adjustments, in case multiple suspend cycles happen before some VCPU
  5280. * gets a chance to run again. In the event that no KVM threads get a
  5281. * chance to run, we will miss the entire elapsed period, as we'll have
  5282. * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
  5283. * loose cycle time. This isn't too big a deal, since the loss will be
  5284. * uniform across all VCPUs (not to mention the scenario is extremely
  5285. * unlikely). It is possible that a second hibernate recovery happens
  5286. * much faster than a first, causing the observed TSC here to be
  5287. * smaller; this would require additional padding adjustment, which is
  5288. * why we set last_host_tsc to the local tsc observed here.
  5289. *
  5290. * N.B. - this code below runs only on platforms with reliable TSC,
  5291. * as that is the only way backwards_tsc is set above. Also note
  5292. * that this runs for ALL vcpus, which is not a bug; all VCPUs should
  5293. * have the same delta_cyc adjustment applied if backwards_tsc
  5294. * is detected. Note further, this adjustment is only done once,
  5295. * as we reset last_host_tsc on all VCPUs to stop this from being
  5296. * called multiple times (one for each physical CPU bringup).
  5297. *
  5298. * Platforms with unnreliable TSCs don't have to deal with this, they
  5299. * will be compensated by the logic in vcpu_load, which sets the TSC to
  5300. * catchup mode. This will catchup all VCPUs to real time, but cannot
  5301. * guarantee that they stay in perfect synchronization.
  5302. */
  5303. if (backwards_tsc) {
  5304. u64 delta_cyc = max_tsc - local_tsc;
  5305. list_for_each_entry(kvm, &vm_list, vm_list) {
  5306. kvm_for_each_vcpu(i, vcpu, kvm) {
  5307. vcpu->arch.tsc_offset_adjustment += delta_cyc;
  5308. vcpu->arch.last_host_tsc = local_tsc;
  5309. }
  5310. /*
  5311. * We have to disable TSC offset matching.. if you were
  5312. * booting a VM while issuing an S4 host suspend....
  5313. * you may have some problem. Solving this issue is
  5314. * left as an exercise to the reader.
  5315. */
  5316. kvm->arch.last_tsc_nsec = 0;
  5317. kvm->arch.last_tsc_write = 0;
  5318. }
  5319. }
  5320. return 0;
  5321. }
  5322. void kvm_arch_hardware_disable(void *garbage)
  5323. {
  5324. kvm_x86_ops->hardware_disable(garbage);
  5325. drop_user_return_notifiers(garbage);
  5326. }
  5327. int kvm_arch_hardware_setup(void)
  5328. {
  5329. return kvm_x86_ops->hardware_setup();
  5330. }
  5331. void kvm_arch_hardware_unsetup(void)
  5332. {
  5333. kvm_x86_ops->hardware_unsetup();
  5334. }
  5335. void kvm_arch_check_processor_compat(void *rtn)
  5336. {
  5337. kvm_x86_ops->check_processor_compatibility(rtn);
  5338. }
  5339. bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
  5340. {
  5341. return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
  5342. }
  5343. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  5344. {
  5345. struct page *page;
  5346. struct kvm *kvm;
  5347. int r;
  5348. BUG_ON(vcpu->kvm == NULL);
  5349. kvm = vcpu->kvm;
  5350. vcpu->arch.emulate_ctxt.ops = &emulate_ops;
  5351. if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
  5352. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  5353. else
  5354. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  5355. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  5356. if (!page) {
  5357. r = -ENOMEM;
  5358. goto fail;
  5359. }
  5360. vcpu->arch.pio_data = page_address(page);
  5361. kvm_set_tsc_khz(vcpu, max_tsc_khz);
  5362. r = kvm_mmu_create(vcpu);
  5363. if (r < 0)
  5364. goto fail_free_pio_data;
  5365. if (irqchip_in_kernel(kvm)) {
  5366. r = kvm_create_lapic(vcpu);
  5367. if (r < 0)
  5368. goto fail_mmu_destroy;
  5369. }
  5370. vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
  5371. GFP_KERNEL);
  5372. if (!vcpu->arch.mce_banks) {
  5373. r = -ENOMEM;
  5374. goto fail_free_lapic;
  5375. }
  5376. vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
  5377. if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
  5378. goto fail_free_mce_banks;
  5379. kvm_async_pf_hash_reset(vcpu);
  5380. kvm_pmu_init(vcpu);
  5381. return 0;
  5382. fail_free_mce_banks:
  5383. kfree(vcpu->arch.mce_banks);
  5384. fail_free_lapic:
  5385. kvm_free_lapic(vcpu);
  5386. fail_mmu_destroy:
  5387. kvm_mmu_destroy(vcpu);
  5388. fail_free_pio_data:
  5389. free_page((unsigned long)vcpu->arch.pio_data);
  5390. fail:
  5391. return r;
  5392. }
  5393. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  5394. {
  5395. int idx;
  5396. kvm_pmu_destroy(vcpu);
  5397. kfree(vcpu->arch.mce_banks);
  5398. kvm_free_lapic(vcpu);
  5399. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5400. kvm_mmu_destroy(vcpu);
  5401. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5402. free_page((unsigned long)vcpu->arch.pio_data);
  5403. }
  5404. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  5405. {
  5406. if (type)
  5407. return -EINVAL;
  5408. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  5409. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  5410. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  5411. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  5412. raw_spin_lock_init(&kvm->arch.tsc_write_lock);
  5413. return 0;
  5414. }
  5415. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  5416. {
  5417. vcpu_load(vcpu);
  5418. kvm_mmu_unload(vcpu);
  5419. vcpu_put(vcpu);
  5420. }
  5421. static void kvm_free_vcpus(struct kvm *kvm)
  5422. {
  5423. unsigned int i;
  5424. struct kvm_vcpu *vcpu;
  5425. /*
  5426. * Unpin any mmu pages first.
  5427. */
  5428. kvm_for_each_vcpu(i, vcpu, kvm) {
  5429. kvm_clear_async_pf_completion_queue(vcpu);
  5430. kvm_unload_vcpu_mmu(vcpu);
  5431. }
  5432. kvm_for_each_vcpu(i, vcpu, kvm)
  5433. kvm_arch_vcpu_free(vcpu);
  5434. mutex_lock(&kvm->lock);
  5435. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  5436. kvm->vcpus[i] = NULL;
  5437. atomic_set(&kvm->online_vcpus, 0);
  5438. mutex_unlock(&kvm->lock);
  5439. }
  5440. void kvm_arch_sync_events(struct kvm *kvm)
  5441. {
  5442. kvm_free_all_assigned_devices(kvm);
  5443. kvm_free_pit(kvm);
  5444. }
  5445. void kvm_arch_destroy_vm(struct kvm *kvm)
  5446. {
  5447. kvm_iommu_unmap_guest(kvm);
  5448. kfree(kvm->arch.vpic);
  5449. kfree(kvm->arch.vioapic);
  5450. kvm_free_vcpus(kvm);
  5451. if (kvm->arch.apic_access_page)
  5452. put_page(kvm->arch.apic_access_page);
  5453. if (kvm->arch.ept_identity_pagetable)
  5454. put_page(kvm->arch.ept_identity_pagetable);
  5455. }
  5456. void kvm_arch_free_memslot(struct kvm_memory_slot *free,
  5457. struct kvm_memory_slot *dont)
  5458. {
  5459. int i;
  5460. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  5461. if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
  5462. kvm_kvfree(free->arch.lpage_info[i]);
  5463. free->arch.lpage_info[i] = NULL;
  5464. }
  5465. }
  5466. }
  5467. int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
  5468. {
  5469. int i;
  5470. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  5471. unsigned long ugfn;
  5472. int lpages;
  5473. int level = i + 2;
  5474. lpages = gfn_to_index(slot->base_gfn + npages - 1,
  5475. slot->base_gfn, level) + 1;
  5476. slot->arch.lpage_info[i] =
  5477. kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
  5478. if (!slot->arch.lpage_info[i])
  5479. goto out_free;
  5480. if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
  5481. slot->arch.lpage_info[i][0].write_count = 1;
  5482. if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
  5483. slot->arch.lpage_info[i][lpages - 1].write_count = 1;
  5484. ugfn = slot->userspace_addr >> PAGE_SHIFT;
  5485. /*
  5486. * If the gfn and userspace address are not aligned wrt each
  5487. * other, or if explicitly asked to, disable large page
  5488. * support for this slot
  5489. */
  5490. if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
  5491. !kvm_largepages_enabled()) {
  5492. unsigned long j;
  5493. for (j = 0; j < lpages; ++j)
  5494. slot->arch.lpage_info[i][j].write_count = 1;
  5495. }
  5496. }
  5497. return 0;
  5498. out_free:
  5499. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  5500. kvm_kvfree(slot->arch.lpage_info[i]);
  5501. slot->arch.lpage_info[i] = NULL;
  5502. }
  5503. return -ENOMEM;
  5504. }
  5505. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  5506. struct kvm_memory_slot *memslot,
  5507. struct kvm_memory_slot old,
  5508. struct kvm_userspace_memory_region *mem,
  5509. int user_alloc)
  5510. {
  5511. int npages = memslot->npages;
  5512. int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
  5513. /* Prevent internal slot pages from being moved by fork()/COW. */
  5514. if (memslot->id >= KVM_MEMORY_SLOTS)
  5515. map_flags = MAP_SHARED | MAP_ANONYMOUS;
  5516. /*To keep backward compatibility with older userspace,
  5517. *x86 needs to hanlde !user_alloc case.
  5518. */
  5519. if (!user_alloc) {
  5520. if (npages && !old.rmap) {
  5521. unsigned long userspace_addr;
  5522. userspace_addr = vm_mmap(NULL, 0,
  5523. npages * PAGE_SIZE,
  5524. PROT_READ | PROT_WRITE,
  5525. map_flags,
  5526. 0);
  5527. if (IS_ERR((void *)userspace_addr))
  5528. return PTR_ERR((void *)userspace_addr);
  5529. memslot->userspace_addr = userspace_addr;
  5530. }
  5531. }
  5532. return 0;
  5533. }
  5534. void kvm_arch_commit_memory_region(struct kvm *kvm,
  5535. struct kvm_userspace_memory_region *mem,
  5536. struct kvm_memory_slot old,
  5537. int user_alloc)
  5538. {
  5539. int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
  5540. if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
  5541. int ret;
  5542. ret = vm_munmap(old.userspace_addr,
  5543. old.npages * PAGE_SIZE);
  5544. if (ret < 0)
  5545. printk(KERN_WARNING
  5546. "kvm_vm_ioctl_set_memory_region: "
  5547. "failed to munmap memory\n");
  5548. }
  5549. if (!kvm->arch.n_requested_mmu_pages)
  5550. nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  5551. spin_lock(&kvm->mmu_lock);
  5552. if (nr_mmu_pages)
  5553. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  5554. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  5555. spin_unlock(&kvm->mmu_lock);
  5556. }
  5557. void kvm_arch_flush_shadow(struct kvm *kvm)
  5558. {
  5559. kvm_mmu_zap_all(kvm);
  5560. kvm_reload_remote_mmus(kvm);
  5561. }
  5562. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  5563. {
  5564. return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
  5565. !vcpu->arch.apf.halted)
  5566. || !list_empty_careful(&vcpu->async_pf.done)
  5567. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
  5568. || atomic_read(&vcpu->arch.nmi_queued) ||
  5569. (kvm_arch_interrupt_allowed(vcpu) &&
  5570. kvm_cpu_has_interrupt(vcpu));
  5571. }
  5572. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  5573. {
  5574. return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
  5575. }
  5576. int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
  5577. {
  5578. return kvm_x86_ops->interrupt_allowed(vcpu);
  5579. }
  5580. bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
  5581. {
  5582. unsigned long current_rip = kvm_rip_read(vcpu) +
  5583. get_segment_base(vcpu, VCPU_SREG_CS);
  5584. return current_rip == linear_rip;
  5585. }
  5586. EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
  5587. unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
  5588. {
  5589. unsigned long rflags;
  5590. rflags = kvm_x86_ops->get_rflags(vcpu);
  5591. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  5592. rflags &= ~X86_EFLAGS_TF;
  5593. return rflags;
  5594. }
  5595. EXPORT_SYMBOL_GPL(kvm_get_rflags);
  5596. void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  5597. {
  5598. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
  5599. kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
  5600. rflags |= X86_EFLAGS_TF;
  5601. kvm_x86_ops->set_rflags(vcpu, rflags);
  5602. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5603. }
  5604. EXPORT_SYMBOL_GPL(kvm_set_rflags);
  5605. void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
  5606. {
  5607. int r;
  5608. if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
  5609. is_error_page(work->page))
  5610. return;
  5611. r = kvm_mmu_reload(vcpu);
  5612. if (unlikely(r))
  5613. return;
  5614. if (!vcpu->arch.mmu.direct_map &&
  5615. work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
  5616. return;
  5617. vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
  5618. }
  5619. static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
  5620. {
  5621. return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
  5622. }
  5623. static inline u32 kvm_async_pf_next_probe(u32 key)
  5624. {
  5625. return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
  5626. }
  5627. static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  5628. {
  5629. u32 key = kvm_async_pf_hash_fn(gfn);
  5630. while (vcpu->arch.apf.gfns[key] != ~0)
  5631. key = kvm_async_pf_next_probe(key);
  5632. vcpu->arch.apf.gfns[key] = gfn;
  5633. }
  5634. static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
  5635. {
  5636. int i;
  5637. u32 key = kvm_async_pf_hash_fn(gfn);
  5638. for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
  5639. (vcpu->arch.apf.gfns[key] != gfn &&
  5640. vcpu->arch.apf.gfns[key] != ~0); i++)
  5641. key = kvm_async_pf_next_probe(key);
  5642. return key;
  5643. }
  5644. bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  5645. {
  5646. return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
  5647. }
  5648. static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  5649. {
  5650. u32 i, j, k;
  5651. i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
  5652. while (true) {
  5653. vcpu->arch.apf.gfns[i] = ~0;
  5654. do {
  5655. j = kvm_async_pf_next_probe(j);
  5656. if (vcpu->arch.apf.gfns[j] == ~0)
  5657. return;
  5658. k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
  5659. /*
  5660. * k lies cyclically in ]i,j]
  5661. * | i.k.j |
  5662. * |....j i.k.| or |.k..j i...|
  5663. */
  5664. } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
  5665. vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
  5666. i = j;
  5667. }
  5668. }
  5669. static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
  5670. {
  5671. return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
  5672. sizeof(val));
  5673. }
  5674. void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
  5675. struct kvm_async_pf *work)
  5676. {
  5677. struct x86_exception fault;
  5678. trace_kvm_async_pf_not_present(work->arch.token, work->gva);
  5679. kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
  5680. if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
  5681. (vcpu->arch.apf.send_user_only &&
  5682. kvm_x86_ops->get_cpl(vcpu) == 0))
  5683. kvm_make_request(KVM_REQ_APF_HALT, vcpu);
  5684. else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
  5685. fault.vector = PF_VECTOR;
  5686. fault.error_code_valid = true;
  5687. fault.error_code = 0;
  5688. fault.nested_page_fault = false;
  5689. fault.address = work->arch.token;
  5690. kvm_inject_page_fault(vcpu, &fault);
  5691. }
  5692. }
  5693. void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
  5694. struct kvm_async_pf *work)
  5695. {
  5696. struct x86_exception fault;
  5697. trace_kvm_async_pf_ready(work->arch.token, work->gva);
  5698. if (is_error_page(work->page))
  5699. work->arch.token = ~0; /* broadcast wakeup */
  5700. else
  5701. kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
  5702. if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
  5703. !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
  5704. fault.vector = PF_VECTOR;
  5705. fault.error_code_valid = true;
  5706. fault.error_code = 0;
  5707. fault.nested_page_fault = false;
  5708. fault.address = work->arch.token;
  5709. kvm_inject_page_fault(vcpu, &fault);
  5710. }
  5711. vcpu->arch.apf.halted = false;
  5712. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  5713. }
  5714. bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
  5715. {
  5716. if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
  5717. return true;
  5718. else
  5719. return !kvm_event_needs_reinjection(vcpu) &&
  5720. kvm_x86_ops->interrupt_allowed(vcpu);
  5721. }
  5722. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
  5723. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
  5724. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
  5725. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
  5726. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
  5727. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
  5728. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
  5729. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
  5730. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
  5731. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
  5732. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
  5733. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);