vmx.c 244 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. *
  14. * This work is licensed under the terms of the GNU GPL, version 2. See
  15. * the COPYING file in the top-level directory.
  16. *
  17. */
  18. #include "irq.h"
  19. #include "mmu.h"
  20. #include "cpuid.h"
  21. #include <linux/kvm_host.h>
  22. #include <linux/module.h>
  23. #include <linux/kernel.h>
  24. #include <linux/mm.h>
  25. #include <linux/highmem.h>
  26. #include <linux/sched.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/mod_devicetable.h>
  29. #include <linux/ftrace_event.h>
  30. #include <linux/slab.h>
  31. #include <linux/tboot.h>
  32. #include "kvm_cache_regs.h"
  33. #include "x86.h"
  34. #include <asm/io.h>
  35. #include <asm/desc.h>
  36. #include <asm/vmx.h>
  37. #include <asm/virtext.h>
  38. #include <asm/mce.h>
  39. #include <asm/i387.h>
  40. #include <asm/xcr.h>
  41. #include <asm/perf_event.h>
  42. #include <asm/kexec.h>
  43. #include "trace.h"
  44. #define __ex(x) __kvm_handle_fault_on_reboot(x)
  45. #define __ex_clear(x, reg) \
  46. ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
  47. MODULE_AUTHOR("Qumranet");
  48. MODULE_LICENSE("GPL");
  49. static const struct x86_cpu_id vmx_cpu_id[] = {
  50. X86_FEATURE_MATCH(X86_FEATURE_VMX),
  51. {}
  52. };
  53. MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
  54. static bool __read_mostly enable_vpid = 1;
  55. module_param_named(vpid, enable_vpid, bool, 0444);
  56. static bool __read_mostly flexpriority_enabled = 1;
  57. module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
  58. static bool __read_mostly enable_ept = 1;
  59. module_param_named(ept, enable_ept, bool, S_IRUGO);
  60. static bool __read_mostly enable_unrestricted_guest = 1;
  61. module_param_named(unrestricted_guest,
  62. enable_unrestricted_guest, bool, S_IRUGO);
  63. static bool __read_mostly enable_ept_ad_bits = 1;
  64. module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
  65. static bool __read_mostly emulate_invalid_guest_state = true;
  66. module_param(emulate_invalid_guest_state, bool, S_IRUGO);
  67. static bool __read_mostly vmm_exclusive = 1;
  68. module_param(vmm_exclusive, bool, S_IRUGO);
  69. static bool __read_mostly fasteoi = 1;
  70. module_param(fasteoi, bool, S_IRUGO);
  71. static bool __read_mostly enable_apicv = 1;
  72. module_param(enable_apicv, bool, S_IRUGO);
  73. static bool __read_mostly enable_shadow_vmcs = 1;
  74. module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
  75. /*
  76. * If nested=1, nested virtualization is supported, i.e., guests may use
  77. * VMX and be a hypervisor for its own guests. If nested=0, guests may not
  78. * use VMX instructions.
  79. */
  80. static bool __read_mostly nested = 0;
  81. module_param(nested, bool, S_IRUGO);
  82. #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
  83. #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
  84. #define KVM_VM_CR0_ALWAYS_ON \
  85. (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
  86. #define KVM_CR4_GUEST_OWNED_BITS \
  87. (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
  88. | X86_CR4_OSXMMEXCPT)
  89. #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
  90. #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
  91. #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
  92. /*
  93. * These 2 parameters are used to config the controls for Pause-Loop Exiting:
  94. * ple_gap: upper bound on the amount of time between two successive
  95. * executions of PAUSE in a loop. Also indicate if ple enabled.
  96. * According to test, this time is usually smaller than 128 cycles.
  97. * ple_window: upper bound on the amount of time a guest is allowed to execute
  98. * in a PAUSE loop. Tests indicate that most spinlocks are held for
  99. * less than 2^12 cycles
  100. * Time is measured based on a counter that runs at the same rate as the TSC,
  101. * refer SDM volume 3b section 21.6.13 & 22.1.3.
  102. */
  103. #define KVM_VMX_DEFAULT_PLE_GAP 128
  104. #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
  105. static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
  106. module_param(ple_gap, int, S_IRUGO);
  107. static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
  108. module_param(ple_window, int, S_IRUGO);
  109. extern const ulong vmx_return;
  110. #define NR_AUTOLOAD_MSRS 8
  111. #define VMCS02_POOL_SIZE 1
  112. struct vmcs {
  113. u32 revision_id;
  114. u32 abort;
  115. char data[0];
  116. };
  117. /*
  118. * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
  119. * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
  120. * loaded on this CPU (so we can clear them if the CPU goes down).
  121. */
  122. struct loaded_vmcs {
  123. struct vmcs *vmcs;
  124. int cpu;
  125. int launched;
  126. struct list_head loaded_vmcss_on_cpu_link;
  127. };
  128. struct shared_msr_entry {
  129. unsigned index;
  130. u64 data;
  131. u64 mask;
  132. };
  133. /*
  134. * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
  135. * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
  136. * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
  137. * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
  138. * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
  139. * More than one of these structures may exist, if L1 runs multiple L2 guests.
  140. * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
  141. * underlying hardware which will be used to run L2.
  142. * This structure is packed to ensure that its layout is identical across
  143. * machines (necessary for live migration).
  144. * If there are changes in this struct, VMCS12_REVISION must be changed.
  145. */
  146. typedef u64 natural_width;
  147. struct __packed vmcs12 {
  148. /* According to the Intel spec, a VMCS region must start with the
  149. * following two fields. Then follow implementation-specific data.
  150. */
  151. u32 revision_id;
  152. u32 abort;
  153. u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
  154. u32 padding[7]; /* room for future expansion */
  155. u64 io_bitmap_a;
  156. u64 io_bitmap_b;
  157. u64 msr_bitmap;
  158. u64 vm_exit_msr_store_addr;
  159. u64 vm_exit_msr_load_addr;
  160. u64 vm_entry_msr_load_addr;
  161. u64 tsc_offset;
  162. u64 virtual_apic_page_addr;
  163. u64 apic_access_addr;
  164. u64 ept_pointer;
  165. u64 guest_physical_address;
  166. u64 vmcs_link_pointer;
  167. u64 guest_ia32_debugctl;
  168. u64 guest_ia32_pat;
  169. u64 guest_ia32_efer;
  170. u64 guest_ia32_perf_global_ctrl;
  171. u64 guest_pdptr0;
  172. u64 guest_pdptr1;
  173. u64 guest_pdptr2;
  174. u64 guest_pdptr3;
  175. u64 host_ia32_pat;
  176. u64 host_ia32_efer;
  177. u64 host_ia32_perf_global_ctrl;
  178. u64 padding64[8]; /* room for future expansion */
  179. /*
  180. * To allow migration of L1 (complete with its L2 guests) between
  181. * machines of different natural widths (32 or 64 bit), we cannot have
  182. * unsigned long fields with no explict size. We use u64 (aliased
  183. * natural_width) instead. Luckily, x86 is little-endian.
  184. */
  185. natural_width cr0_guest_host_mask;
  186. natural_width cr4_guest_host_mask;
  187. natural_width cr0_read_shadow;
  188. natural_width cr4_read_shadow;
  189. natural_width cr3_target_value0;
  190. natural_width cr3_target_value1;
  191. natural_width cr3_target_value2;
  192. natural_width cr3_target_value3;
  193. natural_width exit_qualification;
  194. natural_width guest_linear_address;
  195. natural_width guest_cr0;
  196. natural_width guest_cr3;
  197. natural_width guest_cr4;
  198. natural_width guest_es_base;
  199. natural_width guest_cs_base;
  200. natural_width guest_ss_base;
  201. natural_width guest_ds_base;
  202. natural_width guest_fs_base;
  203. natural_width guest_gs_base;
  204. natural_width guest_ldtr_base;
  205. natural_width guest_tr_base;
  206. natural_width guest_gdtr_base;
  207. natural_width guest_idtr_base;
  208. natural_width guest_dr7;
  209. natural_width guest_rsp;
  210. natural_width guest_rip;
  211. natural_width guest_rflags;
  212. natural_width guest_pending_dbg_exceptions;
  213. natural_width guest_sysenter_esp;
  214. natural_width guest_sysenter_eip;
  215. natural_width host_cr0;
  216. natural_width host_cr3;
  217. natural_width host_cr4;
  218. natural_width host_fs_base;
  219. natural_width host_gs_base;
  220. natural_width host_tr_base;
  221. natural_width host_gdtr_base;
  222. natural_width host_idtr_base;
  223. natural_width host_ia32_sysenter_esp;
  224. natural_width host_ia32_sysenter_eip;
  225. natural_width host_rsp;
  226. natural_width host_rip;
  227. natural_width paddingl[8]; /* room for future expansion */
  228. u32 pin_based_vm_exec_control;
  229. u32 cpu_based_vm_exec_control;
  230. u32 exception_bitmap;
  231. u32 page_fault_error_code_mask;
  232. u32 page_fault_error_code_match;
  233. u32 cr3_target_count;
  234. u32 vm_exit_controls;
  235. u32 vm_exit_msr_store_count;
  236. u32 vm_exit_msr_load_count;
  237. u32 vm_entry_controls;
  238. u32 vm_entry_msr_load_count;
  239. u32 vm_entry_intr_info_field;
  240. u32 vm_entry_exception_error_code;
  241. u32 vm_entry_instruction_len;
  242. u32 tpr_threshold;
  243. u32 secondary_vm_exec_control;
  244. u32 vm_instruction_error;
  245. u32 vm_exit_reason;
  246. u32 vm_exit_intr_info;
  247. u32 vm_exit_intr_error_code;
  248. u32 idt_vectoring_info_field;
  249. u32 idt_vectoring_error_code;
  250. u32 vm_exit_instruction_len;
  251. u32 vmx_instruction_info;
  252. u32 guest_es_limit;
  253. u32 guest_cs_limit;
  254. u32 guest_ss_limit;
  255. u32 guest_ds_limit;
  256. u32 guest_fs_limit;
  257. u32 guest_gs_limit;
  258. u32 guest_ldtr_limit;
  259. u32 guest_tr_limit;
  260. u32 guest_gdtr_limit;
  261. u32 guest_idtr_limit;
  262. u32 guest_es_ar_bytes;
  263. u32 guest_cs_ar_bytes;
  264. u32 guest_ss_ar_bytes;
  265. u32 guest_ds_ar_bytes;
  266. u32 guest_fs_ar_bytes;
  267. u32 guest_gs_ar_bytes;
  268. u32 guest_ldtr_ar_bytes;
  269. u32 guest_tr_ar_bytes;
  270. u32 guest_interruptibility_info;
  271. u32 guest_activity_state;
  272. u32 guest_sysenter_cs;
  273. u32 host_ia32_sysenter_cs;
  274. u32 vmx_preemption_timer_value;
  275. u32 padding32[7]; /* room for future expansion */
  276. u16 virtual_processor_id;
  277. u16 guest_es_selector;
  278. u16 guest_cs_selector;
  279. u16 guest_ss_selector;
  280. u16 guest_ds_selector;
  281. u16 guest_fs_selector;
  282. u16 guest_gs_selector;
  283. u16 guest_ldtr_selector;
  284. u16 guest_tr_selector;
  285. u16 host_es_selector;
  286. u16 host_cs_selector;
  287. u16 host_ss_selector;
  288. u16 host_ds_selector;
  289. u16 host_fs_selector;
  290. u16 host_gs_selector;
  291. u16 host_tr_selector;
  292. };
  293. /*
  294. * VMCS12_REVISION is an arbitrary id that should be changed if the content or
  295. * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
  296. * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
  297. */
  298. #define VMCS12_REVISION 0x11e57ed0
  299. /*
  300. * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
  301. * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
  302. * current implementation, 4K are reserved to avoid future complications.
  303. */
  304. #define VMCS12_SIZE 0x1000
  305. /* Used to remember the last vmcs02 used for some recently used vmcs12s */
  306. struct vmcs02_list {
  307. struct list_head list;
  308. gpa_t vmptr;
  309. struct loaded_vmcs vmcs02;
  310. };
  311. /*
  312. * The nested_vmx structure is part of vcpu_vmx, and holds information we need
  313. * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
  314. */
  315. struct nested_vmx {
  316. /* Has the level1 guest done vmxon? */
  317. bool vmxon;
  318. /* The guest-physical address of the current VMCS L1 keeps for L2 */
  319. gpa_t current_vmptr;
  320. /* The host-usable pointer to the above */
  321. struct page *current_vmcs12_page;
  322. struct vmcs12 *current_vmcs12;
  323. struct vmcs *current_shadow_vmcs;
  324. /*
  325. * Indicates if the shadow vmcs must be updated with the
  326. * data hold by vmcs12
  327. */
  328. bool sync_shadow_vmcs;
  329. /* vmcs02_list cache of VMCSs recently used to run L2 guests */
  330. struct list_head vmcs02_pool;
  331. int vmcs02_num;
  332. u64 vmcs01_tsc_offset;
  333. /* L2 must run next, and mustn't decide to exit to L1. */
  334. bool nested_run_pending;
  335. /*
  336. * Guest pages referred to in vmcs02 with host-physical pointers, so
  337. * we must keep them pinned while L2 runs.
  338. */
  339. struct page *apic_access_page;
  340. u64 msr_ia32_feature_control;
  341. };
  342. #define POSTED_INTR_ON 0
  343. /* Posted-Interrupt Descriptor */
  344. struct pi_desc {
  345. u32 pir[8]; /* Posted interrupt requested */
  346. u32 control; /* bit 0 of control is outstanding notification bit */
  347. u32 rsvd[7];
  348. } __aligned(64);
  349. static bool pi_test_and_set_on(struct pi_desc *pi_desc)
  350. {
  351. return test_and_set_bit(POSTED_INTR_ON,
  352. (unsigned long *)&pi_desc->control);
  353. }
  354. static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
  355. {
  356. return test_and_clear_bit(POSTED_INTR_ON,
  357. (unsigned long *)&pi_desc->control);
  358. }
  359. static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
  360. {
  361. return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
  362. }
  363. struct vcpu_vmx {
  364. struct kvm_vcpu vcpu;
  365. unsigned long host_rsp;
  366. u8 fail;
  367. u8 cpl;
  368. bool nmi_known_unmasked;
  369. u32 exit_intr_info;
  370. u32 idt_vectoring_info;
  371. ulong rflags;
  372. struct shared_msr_entry *guest_msrs;
  373. int nmsrs;
  374. int save_nmsrs;
  375. unsigned long host_idt_base;
  376. #ifdef CONFIG_X86_64
  377. u64 msr_host_kernel_gs_base;
  378. u64 msr_guest_kernel_gs_base;
  379. #endif
  380. /*
  381. * loaded_vmcs points to the VMCS currently used in this vcpu. For a
  382. * non-nested (L1) guest, it always points to vmcs01. For a nested
  383. * guest (L2), it points to a different VMCS.
  384. */
  385. struct loaded_vmcs vmcs01;
  386. struct loaded_vmcs *loaded_vmcs;
  387. bool __launched; /* temporary, used in vmx_vcpu_run */
  388. struct msr_autoload {
  389. unsigned nr;
  390. struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
  391. struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
  392. } msr_autoload;
  393. struct {
  394. int loaded;
  395. u16 fs_sel, gs_sel, ldt_sel;
  396. #ifdef CONFIG_X86_64
  397. u16 ds_sel, es_sel;
  398. #endif
  399. int gs_ldt_reload_needed;
  400. int fs_reload_needed;
  401. } host_state;
  402. struct {
  403. int vm86_active;
  404. ulong save_rflags;
  405. struct kvm_segment segs[8];
  406. } rmode;
  407. struct {
  408. u32 bitmask; /* 4 bits per segment (1 bit per field) */
  409. struct kvm_save_segment {
  410. u16 selector;
  411. unsigned long base;
  412. u32 limit;
  413. u32 ar;
  414. } seg[8];
  415. } segment_cache;
  416. int vpid;
  417. bool emulation_required;
  418. /* Support for vnmi-less CPUs */
  419. int soft_vnmi_blocked;
  420. ktime_t entry_time;
  421. s64 vnmi_blocked_time;
  422. u32 exit_reason;
  423. bool rdtscp_enabled;
  424. /* Posted interrupt descriptor */
  425. struct pi_desc pi_desc;
  426. /* Support for a guest hypervisor (nested VMX) */
  427. struct nested_vmx nested;
  428. };
  429. enum segment_cache_field {
  430. SEG_FIELD_SEL = 0,
  431. SEG_FIELD_BASE = 1,
  432. SEG_FIELD_LIMIT = 2,
  433. SEG_FIELD_AR = 3,
  434. SEG_FIELD_NR = 4
  435. };
  436. static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
  437. {
  438. return container_of(vcpu, struct vcpu_vmx, vcpu);
  439. }
  440. #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
  441. #define FIELD(number, name) [number] = VMCS12_OFFSET(name)
  442. #define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
  443. [number##_HIGH] = VMCS12_OFFSET(name)+4
  444. static const unsigned long shadow_read_only_fields[] = {
  445. /*
  446. * We do NOT shadow fields that are modified when L0
  447. * traps and emulates any vmx instruction (e.g. VMPTRLD,
  448. * VMXON...) executed by L1.
  449. * For example, VM_INSTRUCTION_ERROR is read
  450. * by L1 if a vmx instruction fails (part of the error path).
  451. * Note the code assumes this logic. If for some reason
  452. * we start shadowing these fields then we need to
  453. * force a shadow sync when L0 emulates vmx instructions
  454. * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
  455. * by nested_vmx_failValid)
  456. */
  457. VM_EXIT_REASON,
  458. VM_EXIT_INTR_INFO,
  459. VM_EXIT_INSTRUCTION_LEN,
  460. IDT_VECTORING_INFO_FIELD,
  461. IDT_VECTORING_ERROR_CODE,
  462. VM_EXIT_INTR_ERROR_CODE,
  463. EXIT_QUALIFICATION,
  464. GUEST_LINEAR_ADDRESS,
  465. GUEST_PHYSICAL_ADDRESS
  466. };
  467. static const int max_shadow_read_only_fields =
  468. ARRAY_SIZE(shadow_read_only_fields);
  469. static const unsigned long shadow_read_write_fields[] = {
  470. GUEST_RIP,
  471. GUEST_RSP,
  472. GUEST_CR0,
  473. GUEST_CR3,
  474. GUEST_CR4,
  475. GUEST_INTERRUPTIBILITY_INFO,
  476. GUEST_RFLAGS,
  477. GUEST_CS_SELECTOR,
  478. GUEST_CS_AR_BYTES,
  479. GUEST_CS_LIMIT,
  480. GUEST_CS_BASE,
  481. GUEST_ES_BASE,
  482. CR0_GUEST_HOST_MASK,
  483. CR0_READ_SHADOW,
  484. CR4_READ_SHADOW,
  485. TSC_OFFSET,
  486. EXCEPTION_BITMAP,
  487. CPU_BASED_VM_EXEC_CONTROL,
  488. VM_ENTRY_EXCEPTION_ERROR_CODE,
  489. VM_ENTRY_INTR_INFO_FIELD,
  490. VM_ENTRY_INSTRUCTION_LEN,
  491. VM_ENTRY_EXCEPTION_ERROR_CODE,
  492. HOST_FS_BASE,
  493. HOST_GS_BASE,
  494. HOST_FS_SELECTOR,
  495. HOST_GS_SELECTOR
  496. };
  497. static const int max_shadow_read_write_fields =
  498. ARRAY_SIZE(shadow_read_write_fields);
  499. static const unsigned short vmcs_field_to_offset_table[] = {
  500. FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
  501. FIELD(GUEST_ES_SELECTOR, guest_es_selector),
  502. FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
  503. FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
  504. FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
  505. FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
  506. FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
  507. FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
  508. FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
  509. FIELD(HOST_ES_SELECTOR, host_es_selector),
  510. FIELD(HOST_CS_SELECTOR, host_cs_selector),
  511. FIELD(HOST_SS_SELECTOR, host_ss_selector),
  512. FIELD(HOST_DS_SELECTOR, host_ds_selector),
  513. FIELD(HOST_FS_SELECTOR, host_fs_selector),
  514. FIELD(HOST_GS_SELECTOR, host_gs_selector),
  515. FIELD(HOST_TR_SELECTOR, host_tr_selector),
  516. FIELD64(IO_BITMAP_A, io_bitmap_a),
  517. FIELD64(IO_BITMAP_B, io_bitmap_b),
  518. FIELD64(MSR_BITMAP, msr_bitmap),
  519. FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
  520. FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
  521. FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
  522. FIELD64(TSC_OFFSET, tsc_offset),
  523. FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
  524. FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
  525. FIELD64(EPT_POINTER, ept_pointer),
  526. FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
  527. FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
  528. FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
  529. FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
  530. FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
  531. FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
  532. FIELD64(GUEST_PDPTR0, guest_pdptr0),
  533. FIELD64(GUEST_PDPTR1, guest_pdptr1),
  534. FIELD64(GUEST_PDPTR2, guest_pdptr2),
  535. FIELD64(GUEST_PDPTR3, guest_pdptr3),
  536. FIELD64(HOST_IA32_PAT, host_ia32_pat),
  537. FIELD64(HOST_IA32_EFER, host_ia32_efer),
  538. FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
  539. FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
  540. FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
  541. FIELD(EXCEPTION_BITMAP, exception_bitmap),
  542. FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
  543. FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
  544. FIELD(CR3_TARGET_COUNT, cr3_target_count),
  545. FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
  546. FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
  547. FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
  548. FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
  549. FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
  550. FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
  551. FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
  552. FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
  553. FIELD(TPR_THRESHOLD, tpr_threshold),
  554. FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
  555. FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
  556. FIELD(VM_EXIT_REASON, vm_exit_reason),
  557. FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
  558. FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
  559. FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
  560. FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
  561. FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
  562. FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
  563. FIELD(GUEST_ES_LIMIT, guest_es_limit),
  564. FIELD(GUEST_CS_LIMIT, guest_cs_limit),
  565. FIELD(GUEST_SS_LIMIT, guest_ss_limit),
  566. FIELD(GUEST_DS_LIMIT, guest_ds_limit),
  567. FIELD(GUEST_FS_LIMIT, guest_fs_limit),
  568. FIELD(GUEST_GS_LIMIT, guest_gs_limit),
  569. FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
  570. FIELD(GUEST_TR_LIMIT, guest_tr_limit),
  571. FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
  572. FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
  573. FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
  574. FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
  575. FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
  576. FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
  577. FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
  578. FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
  579. FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
  580. FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
  581. FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
  582. FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
  583. FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
  584. FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
  585. FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
  586. FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
  587. FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
  588. FIELD(CR0_READ_SHADOW, cr0_read_shadow),
  589. FIELD(CR4_READ_SHADOW, cr4_read_shadow),
  590. FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
  591. FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
  592. FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
  593. FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
  594. FIELD(EXIT_QUALIFICATION, exit_qualification),
  595. FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
  596. FIELD(GUEST_CR0, guest_cr0),
  597. FIELD(GUEST_CR3, guest_cr3),
  598. FIELD(GUEST_CR4, guest_cr4),
  599. FIELD(GUEST_ES_BASE, guest_es_base),
  600. FIELD(GUEST_CS_BASE, guest_cs_base),
  601. FIELD(GUEST_SS_BASE, guest_ss_base),
  602. FIELD(GUEST_DS_BASE, guest_ds_base),
  603. FIELD(GUEST_FS_BASE, guest_fs_base),
  604. FIELD(GUEST_GS_BASE, guest_gs_base),
  605. FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
  606. FIELD(GUEST_TR_BASE, guest_tr_base),
  607. FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
  608. FIELD(GUEST_IDTR_BASE, guest_idtr_base),
  609. FIELD(GUEST_DR7, guest_dr7),
  610. FIELD(GUEST_RSP, guest_rsp),
  611. FIELD(GUEST_RIP, guest_rip),
  612. FIELD(GUEST_RFLAGS, guest_rflags),
  613. FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
  614. FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
  615. FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
  616. FIELD(HOST_CR0, host_cr0),
  617. FIELD(HOST_CR3, host_cr3),
  618. FIELD(HOST_CR4, host_cr4),
  619. FIELD(HOST_FS_BASE, host_fs_base),
  620. FIELD(HOST_GS_BASE, host_gs_base),
  621. FIELD(HOST_TR_BASE, host_tr_base),
  622. FIELD(HOST_GDTR_BASE, host_gdtr_base),
  623. FIELD(HOST_IDTR_BASE, host_idtr_base),
  624. FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
  625. FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
  626. FIELD(HOST_RSP, host_rsp),
  627. FIELD(HOST_RIP, host_rip),
  628. };
  629. static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
  630. static inline short vmcs_field_to_offset(unsigned long field)
  631. {
  632. if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
  633. return -1;
  634. return vmcs_field_to_offset_table[field];
  635. }
  636. static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
  637. {
  638. return to_vmx(vcpu)->nested.current_vmcs12;
  639. }
  640. static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
  641. {
  642. struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
  643. if (is_error_page(page))
  644. return NULL;
  645. return page;
  646. }
  647. static void nested_release_page(struct page *page)
  648. {
  649. kvm_release_page_dirty(page);
  650. }
  651. static void nested_release_page_clean(struct page *page)
  652. {
  653. kvm_release_page_clean(page);
  654. }
  655. static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
  656. static u64 construct_eptp(unsigned long root_hpa);
  657. static void kvm_cpu_vmxon(u64 addr);
  658. static void kvm_cpu_vmxoff(void);
  659. static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
  660. static void vmx_set_segment(struct kvm_vcpu *vcpu,
  661. struct kvm_segment *var, int seg);
  662. static void vmx_get_segment(struct kvm_vcpu *vcpu,
  663. struct kvm_segment *var, int seg);
  664. static bool guest_state_valid(struct kvm_vcpu *vcpu);
  665. static u32 vmx_segment_access_rights(struct kvm_segment *var);
  666. static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
  667. static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
  668. static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
  669. static DEFINE_PER_CPU(struct vmcs *, vmxarea);
  670. static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
  671. /*
  672. * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
  673. * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
  674. */
  675. static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
  676. static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
  677. static unsigned long *vmx_io_bitmap_a;
  678. static unsigned long *vmx_io_bitmap_b;
  679. static unsigned long *vmx_msr_bitmap_legacy;
  680. static unsigned long *vmx_msr_bitmap_longmode;
  681. static unsigned long *vmx_msr_bitmap_legacy_x2apic;
  682. static unsigned long *vmx_msr_bitmap_longmode_x2apic;
  683. static unsigned long *vmx_vmread_bitmap;
  684. static unsigned long *vmx_vmwrite_bitmap;
  685. static bool cpu_has_load_ia32_efer;
  686. static bool cpu_has_load_perf_global_ctrl;
  687. static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
  688. static DEFINE_SPINLOCK(vmx_vpid_lock);
  689. static struct vmcs_config {
  690. int size;
  691. int order;
  692. u32 revision_id;
  693. u32 pin_based_exec_ctrl;
  694. u32 cpu_based_exec_ctrl;
  695. u32 cpu_based_2nd_exec_ctrl;
  696. u32 vmexit_ctrl;
  697. u32 vmentry_ctrl;
  698. } vmcs_config;
  699. static struct vmx_capability {
  700. u32 ept;
  701. u32 vpid;
  702. } vmx_capability;
  703. #define VMX_SEGMENT_FIELD(seg) \
  704. [VCPU_SREG_##seg] = { \
  705. .selector = GUEST_##seg##_SELECTOR, \
  706. .base = GUEST_##seg##_BASE, \
  707. .limit = GUEST_##seg##_LIMIT, \
  708. .ar_bytes = GUEST_##seg##_AR_BYTES, \
  709. }
  710. static const struct kvm_vmx_segment_field {
  711. unsigned selector;
  712. unsigned base;
  713. unsigned limit;
  714. unsigned ar_bytes;
  715. } kvm_vmx_segment_fields[] = {
  716. VMX_SEGMENT_FIELD(CS),
  717. VMX_SEGMENT_FIELD(DS),
  718. VMX_SEGMENT_FIELD(ES),
  719. VMX_SEGMENT_FIELD(FS),
  720. VMX_SEGMENT_FIELD(GS),
  721. VMX_SEGMENT_FIELD(SS),
  722. VMX_SEGMENT_FIELD(TR),
  723. VMX_SEGMENT_FIELD(LDTR),
  724. };
  725. static u64 host_efer;
  726. static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
  727. /*
  728. * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
  729. * away by decrementing the array size.
  730. */
  731. static const u32 vmx_msr_index[] = {
  732. #ifdef CONFIG_X86_64
  733. MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
  734. #endif
  735. MSR_EFER, MSR_TSC_AUX, MSR_STAR,
  736. };
  737. #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
  738. static inline bool is_page_fault(u32 intr_info)
  739. {
  740. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
  741. INTR_INFO_VALID_MASK)) ==
  742. (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
  743. }
  744. static inline bool is_no_device(u32 intr_info)
  745. {
  746. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
  747. INTR_INFO_VALID_MASK)) ==
  748. (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
  749. }
  750. static inline bool is_invalid_opcode(u32 intr_info)
  751. {
  752. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
  753. INTR_INFO_VALID_MASK)) ==
  754. (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
  755. }
  756. static inline bool is_external_interrupt(u32 intr_info)
  757. {
  758. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
  759. == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
  760. }
  761. static inline bool is_machine_check(u32 intr_info)
  762. {
  763. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
  764. INTR_INFO_VALID_MASK)) ==
  765. (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
  766. }
  767. static inline bool cpu_has_vmx_msr_bitmap(void)
  768. {
  769. return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
  770. }
  771. static inline bool cpu_has_vmx_tpr_shadow(void)
  772. {
  773. return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
  774. }
  775. static inline bool vm_need_tpr_shadow(struct kvm *kvm)
  776. {
  777. return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
  778. }
  779. static inline bool cpu_has_secondary_exec_ctrls(void)
  780. {
  781. return vmcs_config.cpu_based_exec_ctrl &
  782. CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
  783. }
  784. static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
  785. {
  786. return vmcs_config.cpu_based_2nd_exec_ctrl &
  787. SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
  788. }
  789. static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
  790. {
  791. return vmcs_config.cpu_based_2nd_exec_ctrl &
  792. SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
  793. }
  794. static inline bool cpu_has_vmx_apic_register_virt(void)
  795. {
  796. return vmcs_config.cpu_based_2nd_exec_ctrl &
  797. SECONDARY_EXEC_APIC_REGISTER_VIRT;
  798. }
  799. static inline bool cpu_has_vmx_virtual_intr_delivery(void)
  800. {
  801. return vmcs_config.cpu_based_2nd_exec_ctrl &
  802. SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
  803. }
  804. static inline bool cpu_has_vmx_posted_intr(void)
  805. {
  806. return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
  807. }
  808. static inline bool cpu_has_vmx_apicv(void)
  809. {
  810. return cpu_has_vmx_apic_register_virt() &&
  811. cpu_has_vmx_virtual_intr_delivery() &&
  812. cpu_has_vmx_posted_intr();
  813. }
  814. static inline bool cpu_has_vmx_flexpriority(void)
  815. {
  816. return cpu_has_vmx_tpr_shadow() &&
  817. cpu_has_vmx_virtualize_apic_accesses();
  818. }
  819. static inline bool cpu_has_vmx_ept_execute_only(void)
  820. {
  821. return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
  822. }
  823. static inline bool cpu_has_vmx_eptp_uncacheable(void)
  824. {
  825. return vmx_capability.ept & VMX_EPTP_UC_BIT;
  826. }
  827. static inline bool cpu_has_vmx_eptp_writeback(void)
  828. {
  829. return vmx_capability.ept & VMX_EPTP_WB_BIT;
  830. }
  831. static inline bool cpu_has_vmx_ept_2m_page(void)
  832. {
  833. return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
  834. }
  835. static inline bool cpu_has_vmx_ept_1g_page(void)
  836. {
  837. return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
  838. }
  839. static inline bool cpu_has_vmx_ept_4levels(void)
  840. {
  841. return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
  842. }
  843. static inline bool cpu_has_vmx_ept_ad_bits(void)
  844. {
  845. return vmx_capability.ept & VMX_EPT_AD_BIT;
  846. }
  847. static inline bool cpu_has_vmx_invept_context(void)
  848. {
  849. return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
  850. }
  851. static inline bool cpu_has_vmx_invept_global(void)
  852. {
  853. return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
  854. }
  855. static inline bool cpu_has_vmx_invvpid_single(void)
  856. {
  857. return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
  858. }
  859. static inline bool cpu_has_vmx_invvpid_global(void)
  860. {
  861. return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
  862. }
  863. static inline bool cpu_has_vmx_ept(void)
  864. {
  865. return vmcs_config.cpu_based_2nd_exec_ctrl &
  866. SECONDARY_EXEC_ENABLE_EPT;
  867. }
  868. static inline bool cpu_has_vmx_unrestricted_guest(void)
  869. {
  870. return vmcs_config.cpu_based_2nd_exec_ctrl &
  871. SECONDARY_EXEC_UNRESTRICTED_GUEST;
  872. }
  873. static inline bool cpu_has_vmx_ple(void)
  874. {
  875. return vmcs_config.cpu_based_2nd_exec_ctrl &
  876. SECONDARY_EXEC_PAUSE_LOOP_EXITING;
  877. }
  878. static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
  879. {
  880. return flexpriority_enabled && irqchip_in_kernel(kvm);
  881. }
  882. static inline bool cpu_has_vmx_vpid(void)
  883. {
  884. return vmcs_config.cpu_based_2nd_exec_ctrl &
  885. SECONDARY_EXEC_ENABLE_VPID;
  886. }
  887. static inline bool cpu_has_vmx_rdtscp(void)
  888. {
  889. return vmcs_config.cpu_based_2nd_exec_ctrl &
  890. SECONDARY_EXEC_RDTSCP;
  891. }
  892. static inline bool cpu_has_vmx_invpcid(void)
  893. {
  894. return vmcs_config.cpu_based_2nd_exec_ctrl &
  895. SECONDARY_EXEC_ENABLE_INVPCID;
  896. }
  897. static inline bool cpu_has_virtual_nmis(void)
  898. {
  899. return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
  900. }
  901. static inline bool cpu_has_vmx_wbinvd_exit(void)
  902. {
  903. return vmcs_config.cpu_based_2nd_exec_ctrl &
  904. SECONDARY_EXEC_WBINVD_EXITING;
  905. }
  906. static inline bool cpu_has_vmx_shadow_vmcs(void)
  907. {
  908. u64 vmx_msr;
  909. rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
  910. /* check if the cpu supports writing r/o exit information fields */
  911. if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
  912. return false;
  913. return vmcs_config.cpu_based_2nd_exec_ctrl &
  914. SECONDARY_EXEC_SHADOW_VMCS;
  915. }
  916. static inline bool report_flexpriority(void)
  917. {
  918. return flexpriority_enabled;
  919. }
  920. static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
  921. {
  922. return vmcs12->cpu_based_vm_exec_control & bit;
  923. }
  924. static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
  925. {
  926. return (vmcs12->cpu_based_vm_exec_control &
  927. CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
  928. (vmcs12->secondary_vm_exec_control & bit);
  929. }
  930. static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
  931. {
  932. return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
  933. }
  934. static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
  935. {
  936. return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
  937. }
  938. static inline bool is_exception(u32 intr_info)
  939. {
  940. return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
  941. == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
  942. }
  943. static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
  944. static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
  945. struct vmcs12 *vmcs12,
  946. u32 reason, unsigned long qualification);
  947. static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
  948. {
  949. int i;
  950. for (i = 0; i < vmx->nmsrs; ++i)
  951. if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
  952. return i;
  953. return -1;
  954. }
  955. static inline void __invvpid(int ext, u16 vpid, gva_t gva)
  956. {
  957. struct {
  958. u64 vpid : 16;
  959. u64 rsvd : 48;
  960. u64 gva;
  961. } operand = { vpid, 0, gva };
  962. asm volatile (__ex(ASM_VMX_INVVPID)
  963. /* CF==1 or ZF==1 --> rc = -1 */
  964. "; ja 1f ; ud2 ; 1:"
  965. : : "a"(&operand), "c"(ext) : "cc", "memory");
  966. }
  967. static inline void __invept(int ext, u64 eptp, gpa_t gpa)
  968. {
  969. struct {
  970. u64 eptp, gpa;
  971. } operand = {eptp, gpa};
  972. asm volatile (__ex(ASM_VMX_INVEPT)
  973. /* CF==1 or ZF==1 --> rc = -1 */
  974. "; ja 1f ; ud2 ; 1:\n"
  975. : : "a" (&operand), "c" (ext) : "cc", "memory");
  976. }
  977. static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
  978. {
  979. int i;
  980. i = __find_msr_index(vmx, msr);
  981. if (i >= 0)
  982. return &vmx->guest_msrs[i];
  983. return NULL;
  984. }
  985. static void vmcs_clear(struct vmcs *vmcs)
  986. {
  987. u64 phys_addr = __pa(vmcs);
  988. u8 error;
  989. asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
  990. : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
  991. : "cc", "memory");
  992. if (error)
  993. printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
  994. vmcs, phys_addr);
  995. }
  996. static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
  997. {
  998. vmcs_clear(loaded_vmcs->vmcs);
  999. loaded_vmcs->cpu = -1;
  1000. loaded_vmcs->launched = 0;
  1001. }
  1002. static void vmcs_load(struct vmcs *vmcs)
  1003. {
  1004. u64 phys_addr = __pa(vmcs);
  1005. u8 error;
  1006. asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
  1007. : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
  1008. : "cc", "memory");
  1009. if (error)
  1010. printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
  1011. vmcs, phys_addr);
  1012. }
  1013. #ifdef CONFIG_KEXEC
  1014. /*
  1015. * This bitmap is used to indicate whether the vmclear
  1016. * operation is enabled on all cpus. All disabled by
  1017. * default.
  1018. */
  1019. static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
  1020. static inline void crash_enable_local_vmclear(int cpu)
  1021. {
  1022. cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
  1023. }
  1024. static inline void crash_disable_local_vmclear(int cpu)
  1025. {
  1026. cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
  1027. }
  1028. static inline int crash_local_vmclear_enabled(int cpu)
  1029. {
  1030. return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
  1031. }
  1032. static void crash_vmclear_local_loaded_vmcss(void)
  1033. {
  1034. int cpu = raw_smp_processor_id();
  1035. struct loaded_vmcs *v;
  1036. if (!crash_local_vmclear_enabled(cpu))
  1037. return;
  1038. list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
  1039. loaded_vmcss_on_cpu_link)
  1040. vmcs_clear(v->vmcs);
  1041. }
  1042. #else
  1043. static inline void crash_enable_local_vmclear(int cpu) { }
  1044. static inline void crash_disable_local_vmclear(int cpu) { }
  1045. #endif /* CONFIG_KEXEC */
  1046. static void __loaded_vmcs_clear(void *arg)
  1047. {
  1048. struct loaded_vmcs *loaded_vmcs = arg;
  1049. int cpu = raw_smp_processor_id();
  1050. if (loaded_vmcs->cpu != cpu)
  1051. return; /* vcpu migration can race with cpu offline */
  1052. if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
  1053. per_cpu(current_vmcs, cpu) = NULL;
  1054. crash_disable_local_vmclear(cpu);
  1055. list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
  1056. /*
  1057. * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
  1058. * is before setting loaded_vmcs->vcpu to -1 which is done in
  1059. * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
  1060. * then adds the vmcs into percpu list before it is deleted.
  1061. */
  1062. smp_wmb();
  1063. loaded_vmcs_init(loaded_vmcs);
  1064. crash_enable_local_vmclear(cpu);
  1065. }
  1066. static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
  1067. {
  1068. int cpu = loaded_vmcs->cpu;
  1069. if (cpu != -1)
  1070. smp_call_function_single(cpu,
  1071. __loaded_vmcs_clear, loaded_vmcs, 1);
  1072. }
  1073. static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
  1074. {
  1075. if (vmx->vpid == 0)
  1076. return;
  1077. if (cpu_has_vmx_invvpid_single())
  1078. __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
  1079. }
  1080. static inline void vpid_sync_vcpu_global(void)
  1081. {
  1082. if (cpu_has_vmx_invvpid_global())
  1083. __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
  1084. }
  1085. static inline void vpid_sync_context(struct vcpu_vmx *vmx)
  1086. {
  1087. if (cpu_has_vmx_invvpid_single())
  1088. vpid_sync_vcpu_single(vmx);
  1089. else
  1090. vpid_sync_vcpu_global();
  1091. }
  1092. static inline void ept_sync_global(void)
  1093. {
  1094. if (cpu_has_vmx_invept_global())
  1095. __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
  1096. }
  1097. static inline void ept_sync_context(u64 eptp)
  1098. {
  1099. if (enable_ept) {
  1100. if (cpu_has_vmx_invept_context())
  1101. __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
  1102. else
  1103. ept_sync_global();
  1104. }
  1105. }
  1106. static __always_inline unsigned long vmcs_readl(unsigned long field)
  1107. {
  1108. unsigned long value;
  1109. asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
  1110. : "=a"(value) : "d"(field) : "cc");
  1111. return value;
  1112. }
  1113. static __always_inline u16 vmcs_read16(unsigned long field)
  1114. {
  1115. return vmcs_readl(field);
  1116. }
  1117. static __always_inline u32 vmcs_read32(unsigned long field)
  1118. {
  1119. return vmcs_readl(field);
  1120. }
  1121. static __always_inline u64 vmcs_read64(unsigned long field)
  1122. {
  1123. #ifdef CONFIG_X86_64
  1124. return vmcs_readl(field);
  1125. #else
  1126. return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
  1127. #endif
  1128. }
  1129. static noinline void vmwrite_error(unsigned long field, unsigned long value)
  1130. {
  1131. printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
  1132. field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
  1133. dump_stack();
  1134. }
  1135. static void vmcs_writel(unsigned long field, unsigned long value)
  1136. {
  1137. u8 error;
  1138. asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
  1139. : "=q"(error) : "a"(value), "d"(field) : "cc");
  1140. if (unlikely(error))
  1141. vmwrite_error(field, value);
  1142. }
  1143. static void vmcs_write16(unsigned long field, u16 value)
  1144. {
  1145. vmcs_writel(field, value);
  1146. }
  1147. static void vmcs_write32(unsigned long field, u32 value)
  1148. {
  1149. vmcs_writel(field, value);
  1150. }
  1151. static void vmcs_write64(unsigned long field, u64 value)
  1152. {
  1153. vmcs_writel(field, value);
  1154. #ifndef CONFIG_X86_64
  1155. asm volatile ("");
  1156. vmcs_writel(field+1, value >> 32);
  1157. #endif
  1158. }
  1159. static void vmcs_clear_bits(unsigned long field, u32 mask)
  1160. {
  1161. vmcs_writel(field, vmcs_readl(field) & ~mask);
  1162. }
  1163. static void vmcs_set_bits(unsigned long field, u32 mask)
  1164. {
  1165. vmcs_writel(field, vmcs_readl(field) | mask);
  1166. }
  1167. static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
  1168. {
  1169. vmx->segment_cache.bitmask = 0;
  1170. }
  1171. static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
  1172. unsigned field)
  1173. {
  1174. bool ret;
  1175. u32 mask = 1 << (seg * SEG_FIELD_NR + field);
  1176. if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
  1177. vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
  1178. vmx->segment_cache.bitmask = 0;
  1179. }
  1180. ret = vmx->segment_cache.bitmask & mask;
  1181. vmx->segment_cache.bitmask |= mask;
  1182. return ret;
  1183. }
  1184. static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
  1185. {
  1186. u16 *p = &vmx->segment_cache.seg[seg].selector;
  1187. if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
  1188. *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
  1189. return *p;
  1190. }
  1191. static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
  1192. {
  1193. ulong *p = &vmx->segment_cache.seg[seg].base;
  1194. if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
  1195. *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
  1196. return *p;
  1197. }
  1198. static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
  1199. {
  1200. u32 *p = &vmx->segment_cache.seg[seg].limit;
  1201. if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
  1202. *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
  1203. return *p;
  1204. }
  1205. static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
  1206. {
  1207. u32 *p = &vmx->segment_cache.seg[seg].ar;
  1208. if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
  1209. *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
  1210. return *p;
  1211. }
  1212. static void update_exception_bitmap(struct kvm_vcpu *vcpu)
  1213. {
  1214. u32 eb;
  1215. eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
  1216. (1u << NM_VECTOR) | (1u << DB_VECTOR);
  1217. if ((vcpu->guest_debug &
  1218. (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
  1219. (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
  1220. eb |= 1u << BP_VECTOR;
  1221. if (to_vmx(vcpu)->rmode.vm86_active)
  1222. eb = ~0;
  1223. if (enable_ept)
  1224. eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
  1225. if (vcpu->fpu_active)
  1226. eb &= ~(1u << NM_VECTOR);
  1227. /* When we are running a nested L2 guest and L1 specified for it a
  1228. * certain exception bitmap, we must trap the same exceptions and pass
  1229. * them to L1. When running L2, we will only handle the exceptions
  1230. * specified above if L1 did not want them.
  1231. */
  1232. if (is_guest_mode(vcpu))
  1233. eb |= get_vmcs12(vcpu)->exception_bitmap;
  1234. vmcs_write32(EXCEPTION_BITMAP, eb);
  1235. }
  1236. static void clear_atomic_switch_msr_special(unsigned long entry,
  1237. unsigned long exit)
  1238. {
  1239. vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
  1240. vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
  1241. }
  1242. static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
  1243. {
  1244. unsigned i;
  1245. struct msr_autoload *m = &vmx->msr_autoload;
  1246. switch (msr) {
  1247. case MSR_EFER:
  1248. if (cpu_has_load_ia32_efer) {
  1249. clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
  1250. VM_EXIT_LOAD_IA32_EFER);
  1251. return;
  1252. }
  1253. break;
  1254. case MSR_CORE_PERF_GLOBAL_CTRL:
  1255. if (cpu_has_load_perf_global_ctrl) {
  1256. clear_atomic_switch_msr_special(
  1257. VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
  1258. VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
  1259. return;
  1260. }
  1261. break;
  1262. }
  1263. for (i = 0; i < m->nr; ++i)
  1264. if (m->guest[i].index == msr)
  1265. break;
  1266. if (i == m->nr)
  1267. return;
  1268. --m->nr;
  1269. m->guest[i] = m->guest[m->nr];
  1270. m->host[i] = m->host[m->nr];
  1271. vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
  1272. vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
  1273. }
  1274. static void add_atomic_switch_msr_special(unsigned long entry,
  1275. unsigned long exit, unsigned long guest_val_vmcs,
  1276. unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
  1277. {
  1278. vmcs_write64(guest_val_vmcs, guest_val);
  1279. vmcs_write64(host_val_vmcs, host_val);
  1280. vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
  1281. vmcs_set_bits(VM_EXIT_CONTROLS, exit);
  1282. }
  1283. static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
  1284. u64 guest_val, u64 host_val)
  1285. {
  1286. unsigned i;
  1287. struct msr_autoload *m = &vmx->msr_autoload;
  1288. switch (msr) {
  1289. case MSR_EFER:
  1290. if (cpu_has_load_ia32_efer) {
  1291. add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
  1292. VM_EXIT_LOAD_IA32_EFER,
  1293. GUEST_IA32_EFER,
  1294. HOST_IA32_EFER,
  1295. guest_val, host_val);
  1296. return;
  1297. }
  1298. break;
  1299. case MSR_CORE_PERF_GLOBAL_CTRL:
  1300. if (cpu_has_load_perf_global_ctrl) {
  1301. add_atomic_switch_msr_special(
  1302. VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
  1303. VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
  1304. GUEST_IA32_PERF_GLOBAL_CTRL,
  1305. HOST_IA32_PERF_GLOBAL_CTRL,
  1306. guest_val, host_val);
  1307. return;
  1308. }
  1309. break;
  1310. }
  1311. for (i = 0; i < m->nr; ++i)
  1312. if (m->guest[i].index == msr)
  1313. break;
  1314. if (i == NR_AUTOLOAD_MSRS) {
  1315. printk_once(KERN_WARNING"Not enough mst switch entries. "
  1316. "Can't add msr %x\n", msr);
  1317. return;
  1318. } else if (i == m->nr) {
  1319. ++m->nr;
  1320. vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
  1321. vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
  1322. }
  1323. m->guest[i].index = msr;
  1324. m->guest[i].value = guest_val;
  1325. m->host[i].index = msr;
  1326. m->host[i].value = host_val;
  1327. }
  1328. static void reload_tss(void)
  1329. {
  1330. /*
  1331. * VT restores TR but not its size. Useless.
  1332. */
  1333. struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
  1334. struct desc_struct *descs;
  1335. descs = (void *)gdt->address;
  1336. descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
  1337. load_TR_desc();
  1338. }
  1339. static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
  1340. {
  1341. u64 guest_efer;
  1342. u64 ignore_bits;
  1343. guest_efer = vmx->vcpu.arch.efer;
  1344. /*
  1345. * NX is emulated; LMA and LME handled by hardware; SCE meaningless
  1346. * outside long mode
  1347. */
  1348. ignore_bits = EFER_NX | EFER_SCE;
  1349. #ifdef CONFIG_X86_64
  1350. ignore_bits |= EFER_LMA | EFER_LME;
  1351. /* SCE is meaningful only in long mode on Intel */
  1352. if (guest_efer & EFER_LMA)
  1353. ignore_bits &= ~(u64)EFER_SCE;
  1354. #endif
  1355. guest_efer &= ~ignore_bits;
  1356. guest_efer |= host_efer & ignore_bits;
  1357. vmx->guest_msrs[efer_offset].data = guest_efer;
  1358. vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
  1359. clear_atomic_switch_msr(vmx, MSR_EFER);
  1360. /* On ept, can't emulate nx, and must switch nx atomically */
  1361. if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
  1362. guest_efer = vmx->vcpu.arch.efer;
  1363. if (!(guest_efer & EFER_LMA))
  1364. guest_efer &= ~EFER_LME;
  1365. add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
  1366. return false;
  1367. }
  1368. return true;
  1369. }
  1370. static unsigned long segment_base(u16 selector)
  1371. {
  1372. struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
  1373. struct desc_struct *d;
  1374. unsigned long table_base;
  1375. unsigned long v;
  1376. if (!(selector & ~3))
  1377. return 0;
  1378. table_base = gdt->address;
  1379. if (selector & 4) { /* from ldt */
  1380. u16 ldt_selector = kvm_read_ldt();
  1381. if (!(ldt_selector & ~3))
  1382. return 0;
  1383. table_base = segment_base(ldt_selector);
  1384. }
  1385. d = (struct desc_struct *)(table_base + (selector & ~7));
  1386. v = get_desc_base(d);
  1387. #ifdef CONFIG_X86_64
  1388. if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
  1389. v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
  1390. #endif
  1391. return v;
  1392. }
  1393. static inline unsigned long kvm_read_tr_base(void)
  1394. {
  1395. u16 tr;
  1396. asm("str %0" : "=g"(tr));
  1397. return segment_base(tr);
  1398. }
  1399. static void vmx_save_host_state(struct kvm_vcpu *vcpu)
  1400. {
  1401. struct vcpu_vmx *vmx = to_vmx(vcpu);
  1402. int i;
  1403. if (vmx->host_state.loaded)
  1404. return;
  1405. vmx->host_state.loaded = 1;
  1406. /*
  1407. * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
  1408. * allow segment selectors with cpl > 0 or ti == 1.
  1409. */
  1410. vmx->host_state.ldt_sel = kvm_read_ldt();
  1411. vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
  1412. savesegment(fs, vmx->host_state.fs_sel);
  1413. if (!(vmx->host_state.fs_sel & 7)) {
  1414. vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
  1415. vmx->host_state.fs_reload_needed = 0;
  1416. } else {
  1417. vmcs_write16(HOST_FS_SELECTOR, 0);
  1418. vmx->host_state.fs_reload_needed = 1;
  1419. }
  1420. savesegment(gs, vmx->host_state.gs_sel);
  1421. if (!(vmx->host_state.gs_sel & 7))
  1422. vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
  1423. else {
  1424. vmcs_write16(HOST_GS_SELECTOR, 0);
  1425. vmx->host_state.gs_ldt_reload_needed = 1;
  1426. }
  1427. #ifdef CONFIG_X86_64
  1428. savesegment(ds, vmx->host_state.ds_sel);
  1429. savesegment(es, vmx->host_state.es_sel);
  1430. #endif
  1431. #ifdef CONFIG_X86_64
  1432. vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
  1433. vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
  1434. #else
  1435. vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
  1436. vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
  1437. #endif
  1438. #ifdef CONFIG_X86_64
  1439. rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
  1440. if (is_long_mode(&vmx->vcpu))
  1441. wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
  1442. #endif
  1443. for (i = 0; i < vmx->save_nmsrs; ++i)
  1444. kvm_set_shared_msr(vmx->guest_msrs[i].index,
  1445. vmx->guest_msrs[i].data,
  1446. vmx->guest_msrs[i].mask);
  1447. }
  1448. static void __vmx_load_host_state(struct vcpu_vmx *vmx)
  1449. {
  1450. if (!vmx->host_state.loaded)
  1451. return;
  1452. ++vmx->vcpu.stat.host_state_reload;
  1453. vmx->host_state.loaded = 0;
  1454. #ifdef CONFIG_X86_64
  1455. if (is_long_mode(&vmx->vcpu))
  1456. rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
  1457. #endif
  1458. if (vmx->host_state.gs_ldt_reload_needed) {
  1459. kvm_load_ldt(vmx->host_state.ldt_sel);
  1460. #ifdef CONFIG_X86_64
  1461. load_gs_index(vmx->host_state.gs_sel);
  1462. #else
  1463. loadsegment(gs, vmx->host_state.gs_sel);
  1464. #endif
  1465. }
  1466. if (vmx->host_state.fs_reload_needed)
  1467. loadsegment(fs, vmx->host_state.fs_sel);
  1468. #ifdef CONFIG_X86_64
  1469. if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
  1470. loadsegment(ds, vmx->host_state.ds_sel);
  1471. loadsegment(es, vmx->host_state.es_sel);
  1472. }
  1473. #endif
  1474. reload_tss();
  1475. #ifdef CONFIG_X86_64
  1476. wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
  1477. #endif
  1478. /*
  1479. * If the FPU is not active (through the host task or
  1480. * the guest vcpu), then restore the cr0.TS bit.
  1481. */
  1482. if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
  1483. stts();
  1484. load_gdt(&__get_cpu_var(host_gdt));
  1485. }
  1486. static void vmx_load_host_state(struct vcpu_vmx *vmx)
  1487. {
  1488. preempt_disable();
  1489. __vmx_load_host_state(vmx);
  1490. preempt_enable();
  1491. }
  1492. /*
  1493. * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  1494. * vcpu mutex is already taken.
  1495. */
  1496. static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1497. {
  1498. struct vcpu_vmx *vmx = to_vmx(vcpu);
  1499. u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
  1500. if (!vmm_exclusive)
  1501. kvm_cpu_vmxon(phys_addr);
  1502. else if (vmx->loaded_vmcs->cpu != cpu)
  1503. loaded_vmcs_clear(vmx->loaded_vmcs);
  1504. if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
  1505. per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
  1506. vmcs_load(vmx->loaded_vmcs->vmcs);
  1507. }
  1508. if (vmx->loaded_vmcs->cpu != cpu) {
  1509. struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
  1510. unsigned long sysenter_esp;
  1511. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  1512. local_irq_disable();
  1513. crash_disable_local_vmclear(cpu);
  1514. /*
  1515. * Read loaded_vmcs->cpu should be before fetching
  1516. * loaded_vmcs->loaded_vmcss_on_cpu_link.
  1517. * See the comments in __loaded_vmcs_clear().
  1518. */
  1519. smp_rmb();
  1520. list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
  1521. &per_cpu(loaded_vmcss_on_cpu, cpu));
  1522. crash_enable_local_vmclear(cpu);
  1523. local_irq_enable();
  1524. /*
  1525. * Linux uses per-cpu TSS and GDT, so set these when switching
  1526. * processors.
  1527. */
  1528. vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
  1529. vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
  1530. rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
  1531. vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
  1532. vmx->loaded_vmcs->cpu = cpu;
  1533. }
  1534. }
  1535. static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
  1536. {
  1537. __vmx_load_host_state(to_vmx(vcpu));
  1538. if (!vmm_exclusive) {
  1539. __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
  1540. vcpu->cpu = -1;
  1541. kvm_cpu_vmxoff();
  1542. }
  1543. }
  1544. static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
  1545. {
  1546. ulong cr0;
  1547. if (vcpu->fpu_active)
  1548. return;
  1549. vcpu->fpu_active = 1;
  1550. cr0 = vmcs_readl(GUEST_CR0);
  1551. cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
  1552. cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
  1553. vmcs_writel(GUEST_CR0, cr0);
  1554. update_exception_bitmap(vcpu);
  1555. vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
  1556. if (is_guest_mode(vcpu))
  1557. vcpu->arch.cr0_guest_owned_bits &=
  1558. ~get_vmcs12(vcpu)->cr0_guest_host_mask;
  1559. vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
  1560. }
  1561. static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
  1562. /*
  1563. * Return the cr0 value that a nested guest would read. This is a combination
  1564. * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
  1565. * its hypervisor (cr0_read_shadow).
  1566. */
  1567. static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
  1568. {
  1569. return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
  1570. (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
  1571. }
  1572. static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
  1573. {
  1574. return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
  1575. (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
  1576. }
  1577. static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
  1578. {
  1579. /* Note that there is no vcpu->fpu_active = 0 here. The caller must
  1580. * set this *before* calling this function.
  1581. */
  1582. vmx_decache_cr0_guest_bits(vcpu);
  1583. vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
  1584. update_exception_bitmap(vcpu);
  1585. vcpu->arch.cr0_guest_owned_bits = 0;
  1586. vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
  1587. if (is_guest_mode(vcpu)) {
  1588. /*
  1589. * L1's specified read shadow might not contain the TS bit,
  1590. * so now that we turned on shadowing of this bit, we need to
  1591. * set this bit of the shadow. Like in nested_vmx_run we need
  1592. * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
  1593. * up-to-date here because we just decached cr0.TS (and we'll
  1594. * only update vmcs12->guest_cr0 on nested exit).
  1595. */
  1596. struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
  1597. vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
  1598. (vcpu->arch.cr0 & X86_CR0_TS);
  1599. vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
  1600. } else
  1601. vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
  1602. }
  1603. static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
  1604. {
  1605. unsigned long rflags, save_rflags;
  1606. if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
  1607. __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
  1608. rflags = vmcs_readl(GUEST_RFLAGS);
  1609. if (to_vmx(vcpu)->rmode.vm86_active) {
  1610. rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
  1611. save_rflags = to_vmx(vcpu)->rmode.save_rflags;
  1612. rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
  1613. }
  1614. to_vmx(vcpu)->rflags = rflags;
  1615. }
  1616. return to_vmx(vcpu)->rflags;
  1617. }
  1618. static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  1619. {
  1620. __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
  1621. to_vmx(vcpu)->rflags = rflags;
  1622. if (to_vmx(vcpu)->rmode.vm86_active) {
  1623. to_vmx(vcpu)->rmode.save_rflags = rflags;
  1624. rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
  1625. }
  1626. vmcs_writel(GUEST_RFLAGS, rflags);
  1627. }
  1628. static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  1629. {
  1630. u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
  1631. int ret = 0;
  1632. if (interruptibility & GUEST_INTR_STATE_STI)
  1633. ret |= KVM_X86_SHADOW_INT_STI;
  1634. if (interruptibility & GUEST_INTR_STATE_MOV_SS)
  1635. ret |= KVM_X86_SHADOW_INT_MOV_SS;
  1636. return ret & mask;
  1637. }
  1638. static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  1639. {
  1640. u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
  1641. u32 interruptibility = interruptibility_old;
  1642. interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
  1643. if (mask & KVM_X86_SHADOW_INT_MOV_SS)
  1644. interruptibility |= GUEST_INTR_STATE_MOV_SS;
  1645. else if (mask & KVM_X86_SHADOW_INT_STI)
  1646. interruptibility |= GUEST_INTR_STATE_STI;
  1647. if ((interruptibility != interruptibility_old))
  1648. vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
  1649. }
  1650. static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  1651. {
  1652. unsigned long rip;
  1653. rip = kvm_rip_read(vcpu);
  1654. rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  1655. kvm_rip_write(vcpu, rip);
  1656. /* skipping an emulated instruction also counts */
  1657. vmx_set_interrupt_shadow(vcpu, 0);
  1658. }
  1659. /*
  1660. * KVM wants to inject page-faults which it got to the guest. This function
  1661. * checks whether in a nested guest, we need to inject them to L1 or L2.
  1662. * This function assumes it is called with the exit reason in vmcs02 being
  1663. * a #PF exception (this is the only case in which KVM injects a #PF when L2
  1664. * is running).
  1665. */
  1666. static int nested_pf_handled(struct kvm_vcpu *vcpu)
  1667. {
  1668. struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
  1669. /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
  1670. if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
  1671. return 0;
  1672. nested_vmx_vmexit(vcpu);
  1673. return 1;
  1674. }
  1675. static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
  1676. bool has_error_code, u32 error_code,
  1677. bool reinject)
  1678. {
  1679. struct vcpu_vmx *vmx = to_vmx(vcpu);
  1680. u32 intr_info = nr | INTR_INFO_VALID_MASK;
  1681. if (nr == PF_VECTOR && is_guest_mode(vcpu) &&
  1682. !vmx->nested.nested_run_pending && nested_pf_handled(vcpu))
  1683. return;
  1684. if (has_error_code) {
  1685. vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
  1686. intr_info |= INTR_INFO_DELIVER_CODE_MASK;
  1687. }
  1688. if (vmx->rmode.vm86_active) {
  1689. int inc_eip = 0;
  1690. if (kvm_exception_is_soft(nr))
  1691. inc_eip = vcpu->arch.event_exit_inst_len;
  1692. if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
  1693. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  1694. return;
  1695. }
  1696. if (kvm_exception_is_soft(nr)) {
  1697. vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
  1698. vmx->vcpu.arch.event_exit_inst_len);
  1699. intr_info |= INTR_TYPE_SOFT_EXCEPTION;
  1700. } else
  1701. intr_info |= INTR_TYPE_HARD_EXCEPTION;
  1702. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
  1703. }
  1704. static bool vmx_rdtscp_supported(void)
  1705. {
  1706. return cpu_has_vmx_rdtscp();
  1707. }
  1708. static bool vmx_invpcid_supported(void)
  1709. {
  1710. return cpu_has_vmx_invpcid() && enable_ept;
  1711. }
  1712. /*
  1713. * Swap MSR entry in host/guest MSR entry array.
  1714. */
  1715. static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
  1716. {
  1717. struct shared_msr_entry tmp;
  1718. tmp = vmx->guest_msrs[to];
  1719. vmx->guest_msrs[to] = vmx->guest_msrs[from];
  1720. vmx->guest_msrs[from] = tmp;
  1721. }
  1722. static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
  1723. {
  1724. unsigned long *msr_bitmap;
  1725. if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
  1726. if (is_long_mode(vcpu))
  1727. msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
  1728. else
  1729. msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
  1730. } else {
  1731. if (is_long_mode(vcpu))
  1732. msr_bitmap = vmx_msr_bitmap_longmode;
  1733. else
  1734. msr_bitmap = vmx_msr_bitmap_legacy;
  1735. }
  1736. vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
  1737. }
  1738. /*
  1739. * Set up the vmcs to automatically save and restore system
  1740. * msrs. Don't touch the 64-bit msrs if the guest is in legacy
  1741. * mode, as fiddling with msrs is very expensive.
  1742. */
  1743. static void setup_msrs(struct vcpu_vmx *vmx)
  1744. {
  1745. int save_nmsrs, index;
  1746. save_nmsrs = 0;
  1747. #ifdef CONFIG_X86_64
  1748. if (is_long_mode(&vmx->vcpu)) {
  1749. index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
  1750. if (index >= 0)
  1751. move_msr_up(vmx, index, save_nmsrs++);
  1752. index = __find_msr_index(vmx, MSR_LSTAR);
  1753. if (index >= 0)
  1754. move_msr_up(vmx, index, save_nmsrs++);
  1755. index = __find_msr_index(vmx, MSR_CSTAR);
  1756. if (index >= 0)
  1757. move_msr_up(vmx, index, save_nmsrs++);
  1758. index = __find_msr_index(vmx, MSR_TSC_AUX);
  1759. if (index >= 0 && vmx->rdtscp_enabled)
  1760. move_msr_up(vmx, index, save_nmsrs++);
  1761. /*
  1762. * MSR_STAR is only needed on long mode guests, and only
  1763. * if efer.sce is enabled.
  1764. */
  1765. index = __find_msr_index(vmx, MSR_STAR);
  1766. if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
  1767. move_msr_up(vmx, index, save_nmsrs++);
  1768. }
  1769. #endif
  1770. index = __find_msr_index(vmx, MSR_EFER);
  1771. if (index >= 0 && update_transition_efer(vmx, index))
  1772. move_msr_up(vmx, index, save_nmsrs++);
  1773. vmx->save_nmsrs = save_nmsrs;
  1774. if (cpu_has_vmx_msr_bitmap())
  1775. vmx_set_msr_bitmap(&vmx->vcpu);
  1776. }
  1777. /*
  1778. * reads and returns guest's timestamp counter "register"
  1779. * guest_tsc = host_tsc + tsc_offset -- 21.3
  1780. */
  1781. static u64 guest_read_tsc(void)
  1782. {
  1783. u64 host_tsc, tsc_offset;
  1784. rdtscll(host_tsc);
  1785. tsc_offset = vmcs_read64(TSC_OFFSET);
  1786. return host_tsc + tsc_offset;
  1787. }
  1788. /*
  1789. * Like guest_read_tsc, but always returns L1's notion of the timestamp
  1790. * counter, even if a nested guest (L2) is currently running.
  1791. */
  1792. u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
  1793. {
  1794. u64 tsc_offset;
  1795. tsc_offset = is_guest_mode(vcpu) ?
  1796. to_vmx(vcpu)->nested.vmcs01_tsc_offset :
  1797. vmcs_read64(TSC_OFFSET);
  1798. return host_tsc + tsc_offset;
  1799. }
  1800. /*
  1801. * Engage any workarounds for mis-matched TSC rates. Currently limited to
  1802. * software catchup for faster rates on slower CPUs.
  1803. */
  1804. static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
  1805. {
  1806. if (!scale)
  1807. return;
  1808. if (user_tsc_khz > tsc_khz) {
  1809. vcpu->arch.tsc_catchup = 1;
  1810. vcpu->arch.tsc_always_catchup = 1;
  1811. } else
  1812. WARN(1, "user requested TSC rate below hardware speed\n");
  1813. }
  1814. static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
  1815. {
  1816. return vmcs_read64(TSC_OFFSET);
  1817. }
  1818. /*
  1819. * writes 'offset' into guest's timestamp counter offset register
  1820. */
  1821. static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
  1822. {
  1823. if (is_guest_mode(vcpu)) {
  1824. /*
  1825. * We're here if L1 chose not to trap WRMSR to TSC. According
  1826. * to the spec, this should set L1's TSC; The offset that L1
  1827. * set for L2 remains unchanged, and still needs to be added
  1828. * to the newly set TSC to get L2's TSC.
  1829. */
  1830. struct vmcs12 *vmcs12;
  1831. to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
  1832. /* recalculate vmcs02.TSC_OFFSET: */
  1833. vmcs12 = get_vmcs12(vcpu);
  1834. vmcs_write64(TSC_OFFSET, offset +
  1835. (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
  1836. vmcs12->tsc_offset : 0));
  1837. } else {
  1838. trace_kvm_write_tsc_offset(vcpu->vcpu_id,
  1839. vmcs_read64(TSC_OFFSET), offset);
  1840. vmcs_write64(TSC_OFFSET, offset);
  1841. }
  1842. }
  1843. static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
  1844. {
  1845. u64 offset = vmcs_read64(TSC_OFFSET);
  1846. vmcs_write64(TSC_OFFSET, offset + adjustment);
  1847. if (is_guest_mode(vcpu)) {
  1848. /* Even when running L2, the adjustment needs to apply to L1 */
  1849. to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
  1850. } else
  1851. trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
  1852. offset + adjustment);
  1853. }
  1854. static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
  1855. {
  1856. return target_tsc - native_read_tsc();
  1857. }
  1858. static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
  1859. {
  1860. struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
  1861. return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
  1862. }
  1863. /*
  1864. * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
  1865. * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
  1866. * all guests if the "nested" module option is off, and can also be disabled
  1867. * for a single guest by disabling its VMX cpuid bit.
  1868. */
  1869. static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
  1870. {
  1871. return nested && guest_cpuid_has_vmx(vcpu);
  1872. }
  1873. /*
  1874. * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
  1875. * returned for the various VMX controls MSRs when nested VMX is enabled.
  1876. * The same values should also be used to verify that vmcs12 control fields are
  1877. * valid during nested entry from L1 to L2.
  1878. * Each of these control msrs has a low and high 32-bit half: A low bit is on
  1879. * if the corresponding bit in the (32-bit) control field *must* be on, and a
  1880. * bit in the high half is on if the corresponding bit in the control field
  1881. * may be on. See also vmx_control_verify().
  1882. * TODO: allow these variables to be modified (downgraded) by module options
  1883. * or other means.
  1884. */
  1885. static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
  1886. static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
  1887. static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
  1888. static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
  1889. static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
  1890. static u32 nested_vmx_misc_low, nested_vmx_misc_high;
  1891. static u32 nested_vmx_ept_caps;
  1892. static __init void nested_vmx_setup_ctls_msrs(void)
  1893. {
  1894. /*
  1895. * Note that as a general rule, the high half of the MSRs (bits in
  1896. * the control fields which may be 1) should be initialized by the
  1897. * intersection of the underlying hardware's MSR (i.e., features which
  1898. * can be supported) and the list of features we want to expose -
  1899. * because they are known to be properly supported in our code.
  1900. * Also, usually, the low half of the MSRs (bits which must be 1) can
  1901. * be set to 0, meaning that L1 may turn off any of these bits. The
  1902. * reason is that if one of these bits is necessary, it will appear
  1903. * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
  1904. * fields of vmcs01 and vmcs02, will turn these bits off - and
  1905. * nested_vmx_exit_handled() will not pass related exits to L1.
  1906. * These rules have exceptions below.
  1907. */
  1908. /* pin-based controls */
  1909. rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
  1910. nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high);
  1911. /*
  1912. * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
  1913. * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
  1914. */
  1915. nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
  1916. nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK |
  1917. PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS |
  1918. PIN_BASED_VMX_PREEMPTION_TIMER;
  1919. nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
  1920. /*
  1921. * Exit controls
  1922. * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and
  1923. * 17 must be 1.
  1924. */
  1925. rdmsr(MSR_IA32_VMX_EXIT_CTLS,
  1926. nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high);
  1927. nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
  1928. /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */
  1929. nested_vmx_exit_ctls_high &=
  1930. #ifdef CONFIG_X86_64
  1931. VM_EXIT_HOST_ADDR_SPACE_SIZE |
  1932. #endif
  1933. VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
  1934. nested_vmx_exit_ctls_high |= (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
  1935. VM_EXIT_LOAD_IA32_EFER);
  1936. /* entry controls */
  1937. rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
  1938. nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
  1939. /* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */
  1940. nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
  1941. nested_vmx_entry_ctls_high &=
  1942. #ifdef CONFIG_X86_64
  1943. VM_ENTRY_IA32E_MODE |
  1944. #endif
  1945. VM_ENTRY_LOAD_IA32_PAT;
  1946. nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |
  1947. VM_ENTRY_LOAD_IA32_EFER);
  1948. /* cpu-based controls */
  1949. rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
  1950. nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
  1951. nested_vmx_procbased_ctls_low = 0;
  1952. nested_vmx_procbased_ctls_high &=
  1953. CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
  1954. CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
  1955. CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
  1956. CPU_BASED_CR3_STORE_EXITING |
  1957. #ifdef CONFIG_X86_64
  1958. CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
  1959. #endif
  1960. CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
  1961. CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
  1962. CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
  1963. CPU_BASED_PAUSE_EXITING |
  1964. CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
  1965. /*
  1966. * We can allow some features even when not supported by the
  1967. * hardware. For example, L1 can specify an MSR bitmap - and we
  1968. * can use it to avoid exits to L1 - even when L0 runs L2
  1969. * without MSR bitmaps.
  1970. */
  1971. nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
  1972. /* secondary cpu-based controls */
  1973. rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
  1974. nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
  1975. nested_vmx_secondary_ctls_low = 0;
  1976. nested_vmx_secondary_ctls_high &=
  1977. SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
  1978. SECONDARY_EXEC_WBINVD_EXITING;
  1979. if (enable_ept) {
  1980. /* nested EPT: emulate EPT also to L1 */
  1981. nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
  1982. nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
  1983. VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
  1984. nested_vmx_ept_caps &= vmx_capability.ept;
  1985. /*
  1986. * Since invept is completely emulated we support both global
  1987. * and context invalidation independent of what host cpu
  1988. * supports
  1989. */
  1990. nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
  1991. VMX_EPT_EXTENT_CONTEXT_BIT;
  1992. } else
  1993. nested_vmx_ept_caps = 0;
  1994. /* miscellaneous data */
  1995. rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
  1996. nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK |
  1997. VMX_MISC_SAVE_EFER_LMA;
  1998. nested_vmx_misc_high = 0;
  1999. }
  2000. static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
  2001. {
  2002. /*
  2003. * Bits 0 in high must be 0, and bits 1 in low must be 1.
  2004. */
  2005. return ((control & high) | low) == control;
  2006. }
  2007. static inline u64 vmx_control_msr(u32 low, u32 high)
  2008. {
  2009. return low | ((u64)high << 32);
  2010. }
  2011. /*
  2012. * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
  2013. * also let it use VMX-specific MSRs.
  2014. * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
  2015. * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
  2016. * like all other MSRs).
  2017. */
  2018. static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  2019. {
  2020. if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
  2021. msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
  2022. /*
  2023. * According to the spec, processors which do not support VMX
  2024. * should throw a #GP(0) when VMX capability MSRs are read.
  2025. */
  2026. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  2027. return 1;
  2028. }
  2029. switch (msr_index) {
  2030. case MSR_IA32_FEATURE_CONTROL:
  2031. if (nested_vmx_allowed(vcpu)) {
  2032. *pdata = to_vmx(vcpu)->nested.msr_ia32_feature_control;
  2033. break;
  2034. }
  2035. return 0;
  2036. case MSR_IA32_VMX_BASIC:
  2037. /*
  2038. * This MSR reports some information about VMX support. We
  2039. * should return information about the VMX we emulate for the
  2040. * guest, and the VMCS structure we give it - not about the
  2041. * VMX support of the underlying hardware.
  2042. */
  2043. *pdata = VMCS12_REVISION |
  2044. ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
  2045. (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
  2046. break;
  2047. case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
  2048. case MSR_IA32_VMX_PINBASED_CTLS:
  2049. *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
  2050. nested_vmx_pinbased_ctls_high);
  2051. break;
  2052. case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
  2053. case MSR_IA32_VMX_PROCBASED_CTLS:
  2054. *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
  2055. nested_vmx_procbased_ctls_high);
  2056. break;
  2057. case MSR_IA32_VMX_TRUE_EXIT_CTLS:
  2058. case MSR_IA32_VMX_EXIT_CTLS:
  2059. *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
  2060. nested_vmx_exit_ctls_high);
  2061. break;
  2062. case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
  2063. case MSR_IA32_VMX_ENTRY_CTLS:
  2064. *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
  2065. nested_vmx_entry_ctls_high);
  2066. break;
  2067. case MSR_IA32_VMX_MISC:
  2068. *pdata = vmx_control_msr(nested_vmx_misc_low,
  2069. nested_vmx_misc_high);
  2070. break;
  2071. /*
  2072. * These MSRs specify bits which the guest must keep fixed (on or off)
  2073. * while L1 is in VMXON mode (in L1's root mode, or running an L2).
  2074. * We picked the standard core2 setting.
  2075. */
  2076. #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
  2077. #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
  2078. case MSR_IA32_VMX_CR0_FIXED0:
  2079. *pdata = VMXON_CR0_ALWAYSON;
  2080. break;
  2081. case MSR_IA32_VMX_CR0_FIXED1:
  2082. *pdata = -1ULL;
  2083. break;
  2084. case MSR_IA32_VMX_CR4_FIXED0:
  2085. *pdata = VMXON_CR4_ALWAYSON;
  2086. break;
  2087. case MSR_IA32_VMX_CR4_FIXED1:
  2088. *pdata = -1ULL;
  2089. break;
  2090. case MSR_IA32_VMX_VMCS_ENUM:
  2091. *pdata = 0x1f;
  2092. break;
  2093. case MSR_IA32_VMX_PROCBASED_CTLS2:
  2094. *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
  2095. nested_vmx_secondary_ctls_high);
  2096. break;
  2097. case MSR_IA32_VMX_EPT_VPID_CAP:
  2098. /* Currently, no nested vpid support */
  2099. *pdata = nested_vmx_ept_caps;
  2100. break;
  2101. default:
  2102. return 0;
  2103. }
  2104. return 1;
  2105. }
  2106. static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  2107. {
  2108. u32 msr_index = msr_info->index;
  2109. u64 data = msr_info->data;
  2110. bool host_initialized = msr_info->host_initiated;
  2111. if (!nested_vmx_allowed(vcpu))
  2112. return 0;
  2113. if (msr_index == MSR_IA32_FEATURE_CONTROL) {
  2114. if (!host_initialized &&
  2115. to_vmx(vcpu)->nested.msr_ia32_feature_control
  2116. & FEATURE_CONTROL_LOCKED)
  2117. return 0;
  2118. to_vmx(vcpu)->nested.msr_ia32_feature_control = data;
  2119. return 1;
  2120. }
  2121. /*
  2122. * No need to treat VMX capability MSRs specially: If we don't handle
  2123. * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
  2124. */
  2125. return 0;
  2126. }
  2127. /*
  2128. * Reads an msr value (of 'msr_index') into 'pdata'.
  2129. * Returns 0 on success, non-0 otherwise.
  2130. * Assumes vcpu_load() was already called.
  2131. */
  2132. static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  2133. {
  2134. u64 data;
  2135. struct shared_msr_entry *msr;
  2136. if (!pdata) {
  2137. printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
  2138. return -EINVAL;
  2139. }
  2140. switch (msr_index) {
  2141. #ifdef CONFIG_X86_64
  2142. case MSR_FS_BASE:
  2143. data = vmcs_readl(GUEST_FS_BASE);
  2144. break;
  2145. case MSR_GS_BASE:
  2146. data = vmcs_readl(GUEST_GS_BASE);
  2147. break;
  2148. case MSR_KERNEL_GS_BASE:
  2149. vmx_load_host_state(to_vmx(vcpu));
  2150. data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
  2151. break;
  2152. #endif
  2153. case MSR_EFER:
  2154. return kvm_get_msr_common(vcpu, msr_index, pdata);
  2155. case MSR_IA32_TSC:
  2156. data = guest_read_tsc();
  2157. break;
  2158. case MSR_IA32_SYSENTER_CS:
  2159. data = vmcs_read32(GUEST_SYSENTER_CS);
  2160. break;
  2161. case MSR_IA32_SYSENTER_EIP:
  2162. data = vmcs_readl(GUEST_SYSENTER_EIP);
  2163. break;
  2164. case MSR_IA32_SYSENTER_ESP:
  2165. data = vmcs_readl(GUEST_SYSENTER_ESP);
  2166. break;
  2167. case MSR_TSC_AUX:
  2168. if (!to_vmx(vcpu)->rdtscp_enabled)
  2169. return 1;
  2170. /* Otherwise falls through */
  2171. default:
  2172. if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
  2173. return 0;
  2174. msr = find_msr_entry(to_vmx(vcpu), msr_index);
  2175. if (msr) {
  2176. data = msr->data;
  2177. break;
  2178. }
  2179. return kvm_get_msr_common(vcpu, msr_index, pdata);
  2180. }
  2181. *pdata = data;
  2182. return 0;
  2183. }
  2184. /*
  2185. * Writes msr value into into the appropriate "register".
  2186. * Returns 0 on success, non-0 otherwise.
  2187. * Assumes vcpu_load() was already called.
  2188. */
  2189. static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  2190. {
  2191. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2192. struct shared_msr_entry *msr;
  2193. int ret = 0;
  2194. u32 msr_index = msr_info->index;
  2195. u64 data = msr_info->data;
  2196. switch (msr_index) {
  2197. case MSR_EFER:
  2198. ret = kvm_set_msr_common(vcpu, msr_info);
  2199. break;
  2200. #ifdef CONFIG_X86_64
  2201. case MSR_FS_BASE:
  2202. vmx_segment_cache_clear(vmx);
  2203. vmcs_writel(GUEST_FS_BASE, data);
  2204. break;
  2205. case MSR_GS_BASE:
  2206. vmx_segment_cache_clear(vmx);
  2207. vmcs_writel(GUEST_GS_BASE, data);
  2208. break;
  2209. case MSR_KERNEL_GS_BASE:
  2210. vmx_load_host_state(vmx);
  2211. vmx->msr_guest_kernel_gs_base = data;
  2212. break;
  2213. #endif
  2214. case MSR_IA32_SYSENTER_CS:
  2215. vmcs_write32(GUEST_SYSENTER_CS, data);
  2216. break;
  2217. case MSR_IA32_SYSENTER_EIP:
  2218. vmcs_writel(GUEST_SYSENTER_EIP, data);
  2219. break;
  2220. case MSR_IA32_SYSENTER_ESP:
  2221. vmcs_writel(GUEST_SYSENTER_ESP, data);
  2222. break;
  2223. case MSR_IA32_TSC:
  2224. kvm_write_tsc(vcpu, msr_info);
  2225. break;
  2226. case MSR_IA32_CR_PAT:
  2227. if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
  2228. vmcs_write64(GUEST_IA32_PAT, data);
  2229. vcpu->arch.pat = data;
  2230. break;
  2231. }
  2232. ret = kvm_set_msr_common(vcpu, msr_info);
  2233. break;
  2234. case MSR_IA32_TSC_ADJUST:
  2235. ret = kvm_set_msr_common(vcpu, msr_info);
  2236. break;
  2237. case MSR_TSC_AUX:
  2238. if (!vmx->rdtscp_enabled)
  2239. return 1;
  2240. /* Check reserved bit, higher 32 bits should be zero */
  2241. if ((data >> 32) != 0)
  2242. return 1;
  2243. /* Otherwise falls through */
  2244. default:
  2245. if (vmx_set_vmx_msr(vcpu, msr_info))
  2246. break;
  2247. msr = find_msr_entry(vmx, msr_index);
  2248. if (msr) {
  2249. msr->data = data;
  2250. if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
  2251. preempt_disable();
  2252. kvm_set_shared_msr(msr->index, msr->data,
  2253. msr->mask);
  2254. preempt_enable();
  2255. }
  2256. break;
  2257. }
  2258. ret = kvm_set_msr_common(vcpu, msr_info);
  2259. }
  2260. return ret;
  2261. }
  2262. static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
  2263. {
  2264. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  2265. switch (reg) {
  2266. case VCPU_REGS_RSP:
  2267. vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
  2268. break;
  2269. case VCPU_REGS_RIP:
  2270. vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
  2271. break;
  2272. case VCPU_EXREG_PDPTR:
  2273. if (enable_ept)
  2274. ept_save_pdptrs(vcpu);
  2275. break;
  2276. default:
  2277. break;
  2278. }
  2279. }
  2280. static __init int cpu_has_kvm_support(void)
  2281. {
  2282. return cpu_has_vmx();
  2283. }
  2284. static __init int vmx_disabled_by_bios(void)
  2285. {
  2286. u64 msr;
  2287. rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
  2288. if (msr & FEATURE_CONTROL_LOCKED) {
  2289. /* launched w/ TXT and VMX disabled */
  2290. if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
  2291. && tboot_enabled())
  2292. return 1;
  2293. /* launched w/o TXT and VMX only enabled w/ TXT */
  2294. if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
  2295. && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
  2296. && !tboot_enabled()) {
  2297. printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
  2298. "activate TXT before enabling KVM\n");
  2299. return 1;
  2300. }
  2301. /* launched w/o TXT and VMX disabled */
  2302. if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
  2303. && !tboot_enabled())
  2304. return 1;
  2305. }
  2306. return 0;
  2307. }
  2308. static void kvm_cpu_vmxon(u64 addr)
  2309. {
  2310. asm volatile (ASM_VMX_VMXON_RAX
  2311. : : "a"(&addr), "m"(addr)
  2312. : "memory", "cc");
  2313. }
  2314. static int hardware_enable(void *garbage)
  2315. {
  2316. int cpu = raw_smp_processor_id();
  2317. u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
  2318. u64 old, test_bits;
  2319. if (read_cr4() & X86_CR4_VMXE)
  2320. return -EBUSY;
  2321. INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
  2322. /*
  2323. * Now we can enable the vmclear operation in kdump
  2324. * since the loaded_vmcss_on_cpu list on this cpu
  2325. * has been initialized.
  2326. *
  2327. * Though the cpu is not in VMX operation now, there
  2328. * is no problem to enable the vmclear operation
  2329. * for the loaded_vmcss_on_cpu list is empty!
  2330. */
  2331. crash_enable_local_vmclear(cpu);
  2332. rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
  2333. test_bits = FEATURE_CONTROL_LOCKED;
  2334. test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
  2335. if (tboot_enabled())
  2336. test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
  2337. if ((old & test_bits) != test_bits) {
  2338. /* enable and lock */
  2339. wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
  2340. }
  2341. write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
  2342. if (vmm_exclusive) {
  2343. kvm_cpu_vmxon(phys_addr);
  2344. ept_sync_global();
  2345. }
  2346. native_store_gdt(&__get_cpu_var(host_gdt));
  2347. return 0;
  2348. }
  2349. static void vmclear_local_loaded_vmcss(void)
  2350. {
  2351. int cpu = raw_smp_processor_id();
  2352. struct loaded_vmcs *v, *n;
  2353. list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
  2354. loaded_vmcss_on_cpu_link)
  2355. __loaded_vmcs_clear(v);
  2356. }
  2357. /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
  2358. * tricks.
  2359. */
  2360. static void kvm_cpu_vmxoff(void)
  2361. {
  2362. asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
  2363. }
  2364. static void hardware_disable(void *garbage)
  2365. {
  2366. if (vmm_exclusive) {
  2367. vmclear_local_loaded_vmcss();
  2368. kvm_cpu_vmxoff();
  2369. }
  2370. write_cr4(read_cr4() & ~X86_CR4_VMXE);
  2371. }
  2372. static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
  2373. u32 msr, u32 *result)
  2374. {
  2375. u32 vmx_msr_low, vmx_msr_high;
  2376. u32 ctl = ctl_min | ctl_opt;
  2377. rdmsr(msr, vmx_msr_low, vmx_msr_high);
  2378. ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
  2379. ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
  2380. /* Ensure minimum (required) set of control bits are supported. */
  2381. if (ctl_min & ~ctl)
  2382. return -EIO;
  2383. *result = ctl;
  2384. return 0;
  2385. }
  2386. static __init bool allow_1_setting(u32 msr, u32 ctl)
  2387. {
  2388. u32 vmx_msr_low, vmx_msr_high;
  2389. rdmsr(msr, vmx_msr_low, vmx_msr_high);
  2390. return vmx_msr_high & ctl;
  2391. }
  2392. static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
  2393. {
  2394. u32 vmx_msr_low, vmx_msr_high;
  2395. u32 min, opt, min2, opt2;
  2396. u32 _pin_based_exec_control = 0;
  2397. u32 _cpu_based_exec_control = 0;
  2398. u32 _cpu_based_2nd_exec_control = 0;
  2399. u32 _vmexit_control = 0;
  2400. u32 _vmentry_control = 0;
  2401. min = CPU_BASED_HLT_EXITING |
  2402. #ifdef CONFIG_X86_64
  2403. CPU_BASED_CR8_LOAD_EXITING |
  2404. CPU_BASED_CR8_STORE_EXITING |
  2405. #endif
  2406. CPU_BASED_CR3_LOAD_EXITING |
  2407. CPU_BASED_CR3_STORE_EXITING |
  2408. CPU_BASED_USE_IO_BITMAPS |
  2409. CPU_BASED_MOV_DR_EXITING |
  2410. CPU_BASED_USE_TSC_OFFSETING |
  2411. CPU_BASED_MWAIT_EXITING |
  2412. CPU_BASED_MONITOR_EXITING |
  2413. CPU_BASED_INVLPG_EXITING |
  2414. CPU_BASED_RDPMC_EXITING;
  2415. opt = CPU_BASED_TPR_SHADOW |
  2416. CPU_BASED_USE_MSR_BITMAPS |
  2417. CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
  2418. if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
  2419. &_cpu_based_exec_control) < 0)
  2420. return -EIO;
  2421. #ifdef CONFIG_X86_64
  2422. if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
  2423. _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
  2424. ~CPU_BASED_CR8_STORE_EXITING;
  2425. #endif
  2426. if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
  2427. min2 = 0;
  2428. opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
  2429. SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
  2430. SECONDARY_EXEC_WBINVD_EXITING |
  2431. SECONDARY_EXEC_ENABLE_VPID |
  2432. SECONDARY_EXEC_ENABLE_EPT |
  2433. SECONDARY_EXEC_UNRESTRICTED_GUEST |
  2434. SECONDARY_EXEC_PAUSE_LOOP_EXITING |
  2435. SECONDARY_EXEC_RDTSCP |
  2436. SECONDARY_EXEC_ENABLE_INVPCID |
  2437. SECONDARY_EXEC_APIC_REGISTER_VIRT |
  2438. SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
  2439. SECONDARY_EXEC_SHADOW_VMCS;
  2440. if (adjust_vmx_controls(min2, opt2,
  2441. MSR_IA32_VMX_PROCBASED_CTLS2,
  2442. &_cpu_based_2nd_exec_control) < 0)
  2443. return -EIO;
  2444. }
  2445. #ifndef CONFIG_X86_64
  2446. if (!(_cpu_based_2nd_exec_control &
  2447. SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
  2448. _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
  2449. #endif
  2450. if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
  2451. _cpu_based_2nd_exec_control &= ~(
  2452. SECONDARY_EXEC_APIC_REGISTER_VIRT |
  2453. SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
  2454. SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
  2455. if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
  2456. /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
  2457. enabled */
  2458. _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
  2459. CPU_BASED_CR3_STORE_EXITING |
  2460. CPU_BASED_INVLPG_EXITING);
  2461. rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
  2462. vmx_capability.ept, vmx_capability.vpid);
  2463. }
  2464. min = 0;
  2465. #ifdef CONFIG_X86_64
  2466. min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
  2467. #endif
  2468. opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
  2469. VM_EXIT_ACK_INTR_ON_EXIT;
  2470. if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
  2471. &_vmexit_control) < 0)
  2472. return -EIO;
  2473. min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
  2474. opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR;
  2475. if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
  2476. &_pin_based_exec_control) < 0)
  2477. return -EIO;
  2478. if (!(_cpu_based_2nd_exec_control &
  2479. SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) ||
  2480. !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT))
  2481. _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
  2482. min = 0;
  2483. opt = VM_ENTRY_LOAD_IA32_PAT;
  2484. if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
  2485. &_vmentry_control) < 0)
  2486. return -EIO;
  2487. rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
  2488. /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
  2489. if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
  2490. return -EIO;
  2491. #ifdef CONFIG_X86_64
  2492. /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
  2493. if (vmx_msr_high & (1u<<16))
  2494. return -EIO;
  2495. #endif
  2496. /* Require Write-Back (WB) memory type for VMCS accesses. */
  2497. if (((vmx_msr_high >> 18) & 15) != 6)
  2498. return -EIO;
  2499. vmcs_conf->size = vmx_msr_high & 0x1fff;
  2500. vmcs_conf->order = get_order(vmcs_config.size);
  2501. vmcs_conf->revision_id = vmx_msr_low;
  2502. vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
  2503. vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
  2504. vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
  2505. vmcs_conf->vmexit_ctrl = _vmexit_control;
  2506. vmcs_conf->vmentry_ctrl = _vmentry_control;
  2507. cpu_has_load_ia32_efer =
  2508. allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
  2509. VM_ENTRY_LOAD_IA32_EFER)
  2510. && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
  2511. VM_EXIT_LOAD_IA32_EFER);
  2512. cpu_has_load_perf_global_ctrl =
  2513. allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
  2514. VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
  2515. && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
  2516. VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
  2517. /*
  2518. * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
  2519. * but due to arrata below it can't be used. Workaround is to use
  2520. * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
  2521. *
  2522. * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
  2523. *
  2524. * AAK155 (model 26)
  2525. * AAP115 (model 30)
  2526. * AAT100 (model 37)
  2527. * BC86,AAY89,BD102 (model 44)
  2528. * BA97 (model 46)
  2529. *
  2530. */
  2531. if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
  2532. switch (boot_cpu_data.x86_model) {
  2533. case 26:
  2534. case 30:
  2535. case 37:
  2536. case 44:
  2537. case 46:
  2538. cpu_has_load_perf_global_ctrl = false;
  2539. printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
  2540. "does not work properly. Using workaround\n");
  2541. break;
  2542. default:
  2543. break;
  2544. }
  2545. }
  2546. return 0;
  2547. }
  2548. static struct vmcs *alloc_vmcs_cpu(int cpu)
  2549. {
  2550. int node = cpu_to_node(cpu);
  2551. struct page *pages;
  2552. struct vmcs *vmcs;
  2553. pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
  2554. if (!pages)
  2555. return NULL;
  2556. vmcs = page_address(pages);
  2557. memset(vmcs, 0, vmcs_config.size);
  2558. vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
  2559. return vmcs;
  2560. }
  2561. static struct vmcs *alloc_vmcs(void)
  2562. {
  2563. return alloc_vmcs_cpu(raw_smp_processor_id());
  2564. }
  2565. static void free_vmcs(struct vmcs *vmcs)
  2566. {
  2567. free_pages((unsigned long)vmcs, vmcs_config.order);
  2568. }
  2569. /*
  2570. * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
  2571. */
  2572. static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
  2573. {
  2574. if (!loaded_vmcs->vmcs)
  2575. return;
  2576. loaded_vmcs_clear(loaded_vmcs);
  2577. free_vmcs(loaded_vmcs->vmcs);
  2578. loaded_vmcs->vmcs = NULL;
  2579. }
  2580. static void free_kvm_area(void)
  2581. {
  2582. int cpu;
  2583. for_each_possible_cpu(cpu) {
  2584. free_vmcs(per_cpu(vmxarea, cpu));
  2585. per_cpu(vmxarea, cpu) = NULL;
  2586. }
  2587. }
  2588. static __init int alloc_kvm_area(void)
  2589. {
  2590. int cpu;
  2591. for_each_possible_cpu(cpu) {
  2592. struct vmcs *vmcs;
  2593. vmcs = alloc_vmcs_cpu(cpu);
  2594. if (!vmcs) {
  2595. free_kvm_area();
  2596. return -ENOMEM;
  2597. }
  2598. per_cpu(vmxarea, cpu) = vmcs;
  2599. }
  2600. return 0;
  2601. }
  2602. static __init int hardware_setup(void)
  2603. {
  2604. if (setup_vmcs_config(&vmcs_config) < 0)
  2605. return -EIO;
  2606. if (boot_cpu_has(X86_FEATURE_NX))
  2607. kvm_enable_efer_bits(EFER_NX);
  2608. if (!cpu_has_vmx_vpid())
  2609. enable_vpid = 0;
  2610. if (!cpu_has_vmx_shadow_vmcs())
  2611. enable_shadow_vmcs = 0;
  2612. if (!cpu_has_vmx_ept() ||
  2613. !cpu_has_vmx_ept_4levels()) {
  2614. enable_ept = 0;
  2615. enable_unrestricted_guest = 0;
  2616. enable_ept_ad_bits = 0;
  2617. }
  2618. if (!cpu_has_vmx_ept_ad_bits())
  2619. enable_ept_ad_bits = 0;
  2620. if (!cpu_has_vmx_unrestricted_guest())
  2621. enable_unrestricted_guest = 0;
  2622. if (!cpu_has_vmx_flexpriority())
  2623. flexpriority_enabled = 0;
  2624. if (!cpu_has_vmx_tpr_shadow())
  2625. kvm_x86_ops->update_cr8_intercept = NULL;
  2626. if (enable_ept && !cpu_has_vmx_ept_2m_page())
  2627. kvm_disable_largepages();
  2628. if (!cpu_has_vmx_ple())
  2629. ple_gap = 0;
  2630. if (!cpu_has_vmx_apicv())
  2631. enable_apicv = 0;
  2632. if (enable_apicv)
  2633. kvm_x86_ops->update_cr8_intercept = NULL;
  2634. else {
  2635. kvm_x86_ops->hwapic_irr_update = NULL;
  2636. kvm_x86_ops->deliver_posted_interrupt = NULL;
  2637. kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
  2638. }
  2639. if (nested)
  2640. nested_vmx_setup_ctls_msrs();
  2641. return alloc_kvm_area();
  2642. }
  2643. static __exit void hardware_unsetup(void)
  2644. {
  2645. free_kvm_area();
  2646. }
  2647. static bool emulation_required(struct kvm_vcpu *vcpu)
  2648. {
  2649. return emulate_invalid_guest_state && !guest_state_valid(vcpu);
  2650. }
  2651. static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
  2652. struct kvm_segment *save)
  2653. {
  2654. if (!emulate_invalid_guest_state) {
  2655. /*
  2656. * CS and SS RPL should be equal during guest entry according
  2657. * to VMX spec, but in reality it is not always so. Since vcpu
  2658. * is in the middle of the transition from real mode to
  2659. * protected mode it is safe to assume that RPL 0 is a good
  2660. * default value.
  2661. */
  2662. if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
  2663. save->selector &= ~SELECTOR_RPL_MASK;
  2664. save->dpl = save->selector & SELECTOR_RPL_MASK;
  2665. save->s = 1;
  2666. }
  2667. vmx_set_segment(vcpu, save, seg);
  2668. }
  2669. static void enter_pmode(struct kvm_vcpu *vcpu)
  2670. {
  2671. unsigned long flags;
  2672. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2673. /*
  2674. * Update real mode segment cache. It may be not up-to-date if sement
  2675. * register was written while vcpu was in a guest mode.
  2676. */
  2677. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
  2678. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
  2679. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
  2680. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
  2681. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
  2682. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
  2683. vmx->rmode.vm86_active = 0;
  2684. vmx_segment_cache_clear(vmx);
  2685. vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
  2686. flags = vmcs_readl(GUEST_RFLAGS);
  2687. flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
  2688. flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
  2689. vmcs_writel(GUEST_RFLAGS, flags);
  2690. vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
  2691. (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
  2692. update_exception_bitmap(vcpu);
  2693. fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
  2694. fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
  2695. fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
  2696. fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
  2697. fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
  2698. fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
  2699. /* CPL is always 0 when CPU enters protected mode */
  2700. __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
  2701. vmx->cpl = 0;
  2702. }
  2703. static void fix_rmode_seg(int seg, struct kvm_segment *save)
  2704. {
  2705. const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  2706. struct kvm_segment var = *save;
  2707. var.dpl = 0x3;
  2708. if (seg == VCPU_SREG_CS)
  2709. var.type = 0x3;
  2710. if (!emulate_invalid_guest_state) {
  2711. var.selector = var.base >> 4;
  2712. var.base = var.base & 0xffff0;
  2713. var.limit = 0xffff;
  2714. var.g = 0;
  2715. var.db = 0;
  2716. var.present = 1;
  2717. var.s = 1;
  2718. var.l = 0;
  2719. var.unusable = 0;
  2720. var.type = 0x3;
  2721. var.avl = 0;
  2722. if (save->base & 0xf)
  2723. printk_once(KERN_WARNING "kvm: segment base is not "
  2724. "paragraph aligned when entering "
  2725. "protected mode (seg=%d)", seg);
  2726. }
  2727. vmcs_write16(sf->selector, var.selector);
  2728. vmcs_write32(sf->base, var.base);
  2729. vmcs_write32(sf->limit, var.limit);
  2730. vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
  2731. }
  2732. static void enter_rmode(struct kvm_vcpu *vcpu)
  2733. {
  2734. unsigned long flags;
  2735. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2736. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
  2737. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
  2738. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
  2739. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
  2740. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
  2741. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
  2742. vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
  2743. vmx->rmode.vm86_active = 1;
  2744. /*
  2745. * Very old userspace does not call KVM_SET_TSS_ADDR before entering
  2746. * vcpu. Warn the user that an update is overdue.
  2747. */
  2748. if (!vcpu->kvm->arch.tss_addr)
  2749. printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
  2750. "called before entering vcpu\n");
  2751. vmx_segment_cache_clear(vmx);
  2752. vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr);
  2753. vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
  2754. vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
  2755. flags = vmcs_readl(GUEST_RFLAGS);
  2756. vmx->rmode.save_rflags = flags;
  2757. flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
  2758. vmcs_writel(GUEST_RFLAGS, flags);
  2759. vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
  2760. update_exception_bitmap(vcpu);
  2761. fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
  2762. fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
  2763. fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
  2764. fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
  2765. fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
  2766. fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
  2767. kvm_mmu_reset_context(vcpu);
  2768. }
  2769. static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  2770. {
  2771. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2772. struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
  2773. if (!msr)
  2774. return;
  2775. /*
  2776. * Force kernel_gs_base reloading before EFER changes, as control
  2777. * of this msr depends on is_long_mode().
  2778. */
  2779. vmx_load_host_state(to_vmx(vcpu));
  2780. vcpu->arch.efer = efer;
  2781. if (efer & EFER_LMA) {
  2782. vmcs_write32(VM_ENTRY_CONTROLS,
  2783. vmcs_read32(VM_ENTRY_CONTROLS) |
  2784. VM_ENTRY_IA32E_MODE);
  2785. msr->data = efer;
  2786. } else {
  2787. vmcs_write32(VM_ENTRY_CONTROLS,
  2788. vmcs_read32(VM_ENTRY_CONTROLS) &
  2789. ~VM_ENTRY_IA32E_MODE);
  2790. msr->data = efer & ~EFER_LME;
  2791. }
  2792. setup_msrs(vmx);
  2793. }
  2794. #ifdef CONFIG_X86_64
  2795. static void enter_lmode(struct kvm_vcpu *vcpu)
  2796. {
  2797. u32 guest_tr_ar;
  2798. vmx_segment_cache_clear(to_vmx(vcpu));
  2799. guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
  2800. if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
  2801. pr_debug_ratelimited("%s: tss fixup for long mode. \n",
  2802. __func__);
  2803. vmcs_write32(GUEST_TR_AR_BYTES,
  2804. (guest_tr_ar & ~AR_TYPE_MASK)
  2805. | AR_TYPE_BUSY_64_TSS);
  2806. }
  2807. vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
  2808. }
  2809. static void exit_lmode(struct kvm_vcpu *vcpu)
  2810. {
  2811. vmcs_write32(VM_ENTRY_CONTROLS,
  2812. vmcs_read32(VM_ENTRY_CONTROLS)
  2813. & ~VM_ENTRY_IA32E_MODE);
  2814. vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
  2815. }
  2816. #endif
  2817. static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
  2818. {
  2819. vpid_sync_context(to_vmx(vcpu));
  2820. if (enable_ept) {
  2821. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  2822. return;
  2823. ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
  2824. }
  2825. }
  2826. static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
  2827. {
  2828. ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
  2829. vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
  2830. vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
  2831. }
  2832. static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
  2833. {
  2834. if (enable_ept && is_paging(vcpu))
  2835. vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
  2836. __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
  2837. }
  2838. static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
  2839. {
  2840. ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
  2841. vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
  2842. vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
  2843. }
  2844. static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
  2845. {
  2846. if (!test_bit(VCPU_EXREG_PDPTR,
  2847. (unsigned long *)&vcpu->arch.regs_dirty))
  2848. return;
  2849. if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
  2850. vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
  2851. vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
  2852. vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
  2853. vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
  2854. }
  2855. }
  2856. static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
  2857. {
  2858. if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
  2859. vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
  2860. vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
  2861. vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
  2862. vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
  2863. }
  2864. __set_bit(VCPU_EXREG_PDPTR,
  2865. (unsigned long *)&vcpu->arch.regs_avail);
  2866. __set_bit(VCPU_EXREG_PDPTR,
  2867. (unsigned long *)&vcpu->arch.regs_dirty);
  2868. }
  2869. static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
  2870. static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
  2871. unsigned long cr0,
  2872. struct kvm_vcpu *vcpu)
  2873. {
  2874. if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
  2875. vmx_decache_cr3(vcpu);
  2876. if (!(cr0 & X86_CR0_PG)) {
  2877. /* From paging/starting to nonpaging */
  2878. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
  2879. vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
  2880. (CPU_BASED_CR3_LOAD_EXITING |
  2881. CPU_BASED_CR3_STORE_EXITING));
  2882. vcpu->arch.cr0 = cr0;
  2883. vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
  2884. } else if (!is_paging(vcpu)) {
  2885. /* From nonpaging to paging */
  2886. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
  2887. vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
  2888. ~(CPU_BASED_CR3_LOAD_EXITING |
  2889. CPU_BASED_CR3_STORE_EXITING));
  2890. vcpu->arch.cr0 = cr0;
  2891. vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
  2892. }
  2893. if (!(cr0 & X86_CR0_WP))
  2894. *hw_cr0 &= ~X86_CR0_WP;
  2895. }
  2896. static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  2897. {
  2898. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2899. unsigned long hw_cr0;
  2900. hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
  2901. if (enable_unrestricted_guest)
  2902. hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
  2903. else {
  2904. hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
  2905. if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
  2906. enter_pmode(vcpu);
  2907. if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
  2908. enter_rmode(vcpu);
  2909. }
  2910. #ifdef CONFIG_X86_64
  2911. if (vcpu->arch.efer & EFER_LME) {
  2912. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
  2913. enter_lmode(vcpu);
  2914. if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
  2915. exit_lmode(vcpu);
  2916. }
  2917. #endif
  2918. if (enable_ept)
  2919. ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
  2920. if (!vcpu->fpu_active)
  2921. hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
  2922. vmcs_writel(CR0_READ_SHADOW, cr0);
  2923. vmcs_writel(GUEST_CR0, hw_cr0);
  2924. vcpu->arch.cr0 = cr0;
  2925. /* depends on vcpu->arch.cr0 to be set to a new value */
  2926. vmx->emulation_required = emulation_required(vcpu);
  2927. }
  2928. static u64 construct_eptp(unsigned long root_hpa)
  2929. {
  2930. u64 eptp;
  2931. /* TODO write the value reading from MSR */
  2932. eptp = VMX_EPT_DEFAULT_MT |
  2933. VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
  2934. if (enable_ept_ad_bits)
  2935. eptp |= VMX_EPT_AD_ENABLE_BIT;
  2936. eptp |= (root_hpa & PAGE_MASK);
  2937. return eptp;
  2938. }
  2939. static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  2940. {
  2941. unsigned long guest_cr3;
  2942. u64 eptp;
  2943. guest_cr3 = cr3;
  2944. if (enable_ept) {
  2945. eptp = construct_eptp(cr3);
  2946. vmcs_write64(EPT_POINTER, eptp);
  2947. guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
  2948. vcpu->kvm->arch.ept_identity_map_addr;
  2949. ept_load_pdptrs(vcpu);
  2950. }
  2951. vmx_flush_tlb(vcpu);
  2952. vmcs_writel(GUEST_CR3, guest_cr3);
  2953. }
  2954. static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  2955. {
  2956. unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
  2957. KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
  2958. if (cr4 & X86_CR4_VMXE) {
  2959. /*
  2960. * To use VMXON (and later other VMX instructions), a guest
  2961. * must first be able to turn on cr4.VMXE (see handle_vmon()).
  2962. * So basically the check on whether to allow nested VMX
  2963. * is here.
  2964. */
  2965. if (!nested_vmx_allowed(vcpu))
  2966. return 1;
  2967. }
  2968. if (to_vmx(vcpu)->nested.vmxon &&
  2969. ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
  2970. return 1;
  2971. vcpu->arch.cr4 = cr4;
  2972. if (enable_ept) {
  2973. if (!is_paging(vcpu)) {
  2974. hw_cr4 &= ~X86_CR4_PAE;
  2975. hw_cr4 |= X86_CR4_PSE;
  2976. /*
  2977. * SMEP is disabled if CPU is in non-paging mode in
  2978. * hardware. However KVM always uses paging mode to
  2979. * emulate guest non-paging mode with TDP.
  2980. * To emulate this behavior, SMEP needs to be manually
  2981. * disabled when guest switches to non-paging mode.
  2982. */
  2983. hw_cr4 &= ~X86_CR4_SMEP;
  2984. } else if (!(cr4 & X86_CR4_PAE)) {
  2985. hw_cr4 &= ~X86_CR4_PAE;
  2986. }
  2987. }
  2988. vmcs_writel(CR4_READ_SHADOW, cr4);
  2989. vmcs_writel(GUEST_CR4, hw_cr4);
  2990. return 0;
  2991. }
  2992. static void vmx_get_segment(struct kvm_vcpu *vcpu,
  2993. struct kvm_segment *var, int seg)
  2994. {
  2995. struct vcpu_vmx *vmx = to_vmx(vcpu);
  2996. u32 ar;
  2997. if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
  2998. *var = vmx->rmode.segs[seg];
  2999. if (seg == VCPU_SREG_TR
  3000. || var->selector == vmx_read_guest_seg_selector(vmx, seg))
  3001. return;
  3002. var->base = vmx_read_guest_seg_base(vmx, seg);
  3003. var->selector = vmx_read_guest_seg_selector(vmx, seg);
  3004. return;
  3005. }
  3006. var->base = vmx_read_guest_seg_base(vmx, seg);
  3007. var->limit = vmx_read_guest_seg_limit(vmx, seg);
  3008. var->selector = vmx_read_guest_seg_selector(vmx, seg);
  3009. ar = vmx_read_guest_seg_ar(vmx, seg);
  3010. var->unusable = (ar >> 16) & 1;
  3011. var->type = ar & 15;
  3012. var->s = (ar >> 4) & 1;
  3013. var->dpl = (ar >> 5) & 3;
  3014. /*
  3015. * Some userspaces do not preserve unusable property. Since usable
  3016. * segment has to be present according to VMX spec we can use present
  3017. * property to amend userspace bug by making unusable segment always
  3018. * nonpresent. vmx_segment_access_rights() already marks nonpresent
  3019. * segment as unusable.
  3020. */
  3021. var->present = !var->unusable;
  3022. var->avl = (ar >> 12) & 1;
  3023. var->l = (ar >> 13) & 1;
  3024. var->db = (ar >> 14) & 1;
  3025. var->g = (ar >> 15) & 1;
  3026. }
  3027. static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
  3028. {
  3029. struct kvm_segment s;
  3030. if (to_vmx(vcpu)->rmode.vm86_active) {
  3031. vmx_get_segment(vcpu, &s, seg);
  3032. return s.base;
  3033. }
  3034. return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
  3035. }
  3036. static int vmx_get_cpl(struct kvm_vcpu *vcpu)
  3037. {
  3038. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3039. if (!is_protmode(vcpu))
  3040. return 0;
  3041. if (!is_long_mode(vcpu)
  3042. && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
  3043. return 3;
  3044. if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
  3045. __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
  3046. vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3;
  3047. }
  3048. return vmx->cpl;
  3049. }
  3050. static u32 vmx_segment_access_rights(struct kvm_segment *var)
  3051. {
  3052. u32 ar;
  3053. if (var->unusable || !var->present)
  3054. ar = 1 << 16;
  3055. else {
  3056. ar = var->type & 15;
  3057. ar |= (var->s & 1) << 4;
  3058. ar |= (var->dpl & 3) << 5;
  3059. ar |= (var->present & 1) << 7;
  3060. ar |= (var->avl & 1) << 12;
  3061. ar |= (var->l & 1) << 13;
  3062. ar |= (var->db & 1) << 14;
  3063. ar |= (var->g & 1) << 15;
  3064. }
  3065. return ar;
  3066. }
  3067. static void vmx_set_segment(struct kvm_vcpu *vcpu,
  3068. struct kvm_segment *var, int seg)
  3069. {
  3070. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3071. const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  3072. vmx_segment_cache_clear(vmx);
  3073. if (seg == VCPU_SREG_CS)
  3074. __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
  3075. if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
  3076. vmx->rmode.segs[seg] = *var;
  3077. if (seg == VCPU_SREG_TR)
  3078. vmcs_write16(sf->selector, var->selector);
  3079. else if (var->s)
  3080. fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
  3081. goto out;
  3082. }
  3083. vmcs_writel(sf->base, var->base);
  3084. vmcs_write32(sf->limit, var->limit);
  3085. vmcs_write16(sf->selector, var->selector);
  3086. /*
  3087. * Fix the "Accessed" bit in AR field of segment registers for older
  3088. * qemu binaries.
  3089. * IA32 arch specifies that at the time of processor reset the
  3090. * "Accessed" bit in the AR field of segment registers is 1. And qemu
  3091. * is setting it to 0 in the userland code. This causes invalid guest
  3092. * state vmexit when "unrestricted guest" mode is turned on.
  3093. * Fix for this setup issue in cpu_reset is being pushed in the qemu
  3094. * tree. Newer qemu binaries with that qemu fix would not need this
  3095. * kvm hack.
  3096. */
  3097. if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
  3098. var->type |= 0x1; /* Accessed */
  3099. vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
  3100. out:
  3101. vmx->emulation_required |= emulation_required(vcpu);
  3102. }
  3103. static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  3104. {
  3105. u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
  3106. *db = (ar >> 14) & 1;
  3107. *l = (ar >> 13) & 1;
  3108. }
  3109. static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  3110. {
  3111. dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
  3112. dt->address = vmcs_readl(GUEST_IDTR_BASE);
  3113. }
  3114. static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  3115. {
  3116. vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
  3117. vmcs_writel(GUEST_IDTR_BASE, dt->address);
  3118. }
  3119. static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  3120. {
  3121. dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
  3122. dt->address = vmcs_readl(GUEST_GDTR_BASE);
  3123. }
  3124. static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  3125. {
  3126. vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
  3127. vmcs_writel(GUEST_GDTR_BASE, dt->address);
  3128. }
  3129. static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
  3130. {
  3131. struct kvm_segment var;
  3132. u32 ar;
  3133. vmx_get_segment(vcpu, &var, seg);
  3134. var.dpl = 0x3;
  3135. if (seg == VCPU_SREG_CS)
  3136. var.type = 0x3;
  3137. ar = vmx_segment_access_rights(&var);
  3138. if (var.base != (var.selector << 4))
  3139. return false;
  3140. if (var.limit != 0xffff)
  3141. return false;
  3142. if (ar != 0xf3)
  3143. return false;
  3144. return true;
  3145. }
  3146. static bool code_segment_valid(struct kvm_vcpu *vcpu)
  3147. {
  3148. struct kvm_segment cs;
  3149. unsigned int cs_rpl;
  3150. vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
  3151. cs_rpl = cs.selector & SELECTOR_RPL_MASK;
  3152. if (cs.unusable)
  3153. return false;
  3154. if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
  3155. return false;
  3156. if (!cs.s)
  3157. return false;
  3158. if (cs.type & AR_TYPE_WRITEABLE_MASK) {
  3159. if (cs.dpl > cs_rpl)
  3160. return false;
  3161. } else {
  3162. if (cs.dpl != cs_rpl)
  3163. return false;
  3164. }
  3165. if (!cs.present)
  3166. return false;
  3167. /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
  3168. return true;
  3169. }
  3170. static bool stack_segment_valid(struct kvm_vcpu *vcpu)
  3171. {
  3172. struct kvm_segment ss;
  3173. unsigned int ss_rpl;
  3174. vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
  3175. ss_rpl = ss.selector & SELECTOR_RPL_MASK;
  3176. if (ss.unusable)
  3177. return true;
  3178. if (ss.type != 3 && ss.type != 7)
  3179. return false;
  3180. if (!ss.s)
  3181. return false;
  3182. if (ss.dpl != ss_rpl) /* DPL != RPL */
  3183. return false;
  3184. if (!ss.present)
  3185. return false;
  3186. return true;
  3187. }
  3188. static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
  3189. {
  3190. struct kvm_segment var;
  3191. unsigned int rpl;
  3192. vmx_get_segment(vcpu, &var, seg);
  3193. rpl = var.selector & SELECTOR_RPL_MASK;
  3194. if (var.unusable)
  3195. return true;
  3196. if (!var.s)
  3197. return false;
  3198. if (!var.present)
  3199. return false;
  3200. if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
  3201. if (var.dpl < rpl) /* DPL < RPL */
  3202. return false;
  3203. }
  3204. /* TODO: Add other members to kvm_segment_field to allow checking for other access
  3205. * rights flags
  3206. */
  3207. return true;
  3208. }
  3209. static bool tr_valid(struct kvm_vcpu *vcpu)
  3210. {
  3211. struct kvm_segment tr;
  3212. vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
  3213. if (tr.unusable)
  3214. return false;
  3215. if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
  3216. return false;
  3217. if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
  3218. return false;
  3219. if (!tr.present)
  3220. return false;
  3221. return true;
  3222. }
  3223. static bool ldtr_valid(struct kvm_vcpu *vcpu)
  3224. {
  3225. struct kvm_segment ldtr;
  3226. vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
  3227. if (ldtr.unusable)
  3228. return true;
  3229. if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
  3230. return false;
  3231. if (ldtr.type != 2)
  3232. return false;
  3233. if (!ldtr.present)
  3234. return false;
  3235. return true;
  3236. }
  3237. static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
  3238. {
  3239. struct kvm_segment cs, ss;
  3240. vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
  3241. vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
  3242. return ((cs.selector & SELECTOR_RPL_MASK) ==
  3243. (ss.selector & SELECTOR_RPL_MASK));
  3244. }
  3245. /*
  3246. * Check if guest state is valid. Returns true if valid, false if
  3247. * not.
  3248. * We assume that registers are always usable
  3249. */
  3250. static bool guest_state_valid(struct kvm_vcpu *vcpu)
  3251. {
  3252. if (enable_unrestricted_guest)
  3253. return true;
  3254. /* real mode guest state checks */
  3255. if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
  3256. if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
  3257. return false;
  3258. if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
  3259. return false;
  3260. if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
  3261. return false;
  3262. if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
  3263. return false;
  3264. if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
  3265. return false;
  3266. if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
  3267. return false;
  3268. } else {
  3269. /* protected mode guest state checks */
  3270. if (!cs_ss_rpl_check(vcpu))
  3271. return false;
  3272. if (!code_segment_valid(vcpu))
  3273. return false;
  3274. if (!stack_segment_valid(vcpu))
  3275. return false;
  3276. if (!data_segment_valid(vcpu, VCPU_SREG_DS))
  3277. return false;
  3278. if (!data_segment_valid(vcpu, VCPU_SREG_ES))
  3279. return false;
  3280. if (!data_segment_valid(vcpu, VCPU_SREG_FS))
  3281. return false;
  3282. if (!data_segment_valid(vcpu, VCPU_SREG_GS))
  3283. return false;
  3284. if (!tr_valid(vcpu))
  3285. return false;
  3286. if (!ldtr_valid(vcpu))
  3287. return false;
  3288. }
  3289. /* TODO:
  3290. * - Add checks on RIP
  3291. * - Add checks on RFLAGS
  3292. */
  3293. return true;
  3294. }
  3295. static int init_rmode_tss(struct kvm *kvm)
  3296. {
  3297. gfn_t fn;
  3298. u16 data = 0;
  3299. int r, idx, ret = 0;
  3300. idx = srcu_read_lock(&kvm->srcu);
  3301. fn = kvm->arch.tss_addr >> PAGE_SHIFT;
  3302. r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
  3303. if (r < 0)
  3304. goto out;
  3305. data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
  3306. r = kvm_write_guest_page(kvm, fn++, &data,
  3307. TSS_IOPB_BASE_OFFSET, sizeof(u16));
  3308. if (r < 0)
  3309. goto out;
  3310. r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
  3311. if (r < 0)
  3312. goto out;
  3313. r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
  3314. if (r < 0)
  3315. goto out;
  3316. data = ~0;
  3317. r = kvm_write_guest_page(kvm, fn, &data,
  3318. RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
  3319. sizeof(u8));
  3320. if (r < 0)
  3321. goto out;
  3322. ret = 1;
  3323. out:
  3324. srcu_read_unlock(&kvm->srcu, idx);
  3325. return ret;
  3326. }
  3327. static int init_rmode_identity_map(struct kvm *kvm)
  3328. {
  3329. int i, idx, r, ret;
  3330. pfn_t identity_map_pfn;
  3331. u32 tmp;
  3332. if (!enable_ept)
  3333. return 1;
  3334. if (unlikely(!kvm->arch.ept_identity_pagetable)) {
  3335. printk(KERN_ERR "EPT: identity-mapping pagetable "
  3336. "haven't been allocated!\n");
  3337. return 0;
  3338. }
  3339. if (likely(kvm->arch.ept_identity_pagetable_done))
  3340. return 1;
  3341. ret = 0;
  3342. identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
  3343. idx = srcu_read_lock(&kvm->srcu);
  3344. r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
  3345. if (r < 0)
  3346. goto out;
  3347. /* Set up identity-mapping pagetable for EPT in real mode */
  3348. for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
  3349. tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
  3350. _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
  3351. r = kvm_write_guest_page(kvm, identity_map_pfn,
  3352. &tmp, i * sizeof(tmp), sizeof(tmp));
  3353. if (r < 0)
  3354. goto out;
  3355. }
  3356. kvm->arch.ept_identity_pagetable_done = true;
  3357. ret = 1;
  3358. out:
  3359. srcu_read_unlock(&kvm->srcu, idx);
  3360. return ret;
  3361. }
  3362. static void seg_setup(int seg)
  3363. {
  3364. const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
  3365. unsigned int ar;
  3366. vmcs_write16(sf->selector, 0);
  3367. vmcs_writel(sf->base, 0);
  3368. vmcs_write32(sf->limit, 0xffff);
  3369. ar = 0x93;
  3370. if (seg == VCPU_SREG_CS)
  3371. ar |= 0x08; /* code segment */
  3372. vmcs_write32(sf->ar_bytes, ar);
  3373. }
  3374. static int alloc_apic_access_page(struct kvm *kvm)
  3375. {
  3376. struct page *page;
  3377. struct kvm_userspace_memory_region kvm_userspace_mem;
  3378. int r = 0;
  3379. mutex_lock(&kvm->slots_lock);
  3380. if (kvm->arch.apic_access_page)
  3381. goto out;
  3382. kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
  3383. kvm_userspace_mem.flags = 0;
  3384. kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
  3385. kvm_userspace_mem.memory_size = PAGE_SIZE;
  3386. r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
  3387. if (r)
  3388. goto out;
  3389. page = gfn_to_page(kvm, 0xfee00);
  3390. if (is_error_page(page)) {
  3391. r = -EFAULT;
  3392. goto out;
  3393. }
  3394. kvm->arch.apic_access_page = page;
  3395. out:
  3396. mutex_unlock(&kvm->slots_lock);
  3397. return r;
  3398. }
  3399. static int alloc_identity_pagetable(struct kvm *kvm)
  3400. {
  3401. struct page *page;
  3402. struct kvm_userspace_memory_region kvm_userspace_mem;
  3403. int r = 0;
  3404. mutex_lock(&kvm->slots_lock);
  3405. if (kvm->arch.ept_identity_pagetable)
  3406. goto out;
  3407. kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
  3408. kvm_userspace_mem.flags = 0;
  3409. kvm_userspace_mem.guest_phys_addr =
  3410. kvm->arch.ept_identity_map_addr;
  3411. kvm_userspace_mem.memory_size = PAGE_SIZE;
  3412. r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
  3413. if (r)
  3414. goto out;
  3415. page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
  3416. if (is_error_page(page)) {
  3417. r = -EFAULT;
  3418. goto out;
  3419. }
  3420. kvm->arch.ept_identity_pagetable = page;
  3421. out:
  3422. mutex_unlock(&kvm->slots_lock);
  3423. return r;
  3424. }
  3425. static void allocate_vpid(struct vcpu_vmx *vmx)
  3426. {
  3427. int vpid;
  3428. vmx->vpid = 0;
  3429. if (!enable_vpid)
  3430. return;
  3431. spin_lock(&vmx_vpid_lock);
  3432. vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
  3433. if (vpid < VMX_NR_VPIDS) {
  3434. vmx->vpid = vpid;
  3435. __set_bit(vpid, vmx_vpid_bitmap);
  3436. }
  3437. spin_unlock(&vmx_vpid_lock);
  3438. }
  3439. static void free_vpid(struct vcpu_vmx *vmx)
  3440. {
  3441. if (!enable_vpid)
  3442. return;
  3443. spin_lock(&vmx_vpid_lock);
  3444. if (vmx->vpid != 0)
  3445. __clear_bit(vmx->vpid, vmx_vpid_bitmap);
  3446. spin_unlock(&vmx_vpid_lock);
  3447. }
  3448. #define MSR_TYPE_R 1
  3449. #define MSR_TYPE_W 2
  3450. static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
  3451. u32 msr, int type)
  3452. {
  3453. int f = sizeof(unsigned long);
  3454. if (!cpu_has_vmx_msr_bitmap())
  3455. return;
  3456. /*
  3457. * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
  3458. * have the write-low and read-high bitmap offsets the wrong way round.
  3459. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
  3460. */
  3461. if (msr <= 0x1fff) {
  3462. if (type & MSR_TYPE_R)
  3463. /* read-low */
  3464. __clear_bit(msr, msr_bitmap + 0x000 / f);
  3465. if (type & MSR_TYPE_W)
  3466. /* write-low */
  3467. __clear_bit(msr, msr_bitmap + 0x800 / f);
  3468. } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
  3469. msr &= 0x1fff;
  3470. if (type & MSR_TYPE_R)
  3471. /* read-high */
  3472. __clear_bit(msr, msr_bitmap + 0x400 / f);
  3473. if (type & MSR_TYPE_W)
  3474. /* write-high */
  3475. __clear_bit(msr, msr_bitmap + 0xc00 / f);
  3476. }
  3477. }
  3478. static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
  3479. u32 msr, int type)
  3480. {
  3481. int f = sizeof(unsigned long);
  3482. if (!cpu_has_vmx_msr_bitmap())
  3483. return;
  3484. /*
  3485. * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
  3486. * have the write-low and read-high bitmap offsets the wrong way round.
  3487. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
  3488. */
  3489. if (msr <= 0x1fff) {
  3490. if (type & MSR_TYPE_R)
  3491. /* read-low */
  3492. __set_bit(msr, msr_bitmap + 0x000 / f);
  3493. if (type & MSR_TYPE_W)
  3494. /* write-low */
  3495. __set_bit(msr, msr_bitmap + 0x800 / f);
  3496. } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
  3497. msr &= 0x1fff;
  3498. if (type & MSR_TYPE_R)
  3499. /* read-high */
  3500. __set_bit(msr, msr_bitmap + 0x400 / f);
  3501. if (type & MSR_TYPE_W)
  3502. /* write-high */
  3503. __set_bit(msr, msr_bitmap + 0xc00 / f);
  3504. }
  3505. }
  3506. static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
  3507. {
  3508. if (!longmode_only)
  3509. __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
  3510. msr, MSR_TYPE_R | MSR_TYPE_W);
  3511. __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
  3512. msr, MSR_TYPE_R | MSR_TYPE_W);
  3513. }
  3514. static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
  3515. {
  3516. __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
  3517. msr, MSR_TYPE_R);
  3518. __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
  3519. msr, MSR_TYPE_R);
  3520. }
  3521. static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
  3522. {
  3523. __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
  3524. msr, MSR_TYPE_R);
  3525. __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
  3526. msr, MSR_TYPE_R);
  3527. }
  3528. static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
  3529. {
  3530. __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
  3531. msr, MSR_TYPE_W);
  3532. __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
  3533. msr, MSR_TYPE_W);
  3534. }
  3535. static int vmx_vm_has_apicv(struct kvm *kvm)
  3536. {
  3537. return enable_apicv && irqchip_in_kernel(kvm);
  3538. }
  3539. /*
  3540. * Send interrupt to vcpu via posted interrupt way.
  3541. * 1. If target vcpu is running(non-root mode), send posted interrupt
  3542. * notification to vcpu and hardware will sync PIR to vIRR atomically.
  3543. * 2. If target vcpu isn't running(root mode), kick it to pick up the
  3544. * interrupt from PIR in next vmentry.
  3545. */
  3546. static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
  3547. {
  3548. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3549. int r;
  3550. if (pi_test_and_set_pir(vector, &vmx->pi_desc))
  3551. return;
  3552. r = pi_test_and_set_on(&vmx->pi_desc);
  3553. kvm_make_request(KVM_REQ_EVENT, vcpu);
  3554. #ifdef CONFIG_SMP
  3555. if (!r && (vcpu->mode == IN_GUEST_MODE))
  3556. apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
  3557. POSTED_INTR_VECTOR);
  3558. else
  3559. #endif
  3560. kvm_vcpu_kick(vcpu);
  3561. }
  3562. static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
  3563. {
  3564. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3565. if (!pi_test_and_clear_on(&vmx->pi_desc))
  3566. return;
  3567. kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
  3568. }
  3569. static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
  3570. {
  3571. return;
  3572. }
  3573. /*
  3574. * Set up the vmcs's constant host-state fields, i.e., host-state fields that
  3575. * will not change in the lifetime of the guest.
  3576. * Note that host-state that does change is set elsewhere. E.g., host-state
  3577. * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
  3578. */
  3579. static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
  3580. {
  3581. u32 low32, high32;
  3582. unsigned long tmpl;
  3583. struct desc_ptr dt;
  3584. vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
  3585. vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
  3586. vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
  3587. vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
  3588. #ifdef CONFIG_X86_64
  3589. /*
  3590. * Load null selectors, so we can avoid reloading them in
  3591. * __vmx_load_host_state(), in case userspace uses the null selectors
  3592. * too (the expected case).
  3593. */
  3594. vmcs_write16(HOST_DS_SELECTOR, 0);
  3595. vmcs_write16(HOST_ES_SELECTOR, 0);
  3596. #else
  3597. vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
  3598. vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
  3599. #endif
  3600. vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
  3601. vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
  3602. native_store_idt(&dt);
  3603. vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
  3604. vmx->host_idt_base = dt.address;
  3605. vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
  3606. rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
  3607. vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
  3608. rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
  3609. vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
  3610. if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
  3611. rdmsr(MSR_IA32_CR_PAT, low32, high32);
  3612. vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
  3613. }
  3614. }
  3615. static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
  3616. {
  3617. vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
  3618. if (enable_ept)
  3619. vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
  3620. if (is_guest_mode(&vmx->vcpu))
  3621. vmx->vcpu.arch.cr4_guest_owned_bits &=
  3622. ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
  3623. vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
  3624. }
  3625. static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
  3626. {
  3627. u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
  3628. if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
  3629. pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
  3630. return pin_based_exec_ctrl;
  3631. }
  3632. static u32 vmx_exec_control(struct vcpu_vmx *vmx)
  3633. {
  3634. u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
  3635. if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
  3636. exec_control &= ~CPU_BASED_TPR_SHADOW;
  3637. #ifdef CONFIG_X86_64
  3638. exec_control |= CPU_BASED_CR8_STORE_EXITING |
  3639. CPU_BASED_CR8_LOAD_EXITING;
  3640. #endif
  3641. }
  3642. if (!enable_ept)
  3643. exec_control |= CPU_BASED_CR3_STORE_EXITING |
  3644. CPU_BASED_CR3_LOAD_EXITING |
  3645. CPU_BASED_INVLPG_EXITING;
  3646. return exec_control;
  3647. }
  3648. static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
  3649. {
  3650. u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
  3651. if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
  3652. exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
  3653. if (vmx->vpid == 0)
  3654. exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
  3655. if (!enable_ept) {
  3656. exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
  3657. enable_unrestricted_guest = 0;
  3658. /* Enable INVPCID for non-ept guests may cause performance regression. */
  3659. exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
  3660. }
  3661. if (!enable_unrestricted_guest)
  3662. exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
  3663. if (!ple_gap)
  3664. exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
  3665. if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
  3666. exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
  3667. SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
  3668. exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
  3669. /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
  3670. (handle_vmptrld).
  3671. We can NOT enable shadow_vmcs here because we don't have yet
  3672. a current VMCS12
  3673. */
  3674. exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
  3675. return exec_control;
  3676. }
  3677. static void ept_set_mmio_spte_mask(void)
  3678. {
  3679. /*
  3680. * EPT Misconfigurations can be generated if the value of bits 2:0
  3681. * of an EPT paging-structure entry is 110b (write/execute).
  3682. * Also, magic bits (0x3ull << 62) is set to quickly identify mmio
  3683. * spte.
  3684. */
  3685. kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
  3686. }
  3687. /*
  3688. * Sets up the vmcs for emulated real mode.
  3689. */
  3690. static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
  3691. {
  3692. #ifdef CONFIG_X86_64
  3693. unsigned long a;
  3694. #endif
  3695. int i;
  3696. /* I/O */
  3697. vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
  3698. vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
  3699. if (enable_shadow_vmcs) {
  3700. vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
  3701. vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
  3702. }
  3703. if (cpu_has_vmx_msr_bitmap())
  3704. vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
  3705. vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
  3706. /* Control */
  3707. vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
  3708. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
  3709. if (cpu_has_secondary_exec_ctrls()) {
  3710. vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
  3711. vmx_secondary_exec_control(vmx));
  3712. }
  3713. if (vmx_vm_has_apicv(vmx->vcpu.kvm)) {
  3714. vmcs_write64(EOI_EXIT_BITMAP0, 0);
  3715. vmcs_write64(EOI_EXIT_BITMAP1, 0);
  3716. vmcs_write64(EOI_EXIT_BITMAP2, 0);
  3717. vmcs_write64(EOI_EXIT_BITMAP3, 0);
  3718. vmcs_write16(GUEST_INTR_STATUS, 0);
  3719. vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
  3720. vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
  3721. }
  3722. if (ple_gap) {
  3723. vmcs_write32(PLE_GAP, ple_gap);
  3724. vmcs_write32(PLE_WINDOW, ple_window);
  3725. }
  3726. vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
  3727. vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
  3728. vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
  3729. vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
  3730. vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
  3731. vmx_set_constant_host_state(vmx);
  3732. #ifdef CONFIG_X86_64
  3733. rdmsrl(MSR_FS_BASE, a);
  3734. vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
  3735. rdmsrl(MSR_GS_BASE, a);
  3736. vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
  3737. #else
  3738. vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
  3739. vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
  3740. #endif
  3741. vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
  3742. vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
  3743. vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
  3744. vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
  3745. vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
  3746. if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
  3747. u32 msr_low, msr_high;
  3748. u64 host_pat;
  3749. rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
  3750. host_pat = msr_low | ((u64) msr_high << 32);
  3751. /* Write the default value follow host pat */
  3752. vmcs_write64(GUEST_IA32_PAT, host_pat);
  3753. /* Keep arch.pat sync with GUEST_IA32_PAT */
  3754. vmx->vcpu.arch.pat = host_pat;
  3755. }
  3756. for (i = 0; i < NR_VMX_MSR; ++i) {
  3757. u32 index = vmx_msr_index[i];
  3758. u32 data_low, data_high;
  3759. int j = vmx->nmsrs;
  3760. if (rdmsr_safe(index, &data_low, &data_high) < 0)
  3761. continue;
  3762. if (wrmsr_safe(index, data_low, data_high) < 0)
  3763. continue;
  3764. vmx->guest_msrs[j].index = i;
  3765. vmx->guest_msrs[j].data = 0;
  3766. vmx->guest_msrs[j].mask = -1ull;
  3767. ++vmx->nmsrs;
  3768. }
  3769. vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
  3770. /* 22.2.1, 20.8.1 */
  3771. vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
  3772. vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
  3773. set_cr4_guest_host_mask(vmx);
  3774. return 0;
  3775. }
  3776. static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
  3777. {
  3778. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3779. u64 msr;
  3780. vmx->rmode.vm86_active = 0;
  3781. vmx->soft_vnmi_blocked = 0;
  3782. vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
  3783. kvm_set_cr8(&vmx->vcpu, 0);
  3784. msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
  3785. if (kvm_vcpu_is_bsp(&vmx->vcpu))
  3786. msr |= MSR_IA32_APICBASE_BSP;
  3787. kvm_set_apic_base(&vmx->vcpu, msr);
  3788. vmx_segment_cache_clear(vmx);
  3789. seg_setup(VCPU_SREG_CS);
  3790. vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
  3791. vmcs_write32(GUEST_CS_BASE, 0xffff0000);
  3792. seg_setup(VCPU_SREG_DS);
  3793. seg_setup(VCPU_SREG_ES);
  3794. seg_setup(VCPU_SREG_FS);
  3795. seg_setup(VCPU_SREG_GS);
  3796. seg_setup(VCPU_SREG_SS);
  3797. vmcs_write16(GUEST_TR_SELECTOR, 0);
  3798. vmcs_writel(GUEST_TR_BASE, 0);
  3799. vmcs_write32(GUEST_TR_LIMIT, 0xffff);
  3800. vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
  3801. vmcs_write16(GUEST_LDTR_SELECTOR, 0);
  3802. vmcs_writel(GUEST_LDTR_BASE, 0);
  3803. vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
  3804. vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
  3805. vmcs_write32(GUEST_SYSENTER_CS, 0);
  3806. vmcs_writel(GUEST_SYSENTER_ESP, 0);
  3807. vmcs_writel(GUEST_SYSENTER_EIP, 0);
  3808. vmcs_writel(GUEST_RFLAGS, 0x02);
  3809. kvm_rip_write(vcpu, 0xfff0);
  3810. vmcs_writel(GUEST_GDTR_BASE, 0);
  3811. vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
  3812. vmcs_writel(GUEST_IDTR_BASE, 0);
  3813. vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
  3814. vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
  3815. vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
  3816. vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
  3817. /* Special registers */
  3818. vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
  3819. setup_msrs(vmx);
  3820. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
  3821. if (cpu_has_vmx_tpr_shadow()) {
  3822. vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
  3823. if (vm_need_tpr_shadow(vmx->vcpu.kvm))
  3824. vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
  3825. __pa(vmx->vcpu.arch.apic->regs));
  3826. vmcs_write32(TPR_THRESHOLD, 0);
  3827. }
  3828. if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
  3829. vmcs_write64(APIC_ACCESS_ADDR,
  3830. page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
  3831. if (vmx_vm_has_apicv(vcpu->kvm))
  3832. memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
  3833. if (vmx->vpid != 0)
  3834. vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
  3835. vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
  3836. vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
  3837. vmx_set_cr4(&vmx->vcpu, 0);
  3838. vmx_set_efer(&vmx->vcpu, 0);
  3839. vmx_fpu_activate(&vmx->vcpu);
  3840. update_exception_bitmap(&vmx->vcpu);
  3841. vpid_sync_context(vmx);
  3842. }
  3843. /*
  3844. * In nested virtualization, check if L1 asked to exit on external interrupts.
  3845. * For most existing hypervisors, this will always return true.
  3846. */
  3847. static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
  3848. {
  3849. return get_vmcs12(vcpu)->pin_based_vm_exec_control &
  3850. PIN_BASED_EXT_INTR_MASK;
  3851. }
  3852. static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
  3853. {
  3854. return get_vmcs12(vcpu)->pin_based_vm_exec_control &
  3855. PIN_BASED_NMI_EXITING;
  3856. }
  3857. static int enable_irq_window(struct kvm_vcpu *vcpu)
  3858. {
  3859. u32 cpu_based_vm_exec_control;
  3860. if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
  3861. /*
  3862. * We get here if vmx_interrupt_allowed() said we can't
  3863. * inject to L1 now because L2 must run. The caller will have
  3864. * to make L2 exit right after entry, so we can inject to L1
  3865. * more promptly.
  3866. */
  3867. return -EBUSY;
  3868. cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  3869. cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
  3870. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  3871. return 0;
  3872. }
  3873. static int enable_nmi_window(struct kvm_vcpu *vcpu)
  3874. {
  3875. u32 cpu_based_vm_exec_control;
  3876. if (!cpu_has_virtual_nmis())
  3877. return enable_irq_window(vcpu);
  3878. if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI)
  3879. return enable_irq_window(vcpu);
  3880. cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  3881. cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
  3882. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  3883. return 0;
  3884. }
  3885. static void vmx_inject_irq(struct kvm_vcpu *vcpu)
  3886. {
  3887. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3888. uint32_t intr;
  3889. int irq = vcpu->arch.interrupt.nr;
  3890. trace_kvm_inj_virq(irq);
  3891. ++vcpu->stat.irq_injections;
  3892. if (vmx->rmode.vm86_active) {
  3893. int inc_eip = 0;
  3894. if (vcpu->arch.interrupt.soft)
  3895. inc_eip = vcpu->arch.event_exit_inst_len;
  3896. if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
  3897. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  3898. return;
  3899. }
  3900. intr = irq | INTR_INFO_VALID_MASK;
  3901. if (vcpu->arch.interrupt.soft) {
  3902. intr |= INTR_TYPE_SOFT_INTR;
  3903. vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
  3904. vmx->vcpu.arch.event_exit_inst_len);
  3905. } else
  3906. intr |= INTR_TYPE_EXT_INTR;
  3907. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
  3908. }
  3909. static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
  3910. {
  3911. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3912. if (is_guest_mode(vcpu))
  3913. return;
  3914. if (!cpu_has_virtual_nmis()) {
  3915. /*
  3916. * Tracking the NMI-blocked state in software is built upon
  3917. * finding the next open IRQ window. This, in turn, depends on
  3918. * well-behaving guests: They have to keep IRQs disabled at
  3919. * least as long as the NMI handler runs. Otherwise we may
  3920. * cause NMI nesting, maybe breaking the guest. But as this is
  3921. * highly unlikely, we can live with the residual risk.
  3922. */
  3923. vmx->soft_vnmi_blocked = 1;
  3924. vmx->vnmi_blocked_time = 0;
  3925. }
  3926. ++vcpu->stat.nmi_injections;
  3927. vmx->nmi_known_unmasked = false;
  3928. if (vmx->rmode.vm86_active) {
  3929. if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
  3930. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  3931. return;
  3932. }
  3933. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
  3934. INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
  3935. }
  3936. static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
  3937. {
  3938. if (!cpu_has_virtual_nmis())
  3939. return to_vmx(vcpu)->soft_vnmi_blocked;
  3940. if (to_vmx(vcpu)->nmi_known_unmasked)
  3941. return false;
  3942. return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
  3943. }
  3944. static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
  3945. {
  3946. struct vcpu_vmx *vmx = to_vmx(vcpu);
  3947. if (!cpu_has_virtual_nmis()) {
  3948. if (vmx->soft_vnmi_blocked != masked) {
  3949. vmx->soft_vnmi_blocked = masked;
  3950. vmx->vnmi_blocked_time = 0;
  3951. }
  3952. } else {
  3953. vmx->nmi_known_unmasked = !masked;
  3954. if (masked)
  3955. vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  3956. GUEST_INTR_STATE_NMI);
  3957. else
  3958. vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
  3959. GUEST_INTR_STATE_NMI);
  3960. }
  3961. }
  3962. static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
  3963. {
  3964. if (is_guest_mode(vcpu)) {
  3965. struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
  3966. if (to_vmx(vcpu)->nested.nested_run_pending)
  3967. return 0;
  3968. if (nested_exit_on_nmi(vcpu)) {
  3969. nested_vmx_vmexit(vcpu);
  3970. vmcs12->vm_exit_reason = EXIT_REASON_EXCEPTION_NMI;
  3971. vmcs12->vm_exit_intr_info = NMI_VECTOR |
  3972. INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK;
  3973. /*
  3974. * The NMI-triggered VM exit counts as injection:
  3975. * clear this one and block further NMIs.
  3976. */
  3977. vcpu->arch.nmi_pending = 0;
  3978. vmx_set_nmi_mask(vcpu, true);
  3979. return 0;
  3980. }
  3981. }
  3982. if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
  3983. return 0;
  3984. return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
  3985. (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
  3986. | GUEST_INTR_STATE_NMI));
  3987. }
  3988. static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
  3989. {
  3990. if (is_guest_mode(vcpu)) {
  3991. struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
  3992. if (to_vmx(vcpu)->nested.nested_run_pending)
  3993. return 0;
  3994. if (nested_exit_on_intr(vcpu)) {
  3995. nested_vmx_vmexit(vcpu);
  3996. vmcs12->vm_exit_reason =
  3997. EXIT_REASON_EXTERNAL_INTERRUPT;
  3998. vmcs12->vm_exit_intr_info = 0;
  3999. /*
  4000. * fall through to normal code, but now in L1, not L2
  4001. */
  4002. }
  4003. }
  4004. return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
  4005. !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
  4006. (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
  4007. }
  4008. static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
  4009. {
  4010. int ret;
  4011. struct kvm_userspace_memory_region tss_mem = {
  4012. .slot = TSS_PRIVATE_MEMSLOT,
  4013. .guest_phys_addr = addr,
  4014. .memory_size = PAGE_SIZE * 3,
  4015. .flags = 0,
  4016. };
  4017. ret = kvm_set_memory_region(kvm, &tss_mem);
  4018. if (ret)
  4019. return ret;
  4020. kvm->arch.tss_addr = addr;
  4021. if (!init_rmode_tss(kvm))
  4022. return -ENOMEM;
  4023. return 0;
  4024. }
  4025. static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
  4026. {
  4027. switch (vec) {
  4028. case BP_VECTOR:
  4029. /*
  4030. * Update instruction length as we may reinject the exception
  4031. * from user space while in guest debugging mode.
  4032. */
  4033. to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
  4034. vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  4035. if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
  4036. return false;
  4037. /* fall through */
  4038. case DB_VECTOR:
  4039. if (vcpu->guest_debug &
  4040. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
  4041. return false;
  4042. /* fall through */
  4043. case DE_VECTOR:
  4044. case OF_VECTOR:
  4045. case BR_VECTOR:
  4046. case UD_VECTOR:
  4047. case DF_VECTOR:
  4048. case SS_VECTOR:
  4049. case GP_VECTOR:
  4050. case MF_VECTOR:
  4051. return true;
  4052. break;
  4053. }
  4054. return false;
  4055. }
  4056. static int handle_rmode_exception(struct kvm_vcpu *vcpu,
  4057. int vec, u32 err_code)
  4058. {
  4059. /*
  4060. * Instruction with address size override prefix opcode 0x67
  4061. * Cause the #SS fault with 0 error code in VM86 mode.
  4062. */
  4063. if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
  4064. if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
  4065. if (vcpu->arch.halt_request) {
  4066. vcpu->arch.halt_request = 0;
  4067. return kvm_emulate_halt(vcpu);
  4068. }
  4069. return 1;
  4070. }
  4071. return 0;
  4072. }
  4073. /*
  4074. * Forward all other exceptions that are valid in real mode.
  4075. * FIXME: Breaks guest debugging in real mode, needs to be fixed with
  4076. * the required debugging infrastructure rework.
  4077. */
  4078. kvm_queue_exception(vcpu, vec);
  4079. return 1;
  4080. }
  4081. /*
  4082. * Trigger machine check on the host. We assume all the MSRs are already set up
  4083. * by the CPU and that we still run on the same CPU as the MCE occurred on.
  4084. * We pass a fake environment to the machine check handler because we want
  4085. * the guest to be always treated like user space, no matter what context
  4086. * it used internally.
  4087. */
  4088. static void kvm_machine_check(void)
  4089. {
  4090. #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
  4091. struct pt_regs regs = {
  4092. .cs = 3, /* Fake ring 3 no matter what the guest ran on */
  4093. .flags = X86_EFLAGS_IF,
  4094. };
  4095. do_machine_check(&regs, 0);
  4096. #endif
  4097. }
  4098. static int handle_machine_check(struct kvm_vcpu *vcpu)
  4099. {
  4100. /* already handled by vcpu_run */
  4101. return 1;
  4102. }
  4103. static int handle_exception(struct kvm_vcpu *vcpu)
  4104. {
  4105. struct vcpu_vmx *vmx = to_vmx(vcpu);
  4106. struct kvm_run *kvm_run = vcpu->run;
  4107. u32 intr_info, ex_no, error_code;
  4108. unsigned long cr2, rip, dr6;
  4109. u32 vect_info;
  4110. enum emulation_result er;
  4111. vect_info = vmx->idt_vectoring_info;
  4112. intr_info = vmx->exit_intr_info;
  4113. if (is_machine_check(intr_info))
  4114. return handle_machine_check(vcpu);
  4115. if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
  4116. return 1; /* already handled by vmx_vcpu_run() */
  4117. if (is_no_device(intr_info)) {
  4118. vmx_fpu_activate(vcpu);
  4119. return 1;
  4120. }
  4121. if (is_invalid_opcode(intr_info)) {
  4122. er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
  4123. if (er != EMULATE_DONE)
  4124. kvm_queue_exception(vcpu, UD_VECTOR);
  4125. return 1;
  4126. }
  4127. error_code = 0;
  4128. if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
  4129. error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
  4130. /*
  4131. * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
  4132. * MMIO, it is better to report an internal error.
  4133. * See the comments in vmx_handle_exit.
  4134. */
  4135. if ((vect_info & VECTORING_INFO_VALID_MASK) &&
  4136. !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
  4137. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  4138. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
  4139. vcpu->run->internal.ndata = 2;
  4140. vcpu->run->internal.data[0] = vect_info;
  4141. vcpu->run->internal.data[1] = intr_info;
  4142. return 0;
  4143. }
  4144. if (is_page_fault(intr_info)) {
  4145. /* EPT won't cause page fault directly */
  4146. BUG_ON(enable_ept);
  4147. cr2 = vmcs_readl(EXIT_QUALIFICATION);
  4148. trace_kvm_page_fault(cr2, error_code);
  4149. if (kvm_event_needs_reinjection(vcpu))
  4150. kvm_mmu_unprotect_page_virt(vcpu, cr2);
  4151. return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
  4152. }
  4153. ex_no = intr_info & INTR_INFO_VECTOR_MASK;
  4154. if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
  4155. return handle_rmode_exception(vcpu, ex_no, error_code);
  4156. switch (ex_no) {
  4157. case DB_VECTOR:
  4158. dr6 = vmcs_readl(EXIT_QUALIFICATION);
  4159. if (!(vcpu->guest_debug &
  4160. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
  4161. vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
  4162. kvm_queue_exception(vcpu, DB_VECTOR);
  4163. return 1;
  4164. }
  4165. kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
  4166. kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
  4167. /* fall through */
  4168. case BP_VECTOR:
  4169. /*
  4170. * Update instruction length as we may reinject #BP from
  4171. * user space while in guest debugging mode. Reading it for
  4172. * #DB as well causes no harm, it is not used in that case.
  4173. */
  4174. vmx->vcpu.arch.event_exit_inst_len =
  4175. vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  4176. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  4177. rip = kvm_rip_read(vcpu);
  4178. kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
  4179. kvm_run->debug.arch.exception = ex_no;
  4180. break;
  4181. default:
  4182. kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
  4183. kvm_run->ex.exception = ex_no;
  4184. kvm_run->ex.error_code = error_code;
  4185. break;
  4186. }
  4187. return 0;
  4188. }
  4189. static int handle_external_interrupt(struct kvm_vcpu *vcpu)
  4190. {
  4191. ++vcpu->stat.irq_exits;
  4192. return 1;
  4193. }
  4194. static int handle_triple_fault(struct kvm_vcpu *vcpu)
  4195. {
  4196. vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  4197. return 0;
  4198. }
  4199. static int handle_io(struct kvm_vcpu *vcpu)
  4200. {
  4201. unsigned long exit_qualification;
  4202. int size, in, string;
  4203. unsigned port;
  4204. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  4205. string = (exit_qualification & 16) != 0;
  4206. in = (exit_qualification & 8) != 0;
  4207. ++vcpu->stat.io_exits;
  4208. if (string || in)
  4209. return emulate_instruction(vcpu, 0) == EMULATE_DONE;
  4210. port = exit_qualification >> 16;
  4211. size = (exit_qualification & 7) + 1;
  4212. skip_emulated_instruction(vcpu);
  4213. return kvm_fast_pio_out(vcpu, size, port);
  4214. }
  4215. static void
  4216. vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
  4217. {
  4218. /*
  4219. * Patch in the VMCALL instruction:
  4220. */
  4221. hypercall[0] = 0x0f;
  4222. hypercall[1] = 0x01;
  4223. hypercall[2] = 0xc1;
  4224. }
  4225. /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
  4226. static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
  4227. {
  4228. if (is_guest_mode(vcpu)) {
  4229. struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
  4230. unsigned long orig_val = val;
  4231. /*
  4232. * We get here when L2 changed cr0 in a way that did not change
  4233. * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
  4234. * but did change L0 shadowed bits. So we first calculate the
  4235. * effective cr0 value that L1 would like to write into the
  4236. * hardware. It consists of the L2-owned bits from the new
  4237. * value combined with the L1-owned bits from L1's guest_cr0.
  4238. */
  4239. val = (val & ~vmcs12->cr0_guest_host_mask) |
  4240. (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
  4241. /* TODO: will have to take unrestricted guest mode into
  4242. * account */
  4243. if ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)
  4244. return 1;
  4245. if (kvm_set_cr0(vcpu, val))
  4246. return 1;
  4247. vmcs_writel(CR0_READ_SHADOW, orig_val);
  4248. return 0;
  4249. } else {
  4250. if (to_vmx(vcpu)->nested.vmxon &&
  4251. ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
  4252. return 1;
  4253. return kvm_set_cr0(vcpu, val);
  4254. }
  4255. }
  4256. static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
  4257. {
  4258. if (is_guest_mode(vcpu)) {
  4259. struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
  4260. unsigned long orig_val = val;
  4261. /* analogously to handle_set_cr0 */
  4262. val = (val & ~vmcs12->cr4_guest_host_mask) |
  4263. (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
  4264. if (kvm_set_cr4(vcpu, val))
  4265. return 1;
  4266. vmcs_writel(CR4_READ_SHADOW, orig_val);
  4267. return 0;
  4268. } else
  4269. return kvm_set_cr4(vcpu, val);
  4270. }
  4271. /* called to set cr0 as approriate for clts instruction exit. */
  4272. static void handle_clts(struct kvm_vcpu *vcpu)
  4273. {
  4274. if (is_guest_mode(vcpu)) {
  4275. /*
  4276. * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS
  4277. * but we did (!fpu_active). We need to keep GUEST_CR0.TS on,
  4278. * just pretend it's off (also in arch.cr0 for fpu_activate).
  4279. */
  4280. vmcs_writel(CR0_READ_SHADOW,
  4281. vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
  4282. vcpu->arch.cr0 &= ~X86_CR0_TS;
  4283. } else
  4284. vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
  4285. }
  4286. static int handle_cr(struct kvm_vcpu *vcpu)
  4287. {
  4288. unsigned long exit_qualification, val;
  4289. int cr;
  4290. int reg;
  4291. int err;
  4292. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  4293. cr = exit_qualification & 15;
  4294. reg = (exit_qualification >> 8) & 15;
  4295. switch ((exit_qualification >> 4) & 3) {
  4296. case 0: /* mov to cr */
  4297. val = kvm_register_read(vcpu, reg);
  4298. trace_kvm_cr_write(cr, val);
  4299. switch (cr) {
  4300. case 0:
  4301. err = handle_set_cr0(vcpu, val);
  4302. kvm_complete_insn_gp(vcpu, err);
  4303. return 1;
  4304. case 3:
  4305. err = kvm_set_cr3(vcpu, val);
  4306. kvm_complete_insn_gp(vcpu, err);
  4307. return 1;
  4308. case 4:
  4309. err = handle_set_cr4(vcpu, val);
  4310. kvm_complete_insn_gp(vcpu, err);
  4311. return 1;
  4312. case 8: {
  4313. u8 cr8_prev = kvm_get_cr8(vcpu);
  4314. u8 cr8 = kvm_register_read(vcpu, reg);
  4315. err = kvm_set_cr8(vcpu, cr8);
  4316. kvm_complete_insn_gp(vcpu, err);
  4317. if (irqchip_in_kernel(vcpu->kvm))
  4318. return 1;
  4319. if (cr8_prev <= cr8)
  4320. return 1;
  4321. vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
  4322. return 0;
  4323. }
  4324. }
  4325. break;
  4326. case 2: /* clts */
  4327. handle_clts(vcpu);
  4328. trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
  4329. skip_emulated_instruction(vcpu);
  4330. vmx_fpu_activate(vcpu);
  4331. return 1;
  4332. case 1: /*mov from cr*/
  4333. switch (cr) {
  4334. case 3:
  4335. val = kvm_read_cr3(vcpu);
  4336. kvm_register_write(vcpu, reg, val);
  4337. trace_kvm_cr_read(cr, val);
  4338. skip_emulated_instruction(vcpu);
  4339. return 1;
  4340. case 8:
  4341. val = kvm_get_cr8(vcpu);
  4342. kvm_register_write(vcpu, reg, val);
  4343. trace_kvm_cr_read(cr, val);
  4344. skip_emulated_instruction(vcpu);
  4345. return 1;
  4346. }
  4347. break;
  4348. case 3: /* lmsw */
  4349. val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
  4350. trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
  4351. kvm_lmsw(vcpu, val);
  4352. skip_emulated_instruction(vcpu);
  4353. return 1;
  4354. default:
  4355. break;
  4356. }
  4357. vcpu->run->exit_reason = 0;
  4358. vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
  4359. (int)(exit_qualification >> 4) & 3, cr);
  4360. return 0;
  4361. }
  4362. static int handle_dr(struct kvm_vcpu *vcpu)
  4363. {
  4364. unsigned long exit_qualification;
  4365. int dr, reg;
  4366. /* Do not handle if the CPL > 0, will trigger GP on re-entry */
  4367. if (!kvm_require_cpl(vcpu, 0))
  4368. return 1;
  4369. dr = vmcs_readl(GUEST_DR7);
  4370. if (dr & DR7_GD) {
  4371. /*
  4372. * As the vm-exit takes precedence over the debug trap, we
  4373. * need to emulate the latter, either for the host or the
  4374. * guest debugging itself.
  4375. */
  4376. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
  4377. vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
  4378. vcpu->run->debug.arch.dr7 = dr;
  4379. vcpu->run->debug.arch.pc =
  4380. vmcs_readl(GUEST_CS_BASE) +
  4381. vmcs_readl(GUEST_RIP);
  4382. vcpu->run->debug.arch.exception = DB_VECTOR;
  4383. vcpu->run->exit_reason = KVM_EXIT_DEBUG;
  4384. return 0;
  4385. } else {
  4386. vcpu->arch.dr7 &= ~DR7_GD;
  4387. vcpu->arch.dr6 |= DR6_BD;
  4388. vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
  4389. kvm_queue_exception(vcpu, DB_VECTOR);
  4390. return 1;
  4391. }
  4392. }
  4393. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  4394. dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
  4395. reg = DEBUG_REG_ACCESS_REG(exit_qualification);
  4396. if (exit_qualification & TYPE_MOV_FROM_DR) {
  4397. unsigned long val;
  4398. if (!kvm_get_dr(vcpu, dr, &val))
  4399. kvm_register_write(vcpu, reg, val);
  4400. } else
  4401. kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
  4402. skip_emulated_instruction(vcpu);
  4403. return 1;
  4404. }
  4405. static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
  4406. {
  4407. vmcs_writel(GUEST_DR7, val);
  4408. }
  4409. static int handle_cpuid(struct kvm_vcpu *vcpu)
  4410. {
  4411. kvm_emulate_cpuid(vcpu);
  4412. return 1;
  4413. }
  4414. static int handle_rdmsr(struct kvm_vcpu *vcpu)
  4415. {
  4416. u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
  4417. u64 data;
  4418. if (vmx_get_msr(vcpu, ecx, &data)) {
  4419. trace_kvm_msr_read_ex(ecx);
  4420. kvm_inject_gp(vcpu, 0);
  4421. return 1;
  4422. }
  4423. trace_kvm_msr_read(ecx, data);
  4424. /* FIXME: handling of bits 32:63 of rax, rdx */
  4425. vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
  4426. vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
  4427. skip_emulated_instruction(vcpu);
  4428. return 1;
  4429. }
  4430. static int handle_wrmsr(struct kvm_vcpu *vcpu)
  4431. {
  4432. struct msr_data msr;
  4433. u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
  4434. u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
  4435. | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
  4436. msr.data = data;
  4437. msr.index = ecx;
  4438. msr.host_initiated = false;
  4439. if (vmx_set_msr(vcpu, &msr) != 0) {
  4440. trace_kvm_msr_write_ex(ecx, data);
  4441. kvm_inject_gp(vcpu, 0);
  4442. return 1;
  4443. }
  4444. trace_kvm_msr_write(ecx, data);
  4445. skip_emulated_instruction(vcpu);
  4446. return 1;
  4447. }
  4448. static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
  4449. {
  4450. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4451. return 1;
  4452. }
  4453. static int handle_interrupt_window(struct kvm_vcpu *vcpu)
  4454. {
  4455. u32 cpu_based_vm_exec_control;
  4456. /* clear pending irq */
  4457. cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  4458. cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
  4459. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  4460. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4461. ++vcpu->stat.irq_window_exits;
  4462. /*
  4463. * If the user space waits to inject interrupts, exit as soon as
  4464. * possible
  4465. */
  4466. if (!irqchip_in_kernel(vcpu->kvm) &&
  4467. vcpu->run->request_interrupt_window &&
  4468. !kvm_cpu_has_interrupt(vcpu)) {
  4469. vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  4470. return 0;
  4471. }
  4472. return 1;
  4473. }
  4474. static int handle_halt(struct kvm_vcpu *vcpu)
  4475. {
  4476. skip_emulated_instruction(vcpu);
  4477. return kvm_emulate_halt(vcpu);
  4478. }
  4479. static int handle_vmcall(struct kvm_vcpu *vcpu)
  4480. {
  4481. skip_emulated_instruction(vcpu);
  4482. kvm_emulate_hypercall(vcpu);
  4483. return 1;
  4484. }
  4485. static int handle_invd(struct kvm_vcpu *vcpu)
  4486. {
  4487. return emulate_instruction(vcpu, 0) == EMULATE_DONE;
  4488. }
  4489. static int handle_invlpg(struct kvm_vcpu *vcpu)
  4490. {
  4491. unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  4492. kvm_mmu_invlpg(vcpu, exit_qualification);
  4493. skip_emulated_instruction(vcpu);
  4494. return 1;
  4495. }
  4496. static int handle_rdpmc(struct kvm_vcpu *vcpu)
  4497. {
  4498. int err;
  4499. err = kvm_rdpmc(vcpu);
  4500. kvm_complete_insn_gp(vcpu, err);
  4501. return 1;
  4502. }
  4503. static int handle_wbinvd(struct kvm_vcpu *vcpu)
  4504. {
  4505. skip_emulated_instruction(vcpu);
  4506. kvm_emulate_wbinvd(vcpu);
  4507. return 1;
  4508. }
  4509. static int handle_xsetbv(struct kvm_vcpu *vcpu)
  4510. {
  4511. u64 new_bv = kvm_read_edx_eax(vcpu);
  4512. u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4513. if (kvm_set_xcr(vcpu, index, new_bv) == 0)
  4514. skip_emulated_instruction(vcpu);
  4515. return 1;
  4516. }
  4517. static int handle_apic_access(struct kvm_vcpu *vcpu)
  4518. {
  4519. if (likely(fasteoi)) {
  4520. unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  4521. int access_type, offset;
  4522. access_type = exit_qualification & APIC_ACCESS_TYPE;
  4523. offset = exit_qualification & APIC_ACCESS_OFFSET;
  4524. /*
  4525. * Sane guest uses MOV to write EOI, with written value
  4526. * not cared. So make a short-circuit here by avoiding
  4527. * heavy instruction emulation.
  4528. */
  4529. if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
  4530. (offset == APIC_EOI)) {
  4531. kvm_lapic_set_eoi(vcpu);
  4532. skip_emulated_instruction(vcpu);
  4533. return 1;
  4534. }
  4535. }
  4536. return emulate_instruction(vcpu, 0) == EMULATE_DONE;
  4537. }
  4538. static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
  4539. {
  4540. unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  4541. int vector = exit_qualification & 0xff;
  4542. /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
  4543. kvm_apic_set_eoi_accelerated(vcpu, vector);
  4544. return 1;
  4545. }
  4546. static int handle_apic_write(struct kvm_vcpu *vcpu)
  4547. {
  4548. unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  4549. u32 offset = exit_qualification & 0xfff;
  4550. /* APIC-write VM exit is trap-like and thus no need to adjust IP */
  4551. kvm_apic_write_nodecode(vcpu, offset);
  4552. return 1;
  4553. }
  4554. static int handle_task_switch(struct kvm_vcpu *vcpu)
  4555. {
  4556. struct vcpu_vmx *vmx = to_vmx(vcpu);
  4557. unsigned long exit_qualification;
  4558. bool has_error_code = false;
  4559. u32 error_code = 0;
  4560. u16 tss_selector;
  4561. int reason, type, idt_v, idt_index;
  4562. idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
  4563. idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
  4564. type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
  4565. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  4566. reason = (u32)exit_qualification >> 30;
  4567. if (reason == TASK_SWITCH_GATE && idt_v) {
  4568. switch (type) {
  4569. case INTR_TYPE_NMI_INTR:
  4570. vcpu->arch.nmi_injected = false;
  4571. vmx_set_nmi_mask(vcpu, true);
  4572. break;
  4573. case INTR_TYPE_EXT_INTR:
  4574. case INTR_TYPE_SOFT_INTR:
  4575. kvm_clear_interrupt_queue(vcpu);
  4576. break;
  4577. case INTR_TYPE_HARD_EXCEPTION:
  4578. if (vmx->idt_vectoring_info &
  4579. VECTORING_INFO_DELIVER_CODE_MASK) {
  4580. has_error_code = true;
  4581. error_code =
  4582. vmcs_read32(IDT_VECTORING_ERROR_CODE);
  4583. }
  4584. /* fall through */
  4585. case INTR_TYPE_SOFT_EXCEPTION:
  4586. kvm_clear_exception_queue(vcpu);
  4587. break;
  4588. default:
  4589. break;
  4590. }
  4591. }
  4592. tss_selector = exit_qualification;
  4593. if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
  4594. type != INTR_TYPE_EXT_INTR &&
  4595. type != INTR_TYPE_NMI_INTR))
  4596. skip_emulated_instruction(vcpu);
  4597. if (kvm_task_switch(vcpu, tss_selector,
  4598. type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
  4599. has_error_code, error_code) == EMULATE_FAIL) {
  4600. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  4601. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  4602. vcpu->run->internal.ndata = 0;
  4603. return 0;
  4604. }
  4605. /* clear all local breakpoint enable flags */
  4606. vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
  4607. /*
  4608. * TODO: What about debug traps on tss switch?
  4609. * Are we supposed to inject them and update dr6?
  4610. */
  4611. return 1;
  4612. }
  4613. static int handle_ept_violation(struct kvm_vcpu *vcpu)
  4614. {
  4615. unsigned long exit_qualification;
  4616. gpa_t gpa;
  4617. u32 error_code;
  4618. int gla_validity;
  4619. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  4620. gla_validity = (exit_qualification >> 7) & 0x3;
  4621. if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
  4622. printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
  4623. printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
  4624. (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
  4625. vmcs_readl(GUEST_LINEAR_ADDRESS));
  4626. printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
  4627. (long unsigned int)exit_qualification);
  4628. vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
  4629. vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
  4630. return 0;
  4631. }
  4632. /*
  4633. * EPT violation happened while executing iret from NMI,
  4634. * "blocked by NMI" bit has to be set before next VM entry.
  4635. * There are errata that may cause this bit to not be set:
  4636. * AAK134, BY25.
  4637. */
  4638. if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
  4639. cpu_has_virtual_nmis() &&
  4640. (exit_qualification & INTR_INFO_UNBLOCK_NMI))
  4641. vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
  4642. gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
  4643. trace_kvm_page_fault(gpa, exit_qualification);
  4644. /* It is a write fault? */
  4645. error_code = exit_qualification & (1U << 1);
  4646. /* It is a fetch fault? */
  4647. error_code |= (exit_qualification & (1U << 2)) << 2;
  4648. /* ept page table is present? */
  4649. error_code |= (exit_qualification >> 3) & 0x1;
  4650. vcpu->arch.exit_qualification = exit_qualification;
  4651. return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
  4652. }
  4653. static u64 ept_rsvd_mask(u64 spte, int level)
  4654. {
  4655. int i;
  4656. u64 mask = 0;
  4657. for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
  4658. mask |= (1ULL << i);
  4659. if (level > 2)
  4660. /* bits 7:3 reserved */
  4661. mask |= 0xf8;
  4662. else if (level == 2) {
  4663. if (spte & (1ULL << 7))
  4664. /* 2MB ref, bits 20:12 reserved */
  4665. mask |= 0x1ff000;
  4666. else
  4667. /* bits 6:3 reserved */
  4668. mask |= 0x78;
  4669. }
  4670. return mask;
  4671. }
  4672. static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
  4673. int level)
  4674. {
  4675. printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
  4676. /* 010b (write-only) */
  4677. WARN_ON((spte & 0x7) == 0x2);
  4678. /* 110b (write/execute) */
  4679. WARN_ON((spte & 0x7) == 0x6);
  4680. /* 100b (execute-only) and value not supported by logical processor */
  4681. if (!cpu_has_vmx_ept_execute_only())
  4682. WARN_ON((spte & 0x7) == 0x4);
  4683. /* not 000b */
  4684. if ((spte & 0x7)) {
  4685. u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
  4686. if (rsvd_bits != 0) {
  4687. printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
  4688. __func__, rsvd_bits);
  4689. WARN_ON(1);
  4690. }
  4691. if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
  4692. u64 ept_mem_type = (spte & 0x38) >> 3;
  4693. if (ept_mem_type == 2 || ept_mem_type == 3 ||
  4694. ept_mem_type == 7) {
  4695. printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
  4696. __func__, ept_mem_type);
  4697. WARN_ON(1);
  4698. }
  4699. }
  4700. }
  4701. }
  4702. static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
  4703. {
  4704. u64 sptes[4];
  4705. int nr_sptes, i, ret;
  4706. gpa_t gpa;
  4707. gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
  4708. ret = handle_mmio_page_fault_common(vcpu, gpa, true);
  4709. if (likely(ret == RET_MMIO_PF_EMULATE))
  4710. return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
  4711. EMULATE_DONE;
  4712. if (unlikely(ret == RET_MMIO_PF_INVALID))
  4713. return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
  4714. if (unlikely(ret == RET_MMIO_PF_RETRY))
  4715. return 1;
  4716. /* It is the real ept misconfig */
  4717. printk(KERN_ERR "EPT: Misconfiguration.\n");
  4718. printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
  4719. nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
  4720. for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
  4721. ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
  4722. vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
  4723. vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
  4724. return 0;
  4725. }
  4726. static int handle_nmi_window(struct kvm_vcpu *vcpu)
  4727. {
  4728. u32 cpu_based_vm_exec_control;
  4729. /* clear pending NMI */
  4730. cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  4731. cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
  4732. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
  4733. ++vcpu->stat.nmi_window_exits;
  4734. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4735. return 1;
  4736. }
  4737. static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
  4738. {
  4739. struct vcpu_vmx *vmx = to_vmx(vcpu);
  4740. enum emulation_result err = EMULATE_DONE;
  4741. int ret = 1;
  4742. u32 cpu_exec_ctrl;
  4743. bool intr_window_requested;
  4744. unsigned count = 130;
  4745. cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
  4746. intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
  4747. while (!guest_state_valid(vcpu) && count-- != 0) {
  4748. if (intr_window_requested && vmx_interrupt_allowed(vcpu))
  4749. return handle_interrupt_window(&vmx->vcpu);
  4750. if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
  4751. return 1;
  4752. err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
  4753. if (err == EMULATE_USER_EXIT) {
  4754. ++vcpu->stat.mmio_exits;
  4755. ret = 0;
  4756. goto out;
  4757. }
  4758. if (err != EMULATE_DONE) {
  4759. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  4760. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  4761. vcpu->run->internal.ndata = 0;
  4762. return 0;
  4763. }
  4764. if (vcpu->arch.halt_request) {
  4765. vcpu->arch.halt_request = 0;
  4766. ret = kvm_emulate_halt(vcpu);
  4767. goto out;
  4768. }
  4769. if (signal_pending(current))
  4770. goto out;
  4771. if (need_resched())
  4772. schedule();
  4773. }
  4774. vmx->emulation_required = emulation_required(vcpu);
  4775. out:
  4776. return ret;
  4777. }
  4778. /*
  4779. * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
  4780. * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
  4781. */
  4782. static int handle_pause(struct kvm_vcpu *vcpu)
  4783. {
  4784. skip_emulated_instruction(vcpu);
  4785. kvm_vcpu_on_spin(vcpu);
  4786. return 1;
  4787. }
  4788. static int handle_invalid_op(struct kvm_vcpu *vcpu)
  4789. {
  4790. kvm_queue_exception(vcpu, UD_VECTOR);
  4791. return 1;
  4792. }
  4793. /*
  4794. * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
  4795. * We could reuse a single VMCS for all the L2 guests, but we also want the
  4796. * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
  4797. * allows keeping them loaded on the processor, and in the future will allow
  4798. * optimizations where prepare_vmcs02 doesn't need to set all the fields on
  4799. * every entry if they never change.
  4800. * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
  4801. * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
  4802. *
  4803. * The following functions allocate and free a vmcs02 in this pool.
  4804. */
  4805. /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
  4806. static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
  4807. {
  4808. struct vmcs02_list *item;
  4809. list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
  4810. if (item->vmptr == vmx->nested.current_vmptr) {
  4811. list_move(&item->list, &vmx->nested.vmcs02_pool);
  4812. return &item->vmcs02;
  4813. }
  4814. if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
  4815. /* Recycle the least recently used VMCS. */
  4816. item = list_entry(vmx->nested.vmcs02_pool.prev,
  4817. struct vmcs02_list, list);
  4818. item->vmptr = vmx->nested.current_vmptr;
  4819. list_move(&item->list, &vmx->nested.vmcs02_pool);
  4820. return &item->vmcs02;
  4821. }
  4822. /* Create a new VMCS */
  4823. item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
  4824. if (!item)
  4825. return NULL;
  4826. item->vmcs02.vmcs = alloc_vmcs();
  4827. if (!item->vmcs02.vmcs) {
  4828. kfree(item);
  4829. return NULL;
  4830. }
  4831. loaded_vmcs_init(&item->vmcs02);
  4832. item->vmptr = vmx->nested.current_vmptr;
  4833. list_add(&(item->list), &(vmx->nested.vmcs02_pool));
  4834. vmx->nested.vmcs02_num++;
  4835. return &item->vmcs02;
  4836. }
  4837. /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
  4838. static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
  4839. {
  4840. struct vmcs02_list *item;
  4841. list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
  4842. if (item->vmptr == vmptr) {
  4843. free_loaded_vmcs(&item->vmcs02);
  4844. list_del(&item->list);
  4845. kfree(item);
  4846. vmx->nested.vmcs02_num--;
  4847. return;
  4848. }
  4849. }
  4850. /*
  4851. * Free all VMCSs saved for this vcpu, except the one pointed by
  4852. * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
  4853. * currently used, if running L2), and vmcs01 when running L2.
  4854. */
  4855. static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
  4856. {
  4857. struct vmcs02_list *item, *n;
  4858. list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
  4859. if (vmx->loaded_vmcs != &item->vmcs02)
  4860. free_loaded_vmcs(&item->vmcs02);
  4861. list_del(&item->list);
  4862. kfree(item);
  4863. }
  4864. vmx->nested.vmcs02_num = 0;
  4865. if (vmx->loaded_vmcs != &vmx->vmcs01)
  4866. free_loaded_vmcs(&vmx->vmcs01);
  4867. }
  4868. /*
  4869. * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
  4870. * set the success or error code of an emulated VMX instruction, as specified
  4871. * by Vol 2B, VMX Instruction Reference, "Conventions".
  4872. */
  4873. static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
  4874. {
  4875. vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
  4876. & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
  4877. X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
  4878. }
  4879. static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
  4880. {
  4881. vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
  4882. & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
  4883. X86_EFLAGS_SF | X86_EFLAGS_OF))
  4884. | X86_EFLAGS_CF);
  4885. }
  4886. static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
  4887. u32 vm_instruction_error)
  4888. {
  4889. if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
  4890. /*
  4891. * failValid writes the error number to the current VMCS, which
  4892. * can't be done there isn't a current VMCS.
  4893. */
  4894. nested_vmx_failInvalid(vcpu);
  4895. return;
  4896. }
  4897. vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
  4898. & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
  4899. X86_EFLAGS_SF | X86_EFLAGS_OF))
  4900. | X86_EFLAGS_ZF);
  4901. get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
  4902. /*
  4903. * We don't need to force a shadow sync because
  4904. * VM_INSTRUCTION_ERROR is not shadowed
  4905. */
  4906. }
  4907. /*
  4908. * Emulate the VMXON instruction.
  4909. * Currently, we just remember that VMX is active, and do not save or even
  4910. * inspect the argument to VMXON (the so-called "VMXON pointer") because we
  4911. * do not currently need to store anything in that guest-allocated memory
  4912. * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
  4913. * argument is different from the VMXON pointer (which the spec says they do).
  4914. */
  4915. static int handle_vmon(struct kvm_vcpu *vcpu)
  4916. {
  4917. struct kvm_segment cs;
  4918. struct vcpu_vmx *vmx = to_vmx(vcpu);
  4919. struct vmcs *shadow_vmcs;
  4920. const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
  4921. | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
  4922. /* The Intel VMX Instruction Reference lists a bunch of bits that
  4923. * are prerequisite to running VMXON, most notably cr4.VMXE must be
  4924. * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
  4925. * Otherwise, we should fail with #UD. We test these now:
  4926. */
  4927. if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
  4928. !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
  4929. (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
  4930. kvm_queue_exception(vcpu, UD_VECTOR);
  4931. return 1;
  4932. }
  4933. vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
  4934. if (is_long_mode(vcpu) && !cs.l) {
  4935. kvm_queue_exception(vcpu, UD_VECTOR);
  4936. return 1;
  4937. }
  4938. if (vmx_get_cpl(vcpu)) {
  4939. kvm_inject_gp(vcpu, 0);
  4940. return 1;
  4941. }
  4942. if (vmx->nested.vmxon) {
  4943. nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
  4944. skip_emulated_instruction(vcpu);
  4945. return 1;
  4946. }
  4947. if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
  4948. != VMXON_NEEDED_FEATURES) {
  4949. kvm_inject_gp(vcpu, 0);
  4950. return 1;
  4951. }
  4952. if (enable_shadow_vmcs) {
  4953. shadow_vmcs = alloc_vmcs();
  4954. if (!shadow_vmcs)
  4955. return -ENOMEM;
  4956. /* mark vmcs as shadow */
  4957. shadow_vmcs->revision_id |= (1u << 31);
  4958. /* init shadow vmcs */
  4959. vmcs_clear(shadow_vmcs);
  4960. vmx->nested.current_shadow_vmcs = shadow_vmcs;
  4961. }
  4962. INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
  4963. vmx->nested.vmcs02_num = 0;
  4964. vmx->nested.vmxon = true;
  4965. skip_emulated_instruction(vcpu);
  4966. nested_vmx_succeed(vcpu);
  4967. return 1;
  4968. }
  4969. /*
  4970. * Intel's VMX Instruction Reference specifies a common set of prerequisites
  4971. * for running VMX instructions (except VMXON, whose prerequisites are
  4972. * slightly different). It also specifies what exception to inject otherwise.
  4973. */
  4974. static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
  4975. {
  4976. struct kvm_segment cs;
  4977. struct vcpu_vmx *vmx = to_vmx(vcpu);
  4978. if (!vmx->nested.vmxon) {
  4979. kvm_queue_exception(vcpu, UD_VECTOR);
  4980. return 0;
  4981. }
  4982. vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
  4983. if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
  4984. (is_long_mode(vcpu) && !cs.l)) {
  4985. kvm_queue_exception(vcpu, UD_VECTOR);
  4986. return 0;
  4987. }
  4988. if (vmx_get_cpl(vcpu)) {
  4989. kvm_inject_gp(vcpu, 0);
  4990. return 0;
  4991. }
  4992. return 1;
  4993. }
  4994. static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
  4995. {
  4996. u32 exec_control;
  4997. if (enable_shadow_vmcs) {
  4998. if (vmx->nested.current_vmcs12 != NULL) {
  4999. /* copy to memory all shadowed fields in case
  5000. they were modified */
  5001. copy_shadow_to_vmcs12(vmx);
  5002. vmx->nested.sync_shadow_vmcs = false;
  5003. exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
  5004. exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
  5005. vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
  5006. vmcs_write64(VMCS_LINK_POINTER, -1ull);
  5007. }
  5008. }
  5009. kunmap(vmx->nested.current_vmcs12_page);
  5010. nested_release_page(vmx->nested.current_vmcs12_page);
  5011. }
  5012. /*
  5013. * Free whatever needs to be freed from vmx->nested when L1 goes down, or
  5014. * just stops using VMX.
  5015. */
  5016. static void free_nested(struct vcpu_vmx *vmx)
  5017. {
  5018. if (!vmx->nested.vmxon)
  5019. return;
  5020. vmx->nested.vmxon = false;
  5021. if (vmx->nested.current_vmptr != -1ull) {
  5022. nested_release_vmcs12(vmx);
  5023. vmx->nested.current_vmptr = -1ull;
  5024. vmx->nested.current_vmcs12 = NULL;
  5025. }
  5026. if (enable_shadow_vmcs)
  5027. free_vmcs(vmx->nested.current_shadow_vmcs);
  5028. /* Unpin physical memory we referred to in current vmcs02 */
  5029. if (vmx->nested.apic_access_page) {
  5030. nested_release_page(vmx->nested.apic_access_page);
  5031. vmx->nested.apic_access_page = 0;
  5032. }
  5033. nested_free_all_saved_vmcss(vmx);
  5034. }
  5035. /* Emulate the VMXOFF instruction */
  5036. static int handle_vmoff(struct kvm_vcpu *vcpu)
  5037. {
  5038. if (!nested_vmx_check_permission(vcpu))
  5039. return 1;
  5040. free_nested(to_vmx(vcpu));
  5041. skip_emulated_instruction(vcpu);
  5042. nested_vmx_succeed(vcpu);
  5043. return 1;
  5044. }
  5045. /*
  5046. * Decode the memory-address operand of a vmx instruction, as recorded on an
  5047. * exit caused by such an instruction (run by a guest hypervisor).
  5048. * On success, returns 0. When the operand is invalid, returns 1 and throws
  5049. * #UD or #GP.
  5050. */
  5051. static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
  5052. unsigned long exit_qualification,
  5053. u32 vmx_instruction_info, gva_t *ret)
  5054. {
  5055. /*
  5056. * According to Vol. 3B, "Information for VM Exits Due to Instruction
  5057. * Execution", on an exit, vmx_instruction_info holds most of the
  5058. * addressing components of the operand. Only the displacement part
  5059. * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
  5060. * For how an actual address is calculated from all these components,
  5061. * refer to Vol. 1, "Operand Addressing".
  5062. */
  5063. int scaling = vmx_instruction_info & 3;
  5064. int addr_size = (vmx_instruction_info >> 7) & 7;
  5065. bool is_reg = vmx_instruction_info & (1u << 10);
  5066. int seg_reg = (vmx_instruction_info >> 15) & 7;
  5067. int index_reg = (vmx_instruction_info >> 18) & 0xf;
  5068. bool index_is_valid = !(vmx_instruction_info & (1u << 22));
  5069. int base_reg = (vmx_instruction_info >> 23) & 0xf;
  5070. bool base_is_valid = !(vmx_instruction_info & (1u << 27));
  5071. if (is_reg) {
  5072. kvm_queue_exception(vcpu, UD_VECTOR);
  5073. return 1;
  5074. }
  5075. /* Addr = segment_base + offset */
  5076. /* offset = base + [index * scale] + displacement */
  5077. *ret = vmx_get_segment_base(vcpu, seg_reg);
  5078. if (base_is_valid)
  5079. *ret += kvm_register_read(vcpu, base_reg);
  5080. if (index_is_valid)
  5081. *ret += kvm_register_read(vcpu, index_reg)<<scaling;
  5082. *ret += exit_qualification; /* holds the displacement */
  5083. if (addr_size == 1) /* 32 bit */
  5084. *ret &= 0xffffffff;
  5085. /*
  5086. * TODO: throw #GP (and return 1) in various cases that the VM*
  5087. * instructions require it - e.g., offset beyond segment limit,
  5088. * unusable or unreadable/unwritable segment, non-canonical 64-bit
  5089. * address, and so on. Currently these are not checked.
  5090. */
  5091. return 0;
  5092. }
  5093. /* Emulate the VMCLEAR instruction */
  5094. static int handle_vmclear(struct kvm_vcpu *vcpu)
  5095. {
  5096. struct vcpu_vmx *vmx = to_vmx(vcpu);
  5097. gva_t gva;
  5098. gpa_t vmptr;
  5099. struct vmcs12 *vmcs12;
  5100. struct page *page;
  5101. struct x86_exception e;
  5102. if (!nested_vmx_check_permission(vcpu))
  5103. return 1;
  5104. if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
  5105. vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
  5106. return 1;
  5107. if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
  5108. sizeof(vmptr), &e)) {
  5109. kvm_inject_page_fault(vcpu, &e);
  5110. return 1;
  5111. }
  5112. if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
  5113. nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
  5114. skip_emulated_instruction(vcpu);
  5115. return 1;
  5116. }
  5117. if (vmptr == vmx->nested.current_vmptr) {
  5118. nested_release_vmcs12(vmx);
  5119. vmx->nested.current_vmptr = -1ull;
  5120. vmx->nested.current_vmcs12 = NULL;
  5121. }
  5122. page = nested_get_page(vcpu, vmptr);
  5123. if (page == NULL) {
  5124. /*
  5125. * For accurate processor emulation, VMCLEAR beyond available
  5126. * physical memory should do nothing at all. However, it is
  5127. * possible that a nested vmx bug, not a guest hypervisor bug,
  5128. * resulted in this case, so let's shut down before doing any
  5129. * more damage:
  5130. */
  5131. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  5132. return 1;
  5133. }
  5134. vmcs12 = kmap(page);
  5135. vmcs12->launch_state = 0;
  5136. kunmap(page);
  5137. nested_release_page(page);
  5138. nested_free_vmcs02(vmx, vmptr);
  5139. skip_emulated_instruction(vcpu);
  5140. nested_vmx_succeed(vcpu);
  5141. return 1;
  5142. }
  5143. static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
  5144. /* Emulate the VMLAUNCH instruction */
  5145. static int handle_vmlaunch(struct kvm_vcpu *vcpu)
  5146. {
  5147. return nested_vmx_run(vcpu, true);
  5148. }
  5149. /* Emulate the VMRESUME instruction */
  5150. static int handle_vmresume(struct kvm_vcpu *vcpu)
  5151. {
  5152. return nested_vmx_run(vcpu, false);
  5153. }
  5154. enum vmcs_field_type {
  5155. VMCS_FIELD_TYPE_U16 = 0,
  5156. VMCS_FIELD_TYPE_U64 = 1,
  5157. VMCS_FIELD_TYPE_U32 = 2,
  5158. VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
  5159. };
  5160. static inline int vmcs_field_type(unsigned long field)
  5161. {
  5162. if (0x1 & field) /* the *_HIGH fields are all 32 bit */
  5163. return VMCS_FIELD_TYPE_U32;
  5164. return (field >> 13) & 0x3 ;
  5165. }
  5166. static inline int vmcs_field_readonly(unsigned long field)
  5167. {
  5168. return (((field >> 10) & 0x3) == 1);
  5169. }
  5170. /*
  5171. * Read a vmcs12 field. Since these can have varying lengths and we return
  5172. * one type, we chose the biggest type (u64) and zero-extend the return value
  5173. * to that size. Note that the caller, handle_vmread, might need to use only
  5174. * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
  5175. * 64-bit fields are to be returned).
  5176. */
  5177. static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
  5178. unsigned long field, u64 *ret)
  5179. {
  5180. short offset = vmcs_field_to_offset(field);
  5181. char *p;
  5182. if (offset < 0)
  5183. return 0;
  5184. p = ((char *)(get_vmcs12(vcpu))) + offset;
  5185. switch (vmcs_field_type(field)) {
  5186. case VMCS_FIELD_TYPE_NATURAL_WIDTH:
  5187. *ret = *((natural_width *)p);
  5188. return 1;
  5189. case VMCS_FIELD_TYPE_U16:
  5190. *ret = *((u16 *)p);
  5191. return 1;
  5192. case VMCS_FIELD_TYPE_U32:
  5193. *ret = *((u32 *)p);
  5194. return 1;
  5195. case VMCS_FIELD_TYPE_U64:
  5196. *ret = *((u64 *)p);
  5197. return 1;
  5198. default:
  5199. return 0; /* can never happen. */
  5200. }
  5201. }
  5202. static inline bool vmcs12_write_any(struct kvm_vcpu *vcpu,
  5203. unsigned long field, u64 field_value){
  5204. short offset = vmcs_field_to_offset(field);
  5205. char *p = ((char *) get_vmcs12(vcpu)) + offset;
  5206. if (offset < 0)
  5207. return false;
  5208. switch (vmcs_field_type(field)) {
  5209. case VMCS_FIELD_TYPE_U16:
  5210. *(u16 *)p = field_value;
  5211. return true;
  5212. case VMCS_FIELD_TYPE_U32:
  5213. *(u32 *)p = field_value;
  5214. return true;
  5215. case VMCS_FIELD_TYPE_U64:
  5216. *(u64 *)p = field_value;
  5217. return true;
  5218. case VMCS_FIELD_TYPE_NATURAL_WIDTH:
  5219. *(natural_width *)p = field_value;
  5220. return true;
  5221. default:
  5222. return false; /* can never happen. */
  5223. }
  5224. }
  5225. static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
  5226. {
  5227. int i;
  5228. unsigned long field;
  5229. u64 field_value;
  5230. struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
  5231. const unsigned long *fields = shadow_read_write_fields;
  5232. const int num_fields = max_shadow_read_write_fields;
  5233. vmcs_load(shadow_vmcs);
  5234. for (i = 0; i < num_fields; i++) {
  5235. field = fields[i];
  5236. switch (vmcs_field_type(field)) {
  5237. case VMCS_FIELD_TYPE_U16:
  5238. field_value = vmcs_read16(field);
  5239. break;
  5240. case VMCS_FIELD_TYPE_U32:
  5241. field_value = vmcs_read32(field);
  5242. break;
  5243. case VMCS_FIELD_TYPE_U64:
  5244. field_value = vmcs_read64(field);
  5245. break;
  5246. case VMCS_FIELD_TYPE_NATURAL_WIDTH:
  5247. field_value = vmcs_readl(field);
  5248. break;
  5249. }
  5250. vmcs12_write_any(&vmx->vcpu, field, field_value);
  5251. }
  5252. vmcs_clear(shadow_vmcs);
  5253. vmcs_load(vmx->loaded_vmcs->vmcs);
  5254. }
  5255. static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
  5256. {
  5257. const unsigned long *fields[] = {
  5258. shadow_read_write_fields,
  5259. shadow_read_only_fields
  5260. };
  5261. const int max_fields[] = {
  5262. max_shadow_read_write_fields,
  5263. max_shadow_read_only_fields
  5264. };
  5265. int i, q;
  5266. unsigned long field;
  5267. u64 field_value = 0;
  5268. struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
  5269. vmcs_load(shadow_vmcs);
  5270. for (q = 0; q < ARRAY_SIZE(fields); q++) {
  5271. for (i = 0; i < max_fields[q]; i++) {
  5272. field = fields[q][i];
  5273. vmcs12_read_any(&vmx->vcpu, field, &field_value);
  5274. switch (vmcs_field_type(field)) {
  5275. case VMCS_FIELD_TYPE_U16:
  5276. vmcs_write16(field, (u16)field_value);
  5277. break;
  5278. case VMCS_FIELD_TYPE_U32:
  5279. vmcs_write32(field, (u32)field_value);
  5280. break;
  5281. case VMCS_FIELD_TYPE_U64:
  5282. vmcs_write64(field, (u64)field_value);
  5283. break;
  5284. case VMCS_FIELD_TYPE_NATURAL_WIDTH:
  5285. vmcs_writel(field, (long)field_value);
  5286. break;
  5287. }
  5288. }
  5289. }
  5290. vmcs_clear(shadow_vmcs);
  5291. vmcs_load(vmx->loaded_vmcs->vmcs);
  5292. }
  5293. /*
  5294. * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
  5295. * used before) all generate the same failure when it is missing.
  5296. */
  5297. static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
  5298. {
  5299. struct vcpu_vmx *vmx = to_vmx(vcpu);
  5300. if (vmx->nested.current_vmptr == -1ull) {
  5301. nested_vmx_failInvalid(vcpu);
  5302. skip_emulated_instruction(vcpu);
  5303. return 0;
  5304. }
  5305. return 1;
  5306. }
  5307. static int handle_vmread(struct kvm_vcpu *vcpu)
  5308. {
  5309. unsigned long field;
  5310. u64 field_value;
  5311. unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  5312. u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
  5313. gva_t gva = 0;
  5314. if (!nested_vmx_check_permission(vcpu) ||
  5315. !nested_vmx_check_vmcs12(vcpu))
  5316. return 1;
  5317. /* Decode instruction info and find the field to read */
  5318. field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
  5319. /* Read the field, zero-extended to a u64 field_value */
  5320. if (!vmcs12_read_any(vcpu, field, &field_value)) {
  5321. nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
  5322. skip_emulated_instruction(vcpu);
  5323. return 1;
  5324. }
  5325. /*
  5326. * Now copy part of this value to register or memory, as requested.
  5327. * Note that the number of bits actually copied is 32 or 64 depending
  5328. * on the guest's mode (32 or 64 bit), not on the given field's length.
  5329. */
  5330. if (vmx_instruction_info & (1u << 10)) {
  5331. kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
  5332. field_value);
  5333. } else {
  5334. if (get_vmx_mem_address(vcpu, exit_qualification,
  5335. vmx_instruction_info, &gva))
  5336. return 1;
  5337. /* _system ok, as nested_vmx_check_permission verified cpl=0 */
  5338. kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
  5339. &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
  5340. }
  5341. nested_vmx_succeed(vcpu);
  5342. skip_emulated_instruction(vcpu);
  5343. return 1;
  5344. }
  5345. static int handle_vmwrite(struct kvm_vcpu *vcpu)
  5346. {
  5347. unsigned long field;
  5348. gva_t gva;
  5349. unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  5350. u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
  5351. /* The value to write might be 32 or 64 bits, depending on L1's long
  5352. * mode, and eventually we need to write that into a field of several
  5353. * possible lengths. The code below first zero-extends the value to 64
  5354. * bit (field_value), and then copies only the approriate number of
  5355. * bits into the vmcs12 field.
  5356. */
  5357. u64 field_value = 0;
  5358. struct x86_exception e;
  5359. if (!nested_vmx_check_permission(vcpu) ||
  5360. !nested_vmx_check_vmcs12(vcpu))
  5361. return 1;
  5362. if (vmx_instruction_info & (1u << 10))
  5363. field_value = kvm_register_read(vcpu,
  5364. (((vmx_instruction_info) >> 3) & 0xf));
  5365. else {
  5366. if (get_vmx_mem_address(vcpu, exit_qualification,
  5367. vmx_instruction_info, &gva))
  5368. return 1;
  5369. if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
  5370. &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
  5371. kvm_inject_page_fault(vcpu, &e);
  5372. return 1;
  5373. }
  5374. }
  5375. field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
  5376. if (vmcs_field_readonly(field)) {
  5377. nested_vmx_failValid(vcpu,
  5378. VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
  5379. skip_emulated_instruction(vcpu);
  5380. return 1;
  5381. }
  5382. if (!vmcs12_write_any(vcpu, field, field_value)) {
  5383. nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
  5384. skip_emulated_instruction(vcpu);
  5385. return 1;
  5386. }
  5387. nested_vmx_succeed(vcpu);
  5388. skip_emulated_instruction(vcpu);
  5389. return 1;
  5390. }
  5391. /* Emulate the VMPTRLD instruction */
  5392. static int handle_vmptrld(struct kvm_vcpu *vcpu)
  5393. {
  5394. struct vcpu_vmx *vmx = to_vmx(vcpu);
  5395. gva_t gva;
  5396. gpa_t vmptr;
  5397. struct x86_exception e;
  5398. u32 exec_control;
  5399. if (!nested_vmx_check_permission(vcpu))
  5400. return 1;
  5401. if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
  5402. vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
  5403. return 1;
  5404. if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
  5405. sizeof(vmptr), &e)) {
  5406. kvm_inject_page_fault(vcpu, &e);
  5407. return 1;
  5408. }
  5409. if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
  5410. nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
  5411. skip_emulated_instruction(vcpu);
  5412. return 1;
  5413. }
  5414. if (vmx->nested.current_vmptr != vmptr) {
  5415. struct vmcs12 *new_vmcs12;
  5416. struct page *page;
  5417. page = nested_get_page(vcpu, vmptr);
  5418. if (page == NULL) {
  5419. nested_vmx_failInvalid(vcpu);
  5420. skip_emulated_instruction(vcpu);
  5421. return 1;
  5422. }
  5423. new_vmcs12 = kmap(page);
  5424. if (new_vmcs12->revision_id != VMCS12_REVISION) {
  5425. kunmap(page);
  5426. nested_release_page_clean(page);
  5427. nested_vmx_failValid(vcpu,
  5428. VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
  5429. skip_emulated_instruction(vcpu);
  5430. return 1;
  5431. }
  5432. if (vmx->nested.current_vmptr != -1ull)
  5433. nested_release_vmcs12(vmx);
  5434. vmx->nested.current_vmptr = vmptr;
  5435. vmx->nested.current_vmcs12 = new_vmcs12;
  5436. vmx->nested.current_vmcs12_page = page;
  5437. if (enable_shadow_vmcs) {
  5438. exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
  5439. exec_control |= SECONDARY_EXEC_SHADOW_VMCS;
  5440. vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
  5441. vmcs_write64(VMCS_LINK_POINTER,
  5442. __pa(vmx->nested.current_shadow_vmcs));
  5443. vmx->nested.sync_shadow_vmcs = true;
  5444. }
  5445. }
  5446. nested_vmx_succeed(vcpu);
  5447. skip_emulated_instruction(vcpu);
  5448. return 1;
  5449. }
  5450. /* Emulate the VMPTRST instruction */
  5451. static int handle_vmptrst(struct kvm_vcpu *vcpu)
  5452. {
  5453. unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  5454. u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
  5455. gva_t vmcs_gva;
  5456. struct x86_exception e;
  5457. if (!nested_vmx_check_permission(vcpu))
  5458. return 1;
  5459. if (get_vmx_mem_address(vcpu, exit_qualification,
  5460. vmx_instruction_info, &vmcs_gva))
  5461. return 1;
  5462. /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
  5463. if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
  5464. (void *)&to_vmx(vcpu)->nested.current_vmptr,
  5465. sizeof(u64), &e)) {
  5466. kvm_inject_page_fault(vcpu, &e);
  5467. return 1;
  5468. }
  5469. nested_vmx_succeed(vcpu);
  5470. skip_emulated_instruction(vcpu);
  5471. return 1;
  5472. }
  5473. /* Emulate the INVEPT instruction */
  5474. static int handle_invept(struct kvm_vcpu *vcpu)
  5475. {
  5476. u32 vmx_instruction_info, types;
  5477. unsigned long type;
  5478. gva_t gva;
  5479. struct x86_exception e;
  5480. struct {
  5481. u64 eptp, gpa;
  5482. } operand;
  5483. u64 eptp_mask = ((1ull << 51) - 1) & PAGE_MASK;
  5484. if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
  5485. !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
  5486. kvm_queue_exception(vcpu, UD_VECTOR);
  5487. return 1;
  5488. }
  5489. if (!nested_vmx_check_permission(vcpu))
  5490. return 1;
  5491. if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
  5492. kvm_queue_exception(vcpu, UD_VECTOR);
  5493. return 1;
  5494. }
  5495. vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
  5496. type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
  5497. types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
  5498. if (!(types & (1UL << type))) {
  5499. nested_vmx_failValid(vcpu,
  5500. VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
  5501. return 1;
  5502. }
  5503. /* According to the Intel VMX instruction reference, the memory
  5504. * operand is read even if it isn't needed (e.g., for type==global)
  5505. */
  5506. if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
  5507. vmx_instruction_info, &gva))
  5508. return 1;
  5509. if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
  5510. sizeof(operand), &e)) {
  5511. kvm_inject_page_fault(vcpu, &e);
  5512. return 1;
  5513. }
  5514. switch (type) {
  5515. case VMX_EPT_EXTENT_CONTEXT:
  5516. if ((operand.eptp & eptp_mask) !=
  5517. (nested_ept_get_cr3(vcpu) & eptp_mask))
  5518. break;
  5519. case VMX_EPT_EXTENT_GLOBAL:
  5520. kvm_mmu_sync_roots(vcpu);
  5521. kvm_mmu_flush_tlb(vcpu);
  5522. nested_vmx_succeed(vcpu);
  5523. break;
  5524. default:
  5525. BUG_ON(1);
  5526. break;
  5527. }
  5528. skip_emulated_instruction(vcpu);
  5529. return 1;
  5530. }
  5531. /*
  5532. * The exit handlers return 1 if the exit was handled fully and guest execution
  5533. * may resume. Otherwise they set the kvm_run parameter to indicate what needs
  5534. * to be done to userspace and return 0.
  5535. */
  5536. static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
  5537. [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
  5538. [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
  5539. [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
  5540. [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
  5541. [EXIT_REASON_IO_INSTRUCTION] = handle_io,
  5542. [EXIT_REASON_CR_ACCESS] = handle_cr,
  5543. [EXIT_REASON_DR_ACCESS] = handle_dr,
  5544. [EXIT_REASON_CPUID] = handle_cpuid,
  5545. [EXIT_REASON_MSR_READ] = handle_rdmsr,
  5546. [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
  5547. [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
  5548. [EXIT_REASON_HLT] = handle_halt,
  5549. [EXIT_REASON_INVD] = handle_invd,
  5550. [EXIT_REASON_INVLPG] = handle_invlpg,
  5551. [EXIT_REASON_RDPMC] = handle_rdpmc,
  5552. [EXIT_REASON_VMCALL] = handle_vmcall,
  5553. [EXIT_REASON_VMCLEAR] = handle_vmclear,
  5554. [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
  5555. [EXIT_REASON_VMPTRLD] = handle_vmptrld,
  5556. [EXIT_REASON_VMPTRST] = handle_vmptrst,
  5557. [EXIT_REASON_VMREAD] = handle_vmread,
  5558. [EXIT_REASON_VMRESUME] = handle_vmresume,
  5559. [EXIT_REASON_VMWRITE] = handle_vmwrite,
  5560. [EXIT_REASON_VMOFF] = handle_vmoff,
  5561. [EXIT_REASON_VMON] = handle_vmon,
  5562. [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
  5563. [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
  5564. [EXIT_REASON_APIC_WRITE] = handle_apic_write,
  5565. [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
  5566. [EXIT_REASON_WBINVD] = handle_wbinvd,
  5567. [EXIT_REASON_XSETBV] = handle_xsetbv,
  5568. [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
  5569. [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
  5570. [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
  5571. [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
  5572. [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
  5573. [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
  5574. [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
  5575. [EXIT_REASON_INVEPT] = handle_invept,
  5576. };
  5577. static const int kvm_vmx_max_exit_handlers =
  5578. ARRAY_SIZE(kvm_vmx_exit_handlers);
  5579. static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
  5580. struct vmcs12 *vmcs12)
  5581. {
  5582. unsigned long exit_qualification;
  5583. gpa_t bitmap, last_bitmap;
  5584. unsigned int port;
  5585. int size;
  5586. u8 b;
  5587. if (nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING))
  5588. return 1;
  5589. if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
  5590. return 0;
  5591. exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  5592. port = exit_qualification >> 16;
  5593. size = (exit_qualification & 7) + 1;
  5594. last_bitmap = (gpa_t)-1;
  5595. b = -1;
  5596. while (size > 0) {
  5597. if (port < 0x8000)
  5598. bitmap = vmcs12->io_bitmap_a;
  5599. else if (port < 0x10000)
  5600. bitmap = vmcs12->io_bitmap_b;
  5601. else
  5602. return 1;
  5603. bitmap += (port & 0x7fff) / 8;
  5604. if (last_bitmap != bitmap)
  5605. if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
  5606. return 1;
  5607. if (b & (1 << (port & 7)))
  5608. return 1;
  5609. port++;
  5610. size--;
  5611. last_bitmap = bitmap;
  5612. }
  5613. return 0;
  5614. }
  5615. /*
  5616. * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
  5617. * rather than handle it ourselves in L0. I.e., check whether L1 expressed
  5618. * disinterest in the current event (read or write a specific MSR) by using an
  5619. * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
  5620. */
  5621. static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
  5622. struct vmcs12 *vmcs12, u32 exit_reason)
  5623. {
  5624. u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
  5625. gpa_t bitmap;
  5626. if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
  5627. return 1;
  5628. /*
  5629. * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
  5630. * for the four combinations of read/write and low/high MSR numbers.
  5631. * First we need to figure out which of the four to use:
  5632. */
  5633. bitmap = vmcs12->msr_bitmap;
  5634. if (exit_reason == EXIT_REASON_MSR_WRITE)
  5635. bitmap += 2048;
  5636. if (msr_index >= 0xc0000000) {
  5637. msr_index -= 0xc0000000;
  5638. bitmap += 1024;
  5639. }
  5640. /* Then read the msr_index'th bit from this bitmap: */
  5641. if (msr_index < 1024*8) {
  5642. unsigned char b;
  5643. if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
  5644. return 1;
  5645. return 1 & (b >> (msr_index & 7));
  5646. } else
  5647. return 1; /* let L1 handle the wrong parameter */
  5648. }
  5649. /*
  5650. * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
  5651. * rather than handle it ourselves in L0. I.e., check if L1 wanted to
  5652. * intercept (via guest_host_mask etc.) the current event.
  5653. */
  5654. static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
  5655. struct vmcs12 *vmcs12)
  5656. {
  5657. unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  5658. int cr = exit_qualification & 15;
  5659. int reg = (exit_qualification >> 8) & 15;
  5660. unsigned long val = kvm_register_read(vcpu, reg);
  5661. switch ((exit_qualification >> 4) & 3) {
  5662. case 0: /* mov to cr */
  5663. switch (cr) {
  5664. case 0:
  5665. if (vmcs12->cr0_guest_host_mask &
  5666. (val ^ vmcs12->cr0_read_shadow))
  5667. return 1;
  5668. break;
  5669. case 3:
  5670. if ((vmcs12->cr3_target_count >= 1 &&
  5671. vmcs12->cr3_target_value0 == val) ||
  5672. (vmcs12->cr3_target_count >= 2 &&
  5673. vmcs12->cr3_target_value1 == val) ||
  5674. (vmcs12->cr3_target_count >= 3 &&
  5675. vmcs12->cr3_target_value2 == val) ||
  5676. (vmcs12->cr3_target_count >= 4 &&
  5677. vmcs12->cr3_target_value3 == val))
  5678. return 0;
  5679. if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
  5680. return 1;
  5681. break;
  5682. case 4:
  5683. if (vmcs12->cr4_guest_host_mask &
  5684. (vmcs12->cr4_read_shadow ^ val))
  5685. return 1;
  5686. break;
  5687. case 8:
  5688. if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
  5689. return 1;
  5690. break;
  5691. }
  5692. break;
  5693. case 2: /* clts */
  5694. if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
  5695. (vmcs12->cr0_read_shadow & X86_CR0_TS))
  5696. return 1;
  5697. break;
  5698. case 1: /* mov from cr */
  5699. switch (cr) {
  5700. case 3:
  5701. if (vmcs12->cpu_based_vm_exec_control &
  5702. CPU_BASED_CR3_STORE_EXITING)
  5703. return 1;
  5704. break;
  5705. case 8:
  5706. if (vmcs12->cpu_based_vm_exec_control &
  5707. CPU_BASED_CR8_STORE_EXITING)
  5708. return 1;
  5709. break;
  5710. }
  5711. break;
  5712. case 3: /* lmsw */
  5713. /*
  5714. * lmsw can change bits 1..3 of cr0, and only set bit 0 of
  5715. * cr0. Other attempted changes are ignored, with no exit.
  5716. */
  5717. if (vmcs12->cr0_guest_host_mask & 0xe &
  5718. (val ^ vmcs12->cr0_read_shadow))
  5719. return 1;
  5720. if ((vmcs12->cr0_guest_host_mask & 0x1) &&
  5721. !(vmcs12->cr0_read_shadow & 0x1) &&
  5722. (val & 0x1))
  5723. return 1;
  5724. break;
  5725. }
  5726. return 0;
  5727. }
  5728. /*
  5729. * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
  5730. * should handle it ourselves in L0 (and then continue L2). Only call this
  5731. * when in is_guest_mode (L2).
  5732. */
  5733. static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
  5734. {
  5735. u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  5736. struct vcpu_vmx *vmx = to_vmx(vcpu);
  5737. struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
  5738. u32 exit_reason = vmx->exit_reason;
  5739. if (vmx->nested.nested_run_pending)
  5740. return 0;
  5741. if (unlikely(vmx->fail)) {
  5742. pr_info_ratelimited("%s failed vm entry %x\n", __func__,
  5743. vmcs_read32(VM_INSTRUCTION_ERROR));
  5744. return 1;
  5745. }
  5746. switch (exit_reason) {
  5747. case EXIT_REASON_EXCEPTION_NMI:
  5748. if (!is_exception(intr_info))
  5749. return 0;
  5750. else if (is_page_fault(intr_info))
  5751. return enable_ept;
  5752. return vmcs12->exception_bitmap &
  5753. (1u << (intr_info & INTR_INFO_VECTOR_MASK));
  5754. case EXIT_REASON_EXTERNAL_INTERRUPT:
  5755. return 0;
  5756. case EXIT_REASON_TRIPLE_FAULT:
  5757. return 1;
  5758. case EXIT_REASON_PENDING_INTERRUPT:
  5759. return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
  5760. case EXIT_REASON_NMI_WINDOW:
  5761. return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
  5762. case EXIT_REASON_TASK_SWITCH:
  5763. return 1;
  5764. case EXIT_REASON_CPUID:
  5765. return 1;
  5766. case EXIT_REASON_HLT:
  5767. return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
  5768. case EXIT_REASON_INVD:
  5769. return 1;
  5770. case EXIT_REASON_INVLPG:
  5771. return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
  5772. case EXIT_REASON_RDPMC:
  5773. return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
  5774. case EXIT_REASON_RDTSC:
  5775. return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
  5776. case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
  5777. case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
  5778. case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
  5779. case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
  5780. case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
  5781. case EXIT_REASON_INVEPT:
  5782. /*
  5783. * VMX instructions trap unconditionally. This allows L1 to
  5784. * emulate them for its L2 guest, i.e., allows 3-level nesting!
  5785. */
  5786. return 1;
  5787. case EXIT_REASON_CR_ACCESS:
  5788. return nested_vmx_exit_handled_cr(vcpu, vmcs12);
  5789. case EXIT_REASON_DR_ACCESS:
  5790. return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
  5791. case EXIT_REASON_IO_INSTRUCTION:
  5792. return nested_vmx_exit_handled_io(vcpu, vmcs12);
  5793. case EXIT_REASON_MSR_READ:
  5794. case EXIT_REASON_MSR_WRITE:
  5795. return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
  5796. case EXIT_REASON_INVALID_STATE:
  5797. return 1;
  5798. case EXIT_REASON_MWAIT_INSTRUCTION:
  5799. return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
  5800. case EXIT_REASON_MONITOR_INSTRUCTION:
  5801. return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
  5802. case EXIT_REASON_PAUSE_INSTRUCTION:
  5803. return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
  5804. nested_cpu_has2(vmcs12,
  5805. SECONDARY_EXEC_PAUSE_LOOP_EXITING);
  5806. case EXIT_REASON_MCE_DURING_VMENTRY:
  5807. return 0;
  5808. case EXIT_REASON_TPR_BELOW_THRESHOLD:
  5809. return 1;
  5810. case EXIT_REASON_APIC_ACCESS:
  5811. return nested_cpu_has2(vmcs12,
  5812. SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
  5813. case EXIT_REASON_EPT_VIOLATION:
  5814. /*
  5815. * L0 always deals with the EPT violation. If nested EPT is
  5816. * used, and the nested mmu code discovers that the address is
  5817. * missing in the guest EPT table (EPT12), the EPT violation
  5818. * will be injected with nested_ept_inject_page_fault()
  5819. */
  5820. return 0;
  5821. case EXIT_REASON_EPT_MISCONFIG:
  5822. /*
  5823. * L2 never uses directly L1's EPT, but rather L0's own EPT
  5824. * table (shadow on EPT) or a merged EPT table that L0 built
  5825. * (EPT on EPT). So any problems with the structure of the
  5826. * table is L0's fault.
  5827. */
  5828. return 0;
  5829. case EXIT_REASON_PREEMPTION_TIMER:
  5830. return vmcs12->pin_based_vm_exec_control &
  5831. PIN_BASED_VMX_PREEMPTION_TIMER;
  5832. case EXIT_REASON_WBINVD:
  5833. return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
  5834. case EXIT_REASON_XSETBV:
  5835. return 1;
  5836. default:
  5837. return 1;
  5838. }
  5839. }
  5840. static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
  5841. {
  5842. *info1 = vmcs_readl(EXIT_QUALIFICATION);
  5843. *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
  5844. }
  5845. /*
  5846. * The guest has exited. See if we can fix it or if we need userspace
  5847. * assistance.
  5848. */
  5849. static int vmx_handle_exit(struct kvm_vcpu *vcpu)
  5850. {
  5851. struct vcpu_vmx *vmx = to_vmx(vcpu);
  5852. u32 exit_reason = vmx->exit_reason;
  5853. u32 vectoring_info = vmx->idt_vectoring_info;
  5854. /* If guest state is invalid, start emulating */
  5855. if (vmx->emulation_required)
  5856. return handle_invalid_guest_state(vcpu);
  5857. /*
  5858. * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
  5859. * we did not inject a still-pending event to L1 now because of
  5860. * nested_run_pending, we need to re-enable this bit.
  5861. */
  5862. if (vmx->nested.nested_run_pending)
  5863. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5864. if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH ||
  5865. exit_reason == EXIT_REASON_VMRESUME))
  5866. vmx->nested.nested_run_pending = 1;
  5867. else
  5868. vmx->nested.nested_run_pending = 0;
  5869. if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
  5870. nested_vmx_vmexit(vcpu);
  5871. return 1;
  5872. }
  5873. if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
  5874. vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  5875. vcpu->run->fail_entry.hardware_entry_failure_reason
  5876. = exit_reason;
  5877. return 0;
  5878. }
  5879. if (unlikely(vmx->fail)) {
  5880. vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  5881. vcpu->run->fail_entry.hardware_entry_failure_reason
  5882. = vmcs_read32(VM_INSTRUCTION_ERROR);
  5883. return 0;
  5884. }
  5885. /*
  5886. * Note:
  5887. * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
  5888. * delivery event since it indicates guest is accessing MMIO.
  5889. * The vm-exit can be triggered again after return to guest that
  5890. * will cause infinite loop.
  5891. */
  5892. if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
  5893. (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
  5894. exit_reason != EXIT_REASON_EPT_VIOLATION &&
  5895. exit_reason != EXIT_REASON_TASK_SWITCH)) {
  5896. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  5897. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
  5898. vcpu->run->internal.ndata = 2;
  5899. vcpu->run->internal.data[0] = vectoring_info;
  5900. vcpu->run->internal.data[1] = exit_reason;
  5901. return 0;
  5902. }
  5903. if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
  5904. !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
  5905. get_vmcs12(vcpu))))) {
  5906. if (vmx_interrupt_allowed(vcpu)) {
  5907. vmx->soft_vnmi_blocked = 0;
  5908. } else if (vmx->vnmi_blocked_time > 1000000000LL &&
  5909. vcpu->arch.nmi_pending) {
  5910. /*
  5911. * This CPU don't support us in finding the end of an
  5912. * NMI-blocked window if the guest runs with IRQs
  5913. * disabled. So we pull the trigger after 1 s of
  5914. * futile waiting, but inform the user about this.
  5915. */
  5916. printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
  5917. "state on VCPU %d after 1 s timeout\n",
  5918. __func__, vcpu->vcpu_id);
  5919. vmx->soft_vnmi_blocked = 0;
  5920. }
  5921. }
  5922. if (exit_reason < kvm_vmx_max_exit_handlers
  5923. && kvm_vmx_exit_handlers[exit_reason])
  5924. return kvm_vmx_exit_handlers[exit_reason](vcpu);
  5925. else {
  5926. vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
  5927. vcpu->run->hw.hardware_exit_reason = exit_reason;
  5928. }
  5929. return 0;
  5930. }
  5931. static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
  5932. {
  5933. if (irr == -1 || tpr < irr) {
  5934. vmcs_write32(TPR_THRESHOLD, 0);
  5935. return;
  5936. }
  5937. vmcs_write32(TPR_THRESHOLD, irr);
  5938. }
  5939. static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
  5940. {
  5941. u32 sec_exec_control;
  5942. /*
  5943. * There is not point to enable virtualize x2apic without enable
  5944. * apicv
  5945. */
  5946. if (!cpu_has_vmx_virtualize_x2apic_mode() ||
  5947. !vmx_vm_has_apicv(vcpu->kvm))
  5948. return;
  5949. if (!vm_need_tpr_shadow(vcpu->kvm))
  5950. return;
  5951. sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
  5952. if (set) {
  5953. sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
  5954. sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
  5955. } else {
  5956. sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
  5957. sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
  5958. }
  5959. vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
  5960. vmx_set_msr_bitmap(vcpu);
  5961. }
  5962. static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
  5963. {
  5964. u16 status;
  5965. u8 old;
  5966. if (!vmx_vm_has_apicv(kvm))
  5967. return;
  5968. if (isr == -1)
  5969. isr = 0;
  5970. status = vmcs_read16(GUEST_INTR_STATUS);
  5971. old = status >> 8;
  5972. if (isr != old) {
  5973. status &= 0xff;
  5974. status |= isr << 8;
  5975. vmcs_write16(GUEST_INTR_STATUS, status);
  5976. }
  5977. }
  5978. static void vmx_set_rvi(int vector)
  5979. {
  5980. u16 status;
  5981. u8 old;
  5982. status = vmcs_read16(GUEST_INTR_STATUS);
  5983. old = (u8)status & 0xff;
  5984. if ((u8)vector != old) {
  5985. status &= ~0xff;
  5986. status |= (u8)vector;
  5987. vmcs_write16(GUEST_INTR_STATUS, status);
  5988. }
  5989. }
  5990. static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
  5991. {
  5992. if (max_irr == -1)
  5993. return;
  5994. vmx_set_rvi(max_irr);
  5995. }
  5996. static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
  5997. {
  5998. if (!vmx_vm_has_apicv(vcpu->kvm))
  5999. return;
  6000. vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
  6001. vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
  6002. vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
  6003. vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
  6004. }
  6005. static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
  6006. {
  6007. u32 exit_intr_info;
  6008. if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
  6009. || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
  6010. return;
  6011. vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  6012. exit_intr_info = vmx->exit_intr_info;
  6013. /* Handle machine checks before interrupts are enabled */
  6014. if (is_machine_check(exit_intr_info))
  6015. kvm_machine_check();
  6016. /* We need to handle NMIs before interrupts are enabled */
  6017. if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
  6018. (exit_intr_info & INTR_INFO_VALID_MASK)) {
  6019. kvm_before_handle_nmi(&vmx->vcpu);
  6020. asm("int $2");
  6021. kvm_after_handle_nmi(&vmx->vcpu);
  6022. }
  6023. }
  6024. static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
  6025. {
  6026. u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  6027. /*
  6028. * If external interrupt exists, IF bit is set in rflags/eflags on the
  6029. * interrupt stack frame, and interrupt will be enabled on a return
  6030. * from interrupt handler.
  6031. */
  6032. if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
  6033. == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
  6034. unsigned int vector;
  6035. unsigned long entry;
  6036. gate_desc *desc;
  6037. struct vcpu_vmx *vmx = to_vmx(vcpu);
  6038. #ifdef CONFIG_X86_64
  6039. unsigned long tmp;
  6040. #endif
  6041. vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
  6042. desc = (gate_desc *)vmx->host_idt_base + vector;
  6043. entry = gate_offset(*desc);
  6044. asm volatile(
  6045. #ifdef CONFIG_X86_64
  6046. "mov %%" _ASM_SP ", %[sp]\n\t"
  6047. "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
  6048. "push $%c[ss]\n\t"
  6049. "push %[sp]\n\t"
  6050. #endif
  6051. "pushf\n\t"
  6052. "orl $0x200, (%%" _ASM_SP ")\n\t"
  6053. __ASM_SIZE(push) " $%c[cs]\n\t"
  6054. "call *%[entry]\n\t"
  6055. :
  6056. #ifdef CONFIG_X86_64
  6057. [sp]"=&r"(tmp)
  6058. #endif
  6059. :
  6060. [entry]"r"(entry),
  6061. [ss]"i"(__KERNEL_DS),
  6062. [cs]"i"(__KERNEL_CS)
  6063. );
  6064. } else
  6065. local_irq_enable();
  6066. }
  6067. static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
  6068. {
  6069. u32 exit_intr_info;
  6070. bool unblock_nmi;
  6071. u8 vector;
  6072. bool idtv_info_valid;
  6073. idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
  6074. if (cpu_has_virtual_nmis()) {
  6075. if (vmx->nmi_known_unmasked)
  6076. return;
  6077. /*
  6078. * Can't use vmx->exit_intr_info since we're not sure what
  6079. * the exit reason is.
  6080. */
  6081. exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  6082. unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
  6083. vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
  6084. /*
  6085. * SDM 3: 27.7.1.2 (September 2008)
  6086. * Re-set bit "block by NMI" before VM entry if vmexit caused by
  6087. * a guest IRET fault.
  6088. * SDM 3: 23.2.2 (September 2008)
  6089. * Bit 12 is undefined in any of the following cases:
  6090. * If the VM exit sets the valid bit in the IDT-vectoring
  6091. * information field.
  6092. * If the VM exit is due to a double fault.
  6093. */
  6094. if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
  6095. vector != DF_VECTOR && !idtv_info_valid)
  6096. vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  6097. GUEST_INTR_STATE_NMI);
  6098. else
  6099. vmx->nmi_known_unmasked =
  6100. !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
  6101. & GUEST_INTR_STATE_NMI);
  6102. } else if (unlikely(vmx->soft_vnmi_blocked))
  6103. vmx->vnmi_blocked_time +=
  6104. ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
  6105. }
  6106. static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
  6107. u32 idt_vectoring_info,
  6108. int instr_len_field,
  6109. int error_code_field)
  6110. {
  6111. u8 vector;
  6112. int type;
  6113. bool idtv_info_valid;
  6114. idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
  6115. vcpu->arch.nmi_injected = false;
  6116. kvm_clear_exception_queue(vcpu);
  6117. kvm_clear_interrupt_queue(vcpu);
  6118. if (!idtv_info_valid)
  6119. return;
  6120. kvm_make_request(KVM_REQ_EVENT, vcpu);
  6121. vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
  6122. type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
  6123. switch (type) {
  6124. case INTR_TYPE_NMI_INTR:
  6125. vcpu->arch.nmi_injected = true;
  6126. /*
  6127. * SDM 3: 27.7.1.2 (September 2008)
  6128. * Clear bit "block by NMI" before VM entry if a NMI
  6129. * delivery faulted.
  6130. */
  6131. vmx_set_nmi_mask(vcpu, false);
  6132. break;
  6133. case INTR_TYPE_SOFT_EXCEPTION:
  6134. vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
  6135. /* fall through */
  6136. case INTR_TYPE_HARD_EXCEPTION:
  6137. if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
  6138. u32 err = vmcs_read32(error_code_field);
  6139. kvm_queue_exception_e(vcpu, vector, err);
  6140. } else
  6141. kvm_queue_exception(vcpu, vector);
  6142. break;
  6143. case INTR_TYPE_SOFT_INTR:
  6144. vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
  6145. /* fall through */
  6146. case INTR_TYPE_EXT_INTR:
  6147. kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
  6148. break;
  6149. default:
  6150. break;
  6151. }
  6152. }
  6153. static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
  6154. {
  6155. __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
  6156. VM_EXIT_INSTRUCTION_LEN,
  6157. IDT_VECTORING_ERROR_CODE);
  6158. }
  6159. static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
  6160. {
  6161. __vmx_complete_interrupts(vcpu,
  6162. vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
  6163. VM_ENTRY_INSTRUCTION_LEN,
  6164. VM_ENTRY_EXCEPTION_ERROR_CODE);
  6165. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
  6166. }
  6167. static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
  6168. {
  6169. int i, nr_msrs;
  6170. struct perf_guest_switch_msr *msrs;
  6171. msrs = perf_guest_get_msrs(&nr_msrs);
  6172. if (!msrs)
  6173. return;
  6174. for (i = 0; i < nr_msrs; i++)
  6175. if (msrs[i].host == msrs[i].guest)
  6176. clear_atomic_switch_msr(vmx, msrs[i].msr);
  6177. else
  6178. add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
  6179. msrs[i].host);
  6180. }
  6181. static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  6182. {
  6183. struct vcpu_vmx *vmx = to_vmx(vcpu);
  6184. unsigned long debugctlmsr;
  6185. /* Record the guest's net vcpu time for enforced NMI injections. */
  6186. if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
  6187. vmx->entry_time = ktime_get();
  6188. /* Don't enter VMX if guest state is invalid, let the exit handler
  6189. start emulation until we arrive back to a valid state */
  6190. if (vmx->emulation_required)
  6191. return;
  6192. if (vmx->nested.sync_shadow_vmcs) {
  6193. copy_vmcs12_to_shadow(vmx);
  6194. vmx->nested.sync_shadow_vmcs = false;
  6195. }
  6196. if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
  6197. vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
  6198. if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
  6199. vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
  6200. /* When single-stepping over STI and MOV SS, we must clear the
  6201. * corresponding interruptibility bits in the guest state. Otherwise
  6202. * vmentry fails as it then expects bit 14 (BS) in pending debug
  6203. * exceptions being set, but that's not correct for the guest debugging
  6204. * case. */
  6205. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  6206. vmx_set_interrupt_shadow(vcpu, 0);
  6207. atomic_switch_perf_msrs(vmx);
  6208. debugctlmsr = get_debugctlmsr();
  6209. vmx->__launched = vmx->loaded_vmcs->launched;
  6210. asm(
  6211. /* Store host registers */
  6212. "push %%" _ASM_DX "; push %%" _ASM_BP ";"
  6213. "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
  6214. "push %%" _ASM_CX " \n\t"
  6215. "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
  6216. "je 1f \n\t"
  6217. "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
  6218. __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
  6219. "1: \n\t"
  6220. /* Reload cr2 if changed */
  6221. "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
  6222. "mov %%cr2, %%" _ASM_DX " \n\t"
  6223. "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
  6224. "je 2f \n\t"
  6225. "mov %%" _ASM_AX", %%cr2 \n\t"
  6226. "2: \n\t"
  6227. /* Check if vmlaunch of vmresume is needed */
  6228. "cmpl $0, %c[launched](%0) \n\t"
  6229. /* Load guest registers. Don't clobber flags. */
  6230. "mov %c[rax](%0), %%" _ASM_AX " \n\t"
  6231. "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
  6232. "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
  6233. "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
  6234. "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
  6235. "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
  6236. #ifdef CONFIG_X86_64
  6237. "mov %c[r8](%0), %%r8 \n\t"
  6238. "mov %c[r9](%0), %%r9 \n\t"
  6239. "mov %c[r10](%0), %%r10 \n\t"
  6240. "mov %c[r11](%0), %%r11 \n\t"
  6241. "mov %c[r12](%0), %%r12 \n\t"
  6242. "mov %c[r13](%0), %%r13 \n\t"
  6243. "mov %c[r14](%0), %%r14 \n\t"
  6244. "mov %c[r15](%0), %%r15 \n\t"
  6245. #endif
  6246. "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
  6247. /* Enter guest mode */
  6248. "jne 1f \n\t"
  6249. __ex(ASM_VMX_VMLAUNCH) "\n\t"
  6250. "jmp 2f \n\t"
  6251. "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
  6252. "2: "
  6253. /* Save guest registers, load host registers, keep flags */
  6254. "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
  6255. "pop %0 \n\t"
  6256. "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
  6257. "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
  6258. __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
  6259. "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
  6260. "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
  6261. "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
  6262. "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
  6263. #ifdef CONFIG_X86_64
  6264. "mov %%r8, %c[r8](%0) \n\t"
  6265. "mov %%r9, %c[r9](%0) \n\t"
  6266. "mov %%r10, %c[r10](%0) \n\t"
  6267. "mov %%r11, %c[r11](%0) \n\t"
  6268. "mov %%r12, %c[r12](%0) \n\t"
  6269. "mov %%r13, %c[r13](%0) \n\t"
  6270. "mov %%r14, %c[r14](%0) \n\t"
  6271. "mov %%r15, %c[r15](%0) \n\t"
  6272. #endif
  6273. "mov %%cr2, %%" _ASM_AX " \n\t"
  6274. "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
  6275. "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
  6276. "setbe %c[fail](%0) \n\t"
  6277. ".pushsection .rodata \n\t"
  6278. ".global vmx_return \n\t"
  6279. "vmx_return: " _ASM_PTR " 2b \n\t"
  6280. ".popsection"
  6281. : : "c"(vmx), "d"((unsigned long)HOST_RSP),
  6282. [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
  6283. [fail]"i"(offsetof(struct vcpu_vmx, fail)),
  6284. [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
  6285. [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
  6286. [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
  6287. [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
  6288. [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
  6289. [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
  6290. [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
  6291. [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
  6292. #ifdef CONFIG_X86_64
  6293. [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
  6294. [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
  6295. [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
  6296. [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
  6297. [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
  6298. [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
  6299. [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
  6300. [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
  6301. #endif
  6302. [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
  6303. [wordsize]"i"(sizeof(ulong))
  6304. : "cc", "memory"
  6305. #ifdef CONFIG_X86_64
  6306. , "rax", "rbx", "rdi", "rsi"
  6307. , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
  6308. #else
  6309. , "eax", "ebx", "edi", "esi"
  6310. #endif
  6311. );
  6312. /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
  6313. if (debugctlmsr)
  6314. update_debugctlmsr(debugctlmsr);
  6315. #ifndef CONFIG_X86_64
  6316. /*
  6317. * The sysexit path does not restore ds/es, so we must set them to
  6318. * a reasonable value ourselves.
  6319. *
  6320. * We can't defer this to vmx_load_host_state() since that function
  6321. * may be executed in interrupt context, which saves and restore segments
  6322. * around it, nullifying its effect.
  6323. */
  6324. loadsegment(ds, __USER_DS);
  6325. loadsegment(es, __USER_DS);
  6326. #endif
  6327. vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
  6328. | (1 << VCPU_EXREG_RFLAGS)
  6329. | (1 << VCPU_EXREG_CPL)
  6330. | (1 << VCPU_EXREG_PDPTR)
  6331. | (1 << VCPU_EXREG_SEGMENTS)
  6332. | (1 << VCPU_EXREG_CR3));
  6333. vcpu->arch.regs_dirty = 0;
  6334. vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
  6335. vmx->loaded_vmcs->launched = 1;
  6336. vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
  6337. trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
  6338. vmx_complete_atomic_exit(vmx);
  6339. vmx_recover_nmi_blocking(vmx);
  6340. vmx_complete_interrupts(vmx);
  6341. }
  6342. static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
  6343. {
  6344. struct vcpu_vmx *vmx = to_vmx(vcpu);
  6345. free_vpid(vmx);
  6346. free_nested(vmx);
  6347. free_loaded_vmcs(vmx->loaded_vmcs);
  6348. kfree(vmx->guest_msrs);
  6349. kvm_vcpu_uninit(vcpu);
  6350. kmem_cache_free(kvm_vcpu_cache, vmx);
  6351. }
  6352. static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
  6353. {
  6354. int err;
  6355. struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  6356. int cpu;
  6357. if (!vmx)
  6358. return ERR_PTR(-ENOMEM);
  6359. allocate_vpid(vmx);
  6360. err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
  6361. if (err)
  6362. goto free_vcpu;
  6363. vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
  6364. err = -ENOMEM;
  6365. if (!vmx->guest_msrs) {
  6366. goto uninit_vcpu;
  6367. }
  6368. vmx->loaded_vmcs = &vmx->vmcs01;
  6369. vmx->loaded_vmcs->vmcs = alloc_vmcs();
  6370. if (!vmx->loaded_vmcs->vmcs)
  6371. goto free_msrs;
  6372. if (!vmm_exclusive)
  6373. kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
  6374. loaded_vmcs_init(vmx->loaded_vmcs);
  6375. if (!vmm_exclusive)
  6376. kvm_cpu_vmxoff();
  6377. cpu = get_cpu();
  6378. vmx_vcpu_load(&vmx->vcpu, cpu);
  6379. vmx->vcpu.cpu = cpu;
  6380. err = vmx_vcpu_setup(vmx);
  6381. vmx_vcpu_put(&vmx->vcpu);
  6382. put_cpu();
  6383. if (err)
  6384. goto free_vmcs;
  6385. if (vm_need_virtualize_apic_accesses(kvm)) {
  6386. err = alloc_apic_access_page(kvm);
  6387. if (err)
  6388. goto free_vmcs;
  6389. }
  6390. if (enable_ept) {
  6391. if (!kvm->arch.ept_identity_map_addr)
  6392. kvm->arch.ept_identity_map_addr =
  6393. VMX_EPT_IDENTITY_PAGETABLE_ADDR;
  6394. err = -ENOMEM;
  6395. if (alloc_identity_pagetable(kvm) != 0)
  6396. goto free_vmcs;
  6397. if (!init_rmode_identity_map(kvm))
  6398. goto free_vmcs;
  6399. }
  6400. vmx->nested.current_vmptr = -1ull;
  6401. vmx->nested.current_vmcs12 = NULL;
  6402. return &vmx->vcpu;
  6403. free_vmcs:
  6404. free_loaded_vmcs(vmx->loaded_vmcs);
  6405. free_msrs:
  6406. kfree(vmx->guest_msrs);
  6407. uninit_vcpu:
  6408. kvm_vcpu_uninit(&vmx->vcpu);
  6409. free_vcpu:
  6410. free_vpid(vmx);
  6411. kmem_cache_free(kvm_vcpu_cache, vmx);
  6412. return ERR_PTR(err);
  6413. }
  6414. static void __init vmx_check_processor_compat(void *rtn)
  6415. {
  6416. struct vmcs_config vmcs_conf;
  6417. *(int *)rtn = 0;
  6418. if (setup_vmcs_config(&vmcs_conf) < 0)
  6419. *(int *)rtn = -EIO;
  6420. if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
  6421. printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
  6422. smp_processor_id());
  6423. *(int *)rtn = -EIO;
  6424. }
  6425. }
  6426. static int get_ept_level(void)
  6427. {
  6428. return VMX_EPT_DEFAULT_GAW + 1;
  6429. }
  6430. static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
  6431. {
  6432. u64 ret;
  6433. /* For VT-d and EPT combination
  6434. * 1. MMIO: always map as UC
  6435. * 2. EPT with VT-d:
  6436. * a. VT-d without snooping control feature: can't guarantee the
  6437. * result, try to trust guest.
  6438. * b. VT-d with snooping control feature: snooping control feature of
  6439. * VT-d engine can guarantee the cache correctness. Just set it
  6440. * to WB to keep consistent with host. So the same as item 3.
  6441. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
  6442. * consistent with host MTRR
  6443. */
  6444. if (is_mmio)
  6445. ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
  6446. else if (vcpu->kvm->arch.iommu_domain &&
  6447. !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
  6448. ret = kvm_get_guest_memory_type(vcpu, gfn) <<
  6449. VMX_EPT_MT_EPTE_SHIFT;
  6450. else
  6451. ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
  6452. | VMX_EPT_IPAT_BIT;
  6453. return ret;
  6454. }
  6455. static int vmx_get_lpage_level(void)
  6456. {
  6457. if (enable_ept && !cpu_has_vmx_ept_1g_page())
  6458. return PT_DIRECTORY_LEVEL;
  6459. else
  6460. /* For shadow and EPT supported 1GB page */
  6461. return PT_PDPE_LEVEL;
  6462. }
  6463. static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
  6464. {
  6465. struct kvm_cpuid_entry2 *best;
  6466. struct vcpu_vmx *vmx = to_vmx(vcpu);
  6467. u32 exec_control;
  6468. vmx->rdtscp_enabled = false;
  6469. if (vmx_rdtscp_supported()) {
  6470. exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
  6471. if (exec_control & SECONDARY_EXEC_RDTSCP) {
  6472. best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  6473. if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
  6474. vmx->rdtscp_enabled = true;
  6475. else {
  6476. exec_control &= ~SECONDARY_EXEC_RDTSCP;
  6477. vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
  6478. exec_control);
  6479. }
  6480. }
  6481. }
  6482. /* Exposing INVPCID only when PCID is exposed */
  6483. best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
  6484. if (vmx_invpcid_supported() &&
  6485. best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
  6486. guest_cpuid_has_pcid(vcpu)) {
  6487. exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
  6488. exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
  6489. vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
  6490. exec_control);
  6491. } else {
  6492. if (cpu_has_secondary_exec_ctrls()) {
  6493. exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
  6494. exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
  6495. vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
  6496. exec_control);
  6497. }
  6498. if (best)
  6499. best->ebx &= ~bit(X86_FEATURE_INVPCID);
  6500. }
  6501. }
  6502. static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
  6503. {
  6504. if (func == 1 && nested)
  6505. entry->ecx |= bit(X86_FEATURE_VMX);
  6506. }
  6507. static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
  6508. struct x86_exception *fault)
  6509. {
  6510. struct vmcs12 *vmcs12;
  6511. nested_vmx_vmexit(vcpu);
  6512. vmcs12 = get_vmcs12(vcpu);
  6513. if (fault->error_code & PFERR_RSVD_MASK)
  6514. vmcs12->vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
  6515. else
  6516. vmcs12->vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
  6517. vmcs12->exit_qualification = vcpu->arch.exit_qualification;
  6518. vmcs12->guest_physical_address = fault->address;
  6519. }
  6520. /* Callbacks for nested_ept_init_mmu_context: */
  6521. static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
  6522. {
  6523. /* return the page table to be shadowed - in our case, EPT12 */
  6524. return get_vmcs12(vcpu)->ept_pointer;
  6525. }
  6526. static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
  6527. {
  6528. int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
  6529. nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
  6530. vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
  6531. vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
  6532. vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
  6533. vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
  6534. return r;
  6535. }
  6536. static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
  6537. {
  6538. vcpu->arch.walk_mmu = &vcpu->arch.mmu;
  6539. }
  6540. /*
  6541. * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
  6542. * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
  6543. * with L0's requirements for its guest (a.k.a. vmsc01), so we can run the L2
  6544. * guest in a way that will both be appropriate to L1's requests, and our
  6545. * needs. In addition to modifying the active vmcs (which is vmcs02), this
  6546. * function also has additional necessary side-effects, like setting various
  6547. * vcpu->arch fields.
  6548. */
  6549. static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
  6550. {
  6551. struct vcpu_vmx *vmx = to_vmx(vcpu);
  6552. u32 exec_control;
  6553. vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
  6554. vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
  6555. vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
  6556. vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
  6557. vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
  6558. vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
  6559. vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
  6560. vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
  6561. vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
  6562. vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
  6563. vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
  6564. vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
  6565. vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
  6566. vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
  6567. vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
  6568. vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
  6569. vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
  6570. vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
  6571. vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
  6572. vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
  6573. vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
  6574. vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
  6575. vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
  6576. vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
  6577. vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
  6578. vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
  6579. vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
  6580. vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
  6581. vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
  6582. vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
  6583. vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
  6584. vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
  6585. vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
  6586. vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
  6587. vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
  6588. vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
  6589. vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
  6590. vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
  6591. vmcs12->vm_entry_intr_info_field);
  6592. vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
  6593. vmcs12->vm_entry_exception_error_code);
  6594. vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
  6595. vmcs12->vm_entry_instruction_len);
  6596. vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
  6597. vmcs12->guest_interruptibility_info);
  6598. vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
  6599. kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
  6600. vmx_set_rflags(vcpu, vmcs12->guest_rflags);
  6601. vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
  6602. vmcs12->guest_pending_dbg_exceptions);
  6603. vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
  6604. vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
  6605. vmcs_write64(VMCS_LINK_POINTER, -1ull);
  6606. vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
  6607. (vmcs_config.pin_based_exec_ctrl |
  6608. vmcs12->pin_based_vm_exec_control));
  6609. if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
  6610. vmcs_write32(VMX_PREEMPTION_TIMER_VALUE,
  6611. vmcs12->vmx_preemption_timer_value);
  6612. /*
  6613. * Whether page-faults are trapped is determined by a combination of
  6614. * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
  6615. * If enable_ept, L0 doesn't care about page faults and we should
  6616. * set all of these to L1's desires. However, if !enable_ept, L0 does
  6617. * care about (at least some) page faults, and because it is not easy
  6618. * (if at all possible?) to merge L0 and L1's desires, we simply ask
  6619. * to exit on each and every L2 page fault. This is done by setting
  6620. * MASK=MATCH=0 and (see below) EB.PF=1.
  6621. * Note that below we don't need special code to set EB.PF beyond the
  6622. * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
  6623. * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
  6624. * !enable_ept, EB.PF is 1, so the "or" will always be 1.
  6625. *
  6626. * A problem with this approach (when !enable_ept) is that L1 may be
  6627. * injected with more page faults than it asked for. This could have
  6628. * caused problems, but in practice existing hypervisors don't care.
  6629. * To fix this, we will need to emulate the PFEC checking (on the L1
  6630. * page tables), using walk_addr(), when injecting PFs to L1.
  6631. */
  6632. vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
  6633. enable_ept ? vmcs12->page_fault_error_code_mask : 0);
  6634. vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
  6635. enable_ept ? vmcs12->page_fault_error_code_match : 0);
  6636. if (cpu_has_secondary_exec_ctrls()) {
  6637. u32 exec_control = vmx_secondary_exec_control(vmx);
  6638. if (!vmx->rdtscp_enabled)
  6639. exec_control &= ~SECONDARY_EXEC_RDTSCP;
  6640. /* Take the following fields only from vmcs12 */
  6641. exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
  6642. if (nested_cpu_has(vmcs12,
  6643. CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
  6644. exec_control |= vmcs12->secondary_vm_exec_control;
  6645. if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
  6646. /*
  6647. * Translate L1 physical address to host physical
  6648. * address for vmcs02. Keep the page pinned, so this
  6649. * physical address remains valid. We keep a reference
  6650. * to it so we can release it later.
  6651. */
  6652. if (vmx->nested.apic_access_page) /* shouldn't happen */
  6653. nested_release_page(vmx->nested.apic_access_page);
  6654. vmx->nested.apic_access_page =
  6655. nested_get_page(vcpu, vmcs12->apic_access_addr);
  6656. /*
  6657. * If translation failed, no matter: This feature asks
  6658. * to exit when accessing the given address, and if it
  6659. * can never be accessed, this feature won't do
  6660. * anything anyway.
  6661. */
  6662. if (!vmx->nested.apic_access_page)
  6663. exec_control &=
  6664. ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
  6665. else
  6666. vmcs_write64(APIC_ACCESS_ADDR,
  6667. page_to_phys(vmx->nested.apic_access_page));
  6668. }
  6669. vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
  6670. }
  6671. /*
  6672. * Set host-state according to L0's settings (vmcs12 is irrelevant here)
  6673. * Some constant fields are set here by vmx_set_constant_host_state().
  6674. * Other fields are different per CPU, and will be set later when
  6675. * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
  6676. */
  6677. vmx_set_constant_host_state(vmx);
  6678. /*
  6679. * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
  6680. * entry, but only if the current (host) sp changed from the value
  6681. * we wrote last (vmx->host_rsp). This cache is no longer relevant
  6682. * if we switch vmcs, and rather than hold a separate cache per vmcs,
  6683. * here we just force the write to happen on entry.
  6684. */
  6685. vmx->host_rsp = 0;
  6686. exec_control = vmx_exec_control(vmx); /* L0's desires */
  6687. exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
  6688. exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
  6689. exec_control &= ~CPU_BASED_TPR_SHADOW;
  6690. exec_control |= vmcs12->cpu_based_vm_exec_control;
  6691. /*
  6692. * Merging of IO and MSR bitmaps not currently supported.
  6693. * Rather, exit every time.
  6694. */
  6695. exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
  6696. exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
  6697. exec_control |= CPU_BASED_UNCOND_IO_EXITING;
  6698. vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
  6699. /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
  6700. * bitwise-or of what L1 wants to trap for L2, and what we want to
  6701. * trap. Note that CR0.TS also needs updating - we do this later.
  6702. */
  6703. update_exception_bitmap(vcpu);
  6704. vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
  6705. vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
  6706. /* L2->L1 exit controls are emulated - the hardware exit is to L0 so
  6707. * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
  6708. * bits are further modified by vmx_set_efer() below.
  6709. */
  6710. vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
  6711. /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
  6712. * emulated by vmx_set_efer(), below.
  6713. */
  6714. vmcs_write32(VM_ENTRY_CONTROLS,
  6715. (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
  6716. ~VM_ENTRY_IA32E_MODE) |
  6717. (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
  6718. if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) {
  6719. vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
  6720. vcpu->arch.pat = vmcs12->guest_ia32_pat;
  6721. } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
  6722. vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
  6723. set_cr4_guest_host_mask(vmx);
  6724. if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
  6725. vmcs_write64(TSC_OFFSET,
  6726. vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
  6727. else
  6728. vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
  6729. if (enable_vpid) {
  6730. /*
  6731. * Trivially support vpid by letting L2s share their parent
  6732. * L1's vpid. TODO: move to a more elaborate solution, giving
  6733. * each L2 its own vpid and exposing the vpid feature to L1.
  6734. */
  6735. vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
  6736. vmx_flush_tlb(vcpu);
  6737. }
  6738. if (nested_cpu_has_ept(vmcs12)) {
  6739. kvm_mmu_unload(vcpu);
  6740. nested_ept_init_mmu_context(vcpu);
  6741. }
  6742. if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
  6743. vcpu->arch.efer = vmcs12->guest_ia32_efer;
  6744. else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
  6745. vcpu->arch.efer |= (EFER_LMA | EFER_LME);
  6746. else
  6747. vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
  6748. /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
  6749. vmx_set_efer(vcpu, vcpu->arch.efer);
  6750. /*
  6751. * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
  6752. * TS bit (for lazy fpu) and bits which we consider mandatory enabled.
  6753. * The CR0_READ_SHADOW is what L2 should have expected to read given
  6754. * the specifications by L1; It's not enough to take
  6755. * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
  6756. * have more bits than L1 expected.
  6757. */
  6758. vmx_set_cr0(vcpu, vmcs12->guest_cr0);
  6759. vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
  6760. vmx_set_cr4(vcpu, vmcs12->guest_cr4);
  6761. vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
  6762. /* shadow page tables on either EPT or shadow page tables */
  6763. kvm_set_cr3(vcpu, vmcs12->guest_cr3);
  6764. kvm_mmu_reset_context(vcpu);
  6765. /*
  6766. * L1 may access the L2's PDPTR, so save them to construct vmcs12
  6767. */
  6768. if (enable_ept) {
  6769. vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
  6770. vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
  6771. vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
  6772. vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
  6773. __clear_bit(VCPU_EXREG_PDPTR,
  6774. (unsigned long *)&vcpu->arch.regs_avail);
  6775. __clear_bit(VCPU_EXREG_PDPTR,
  6776. (unsigned long *)&vcpu->arch.regs_dirty);
  6777. }
  6778. kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
  6779. kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
  6780. }
  6781. /*
  6782. * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
  6783. * for running an L2 nested guest.
  6784. */
  6785. static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
  6786. {
  6787. struct vmcs12 *vmcs12;
  6788. struct vcpu_vmx *vmx = to_vmx(vcpu);
  6789. int cpu;
  6790. struct loaded_vmcs *vmcs02;
  6791. bool ia32e;
  6792. if (!nested_vmx_check_permission(vcpu) ||
  6793. !nested_vmx_check_vmcs12(vcpu))
  6794. return 1;
  6795. skip_emulated_instruction(vcpu);
  6796. vmcs12 = get_vmcs12(vcpu);
  6797. if (enable_shadow_vmcs)
  6798. copy_shadow_to_vmcs12(vmx);
  6799. /*
  6800. * The nested entry process starts with enforcing various prerequisites
  6801. * on vmcs12 as required by the Intel SDM, and act appropriately when
  6802. * they fail: As the SDM explains, some conditions should cause the
  6803. * instruction to fail, while others will cause the instruction to seem
  6804. * to succeed, but return an EXIT_REASON_INVALID_STATE.
  6805. * To speed up the normal (success) code path, we should avoid checking
  6806. * for misconfigurations which will anyway be caught by the processor
  6807. * when using the merged vmcs02.
  6808. */
  6809. if (vmcs12->launch_state == launch) {
  6810. nested_vmx_failValid(vcpu,
  6811. launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
  6812. : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
  6813. return 1;
  6814. }
  6815. if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE) {
  6816. nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
  6817. return 1;
  6818. }
  6819. if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
  6820. !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
  6821. /*TODO: Also verify bits beyond physical address width are 0*/
  6822. nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
  6823. return 1;
  6824. }
  6825. if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
  6826. !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
  6827. /*TODO: Also verify bits beyond physical address width are 0*/
  6828. nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
  6829. return 1;
  6830. }
  6831. if (vmcs12->vm_entry_msr_load_count > 0 ||
  6832. vmcs12->vm_exit_msr_load_count > 0 ||
  6833. vmcs12->vm_exit_msr_store_count > 0) {
  6834. pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
  6835. __func__);
  6836. nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
  6837. return 1;
  6838. }
  6839. if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
  6840. nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) ||
  6841. !vmx_control_verify(vmcs12->secondary_vm_exec_control,
  6842. nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
  6843. !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
  6844. nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
  6845. !vmx_control_verify(vmcs12->vm_exit_controls,
  6846. nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) ||
  6847. !vmx_control_verify(vmcs12->vm_entry_controls,
  6848. nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high))
  6849. {
  6850. nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
  6851. return 1;
  6852. }
  6853. if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
  6854. ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
  6855. nested_vmx_failValid(vcpu,
  6856. VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
  6857. return 1;
  6858. }
  6859. if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
  6860. ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
  6861. nested_vmx_entry_failure(vcpu, vmcs12,
  6862. EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
  6863. return 1;
  6864. }
  6865. if (vmcs12->vmcs_link_pointer != -1ull) {
  6866. nested_vmx_entry_failure(vcpu, vmcs12,
  6867. EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
  6868. return 1;
  6869. }
  6870. /*
  6871. * If the load IA32_EFER VM-entry control is 1, the following checks
  6872. * are performed on the field for the IA32_EFER MSR:
  6873. * - Bits reserved in the IA32_EFER MSR must be 0.
  6874. * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
  6875. * the IA-32e mode guest VM-exit control. It must also be identical
  6876. * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
  6877. * CR0.PG) is 1.
  6878. */
  6879. if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
  6880. ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
  6881. if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
  6882. ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
  6883. ((vmcs12->guest_cr0 & X86_CR0_PG) &&
  6884. ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
  6885. nested_vmx_entry_failure(vcpu, vmcs12,
  6886. EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
  6887. return 1;
  6888. }
  6889. }
  6890. /*
  6891. * If the load IA32_EFER VM-exit control is 1, bits reserved in the
  6892. * IA32_EFER MSR must be 0 in the field for that register. In addition,
  6893. * the values of the LMA and LME bits in the field must each be that of
  6894. * the host address-space size VM-exit control.
  6895. */
  6896. if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
  6897. ia32e = (vmcs12->vm_exit_controls &
  6898. VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
  6899. if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
  6900. ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
  6901. ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
  6902. nested_vmx_entry_failure(vcpu, vmcs12,
  6903. EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
  6904. return 1;
  6905. }
  6906. }
  6907. /*
  6908. * We're finally done with prerequisite checking, and can start with
  6909. * the nested entry.
  6910. */
  6911. vmcs02 = nested_get_current_vmcs02(vmx);
  6912. if (!vmcs02)
  6913. return -ENOMEM;
  6914. enter_guest_mode(vcpu);
  6915. vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
  6916. cpu = get_cpu();
  6917. vmx->loaded_vmcs = vmcs02;
  6918. vmx_vcpu_put(vcpu);
  6919. vmx_vcpu_load(vcpu, cpu);
  6920. vcpu->cpu = cpu;
  6921. put_cpu();
  6922. vmx_segment_cache_clear(vmx);
  6923. vmcs12->launch_state = 1;
  6924. prepare_vmcs02(vcpu, vmcs12);
  6925. /*
  6926. * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
  6927. * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
  6928. * returned as far as L1 is concerned. It will only return (and set
  6929. * the success flag) when L2 exits (see nested_vmx_vmexit()).
  6930. */
  6931. return 1;
  6932. }
  6933. /*
  6934. * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
  6935. * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
  6936. * This function returns the new value we should put in vmcs12.guest_cr0.
  6937. * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
  6938. * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
  6939. * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
  6940. * didn't trap the bit, because if L1 did, so would L0).
  6941. * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
  6942. * been modified by L2, and L1 knows it. So just leave the old value of
  6943. * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
  6944. * isn't relevant, because if L0 traps this bit it can set it to anything.
  6945. * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
  6946. * changed these bits, and therefore they need to be updated, but L0
  6947. * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
  6948. * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
  6949. */
  6950. static inline unsigned long
  6951. vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
  6952. {
  6953. return
  6954. /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
  6955. /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
  6956. /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
  6957. vcpu->arch.cr0_guest_owned_bits));
  6958. }
  6959. static inline unsigned long
  6960. vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
  6961. {
  6962. return
  6963. /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
  6964. /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
  6965. /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
  6966. vcpu->arch.cr4_guest_owned_bits));
  6967. }
  6968. static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
  6969. struct vmcs12 *vmcs12)
  6970. {
  6971. u32 idt_vectoring;
  6972. unsigned int nr;
  6973. if (vcpu->arch.exception.pending) {
  6974. nr = vcpu->arch.exception.nr;
  6975. idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
  6976. if (kvm_exception_is_soft(nr)) {
  6977. vmcs12->vm_exit_instruction_len =
  6978. vcpu->arch.event_exit_inst_len;
  6979. idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
  6980. } else
  6981. idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
  6982. if (vcpu->arch.exception.has_error_code) {
  6983. idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
  6984. vmcs12->idt_vectoring_error_code =
  6985. vcpu->arch.exception.error_code;
  6986. }
  6987. vmcs12->idt_vectoring_info_field = idt_vectoring;
  6988. } else if (vcpu->arch.nmi_pending) {
  6989. vmcs12->idt_vectoring_info_field =
  6990. INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
  6991. } else if (vcpu->arch.interrupt.pending) {
  6992. nr = vcpu->arch.interrupt.nr;
  6993. idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
  6994. if (vcpu->arch.interrupt.soft) {
  6995. idt_vectoring |= INTR_TYPE_SOFT_INTR;
  6996. vmcs12->vm_entry_instruction_len =
  6997. vcpu->arch.event_exit_inst_len;
  6998. } else
  6999. idt_vectoring |= INTR_TYPE_EXT_INTR;
  7000. vmcs12->idt_vectoring_info_field = idt_vectoring;
  7001. }
  7002. }
  7003. /*
  7004. * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
  7005. * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
  7006. * and this function updates it to reflect the changes to the guest state while
  7007. * L2 was running (and perhaps made some exits which were handled directly by L0
  7008. * without going back to L1), and to reflect the exit reason.
  7009. * Note that we do not have to copy here all VMCS fields, just those that
  7010. * could have changed by the L2 guest or the exit - i.e., the guest-state and
  7011. * exit-information fields only. Other fields are modified by L1 with VMWRITE,
  7012. * which already writes to vmcs12 directly.
  7013. */
  7014. static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
  7015. {
  7016. /* update guest state fields: */
  7017. vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
  7018. vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
  7019. kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
  7020. vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  7021. vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
  7022. vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
  7023. vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
  7024. vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
  7025. vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
  7026. vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
  7027. vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
  7028. vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
  7029. vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
  7030. vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
  7031. vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
  7032. vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
  7033. vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
  7034. vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
  7035. vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
  7036. vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
  7037. vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
  7038. vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
  7039. vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
  7040. vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
  7041. vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
  7042. vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
  7043. vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
  7044. vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
  7045. vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
  7046. vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
  7047. vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
  7048. vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
  7049. vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
  7050. vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
  7051. vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
  7052. vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
  7053. vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
  7054. vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
  7055. vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
  7056. vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
  7057. vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
  7058. vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
  7059. vmcs12->guest_interruptibility_info =
  7060. vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
  7061. vmcs12->guest_pending_dbg_exceptions =
  7062. vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
  7063. /*
  7064. * In some cases (usually, nested EPT), L2 is allowed to change its
  7065. * own CR3 without exiting. If it has changed it, we must keep it.
  7066. * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
  7067. * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
  7068. *
  7069. * Additionally, restore L2's PDPTR to vmcs12.
  7070. */
  7071. if (enable_ept) {
  7072. vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3);
  7073. vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
  7074. vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
  7075. vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
  7076. vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
  7077. }
  7078. vmcs12->vm_entry_controls =
  7079. (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
  7080. (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
  7081. /* TODO: These cannot have changed unless we have MSR bitmaps and
  7082. * the relevant bit asks not to trap the change */
  7083. vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
  7084. if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
  7085. vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
  7086. vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
  7087. vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
  7088. vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
  7089. /* update exit information fields: */
  7090. vmcs12->vm_exit_reason = to_vmx(vcpu)->exit_reason;
  7091. vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
  7092. vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  7093. if ((vmcs12->vm_exit_intr_info &
  7094. (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
  7095. (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
  7096. vmcs12->vm_exit_intr_error_code =
  7097. vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
  7098. vmcs12->idt_vectoring_info_field = 0;
  7099. vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
  7100. vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
  7101. if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
  7102. /* vm_entry_intr_info_field is cleared on exit. Emulate this
  7103. * instead of reading the real value. */
  7104. vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
  7105. /*
  7106. * Transfer the event that L0 or L1 may wanted to inject into
  7107. * L2 to IDT_VECTORING_INFO_FIELD.
  7108. */
  7109. vmcs12_save_pending_event(vcpu, vmcs12);
  7110. }
  7111. /*
  7112. * Drop what we picked up for L2 via vmx_complete_interrupts. It is
  7113. * preserved above and would only end up incorrectly in L1.
  7114. */
  7115. vcpu->arch.nmi_injected = false;
  7116. kvm_clear_exception_queue(vcpu);
  7117. kvm_clear_interrupt_queue(vcpu);
  7118. }
  7119. /*
  7120. * A part of what we need to when the nested L2 guest exits and we want to
  7121. * run its L1 parent, is to reset L1's guest state to the host state specified
  7122. * in vmcs12.
  7123. * This function is to be called not only on normal nested exit, but also on
  7124. * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
  7125. * Failures During or After Loading Guest State").
  7126. * This function should be called when the active VMCS is L1's (vmcs01).
  7127. */
  7128. static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
  7129. struct vmcs12 *vmcs12)
  7130. {
  7131. struct kvm_segment seg;
  7132. if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
  7133. vcpu->arch.efer = vmcs12->host_ia32_efer;
  7134. else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
  7135. vcpu->arch.efer |= (EFER_LMA | EFER_LME);
  7136. else
  7137. vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
  7138. vmx_set_efer(vcpu, vcpu->arch.efer);
  7139. kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
  7140. kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
  7141. vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
  7142. /*
  7143. * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
  7144. * actually changed, because it depends on the current state of
  7145. * fpu_active (which may have changed).
  7146. * Note that vmx_set_cr0 refers to efer set above.
  7147. */
  7148. kvm_set_cr0(vcpu, vmcs12->host_cr0);
  7149. /*
  7150. * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
  7151. * to apply the same changes to L1's vmcs. We just set cr0 correctly,
  7152. * but we also need to update cr0_guest_host_mask and exception_bitmap.
  7153. */
  7154. update_exception_bitmap(vcpu);
  7155. vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
  7156. vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
  7157. /*
  7158. * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01
  7159. * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
  7160. */
  7161. vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
  7162. kvm_set_cr4(vcpu, vmcs12->host_cr4);
  7163. if (nested_cpu_has_ept(vmcs12))
  7164. nested_ept_uninit_mmu_context(vcpu);
  7165. kvm_set_cr3(vcpu, vmcs12->host_cr3);
  7166. kvm_mmu_reset_context(vcpu);
  7167. if (enable_vpid) {
  7168. /*
  7169. * Trivially support vpid by letting L2s share their parent
  7170. * L1's vpid. TODO: move to a more elaborate solution, giving
  7171. * each L2 its own vpid and exposing the vpid feature to L1.
  7172. */
  7173. vmx_flush_tlb(vcpu);
  7174. }
  7175. vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
  7176. vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
  7177. vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
  7178. vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
  7179. vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
  7180. if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
  7181. vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
  7182. vcpu->arch.pat = vmcs12->host_ia32_pat;
  7183. }
  7184. if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
  7185. vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
  7186. vmcs12->host_ia32_perf_global_ctrl);
  7187. /* Set L1 segment info according to Intel SDM
  7188. 27.5.2 Loading Host Segment and Descriptor-Table Registers */
  7189. seg = (struct kvm_segment) {
  7190. .base = 0,
  7191. .limit = 0xFFFFFFFF,
  7192. .selector = vmcs12->host_cs_selector,
  7193. .type = 11,
  7194. .present = 1,
  7195. .s = 1,
  7196. .g = 1
  7197. };
  7198. if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
  7199. seg.l = 1;
  7200. else
  7201. seg.db = 1;
  7202. vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
  7203. seg = (struct kvm_segment) {
  7204. .base = 0,
  7205. .limit = 0xFFFFFFFF,
  7206. .type = 3,
  7207. .present = 1,
  7208. .s = 1,
  7209. .db = 1,
  7210. .g = 1
  7211. };
  7212. seg.selector = vmcs12->host_ds_selector;
  7213. vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
  7214. seg.selector = vmcs12->host_es_selector;
  7215. vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
  7216. seg.selector = vmcs12->host_ss_selector;
  7217. vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
  7218. seg.selector = vmcs12->host_fs_selector;
  7219. seg.base = vmcs12->host_fs_base;
  7220. vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
  7221. seg.selector = vmcs12->host_gs_selector;
  7222. seg.base = vmcs12->host_gs_base;
  7223. vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
  7224. seg = (struct kvm_segment) {
  7225. .base = vmcs12->host_tr_base,
  7226. .limit = 0x67,
  7227. .selector = vmcs12->host_tr_selector,
  7228. .type = 11,
  7229. .present = 1
  7230. };
  7231. vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
  7232. kvm_set_dr(vcpu, 7, 0x400);
  7233. vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
  7234. }
  7235. /*
  7236. * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
  7237. * and modify vmcs12 to make it see what it would expect to see there if
  7238. * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
  7239. */
  7240. static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
  7241. {
  7242. struct vcpu_vmx *vmx = to_vmx(vcpu);
  7243. int cpu;
  7244. struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
  7245. /* trying to cancel vmlaunch/vmresume is a bug */
  7246. WARN_ON_ONCE(vmx->nested.nested_run_pending);
  7247. leave_guest_mode(vcpu);
  7248. prepare_vmcs12(vcpu, vmcs12);
  7249. cpu = get_cpu();
  7250. vmx->loaded_vmcs = &vmx->vmcs01;
  7251. vmx_vcpu_put(vcpu);
  7252. vmx_vcpu_load(vcpu, cpu);
  7253. vcpu->cpu = cpu;
  7254. put_cpu();
  7255. vmx_segment_cache_clear(vmx);
  7256. /* if no vmcs02 cache requested, remove the one we used */
  7257. if (VMCS02_POOL_SIZE == 0)
  7258. nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
  7259. load_vmcs12_host_state(vcpu, vmcs12);
  7260. /* Update TSC_OFFSET if TSC was changed while L2 ran */
  7261. vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
  7262. /* This is needed for same reason as it was needed in prepare_vmcs02 */
  7263. vmx->host_rsp = 0;
  7264. /* Unpin physical memory we referred to in vmcs02 */
  7265. if (vmx->nested.apic_access_page) {
  7266. nested_release_page(vmx->nested.apic_access_page);
  7267. vmx->nested.apic_access_page = 0;
  7268. }
  7269. /*
  7270. * Exiting from L2 to L1, we're now back to L1 which thinks it just
  7271. * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
  7272. * success or failure flag accordingly.
  7273. */
  7274. if (unlikely(vmx->fail)) {
  7275. vmx->fail = 0;
  7276. nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
  7277. } else
  7278. nested_vmx_succeed(vcpu);
  7279. if (enable_shadow_vmcs)
  7280. vmx->nested.sync_shadow_vmcs = true;
  7281. }
  7282. /*
  7283. * L1's failure to enter L2 is a subset of a normal exit, as explained in
  7284. * 23.7 "VM-entry failures during or after loading guest state" (this also
  7285. * lists the acceptable exit-reason and exit-qualification parameters).
  7286. * It should only be called before L2 actually succeeded to run, and when
  7287. * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
  7288. */
  7289. static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
  7290. struct vmcs12 *vmcs12,
  7291. u32 reason, unsigned long qualification)
  7292. {
  7293. load_vmcs12_host_state(vcpu, vmcs12);
  7294. vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
  7295. vmcs12->exit_qualification = qualification;
  7296. nested_vmx_succeed(vcpu);
  7297. if (enable_shadow_vmcs)
  7298. to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
  7299. }
  7300. static int vmx_check_intercept(struct kvm_vcpu *vcpu,
  7301. struct x86_instruction_info *info,
  7302. enum x86_intercept_stage stage)
  7303. {
  7304. return X86EMUL_CONTINUE;
  7305. }
  7306. static struct kvm_x86_ops vmx_x86_ops = {
  7307. .cpu_has_kvm_support = cpu_has_kvm_support,
  7308. .disabled_by_bios = vmx_disabled_by_bios,
  7309. .hardware_setup = hardware_setup,
  7310. .hardware_unsetup = hardware_unsetup,
  7311. .check_processor_compatibility = vmx_check_processor_compat,
  7312. .hardware_enable = hardware_enable,
  7313. .hardware_disable = hardware_disable,
  7314. .cpu_has_accelerated_tpr = report_flexpriority,
  7315. .vcpu_create = vmx_create_vcpu,
  7316. .vcpu_free = vmx_free_vcpu,
  7317. .vcpu_reset = vmx_vcpu_reset,
  7318. .prepare_guest_switch = vmx_save_host_state,
  7319. .vcpu_load = vmx_vcpu_load,
  7320. .vcpu_put = vmx_vcpu_put,
  7321. .update_db_bp_intercept = update_exception_bitmap,
  7322. .get_msr = vmx_get_msr,
  7323. .set_msr = vmx_set_msr,
  7324. .get_segment_base = vmx_get_segment_base,
  7325. .get_segment = vmx_get_segment,
  7326. .set_segment = vmx_set_segment,
  7327. .get_cpl = vmx_get_cpl,
  7328. .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
  7329. .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
  7330. .decache_cr3 = vmx_decache_cr3,
  7331. .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
  7332. .set_cr0 = vmx_set_cr0,
  7333. .set_cr3 = vmx_set_cr3,
  7334. .set_cr4 = vmx_set_cr4,
  7335. .set_efer = vmx_set_efer,
  7336. .get_idt = vmx_get_idt,
  7337. .set_idt = vmx_set_idt,
  7338. .get_gdt = vmx_get_gdt,
  7339. .set_gdt = vmx_set_gdt,
  7340. .set_dr7 = vmx_set_dr7,
  7341. .cache_reg = vmx_cache_reg,
  7342. .get_rflags = vmx_get_rflags,
  7343. .set_rflags = vmx_set_rflags,
  7344. .fpu_activate = vmx_fpu_activate,
  7345. .fpu_deactivate = vmx_fpu_deactivate,
  7346. .tlb_flush = vmx_flush_tlb,
  7347. .run = vmx_vcpu_run,
  7348. .handle_exit = vmx_handle_exit,
  7349. .skip_emulated_instruction = skip_emulated_instruction,
  7350. .set_interrupt_shadow = vmx_set_interrupt_shadow,
  7351. .get_interrupt_shadow = vmx_get_interrupt_shadow,
  7352. .patch_hypercall = vmx_patch_hypercall,
  7353. .set_irq = vmx_inject_irq,
  7354. .set_nmi = vmx_inject_nmi,
  7355. .queue_exception = vmx_queue_exception,
  7356. .cancel_injection = vmx_cancel_injection,
  7357. .interrupt_allowed = vmx_interrupt_allowed,
  7358. .nmi_allowed = vmx_nmi_allowed,
  7359. .get_nmi_mask = vmx_get_nmi_mask,
  7360. .set_nmi_mask = vmx_set_nmi_mask,
  7361. .enable_nmi_window = enable_nmi_window,
  7362. .enable_irq_window = enable_irq_window,
  7363. .update_cr8_intercept = update_cr8_intercept,
  7364. .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
  7365. .vm_has_apicv = vmx_vm_has_apicv,
  7366. .load_eoi_exitmap = vmx_load_eoi_exitmap,
  7367. .hwapic_irr_update = vmx_hwapic_irr_update,
  7368. .hwapic_isr_update = vmx_hwapic_isr_update,
  7369. .sync_pir_to_irr = vmx_sync_pir_to_irr,
  7370. .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
  7371. .set_tss_addr = vmx_set_tss_addr,
  7372. .get_tdp_level = get_ept_level,
  7373. .get_mt_mask = vmx_get_mt_mask,
  7374. .get_exit_info = vmx_get_exit_info,
  7375. .get_lpage_level = vmx_get_lpage_level,
  7376. .cpuid_update = vmx_cpuid_update,
  7377. .rdtscp_supported = vmx_rdtscp_supported,
  7378. .invpcid_supported = vmx_invpcid_supported,
  7379. .set_supported_cpuid = vmx_set_supported_cpuid,
  7380. .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
  7381. .set_tsc_khz = vmx_set_tsc_khz,
  7382. .read_tsc_offset = vmx_read_tsc_offset,
  7383. .write_tsc_offset = vmx_write_tsc_offset,
  7384. .adjust_tsc_offset = vmx_adjust_tsc_offset,
  7385. .compute_tsc_offset = vmx_compute_tsc_offset,
  7386. .read_l1_tsc = vmx_read_l1_tsc,
  7387. .set_tdp_cr3 = vmx_set_cr3,
  7388. .check_intercept = vmx_check_intercept,
  7389. .handle_external_intr = vmx_handle_external_intr,
  7390. };
  7391. static int __init vmx_init(void)
  7392. {
  7393. int r, i, msr;
  7394. rdmsrl_safe(MSR_EFER, &host_efer);
  7395. for (i = 0; i < NR_VMX_MSR; ++i)
  7396. kvm_define_shared_msr(i, vmx_msr_index[i]);
  7397. vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
  7398. if (!vmx_io_bitmap_a)
  7399. return -ENOMEM;
  7400. r = -ENOMEM;
  7401. vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
  7402. if (!vmx_io_bitmap_b)
  7403. goto out;
  7404. vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
  7405. if (!vmx_msr_bitmap_legacy)
  7406. goto out1;
  7407. vmx_msr_bitmap_legacy_x2apic =
  7408. (unsigned long *)__get_free_page(GFP_KERNEL);
  7409. if (!vmx_msr_bitmap_legacy_x2apic)
  7410. goto out2;
  7411. vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
  7412. if (!vmx_msr_bitmap_longmode)
  7413. goto out3;
  7414. vmx_msr_bitmap_longmode_x2apic =
  7415. (unsigned long *)__get_free_page(GFP_KERNEL);
  7416. if (!vmx_msr_bitmap_longmode_x2apic)
  7417. goto out4;
  7418. vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
  7419. if (!vmx_vmread_bitmap)
  7420. goto out5;
  7421. vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
  7422. if (!vmx_vmwrite_bitmap)
  7423. goto out6;
  7424. memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
  7425. memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
  7426. /* shadowed read/write fields */
  7427. for (i = 0; i < max_shadow_read_write_fields; i++) {
  7428. clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
  7429. clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
  7430. }
  7431. /* shadowed read only fields */
  7432. for (i = 0; i < max_shadow_read_only_fields; i++)
  7433. clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
  7434. /*
  7435. * Allow direct access to the PC debug port (it is often used for I/O
  7436. * delays, but the vmexits simply slow things down).
  7437. */
  7438. memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
  7439. clear_bit(0x80, vmx_io_bitmap_a);
  7440. memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
  7441. memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
  7442. memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
  7443. set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
  7444. r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
  7445. __alignof__(struct vcpu_vmx), THIS_MODULE);
  7446. if (r)
  7447. goto out7;
  7448. #ifdef CONFIG_KEXEC
  7449. rcu_assign_pointer(crash_vmclear_loaded_vmcss,
  7450. crash_vmclear_local_loaded_vmcss);
  7451. #endif
  7452. vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
  7453. vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
  7454. vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
  7455. vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
  7456. vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
  7457. vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
  7458. memcpy(vmx_msr_bitmap_legacy_x2apic,
  7459. vmx_msr_bitmap_legacy, PAGE_SIZE);
  7460. memcpy(vmx_msr_bitmap_longmode_x2apic,
  7461. vmx_msr_bitmap_longmode, PAGE_SIZE);
  7462. if (enable_apicv) {
  7463. for (msr = 0x800; msr <= 0x8ff; msr++)
  7464. vmx_disable_intercept_msr_read_x2apic(msr);
  7465. /* According SDM, in x2apic mode, the whole id reg is used.
  7466. * But in KVM, it only use the highest eight bits. Need to
  7467. * intercept it */
  7468. vmx_enable_intercept_msr_read_x2apic(0x802);
  7469. /* TMCCT */
  7470. vmx_enable_intercept_msr_read_x2apic(0x839);
  7471. /* TPR */
  7472. vmx_disable_intercept_msr_write_x2apic(0x808);
  7473. /* EOI */
  7474. vmx_disable_intercept_msr_write_x2apic(0x80b);
  7475. /* SELF-IPI */
  7476. vmx_disable_intercept_msr_write_x2apic(0x83f);
  7477. }
  7478. if (enable_ept) {
  7479. kvm_mmu_set_mask_ptes(0ull,
  7480. (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
  7481. (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
  7482. 0ull, VMX_EPT_EXECUTABLE_MASK);
  7483. ept_set_mmio_spte_mask();
  7484. kvm_enable_tdp();
  7485. } else
  7486. kvm_disable_tdp();
  7487. return 0;
  7488. out7:
  7489. free_page((unsigned long)vmx_vmwrite_bitmap);
  7490. out6:
  7491. free_page((unsigned long)vmx_vmread_bitmap);
  7492. out5:
  7493. free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
  7494. out4:
  7495. free_page((unsigned long)vmx_msr_bitmap_longmode);
  7496. out3:
  7497. free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
  7498. out2:
  7499. free_page((unsigned long)vmx_msr_bitmap_legacy);
  7500. out1:
  7501. free_page((unsigned long)vmx_io_bitmap_b);
  7502. out:
  7503. free_page((unsigned long)vmx_io_bitmap_a);
  7504. return r;
  7505. }
  7506. static void __exit vmx_exit(void)
  7507. {
  7508. free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
  7509. free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
  7510. free_page((unsigned long)vmx_msr_bitmap_legacy);
  7511. free_page((unsigned long)vmx_msr_bitmap_longmode);
  7512. free_page((unsigned long)vmx_io_bitmap_b);
  7513. free_page((unsigned long)vmx_io_bitmap_a);
  7514. free_page((unsigned long)vmx_vmwrite_bitmap);
  7515. free_page((unsigned long)vmx_vmread_bitmap);
  7516. #ifdef CONFIG_KEXEC
  7517. rcu_assign_pointer(crash_vmclear_loaded_vmcss, NULL);
  7518. synchronize_rcu();
  7519. #endif
  7520. kvm_exit();
  7521. }
  7522. module_init(vmx_init)
  7523. module_exit(vmx_exit)