x86.c 156 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  10. *
  11. * Authors:
  12. * Avi Kivity <avi@qumranet.com>
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Amit Shah <amit.shah@qumranet.com>
  15. * Ben-Ami Yassour <benami@il.ibm.com>
  16. *
  17. * This work is licensed under the terms of the GNU GPL, version 2. See
  18. * the COPYING file in the top-level directory.
  19. *
  20. */
  21. #include <linux/kvm_host.h>
  22. #include "irq.h"
  23. #include "mmu.h"
  24. #include "i8254.h"
  25. #include "tss.h"
  26. #include "kvm_cache_regs.h"
  27. #include "x86.h"
  28. #include <linux/clocksource.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/kvm.h>
  31. #include <linux/fs.h>
  32. #include <linux/vmalloc.h>
  33. #include <linux/module.h>
  34. #include <linux/mman.h>
  35. #include <linux/highmem.h>
  36. #include <linux/iommu.h>
  37. #include <linux/intel-iommu.h>
  38. #include <linux/cpufreq.h>
  39. #include <linux/user-return-notifier.h>
  40. #include <linux/srcu.h>
  41. #include <linux/slab.h>
  42. #include <linux/perf_event.h>
  43. #include <linux/uaccess.h>
  44. #include <linux/hash.h>
  45. #include <trace/events/kvm.h>
  46. #define CREATE_TRACE_POINTS
  47. #include "trace.h"
  48. #include <asm/debugreg.h>
  49. #include <asm/msr.h>
  50. #include <asm/desc.h>
  51. #include <asm/mtrr.h>
  52. #include <asm/mce.h>
  53. #include <asm/i387.h>
  54. #include <asm/xcr.h>
  55. #include <asm/pvclock.h>
  56. #include <asm/div64.h>
  57. #define MAX_IO_MSRS 256
  58. #define CR0_RESERVED_BITS \
  59. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  60. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  61. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  62. #define CR4_RESERVED_BITS \
  63. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  64. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  65. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  66. | X86_CR4_OSXSAVE \
  67. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  68. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  69. #define KVM_MAX_MCE_BANKS 32
  70. #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
  71. /* EFER defaults:
  72. * - enable syscall per default because its emulated by KVM
  73. * - enable LME and LMA per default on 64 bit KVM
  74. */
  75. #ifdef CONFIG_X86_64
  76. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
  77. #else
  78. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
  79. #endif
  80. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  81. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  82. static void update_cr8_intercept(struct kvm_vcpu *vcpu);
  83. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  84. struct kvm_cpuid_entry2 __user *entries);
  85. struct kvm_x86_ops *kvm_x86_ops;
  86. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  87. int ignore_msrs = 0;
  88. module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
  89. #define KVM_NR_SHARED_MSRS 16
  90. struct kvm_shared_msrs_global {
  91. int nr;
  92. u32 msrs[KVM_NR_SHARED_MSRS];
  93. };
  94. struct kvm_shared_msrs {
  95. struct user_return_notifier urn;
  96. bool registered;
  97. struct kvm_shared_msr_values {
  98. u64 host;
  99. u64 curr;
  100. } values[KVM_NR_SHARED_MSRS];
  101. };
  102. static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
  103. static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
  104. struct kvm_stats_debugfs_item debugfs_entries[] = {
  105. { "pf_fixed", VCPU_STAT(pf_fixed) },
  106. { "pf_guest", VCPU_STAT(pf_guest) },
  107. { "tlb_flush", VCPU_STAT(tlb_flush) },
  108. { "invlpg", VCPU_STAT(invlpg) },
  109. { "exits", VCPU_STAT(exits) },
  110. { "io_exits", VCPU_STAT(io_exits) },
  111. { "mmio_exits", VCPU_STAT(mmio_exits) },
  112. { "signal_exits", VCPU_STAT(signal_exits) },
  113. { "irq_window", VCPU_STAT(irq_window_exits) },
  114. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  115. { "halt_exits", VCPU_STAT(halt_exits) },
  116. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  117. { "hypercalls", VCPU_STAT(hypercalls) },
  118. { "request_irq", VCPU_STAT(request_irq_exits) },
  119. { "irq_exits", VCPU_STAT(irq_exits) },
  120. { "host_state_reload", VCPU_STAT(host_state_reload) },
  121. { "efer_reload", VCPU_STAT(efer_reload) },
  122. { "fpu_reload", VCPU_STAT(fpu_reload) },
  123. { "insn_emulation", VCPU_STAT(insn_emulation) },
  124. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  125. { "irq_injections", VCPU_STAT(irq_injections) },
  126. { "nmi_injections", VCPU_STAT(nmi_injections) },
  127. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  128. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  129. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  130. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  131. { "mmu_flooded", VM_STAT(mmu_flooded) },
  132. { "mmu_recycled", VM_STAT(mmu_recycled) },
  133. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  134. { "mmu_unsync", VM_STAT(mmu_unsync) },
  135. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  136. { "largepages", VM_STAT(lpages) },
  137. { NULL }
  138. };
  139. u64 __read_mostly host_xcr0;
  140. static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
  141. {
  142. int i;
  143. for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
  144. vcpu->arch.apf.gfns[i] = ~0;
  145. }
  146. static void kvm_on_user_return(struct user_return_notifier *urn)
  147. {
  148. unsigned slot;
  149. struct kvm_shared_msrs *locals
  150. = container_of(urn, struct kvm_shared_msrs, urn);
  151. struct kvm_shared_msr_values *values;
  152. for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
  153. values = &locals->values[slot];
  154. if (values->host != values->curr) {
  155. wrmsrl(shared_msrs_global.msrs[slot], values->host);
  156. values->curr = values->host;
  157. }
  158. }
  159. locals->registered = false;
  160. user_return_notifier_unregister(urn);
  161. }
  162. static void shared_msr_update(unsigned slot, u32 msr)
  163. {
  164. struct kvm_shared_msrs *smsr;
  165. u64 value;
  166. smsr = &__get_cpu_var(shared_msrs);
  167. /* only read, and nobody should modify it at this time,
  168. * so don't need lock */
  169. if (slot >= shared_msrs_global.nr) {
  170. printk(KERN_ERR "kvm: invalid MSR slot!");
  171. return;
  172. }
  173. rdmsrl_safe(msr, &value);
  174. smsr->values[slot].host = value;
  175. smsr->values[slot].curr = value;
  176. }
  177. void kvm_define_shared_msr(unsigned slot, u32 msr)
  178. {
  179. if (slot >= shared_msrs_global.nr)
  180. shared_msrs_global.nr = slot + 1;
  181. shared_msrs_global.msrs[slot] = msr;
  182. /* we need ensured the shared_msr_global have been updated */
  183. smp_wmb();
  184. }
  185. EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
  186. static void kvm_shared_msr_cpu_online(void)
  187. {
  188. unsigned i;
  189. for (i = 0; i < shared_msrs_global.nr; ++i)
  190. shared_msr_update(i, shared_msrs_global.msrs[i]);
  191. }
  192. void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
  193. {
  194. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  195. if (((value ^ smsr->values[slot].curr) & mask) == 0)
  196. return;
  197. smsr->values[slot].curr = value;
  198. wrmsrl(shared_msrs_global.msrs[slot], value);
  199. if (!smsr->registered) {
  200. smsr->urn.on_user_return = kvm_on_user_return;
  201. user_return_notifier_register(&smsr->urn);
  202. smsr->registered = true;
  203. }
  204. }
  205. EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
  206. static void drop_user_return_notifiers(void *ignore)
  207. {
  208. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  209. if (smsr->registered)
  210. kvm_on_user_return(&smsr->urn);
  211. }
  212. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  213. {
  214. if (irqchip_in_kernel(vcpu->kvm))
  215. return vcpu->arch.apic_base;
  216. else
  217. return vcpu->arch.apic_base;
  218. }
  219. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  220. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  221. {
  222. /* TODO: reserve bits check */
  223. if (irqchip_in_kernel(vcpu->kvm))
  224. kvm_lapic_set_base(vcpu, data);
  225. else
  226. vcpu->arch.apic_base = data;
  227. }
  228. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  229. #define EXCPT_BENIGN 0
  230. #define EXCPT_CONTRIBUTORY 1
  231. #define EXCPT_PF 2
  232. static int exception_class(int vector)
  233. {
  234. switch (vector) {
  235. case PF_VECTOR:
  236. return EXCPT_PF;
  237. case DE_VECTOR:
  238. case TS_VECTOR:
  239. case NP_VECTOR:
  240. case SS_VECTOR:
  241. case GP_VECTOR:
  242. return EXCPT_CONTRIBUTORY;
  243. default:
  244. break;
  245. }
  246. return EXCPT_BENIGN;
  247. }
  248. static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
  249. unsigned nr, bool has_error, u32 error_code,
  250. bool reinject)
  251. {
  252. u32 prev_nr;
  253. int class1, class2;
  254. kvm_make_request(KVM_REQ_EVENT, vcpu);
  255. if (!vcpu->arch.exception.pending) {
  256. queue:
  257. vcpu->arch.exception.pending = true;
  258. vcpu->arch.exception.has_error_code = has_error;
  259. vcpu->arch.exception.nr = nr;
  260. vcpu->arch.exception.error_code = error_code;
  261. vcpu->arch.exception.reinject = reinject;
  262. return;
  263. }
  264. /* to check exception */
  265. prev_nr = vcpu->arch.exception.nr;
  266. if (prev_nr == DF_VECTOR) {
  267. /* triple fault -> shutdown */
  268. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  269. return;
  270. }
  271. class1 = exception_class(prev_nr);
  272. class2 = exception_class(nr);
  273. if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
  274. || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
  275. /* generate double fault per SDM Table 5-5 */
  276. vcpu->arch.exception.pending = true;
  277. vcpu->arch.exception.has_error_code = true;
  278. vcpu->arch.exception.nr = DF_VECTOR;
  279. vcpu->arch.exception.error_code = 0;
  280. } else
  281. /* replace previous exception with a new one in a hope
  282. that instruction re-execution will regenerate lost
  283. exception */
  284. goto queue;
  285. }
  286. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  287. {
  288. kvm_multiple_exception(vcpu, nr, false, 0, false);
  289. }
  290. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  291. void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  292. {
  293. kvm_multiple_exception(vcpu, nr, false, 0, true);
  294. }
  295. EXPORT_SYMBOL_GPL(kvm_requeue_exception);
  296. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  297. {
  298. ++vcpu->stat.pf_guest;
  299. vcpu->arch.cr2 = fault->address;
  300. kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
  301. }
  302. void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  303. {
  304. if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
  305. vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
  306. else
  307. vcpu->arch.mmu.inject_page_fault(vcpu, fault);
  308. }
  309. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  310. {
  311. kvm_make_request(KVM_REQ_EVENT, vcpu);
  312. vcpu->arch.nmi_pending = 1;
  313. }
  314. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  315. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  316. {
  317. kvm_multiple_exception(vcpu, nr, true, error_code, false);
  318. }
  319. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  320. void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  321. {
  322. kvm_multiple_exception(vcpu, nr, true, error_code, true);
  323. }
  324. EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
  325. /*
  326. * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
  327. * a #GP and return false.
  328. */
  329. bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
  330. {
  331. if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
  332. return true;
  333. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  334. return false;
  335. }
  336. EXPORT_SYMBOL_GPL(kvm_require_cpl);
  337. /*
  338. * This function will be used to read from the physical memory of the currently
  339. * running guest. The difference to kvm_read_guest_page is that this function
  340. * can read from guest physical or from the guest's guest physical memory.
  341. */
  342. int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  343. gfn_t ngfn, void *data, int offset, int len,
  344. u32 access)
  345. {
  346. gfn_t real_gfn;
  347. gpa_t ngpa;
  348. ngpa = gfn_to_gpa(ngfn);
  349. real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
  350. if (real_gfn == UNMAPPED_GVA)
  351. return -EFAULT;
  352. real_gfn = gpa_to_gfn(real_gfn);
  353. return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
  354. }
  355. EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
  356. int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
  357. void *data, int offset, int len, u32 access)
  358. {
  359. return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
  360. data, offset, len, access);
  361. }
  362. /*
  363. * Load the pae pdptrs. Return true is they are all valid.
  364. */
  365. int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
  366. {
  367. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  368. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  369. int i;
  370. int ret;
  371. u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
  372. ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
  373. offset * sizeof(u64), sizeof(pdpte),
  374. PFERR_USER_MASK|PFERR_WRITE_MASK);
  375. if (ret < 0) {
  376. ret = 0;
  377. goto out;
  378. }
  379. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  380. if (is_present_gpte(pdpte[i]) &&
  381. (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
  382. ret = 0;
  383. goto out;
  384. }
  385. }
  386. ret = 1;
  387. memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
  388. __set_bit(VCPU_EXREG_PDPTR,
  389. (unsigned long *)&vcpu->arch.regs_avail);
  390. __set_bit(VCPU_EXREG_PDPTR,
  391. (unsigned long *)&vcpu->arch.regs_dirty);
  392. out:
  393. return ret;
  394. }
  395. EXPORT_SYMBOL_GPL(load_pdptrs);
  396. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  397. {
  398. u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
  399. bool changed = true;
  400. int offset;
  401. gfn_t gfn;
  402. int r;
  403. if (is_long_mode(vcpu) || !is_pae(vcpu))
  404. return false;
  405. if (!test_bit(VCPU_EXREG_PDPTR,
  406. (unsigned long *)&vcpu->arch.regs_avail))
  407. return true;
  408. gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
  409. offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
  410. r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
  411. PFERR_USER_MASK | PFERR_WRITE_MASK);
  412. if (r < 0)
  413. goto out;
  414. changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
  415. out:
  416. return changed;
  417. }
  418. int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  419. {
  420. unsigned long old_cr0 = kvm_read_cr0(vcpu);
  421. unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
  422. X86_CR0_CD | X86_CR0_NW;
  423. cr0 |= X86_CR0_ET;
  424. #ifdef CONFIG_X86_64
  425. if (cr0 & 0xffffffff00000000UL)
  426. return 1;
  427. #endif
  428. cr0 &= ~CR0_RESERVED_BITS;
  429. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
  430. return 1;
  431. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
  432. return 1;
  433. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  434. #ifdef CONFIG_X86_64
  435. if ((vcpu->arch.efer & EFER_LME)) {
  436. int cs_db, cs_l;
  437. if (!is_pae(vcpu))
  438. return 1;
  439. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  440. if (cs_l)
  441. return 1;
  442. } else
  443. #endif
  444. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
  445. vcpu->arch.cr3))
  446. return 1;
  447. }
  448. kvm_x86_ops->set_cr0(vcpu, cr0);
  449. if ((cr0 ^ old_cr0) & X86_CR0_PG)
  450. kvm_clear_async_pf_completion_queue(vcpu);
  451. if ((cr0 ^ old_cr0) & update_bits)
  452. kvm_mmu_reset_context(vcpu);
  453. return 0;
  454. }
  455. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  456. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  457. {
  458. (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
  459. }
  460. EXPORT_SYMBOL_GPL(kvm_lmsw);
  461. int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  462. {
  463. u64 xcr0;
  464. /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
  465. if (index != XCR_XFEATURE_ENABLED_MASK)
  466. return 1;
  467. xcr0 = xcr;
  468. if (kvm_x86_ops->get_cpl(vcpu) != 0)
  469. return 1;
  470. if (!(xcr0 & XSTATE_FP))
  471. return 1;
  472. if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
  473. return 1;
  474. if (xcr0 & ~host_xcr0)
  475. return 1;
  476. vcpu->arch.xcr0 = xcr0;
  477. vcpu->guest_xcr0_loaded = 0;
  478. return 0;
  479. }
  480. int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  481. {
  482. if (__kvm_set_xcr(vcpu, index, xcr)) {
  483. kvm_inject_gp(vcpu, 0);
  484. return 1;
  485. }
  486. return 0;
  487. }
  488. EXPORT_SYMBOL_GPL(kvm_set_xcr);
  489. static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
  490. {
  491. struct kvm_cpuid_entry2 *best;
  492. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  493. return best && (best->ecx & bit(X86_FEATURE_XSAVE));
  494. }
  495. static void update_cpuid(struct kvm_vcpu *vcpu)
  496. {
  497. struct kvm_cpuid_entry2 *best;
  498. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  499. if (!best)
  500. return;
  501. /* Update OSXSAVE bit */
  502. if (cpu_has_xsave && best->function == 0x1) {
  503. best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
  504. if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
  505. best->ecx |= bit(X86_FEATURE_OSXSAVE);
  506. }
  507. }
  508. int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  509. {
  510. unsigned long old_cr4 = kvm_read_cr4(vcpu);
  511. unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
  512. if (cr4 & CR4_RESERVED_BITS)
  513. return 1;
  514. if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
  515. return 1;
  516. if (is_long_mode(vcpu)) {
  517. if (!(cr4 & X86_CR4_PAE))
  518. return 1;
  519. } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
  520. && ((cr4 ^ old_cr4) & pdptr_bits)
  521. && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
  522. return 1;
  523. if (cr4 & X86_CR4_VMXE)
  524. return 1;
  525. kvm_x86_ops->set_cr4(vcpu, cr4);
  526. if ((cr4 ^ old_cr4) & pdptr_bits)
  527. kvm_mmu_reset_context(vcpu);
  528. if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
  529. update_cpuid(vcpu);
  530. return 0;
  531. }
  532. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  533. int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  534. {
  535. if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
  536. kvm_mmu_sync_roots(vcpu);
  537. kvm_mmu_flush_tlb(vcpu);
  538. return 0;
  539. }
  540. if (is_long_mode(vcpu)) {
  541. if (cr3 & CR3_L_MODE_RESERVED_BITS)
  542. return 1;
  543. } else {
  544. if (is_pae(vcpu)) {
  545. if (cr3 & CR3_PAE_RESERVED_BITS)
  546. return 1;
  547. if (is_paging(vcpu) &&
  548. !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
  549. return 1;
  550. }
  551. /*
  552. * We don't check reserved bits in nonpae mode, because
  553. * this isn't enforced, and VMware depends on this.
  554. */
  555. }
  556. /*
  557. * Does the new cr3 value map to physical memory? (Note, we
  558. * catch an invalid cr3 even in real-mode, because it would
  559. * cause trouble later on when we turn on paging anyway.)
  560. *
  561. * A real CPU would silently accept an invalid cr3 and would
  562. * attempt to use it - with largely undefined (and often hard
  563. * to debug) behavior on the guest side.
  564. */
  565. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  566. return 1;
  567. vcpu->arch.cr3 = cr3;
  568. vcpu->arch.mmu.new_cr3(vcpu);
  569. return 0;
  570. }
  571. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  572. int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  573. {
  574. if (cr8 & CR8_RESERVED_BITS)
  575. return 1;
  576. if (irqchip_in_kernel(vcpu->kvm))
  577. kvm_lapic_set_tpr(vcpu, cr8);
  578. else
  579. vcpu->arch.cr8 = cr8;
  580. return 0;
  581. }
  582. void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  583. {
  584. if (__kvm_set_cr8(vcpu, cr8))
  585. kvm_inject_gp(vcpu, 0);
  586. }
  587. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  588. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  589. {
  590. if (irqchip_in_kernel(vcpu->kvm))
  591. return kvm_lapic_get_cr8(vcpu);
  592. else
  593. return vcpu->arch.cr8;
  594. }
  595. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  596. static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  597. {
  598. switch (dr) {
  599. case 0 ... 3:
  600. vcpu->arch.db[dr] = val;
  601. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
  602. vcpu->arch.eff_db[dr] = val;
  603. break;
  604. case 4:
  605. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  606. return 1; /* #UD */
  607. /* fall through */
  608. case 6:
  609. if (val & 0xffffffff00000000ULL)
  610. return -1; /* #GP */
  611. vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
  612. break;
  613. case 5:
  614. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  615. return 1; /* #UD */
  616. /* fall through */
  617. default: /* 7 */
  618. if (val & 0xffffffff00000000ULL)
  619. return -1; /* #GP */
  620. vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
  621. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
  622. kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
  623. vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
  624. }
  625. break;
  626. }
  627. return 0;
  628. }
  629. int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  630. {
  631. int res;
  632. res = __kvm_set_dr(vcpu, dr, val);
  633. if (res > 0)
  634. kvm_queue_exception(vcpu, UD_VECTOR);
  635. else if (res < 0)
  636. kvm_inject_gp(vcpu, 0);
  637. return res;
  638. }
  639. EXPORT_SYMBOL_GPL(kvm_set_dr);
  640. static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  641. {
  642. switch (dr) {
  643. case 0 ... 3:
  644. *val = vcpu->arch.db[dr];
  645. break;
  646. case 4:
  647. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  648. return 1;
  649. /* fall through */
  650. case 6:
  651. *val = vcpu->arch.dr6;
  652. break;
  653. case 5:
  654. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  655. return 1;
  656. /* fall through */
  657. default: /* 7 */
  658. *val = vcpu->arch.dr7;
  659. break;
  660. }
  661. return 0;
  662. }
  663. int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  664. {
  665. if (_kvm_get_dr(vcpu, dr, val)) {
  666. kvm_queue_exception(vcpu, UD_VECTOR);
  667. return 1;
  668. }
  669. return 0;
  670. }
  671. EXPORT_SYMBOL_GPL(kvm_get_dr);
  672. /*
  673. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  674. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  675. *
  676. * This list is modified at module load time to reflect the
  677. * capabilities of the host cpu. This capabilities test skips MSRs that are
  678. * kvm-specific. Those are put in the beginning of the list.
  679. */
  680. #define KVM_SAVE_MSRS_BEGIN 8
  681. static u32 msrs_to_save[] = {
  682. MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  683. MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
  684. HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
  685. HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
  686. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  687. MSR_STAR,
  688. #ifdef CONFIG_X86_64
  689. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  690. #endif
  691. MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  692. };
  693. static unsigned num_msrs_to_save;
  694. static u32 emulated_msrs[] = {
  695. MSR_IA32_MISC_ENABLE,
  696. MSR_IA32_MCG_STATUS,
  697. MSR_IA32_MCG_CTL,
  698. };
  699. static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
  700. {
  701. u64 old_efer = vcpu->arch.efer;
  702. if (efer & efer_reserved_bits)
  703. return 1;
  704. if (is_paging(vcpu)
  705. && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
  706. return 1;
  707. if (efer & EFER_FFXSR) {
  708. struct kvm_cpuid_entry2 *feat;
  709. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  710. if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
  711. return 1;
  712. }
  713. if (efer & EFER_SVME) {
  714. struct kvm_cpuid_entry2 *feat;
  715. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  716. if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
  717. return 1;
  718. }
  719. efer &= ~EFER_LMA;
  720. efer |= vcpu->arch.efer & EFER_LMA;
  721. kvm_x86_ops->set_efer(vcpu, efer);
  722. vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
  723. /* Update reserved bits */
  724. if ((efer ^ old_efer) & EFER_NX)
  725. kvm_mmu_reset_context(vcpu);
  726. return 0;
  727. }
  728. void kvm_enable_efer_bits(u64 mask)
  729. {
  730. efer_reserved_bits &= ~mask;
  731. }
  732. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  733. /*
  734. * Writes msr value into into the appropriate "register".
  735. * Returns 0 on success, non-0 otherwise.
  736. * Assumes vcpu_load() was already called.
  737. */
  738. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  739. {
  740. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  741. }
  742. /*
  743. * Adapt set_msr() to msr_io()'s calling convention
  744. */
  745. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  746. {
  747. return kvm_set_msr(vcpu, index, *data);
  748. }
  749. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  750. {
  751. int version;
  752. int r;
  753. struct pvclock_wall_clock wc;
  754. struct timespec boot;
  755. if (!wall_clock)
  756. return;
  757. r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
  758. if (r)
  759. return;
  760. if (version & 1)
  761. ++version; /* first time write, random junk */
  762. ++version;
  763. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  764. /*
  765. * The guest calculates current wall clock time by adding
  766. * system time (updated by kvm_guest_time_update below) to the
  767. * wall clock specified here. guest system time equals host
  768. * system time for us, thus we must fill in host boot time here.
  769. */
  770. getboottime(&boot);
  771. wc.sec = boot.tv_sec;
  772. wc.nsec = boot.tv_nsec;
  773. wc.version = version;
  774. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  775. version++;
  776. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  777. }
  778. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  779. {
  780. uint32_t quotient, remainder;
  781. /* Don't try to replace with do_div(), this one calculates
  782. * "(dividend << 32) / divisor" */
  783. __asm__ ( "divl %4"
  784. : "=a" (quotient), "=d" (remainder)
  785. : "0" (0), "1" (dividend), "r" (divisor) );
  786. return quotient;
  787. }
  788. static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
  789. s8 *pshift, u32 *pmultiplier)
  790. {
  791. uint64_t scaled64;
  792. int32_t shift = 0;
  793. uint64_t tps64;
  794. uint32_t tps32;
  795. tps64 = base_khz * 1000LL;
  796. scaled64 = scaled_khz * 1000LL;
  797. while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
  798. tps64 >>= 1;
  799. shift--;
  800. }
  801. tps32 = (uint32_t)tps64;
  802. while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
  803. if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
  804. scaled64 >>= 1;
  805. else
  806. tps32 <<= 1;
  807. shift++;
  808. }
  809. *pshift = shift;
  810. *pmultiplier = div_frac(scaled64, tps32);
  811. pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
  812. __func__, base_khz, scaled_khz, shift, *pmultiplier);
  813. }
  814. static inline u64 get_kernel_ns(void)
  815. {
  816. struct timespec ts;
  817. WARN_ON(preemptible());
  818. ktime_get_ts(&ts);
  819. monotonic_to_bootbased(&ts);
  820. return timespec_to_ns(&ts);
  821. }
  822. static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
  823. unsigned long max_tsc_khz;
  824. static inline int kvm_tsc_changes_freq(void)
  825. {
  826. int cpu = get_cpu();
  827. int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
  828. cpufreq_quick_get(cpu) != 0;
  829. put_cpu();
  830. return ret;
  831. }
  832. static inline u64 nsec_to_cycles(u64 nsec)
  833. {
  834. u64 ret;
  835. WARN_ON(preemptible());
  836. if (kvm_tsc_changes_freq())
  837. printk_once(KERN_WARNING
  838. "kvm: unreliable cycle conversion on adjustable rate TSC\n");
  839. ret = nsec * __get_cpu_var(cpu_tsc_khz);
  840. do_div(ret, USEC_PER_SEC);
  841. return ret;
  842. }
  843. static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz)
  844. {
  845. /* Compute a scale to convert nanoseconds in TSC cycles */
  846. kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
  847. &kvm->arch.virtual_tsc_shift,
  848. &kvm->arch.virtual_tsc_mult);
  849. kvm->arch.virtual_tsc_khz = this_tsc_khz;
  850. }
  851. static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
  852. {
  853. u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
  854. vcpu->kvm->arch.virtual_tsc_mult,
  855. vcpu->kvm->arch.virtual_tsc_shift);
  856. tsc += vcpu->arch.last_tsc_write;
  857. return tsc;
  858. }
  859. void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
  860. {
  861. struct kvm *kvm = vcpu->kvm;
  862. u64 offset, ns, elapsed;
  863. unsigned long flags;
  864. s64 sdiff;
  865. spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
  866. offset = data - native_read_tsc();
  867. ns = get_kernel_ns();
  868. elapsed = ns - kvm->arch.last_tsc_nsec;
  869. sdiff = data - kvm->arch.last_tsc_write;
  870. if (sdiff < 0)
  871. sdiff = -sdiff;
  872. /*
  873. * Special case: close write to TSC within 5 seconds of
  874. * another CPU is interpreted as an attempt to synchronize
  875. * The 5 seconds is to accomodate host load / swapping as
  876. * well as any reset of TSC during the boot process.
  877. *
  878. * In that case, for a reliable TSC, we can match TSC offsets,
  879. * or make a best guest using elapsed value.
  880. */
  881. if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
  882. elapsed < 5ULL * NSEC_PER_SEC) {
  883. if (!check_tsc_unstable()) {
  884. offset = kvm->arch.last_tsc_offset;
  885. pr_debug("kvm: matched tsc offset for %llu\n", data);
  886. } else {
  887. u64 delta = nsec_to_cycles(elapsed);
  888. offset += delta;
  889. pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
  890. }
  891. ns = kvm->arch.last_tsc_nsec;
  892. }
  893. kvm->arch.last_tsc_nsec = ns;
  894. kvm->arch.last_tsc_write = data;
  895. kvm->arch.last_tsc_offset = offset;
  896. kvm_x86_ops->write_tsc_offset(vcpu, offset);
  897. spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
  898. /* Reset of TSC must disable overshoot protection below */
  899. vcpu->arch.hv_clock.tsc_timestamp = 0;
  900. vcpu->arch.last_tsc_write = data;
  901. vcpu->arch.last_tsc_nsec = ns;
  902. }
  903. EXPORT_SYMBOL_GPL(kvm_write_tsc);
  904. static int kvm_guest_time_update(struct kvm_vcpu *v)
  905. {
  906. unsigned long flags;
  907. struct kvm_vcpu_arch *vcpu = &v->arch;
  908. void *shared_kaddr;
  909. unsigned long this_tsc_khz;
  910. s64 kernel_ns, max_kernel_ns;
  911. u64 tsc_timestamp;
  912. /* Keep irq disabled to prevent changes to the clock */
  913. local_irq_save(flags);
  914. kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
  915. kernel_ns = get_kernel_ns();
  916. this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
  917. if (unlikely(this_tsc_khz == 0)) {
  918. local_irq_restore(flags);
  919. kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
  920. return 1;
  921. }
  922. /*
  923. * We may have to catch up the TSC to match elapsed wall clock
  924. * time for two reasons, even if kvmclock is used.
  925. * 1) CPU could have been running below the maximum TSC rate
  926. * 2) Broken TSC compensation resets the base at each VCPU
  927. * entry to avoid unknown leaps of TSC even when running
  928. * again on the same CPU. This may cause apparent elapsed
  929. * time to disappear, and the guest to stand still or run
  930. * very slowly.
  931. */
  932. if (vcpu->tsc_catchup) {
  933. u64 tsc = compute_guest_tsc(v, kernel_ns);
  934. if (tsc > tsc_timestamp) {
  935. kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
  936. tsc_timestamp = tsc;
  937. }
  938. }
  939. local_irq_restore(flags);
  940. if (!vcpu->time_page)
  941. return 0;
  942. /*
  943. * Time as measured by the TSC may go backwards when resetting the base
  944. * tsc_timestamp. The reason for this is that the TSC resolution is
  945. * higher than the resolution of the other clock scales. Thus, many
  946. * possible measurments of the TSC correspond to one measurement of any
  947. * other clock, and so a spread of values is possible. This is not a
  948. * problem for the computation of the nanosecond clock; with TSC rates
  949. * around 1GHZ, there can only be a few cycles which correspond to one
  950. * nanosecond value, and any path through this code will inevitably
  951. * take longer than that. However, with the kernel_ns value itself,
  952. * the precision may be much lower, down to HZ granularity. If the
  953. * first sampling of TSC against kernel_ns ends in the low part of the
  954. * range, and the second in the high end of the range, we can get:
  955. *
  956. * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
  957. *
  958. * As the sampling errors potentially range in the thousands of cycles,
  959. * it is possible such a time value has already been observed by the
  960. * guest. To protect against this, we must compute the system time as
  961. * observed by the guest and ensure the new system time is greater.
  962. */
  963. max_kernel_ns = 0;
  964. if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
  965. max_kernel_ns = vcpu->last_guest_tsc -
  966. vcpu->hv_clock.tsc_timestamp;
  967. max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
  968. vcpu->hv_clock.tsc_to_system_mul,
  969. vcpu->hv_clock.tsc_shift);
  970. max_kernel_ns += vcpu->last_kernel_ns;
  971. }
  972. if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
  973. kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
  974. &vcpu->hv_clock.tsc_shift,
  975. &vcpu->hv_clock.tsc_to_system_mul);
  976. vcpu->hw_tsc_khz = this_tsc_khz;
  977. }
  978. if (max_kernel_ns > kernel_ns)
  979. kernel_ns = max_kernel_ns;
  980. /* With all the info we got, fill in the values */
  981. vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
  982. vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
  983. vcpu->last_kernel_ns = kernel_ns;
  984. vcpu->last_guest_tsc = tsc_timestamp;
  985. vcpu->hv_clock.flags = 0;
  986. /*
  987. * The interface expects us to write an even number signaling that the
  988. * update is finished. Since the guest won't see the intermediate
  989. * state, we just increase by 2 at the end.
  990. */
  991. vcpu->hv_clock.version += 2;
  992. shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
  993. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  994. sizeof(vcpu->hv_clock));
  995. kunmap_atomic(shared_kaddr, KM_USER0);
  996. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  997. return 0;
  998. }
  999. static bool msr_mtrr_valid(unsigned msr)
  1000. {
  1001. switch (msr) {
  1002. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  1003. case MSR_MTRRfix64K_00000:
  1004. case MSR_MTRRfix16K_80000:
  1005. case MSR_MTRRfix16K_A0000:
  1006. case MSR_MTRRfix4K_C0000:
  1007. case MSR_MTRRfix4K_C8000:
  1008. case MSR_MTRRfix4K_D0000:
  1009. case MSR_MTRRfix4K_D8000:
  1010. case MSR_MTRRfix4K_E0000:
  1011. case MSR_MTRRfix4K_E8000:
  1012. case MSR_MTRRfix4K_F0000:
  1013. case MSR_MTRRfix4K_F8000:
  1014. case MSR_MTRRdefType:
  1015. case MSR_IA32_CR_PAT:
  1016. return true;
  1017. case 0x2f8:
  1018. return true;
  1019. }
  1020. return false;
  1021. }
  1022. static bool valid_pat_type(unsigned t)
  1023. {
  1024. return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
  1025. }
  1026. static bool valid_mtrr_type(unsigned t)
  1027. {
  1028. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  1029. }
  1030. static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1031. {
  1032. int i;
  1033. if (!msr_mtrr_valid(msr))
  1034. return false;
  1035. if (msr == MSR_IA32_CR_PAT) {
  1036. for (i = 0; i < 8; i++)
  1037. if (!valid_pat_type((data >> (i * 8)) & 0xff))
  1038. return false;
  1039. return true;
  1040. } else if (msr == MSR_MTRRdefType) {
  1041. if (data & ~0xcff)
  1042. return false;
  1043. return valid_mtrr_type(data & 0xff);
  1044. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  1045. for (i = 0; i < 8 ; i++)
  1046. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  1047. return false;
  1048. return true;
  1049. }
  1050. /* variable MTRRs */
  1051. return valid_mtrr_type(data & 0xff);
  1052. }
  1053. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1054. {
  1055. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1056. if (!mtrr_valid(vcpu, msr, data))
  1057. return 1;
  1058. if (msr == MSR_MTRRdefType) {
  1059. vcpu->arch.mtrr_state.def_type = data;
  1060. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  1061. } else if (msr == MSR_MTRRfix64K_00000)
  1062. p[0] = data;
  1063. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1064. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  1065. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1066. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  1067. else if (msr == MSR_IA32_CR_PAT)
  1068. vcpu->arch.pat = data;
  1069. else { /* Variable MTRRs */
  1070. int idx, is_mtrr_mask;
  1071. u64 *pt;
  1072. idx = (msr - 0x200) / 2;
  1073. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1074. if (!is_mtrr_mask)
  1075. pt =
  1076. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1077. else
  1078. pt =
  1079. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1080. *pt = data;
  1081. }
  1082. kvm_mmu_reset_context(vcpu);
  1083. return 0;
  1084. }
  1085. static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1086. {
  1087. u64 mcg_cap = vcpu->arch.mcg_cap;
  1088. unsigned bank_num = mcg_cap & 0xff;
  1089. switch (msr) {
  1090. case MSR_IA32_MCG_STATUS:
  1091. vcpu->arch.mcg_status = data;
  1092. break;
  1093. case MSR_IA32_MCG_CTL:
  1094. if (!(mcg_cap & MCG_CTL_P))
  1095. return 1;
  1096. if (data != 0 && data != ~(u64)0)
  1097. return -1;
  1098. vcpu->arch.mcg_ctl = data;
  1099. break;
  1100. default:
  1101. if (msr >= MSR_IA32_MC0_CTL &&
  1102. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1103. u32 offset = msr - MSR_IA32_MC0_CTL;
  1104. /* only 0 or all 1s can be written to IA32_MCi_CTL
  1105. * some Linux kernels though clear bit 10 in bank 4 to
  1106. * workaround a BIOS/GART TBL issue on AMD K8s, ignore
  1107. * this to avoid an uncatched #GP in the guest
  1108. */
  1109. if ((offset & 0x3) == 0 &&
  1110. data != 0 && (data | (1 << 10)) != ~(u64)0)
  1111. return -1;
  1112. vcpu->arch.mce_banks[offset] = data;
  1113. break;
  1114. }
  1115. return 1;
  1116. }
  1117. return 0;
  1118. }
  1119. static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
  1120. {
  1121. struct kvm *kvm = vcpu->kvm;
  1122. int lm = is_long_mode(vcpu);
  1123. u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
  1124. : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
  1125. u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
  1126. : kvm->arch.xen_hvm_config.blob_size_32;
  1127. u32 page_num = data & ~PAGE_MASK;
  1128. u64 page_addr = data & PAGE_MASK;
  1129. u8 *page;
  1130. int r;
  1131. r = -E2BIG;
  1132. if (page_num >= blob_size)
  1133. goto out;
  1134. r = -ENOMEM;
  1135. page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  1136. if (!page)
  1137. goto out;
  1138. r = -EFAULT;
  1139. if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
  1140. goto out_free;
  1141. if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
  1142. goto out_free;
  1143. r = 0;
  1144. out_free:
  1145. kfree(page);
  1146. out:
  1147. return r;
  1148. }
  1149. static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  1150. {
  1151. return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
  1152. }
  1153. static bool kvm_hv_msr_partition_wide(u32 msr)
  1154. {
  1155. bool r = false;
  1156. switch (msr) {
  1157. case HV_X64_MSR_GUEST_OS_ID:
  1158. case HV_X64_MSR_HYPERCALL:
  1159. r = true;
  1160. break;
  1161. }
  1162. return r;
  1163. }
  1164. static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1165. {
  1166. struct kvm *kvm = vcpu->kvm;
  1167. switch (msr) {
  1168. case HV_X64_MSR_GUEST_OS_ID:
  1169. kvm->arch.hv_guest_os_id = data;
  1170. /* setting guest os id to zero disables hypercall page */
  1171. if (!kvm->arch.hv_guest_os_id)
  1172. kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  1173. break;
  1174. case HV_X64_MSR_HYPERCALL: {
  1175. u64 gfn;
  1176. unsigned long addr;
  1177. u8 instructions[4];
  1178. /* if guest os id is not set hypercall should remain disabled */
  1179. if (!kvm->arch.hv_guest_os_id)
  1180. break;
  1181. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  1182. kvm->arch.hv_hypercall = data;
  1183. break;
  1184. }
  1185. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  1186. addr = gfn_to_hva(kvm, gfn);
  1187. if (kvm_is_error_hva(addr))
  1188. return 1;
  1189. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  1190. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  1191. if (copy_to_user((void __user *)addr, instructions, 4))
  1192. return 1;
  1193. kvm->arch.hv_hypercall = data;
  1194. break;
  1195. }
  1196. default:
  1197. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1198. "data 0x%llx\n", msr, data);
  1199. return 1;
  1200. }
  1201. return 0;
  1202. }
  1203. static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1204. {
  1205. switch (msr) {
  1206. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  1207. unsigned long addr;
  1208. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  1209. vcpu->arch.hv_vapic = data;
  1210. break;
  1211. }
  1212. addr = gfn_to_hva(vcpu->kvm, data >>
  1213. HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
  1214. if (kvm_is_error_hva(addr))
  1215. return 1;
  1216. if (clear_user((void __user *)addr, PAGE_SIZE))
  1217. return 1;
  1218. vcpu->arch.hv_vapic = data;
  1219. break;
  1220. }
  1221. case HV_X64_MSR_EOI:
  1222. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  1223. case HV_X64_MSR_ICR:
  1224. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  1225. case HV_X64_MSR_TPR:
  1226. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  1227. default:
  1228. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1229. "data 0x%llx\n", msr, data);
  1230. return 1;
  1231. }
  1232. return 0;
  1233. }
  1234. static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
  1235. {
  1236. gpa_t gpa = data & ~0x3f;
  1237. /* Bits 2:5 are resrved, Should be zero */
  1238. if (data & 0x3c)
  1239. return 1;
  1240. vcpu->arch.apf.msr_val = data;
  1241. if (!(data & KVM_ASYNC_PF_ENABLED)) {
  1242. kvm_clear_async_pf_completion_queue(vcpu);
  1243. kvm_async_pf_hash_reset(vcpu);
  1244. return 0;
  1245. }
  1246. if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
  1247. return 1;
  1248. vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
  1249. kvm_async_pf_wakeup_all(vcpu);
  1250. return 0;
  1251. }
  1252. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1253. {
  1254. switch (msr) {
  1255. case MSR_EFER:
  1256. return set_efer(vcpu, data);
  1257. case MSR_K7_HWCR:
  1258. data &= ~(u64)0x40; /* ignore flush filter disable */
  1259. data &= ~(u64)0x100; /* ignore ignne emulation enable */
  1260. if (data != 0) {
  1261. pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
  1262. data);
  1263. return 1;
  1264. }
  1265. break;
  1266. case MSR_FAM10H_MMIO_CONF_BASE:
  1267. if (data != 0) {
  1268. pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
  1269. "0x%llx\n", data);
  1270. return 1;
  1271. }
  1272. break;
  1273. case MSR_AMD64_NB_CFG:
  1274. break;
  1275. case MSR_IA32_DEBUGCTLMSR:
  1276. if (!data) {
  1277. /* We support the non-activated case already */
  1278. break;
  1279. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  1280. /* Values other than LBR and BTF are vendor-specific,
  1281. thus reserved and should throw a #GP */
  1282. return 1;
  1283. }
  1284. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  1285. __func__, data);
  1286. break;
  1287. case MSR_IA32_UCODE_REV:
  1288. case MSR_IA32_UCODE_WRITE:
  1289. case MSR_VM_HSAVE_PA:
  1290. case MSR_AMD64_PATCH_LOADER:
  1291. break;
  1292. case 0x200 ... 0x2ff:
  1293. return set_msr_mtrr(vcpu, msr, data);
  1294. case MSR_IA32_APICBASE:
  1295. kvm_set_apic_base(vcpu, data);
  1296. break;
  1297. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1298. return kvm_x2apic_msr_write(vcpu, msr, data);
  1299. case MSR_IA32_MISC_ENABLE:
  1300. vcpu->arch.ia32_misc_enable_msr = data;
  1301. break;
  1302. case MSR_KVM_WALL_CLOCK_NEW:
  1303. case MSR_KVM_WALL_CLOCK:
  1304. vcpu->kvm->arch.wall_clock = data;
  1305. kvm_write_wall_clock(vcpu->kvm, data);
  1306. break;
  1307. case MSR_KVM_SYSTEM_TIME_NEW:
  1308. case MSR_KVM_SYSTEM_TIME: {
  1309. if (vcpu->arch.time_page) {
  1310. kvm_release_page_dirty(vcpu->arch.time_page);
  1311. vcpu->arch.time_page = NULL;
  1312. }
  1313. vcpu->arch.time = data;
  1314. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  1315. /* we verify if the enable bit is set... */
  1316. if (!(data & 1))
  1317. break;
  1318. /* ...but clean it before doing the actual write */
  1319. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  1320. vcpu->arch.time_page =
  1321. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  1322. if (is_error_page(vcpu->arch.time_page)) {
  1323. kvm_release_page_clean(vcpu->arch.time_page);
  1324. vcpu->arch.time_page = NULL;
  1325. }
  1326. break;
  1327. }
  1328. case MSR_KVM_ASYNC_PF_EN:
  1329. if (kvm_pv_enable_async_pf(vcpu, data))
  1330. return 1;
  1331. break;
  1332. case MSR_IA32_MCG_CTL:
  1333. case MSR_IA32_MCG_STATUS:
  1334. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1335. return set_msr_mce(vcpu, msr, data);
  1336. /* Performance counters are not protected by a CPUID bit,
  1337. * so we should check all of them in the generic path for the sake of
  1338. * cross vendor migration.
  1339. * Writing a zero into the event select MSRs disables them,
  1340. * which we perfectly emulate ;-). Any other value should be at least
  1341. * reported, some guests depend on them.
  1342. */
  1343. case MSR_P6_EVNTSEL0:
  1344. case MSR_P6_EVNTSEL1:
  1345. case MSR_K7_EVNTSEL0:
  1346. case MSR_K7_EVNTSEL1:
  1347. case MSR_K7_EVNTSEL2:
  1348. case MSR_K7_EVNTSEL3:
  1349. if (data != 0)
  1350. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1351. "0x%x data 0x%llx\n", msr, data);
  1352. break;
  1353. /* at least RHEL 4 unconditionally writes to the perfctr registers,
  1354. * so we ignore writes to make it happy.
  1355. */
  1356. case MSR_P6_PERFCTR0:
  1357. case MSR_P6_PERFCTR1:
  1358. case MSR_K7_PERFCTR0:
  1359. case MSR_K7_PERFCTR1:
  1360. case MSR_K7_PERFCTR2:
  1361. case MSR_K7_PERFCTR3:
  1362. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1363. "0x%x data 0x%llx\n", msr, data);
  1364. break;
  1365. case MSR_K7_CLK_CTL:
  1366. /*
  1367. * Ignore all writes to this no longer documented MSR.
  1368. * Writes are only relevant for old K7 processors,
  1369. * all pre-dating SVM, but a recommended workaround from
  1370. * AMD for these chips. It is possible to speicify the
  1371. * affected processor models on the command line, hence
  1372. * the need to ignore the workaround.
  1373. */
  1374. break;
  1375. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1376. if (kvm_hv_msr_partition_wide(msr)) {
  1377. int r;
  1378. mutex_lock(&vcpu->kvm->lock);
  1379. r = set_msr_hyperv_pw(vcpu, msr, data);
  1380. mutex_unlock(&vcpu->kvm->lock);
  1381. return r;
  1382. } else
  1383. return set_msr_hyperv(vcpu, msr, data);
  1384. break;
  1385. default:
  1386. if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
  1387. return xen_hvm_config(vcpu, data);
  1388. if (!ignore_msrs) {
  1389. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
  1390. msr, data);
  1391. return 1;
  1392. } else {
  1393. pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
  1394. msr, data);
  1395. break;
  1396. }
  1397. }
  1398. return 0;
  1399. }
  1400. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1401. /*
  1402. * Reads an msr value (of 'msr_index') into 'pdata'.
  1403. * Returns 0 on success, non-0 otherwise.
  1404. * Assumes vcpu_load() was already called.
  1405. */
  1406. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1407. {
  1408. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  1409. }
  1410. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1411. {
  1412. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1413. if (!msr_mtrr_valid(msr))
  1414. return 1;
  1415. if (msr == MSR_MTRRdefType)
  1416. *pdata = vcpu->arch.mtrr_state.def_type +
  1417. (vcpu->arch.mtrr_state.enabled << 10);
  1418. else if (msr == MSR_MTRRfix64K_00000)
  1419. *pdata = p[0];
  1420. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1421. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  1422. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1423. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  1424. else if (msr == MSR_IA32_CR_PAT)
  1425. *pdata = vcpu->arch.pat;
  1426. else { /* Variable MTRRs */
  1427. int idx, is_mtrr_mask;
  1428. u64 *pt;
  1429. idx = (msr - 0x200) / 2;
  1430. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1431. if (!is_mtrr_mask)
  1432. pt =
  1433. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1434. else
  1435. pt =
  1436. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1437. *pdata = *pt;
  1438. }
  1439. return 0;
  1440. }
  1441. static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1442. {
  1443. u64 data;
  1444. u64 mcg_cap = vcpu->arch.mcg_cap;
  1445. unsigned bank_num = mcg_cap & 0xff;
  1446. switch (msr) {
  1447. case MSR_IA32_P5_MC_ADDR:
  1448. case MSR_IA32_P5_MC_TYPE:
  1449. data = 0;
  1450. break;
  1451. case MSR_IA32_MCG_CAP:
  1452. data = vcpu->arch.mcg_cap;
  1453. break;
  1454. case MSR_IA32_MCG_CTL:
  1455. if (!(mcg_cap & MCG_CTL_P))
  1456. return 1;
  1457. data = vcpu->arch.mcg_ctl;
  1458. break;
  1459. case MSR_IA32_MCG_STATUS:
  1460. data = vcpu->arch.mcg_status;
  1461. break;
  1462. default:
  1463. if (msr >= MSR_IA32_MC0_CTL &&
  1464. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1465. u32 offset = msr - MSR_IA32_MC0_CTL;
  1466. data = vcpu->arch.mce_banks[offset];
  1467. break;
  1468. }
  1469. return 1;
  1470. }
  1471. *pdata = data;
  1472. return 0;
  1473. }
  1474. static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1475. {
  1476. u64 data = 0;
  1477. struct kvm *kvm = vcpu->kvm;
  1478. switch (msr) {
  1479. case HV_X64_MSR_GUEST_OS_ID:
  1480. data = kvm->arch.hv_guest_os_id;
  1481. break;
  1482. case HV_X64_MSR_HYPERCALL:
  1483. data = kvm->arch.hv_hypercall;
  1484. break;
  1485. default:
  1486. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1487. return 1;
  1488. }
  1489. *pdata = data;
  1490. return 0;
  1491. }
  1492. static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1493. {
  1494. u64 data = 0;
  1495. switch (msr) {
  1496. case HV_X64_MSR_VP_INDEX: {
  1497. int r;
  1498. struct kvm_vcpu *v;
  1499. kvm_for_each_vcpu(r, v, vcpu->kvm)
  1500. if (v == vcpu)
  1501. data = r;
  1502. break;
  1503. }
  1504. case HV_X64_MSR_EOI:
  1505. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  1506. case HV_X64_MSR_ICR:
  1507. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  1508. case HV_X64_MSR_TPR:
  1509. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  1510. default:
  1511. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1512. return 1;
  1513. }
  1514. *pdata = data;
  1515. return 0;
  1516. }
  1517. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1518. {
  1519. u64 data;
  1520. switch (msr) {
  1521. case MSR_IA32_PLATFORM_ID:
  1522. case MSR_IA32_UCODE_REV:
  1523. case MSR_IA32_EBL_CR_POWERON:
  1524. case MSR_IA32_DEBUGCTLMSR:
  1525. case MSR_IA32_LASTBRANCHFROMIP:
  1526. case MSR_IA32_LASTBRANCHTOIP:
  1527. case MSR_IA32_LASTINTFROMIP:
  1528. case MSR_IA32_LASTINTTOIP:
  1529. case MSR_K8_SYSCFG:
  1530. case MSR_K7_HWCR:
  1531. case MSR_VM_HSAVE_PA:
  1532. case MSR_P6_PERFCTR0:
  1533. case MSR_P6_PERFCTR1:
  1534. case MSR_P6_EVNTSEL0:
  1535. case MSR_P6_EVNTSEL1:
  1536. case MSR_K7_EVNTSEL0:
  1537. case MSR_K7_PERFCTR0:
  1538. case MSR_K8_INT_PENDING_MSG:
  1539. case MSR_AMD64_NB_CFG:
  1540. case MSR_FAM10H_MMIO_CONF_BASE:
  1541. data = 0;
  1542. break;
  1543. case MSR_MTRRcap:
  1544. data = 0x500 | KVM_NR_VAR_MTRR;
  1545. break;
  1546. case 0x200 ... 0x2ff:
  1547. return get_msr_mtrr(vcpu, msr, pdata);
  1548. case 0xcd: /* fsb frequency */
  1549. data = 3;
  1550. break;
  1551. /*
  1552. * MSR_EBC_FREQUENCY_ID
  1553. * Conservative value valid for even the basic CPU models.
  1554. * Models 0,1: 000 in bits 23:21 indicating a bus speed of
  1555. * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
  1556. * and 266MHz for model 3, or 4. Set Core Clock
  1557. * Frequency to System Bus Frequency Ratio to 1 (bits
  1558. * 31:24) even though these are only valid for CPU
  1559. * models > 2, however guests may end up dividing or
  1560. * multiplying by zero otherwise.
  1561. */
  1562. case MSR_EBC_FREQUENCY_ID:
  1563. data = 1 << 24;
  1564. break;
  1565. case MSR_IA32_APICBASE:
  1566. data = kvm_get_apic_base(vcpu);
  1567. break;
  1568. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1569. return kvm_x2apic_msr_read(vcpu, msr, pdata);
  1570. break;
  1571. case MSR_IA32_MISC_ENABLE:
  1572. data = vcpu->arch.ia32_misc_enable_msr;
  1573. break;
  1574. case MSR_IA32_PERF_STATUS:
  1575. /* TSC increment by tick */
  1576. data = 1000ULL;
  1577. /* CPU multiplier */
  1578. data |= (((uint64_t)4ULL) << 40);
  1579. break;
  1580. case MSR_EFER:
  1581. data = vcpu->arch.efer;
  1582. break;
  1583. case MSR_KVM_WALL_CLOCK:
  1584. case MSR_KVM_WALL_CLOCK_NEW:
  1585. data = vcpu->kvm->arch.wall_clock;
  1586. break;
  1587. case MSR_KVM_SYSTEM_TIME:
  1588. case MSR_KVM_SYSTEM_TIME_NEW:
  1589. data = vcpu->arch.time;
  1590. break;
  1591. case MSR_KVM_ASYNC_PF_EN:
  1592. data = vcpu->arch.apf.msr_val;
  1593. break;
  1594. case MSR_IA32_P5_MC_ADDR:
  1595. case MSR_IA32_P5_MC_TYPE:
  1596. case MSR_IA32_MCG_CAP:
  1597. case MSR_IA32_MCG_CTL:
  1598. case MSR_IA32_MCG_STATUS:
  1599. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1600. return get_msr_mce(vcpu, msr, pdata);
  1601. case MSR_K7_CLK_CTL:
  1602. /*
  1603. * Provide expected ramp-up count for K7. All other
  1604. * are set to zero, indicating minimum divisors for
  1605. * every field.
  1606. *
  1607. * This prevents guest kernels on AMD host with CPU
  1608. * type 6, model 8 and higher from exploding due to
  1609. * the rdmsr failing.
  1610. */
  1611. data = 0x20000000;
  1612. break;
  1613. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1614. if (kvm_hv_msr_partition_wide(msr)) {
  1615. int r;
  1616. mutex_lock(&vcpu->kvm->lock);
  1617. r = get_msr_hyperv_pw(vcpu, msr, pdata);
  1618. mutex_unlock(&vcpu->kvm->lock);
  1619. return r;
  1620. } else
  1621. return get_msr_hyperv(vcpu, msr, pdata);
  1622. break;
  1623. default:
  1624. if (!ignore_msrs) {
  1625. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  1626. return 1;
  1627. } else {
  1628. pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
  1629. data = 0;
  1630. }
  1631. break;
  1632. }
  1633. *pdata = data;
  1634. return 0;
  1635. }
  1636. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1637. /*
  1638. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1639. *
  1640. * @return number of msrs set successfully.
  1641. */
  1642. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  1643. struct kvm_msr_entry *entries,
  1644. int (*do_msr)(struct kvm_vcpu *vcpu,
  1645. unsigned index, u64 *data))
  1646. {
  1647. int i, idx;
  1648. idx = srcu_read_lock(&vcpu->kvm->srcu);
  1649. for (i = 0; i < msrs->nmsrs; ++i)
  1650. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  1651. break;
  1652. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1653. return i;
  1654. }
  1655. /*
  1656. * Read or write a bunch of msrs. Parameters are user addresses.
  1657. *
  1658. * @return number of msrs set successfully.
  1659. */
  1660. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  1661. int (*do_msr)(struct kvm_vcpu *vcpu,
  1662. unsigned index, u64 *data),
  1663. int writeback)
  1664. {
  1665. struct kvm_msrs msrs;
  1666. struct kvm_msr_entry *entries;
  1667. int r, n;
  1668. unsigned size;
  1669. r = -EFAULT;
  1670. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  1671. goto out;
  1672. r = -E2BIG;
  1673. if (msrs.nmsrs >= MAX_IO_MSRS)
  1674. goto out;
  1675. r = -ENOMEM;
  1676. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  1677. entries = kmalloc(size, GFP_KERNEL);
  1678. if (!entries)
  1679. goto out;
  1680. r = -EFAULT;
  1681. if (copy_from_user(entries, user_msrs->entries, size))
  1682. goto out_free;
  1683. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  1684. if (r < 0)
  1685. goto out_free;
  1686. r = -EFAULT;
  1687. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  1688. goto out_free;
  1689. r = n;
  1690. out_free:
  1691. kfree(entries);
  1692. out:
  1693. return r;
  1694. }
  1695. int kvm_dev_ioctl_check_extension(long ext)
  1696. {
  1697. int r;
  1698. switch (ext) {
  1699. case KVM_CAP_IRQCHIP:
  1700. case KVM_CAP_HLT:
  1701. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  1702. case KVM_CAP_SET_TSS_ADDR:
  1703. case KVM_CAP_EXT_CPUID:
  1704. case KVM_CAP_CLOCKSOURCE:
  1705. case KVM_CAP_PIT:
  1706. case KVM_CAP_NOP_IO_DELAY:
  1707. case KVM_CAP_MP_STATE:
  1708. case KVM_CAP_SYNC_MMU:
  1709. case KVM_CAP_REINJECT_CONTROL:
  1710. case KVM_CAP_IRQ_INJECT_STATUS:
  1711. case KVM_CAP_ASSIGN_DEV_IRQ:
  1712. case KVM_CAP_IRQFD:
  1713. case KVM_CAP_IOEVENTFD:
  1714. case KVM_CAP_PIT2:
  1715. case KVM_CAP_PIT_STATE2:
  1716. case KVM_CAP_SET_IDENTITY_MAP_ADDR:
  1717. case KVM_CAP_XEN_HVM:
  1718. case KVM_CAP_ADJUST_CLOCK:
  1719. case KVM_CAP_VCPU_EVENTS:
  1720. case KVM_CAP_HYPERV:
  1721. case KVM_CAP_HYPERV_VAPIC:
  1722. case KVM_CAP_HYPERV_SPIN:
  1723. case KVM_CAP_PCI_SEGMENT:
  1724. case KVM_CAP_DEBUGREGS:
  1725. case KVM_CAP_X86_ROBUST_SINGLESTEP:
  1726. case KVM_CAP_XSAVE:
  1727. case KVM_CAP_ASYNC_PF:
  1728. r = 1;
  1729. break;
  1730. case KVM_CAP_COALESCED_MMIO:
  1731. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  1732. break;
  1733. case KVM_CAP_VAPIC:
  1734. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  1735. break;
  1736. case KVM_CAP_NR_VCPUS:
  1737. r = KVM_MAX_VCPUS;
  1738. break;
  1739. case KVM_CAP_NR_MEMSLOTS:
  1740. r = KVM_MEMORY_SLOTS;
  1741. break;
  1742. case KVM_CAP_PV_MMU: /* obsolete */
  1743. r = 0;
  1744. break;
  1745. case KVM_CAP_IOMMU:
  1746. r = iommu_found();
  1747. break;
  1748. case KVM_CAP_MCE:
  1749. r = KVM_MAX_MCE_BANKS;
  1750. break;
  1751. case KVM_CAP_XCRS:
  1752. r = cpu_has_xsave;
  1753. break;
  1754. default:
  1755. r = 0;
  1756. break;
  1757. }
  1758. return r;
  1759. }
  1760. long kvm_arch_dev_ioctl(struct file *filp,
  1761. unsigned int ioctl, unsigned long arg)
  1762. {
  1763. void __user *argp = (void __user *)arg;
  1764. long r;
  1765. switch (ioctl) {
  1766. case KVM_GET_MSR_INDEX_LIST: {
  1767. struct kvm_msr_list __user *user_msr_list = argp;
  1768. struct kvm_msr_list msr_list;
  1769. unsigned n;
  1770. r = -EFAULT;
  1771. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  1772. goto out;
  1773. n = msr_list.nmsrs;
  1774. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  1775. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  1776. goto out;
  1777. r = -E2BIG;
  1778. if (n < msr_list.nmsrs)
  1779. goto out;
  1780. r = -EFAULT;
  1781. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  1782. num_msrs_to_save * sizeof(u32)))
  1783. goto out;
  1784. if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
  1785. &emulated_msrs,
  1786. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  1787. goto out;
  1788. r = 0;
  1789. break;
  1790. }
  1791. case KVM_GET_SUPPORTED_CPUID: {
  1792. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1793. struct kvm_cpuid2 cpuid;
  1794. r = -EFAULT;
  1795. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1796. goto out;
  1797. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  1798. cpuid_arg->entries);
  1799. if (r)
  1800. goto out;
  1801. r = -EFAULT;
  1802. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  1803. goto out;
  1804. r = 0;
  1805. break;
  1806. }
  1807. case KVM_X86_GET_MCE_CAP_SUPPORTED: {
  1808. u64 mce_cap;
  1809. mce_cap = KVM_MCE_CAP_SUPPORTED;
  1810. r = -EFAULT;
  1811. if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
  1812. goto out;
  1813. r = 0;
  1814. break;
  1815. }
  1816. default:
  1817. r = -EINVAL;
  1818. }
  1819. out:
  1820. return r;
  1821. }
  1822. static void wbinvd_ipi(void *garbage)
  1823. {
  1824. wbinvd();
  1825. }
  1826. static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
  1827. {
  1828. return vcpu->kvm->arch.iommu_domain &&
  1829. !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
  1830. }
  1831. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1832. {
  1833. /* Address WBINVD may be executed by guest */
  1834. if (need_emulate_wbinvd(vcpu)) {
  1835. if (kvm_x86_ops->has_wbinvd_exit())
  1836. cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
  1837. else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
  1838. smp_call_function_single(vcpu->cpu,
  1839. wbinvd_ipi, NULL, 1);
  1840. }
  1841. kvm_x86_ops->vcpu_load(vcpu, cpu);
  1842. if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
  1843. /* Make sure TSC doesn't go backwards */
  1844. s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
  1845. native_read_tsc() - vcpu->arch.last_host_tsc;
  1846. if (tsc_delta < 0)
  1847. mark_tsc_unstable("KVM discovered backwards TSC");
  1848. if (check_tsc_unstable()) {
  1849. kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
  1850. vcpu->arch.tsc_catchup = 1;
  1851. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  1852. }
  1853. if (vcpu->cpu != cpu)
  1854. kvm_migrate_timers(vcpu);
  1855. vcpu->cpu = cpu;
  1856. }
  1857. }
  1858. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  1859. {
  1860. kvm_x86_ops->vcpu_put(vcpu);
  1861. kvm_put_guest_fpu(vcpu);
  1862. vcpu->arch.last_host_tsc = native_read_tsc();
  1863. }
  1864. static int is_efer_nx(void)
  1865. {
  1866. unsigned long long efer = 0;
  1867. rdmsrl_safe(MSR_EFER, &efer);
  1868. return efer & EFER_NX;
  1869. }
  1870. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  1871. {
  1872. int i;
  1873. struct kvm_cpuid_entry2 *e, *entry;
  1874. entry = NULL;
  1875. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  1876. e = &vcpu->arch.cpuid_entries[i];
  1877. if (e->function == 0x80000001) {
  1878. entry = e;
  1879. break;
  1880. }
  1881. }
  1882. if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
  1883. entry->edx &= ~(1 << 20);
  1884. printk(KERN_INFO "kvm: guest NX capability removed\n");
  1885. }
  1886. }
  1887. /* when an old userspace process fills a new kernel module */
  1888. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  1889. struct kvm_cpuid *cpuid,
  1890. struct kvm_cpuid_entry __user *entries)
  1891. {
  1892. int r, i;
  1893. struct kvm_cpuid_entry *cpuid_entries;
  1894. r = -E2BIG;
  1895. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1896. goto out;
  1897. r = -ENOMEM;
  1898. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  1899. if (!cpuid_entries)
  1900. goto out;
  1901. r = -EFAULT;
  1902. if (copy_from_user(cpuid_entries, entries,
  1903. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  1904. goto out_free;
  1905. for (i = 0; i < cpuid->nent; i++) {
  1906. vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
  1907. vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
  1908. vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
  1909. vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
  1910. vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
  1911. vcpu->arch.cpuid_entries[i].index = 0;
  1912. vcpu->arch.cpuid_entries[i].flags = 0;
  1913. vcpu->arch.cpuid_entries[i].padding[0] = 0;
  1914. vcpu->arch.cpuid_entries[i].padding[1] = 0;
  1915. vcpu->arch.cpuid_entries[i].padding[2] = 0;
  1916. }
  1917. vcpu->arch.cpuid_nent = cpuid->nent;
  1918. cpuid_fix_nx_cap(vcpu);
  1919. r = 0;
  1920. kvm_apic_set_version(vcpu);
  1921. kvm_x86_ops->cpuid_update(vcpu);
  1922. update_cpuid(vcpu);
  1923. out_free:
  1924. vfree(cpuid_entries);
  1925. out:
  1926. return r;
  1927. }
  1928. static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  1929. struct kvm_cpuid2 *cpuid,
  1930. struct kvm_cpuid_entry2 __user *entries)
  1931. {
  1932. int r;
  1933. r = -E2BIG;
  1934. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1935. goto out;
  1936. r = -EFAULT;
  1937. if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
  1938. cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
  1939. goto out;
  1940. vcpu->arch.cpuid_nent = cpuid->nent;
  1941. kvm_apic_set_version(vcpu);
  1942. kvm_x86_ops->cpuid_update(vcpu);
  1943. update_cpuid(vcpu);
  1944. return 0;
  1945. out:
  1946. return r;
  1947. }
  1948. static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  1949. struct kvm_cpuid2 *cpuid,
  1950. struct kvm_cpuid_entry2 __user *entries)
  1951. {
  1952. int r;
  1953. r = -E2BIG;
  1954. if (cpuid->nent < vcpu->arch.cpuid_nent)
  1955. goto out;
  1956. r = -EFAULT;
  1957. if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
  1958. vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
  1959. goto out;
  1960. return 0;
  1961. out:
  1962. cpuid->nent = vcpu->arch.cpuid_nent;
  1963. return r;
  1964. }
  1965. static void cpuid_mask(u32 *word, int wordnum)
  1966. {
  1967. *word &= boot_cpu_data.x86_capability[wordnum];
  1968. }
  1969. static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1970. u32 index)
  1971. {
  1972. entry->function = function;
  1973. entry->index = index;
  1974. cpuid_count(entry->function, entry->index,
  1975. &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
  1976. entry->flags = 0;
  1977. }
  1978. #define F(x) bit(X86_FEATURE_##x)
  1979. static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1980. u32 index, int *nent, int maxnent)
  1981. {
  1982. unsigned f_nx = is_efer_nx() ? F(NX) : 0;
  1983. #ifdef CONFIG_X86_64
  1984. unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
  1985. ? F(GBPAGES) : 0;
  1986. unsigned f_lm = F(LM);
  1987. #else
  1988. unsigned f_gbpages = 0;
  1989. unsigned f_lm = 0;
  1990. #endif
  1991. unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
  1992. /* cpuid 1.edx */
  1993. const u32 kvm_supported_word0_x86_features =
  1994. F(FPU) | F(VME) | F(DE) | F(PSE) |
  1995. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  1996. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
  1997. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  1998. F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
  1999. 0 /* Reserved, DS, ACPI */ | F(MMX) |
  2000. F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
  2001. 0 /* HTT, TM, Reserved, PBE */;
  2002. /* cpuid 0x80000001.edx */
  2003. const u32 kvm_supported_word1_x86_features =
  2004. F(FPU) | F(VME) | F(DE) | F(PSE) |
  2005. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  2006. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
  2007. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  2008. F(PAT) | F(PSE36) | 0 /* Reserved */ |
  2009. f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
  2010. F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
  2011. 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
  2012. /* cpuid 1.ecx */
  2013. const u32 kvm_supported_word4_x86_features =
  2014. F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
  2015. 0 /* DS-CPL, VMX, SMX, EST */ |
  2016. 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
  2017. 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
  2018. 0 /* Reserved, DCA */ | F(XMM4_1) |
  2019. F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
  2020. 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
  2021. F(F16C);
  2022. /* cpuid 0x80000001.ecx */
  2023. const u32 kvm_supported_word6_x86_features =
  2024. F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
  2025. F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
  2026. F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
  2027. 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
  2028. /* all calls to cpuid_count() should be made on the same cpu */
  2029. get_cpu();
  2030. do_cpuid_1_ent(entry, function, index);
  2031. ++*nent;
  2032. switch (function) {
  2033. case 0:
  2034. entry->eax = min(entry->eax, (u32)0xd);
  2035. break;
  2036. case 1:
  2037. entry->edx &= kvm_supported_word0_x86_features;
  2038. cpuid_mask(&entry->edx, 0);
  2039. entry->ecx &= kvm_supported_word4_x86_features;
  2040. cpuid_mask(&entry->ecx, 4);
  2041. /* we support x2apic emulation even if host does not support
  2042. * it since we emulate x2apic in software */
  2043. entry->ecx |= F(X2APIC);
  2044. break;
  2045. /* function 2 entries are STATEFUL. That is, repeated cpuid commands
  2046. * may return different values. This forces us to get_cpu() before
  2047. * issuing the first command, and also to emulate this annoying behavior
  2048. * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
  2049. case 2: {
  2050. int t, times = entry->eax & 0xff;
  2051. entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  2052. entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  2053. for (t = 1; t < times && *nent < maxnent; ++t) {
  2054. do_cpuid_1_ent(&entry[t], function, 0);
  2055. entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  2056. ++*nent;
  2057. }
  2058. break;
  2059. }
  2060. /* function 4 and 0xb have additional index. */
  2061. case 4: {
  2062. int i, cache_type;
  2063. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2064. /* read more entries until cache_type is zero */
  2065. for (i = 1; *nent < maxnent; ++i) {
  2066. cache_type = entry[i - 1].eax & 0x1f;
  2067. if (!cache_type)
  2068. break;
  2069. do_cpuid_1_ent(&entry[i], function, i);
  2070. entry[i].flags |=
  2071. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2072. ++*nent;
  2073. }
  2074. break;
  2075. }
  2076. case 0xb: {
  2077. int i, level_type;
  2078. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2079. /* read more entries until level_type is zero */
  2080. for (i = 1; *nent < maxnent; ++i) {
  2081. level_type = entry[i - 1].ecx & 0xff00;
  2082. if (!level_type)
  2083. break;
  2084. do_cpuid_1_ent(&entry[i], function, i);
  2085. entry[i].flags |=
  2086. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2087. ++*nent;
  2088. }
  2089. break;
  2090. }
  2091. case 0xd: {
  2092. int i;
  2093. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2094. for (i = 1; *nent < maxnent; ++i) {
  2095. if (entry[i - 1].eax == 0 && i != 2)
  2096. break;
  2097. do_cpuid_1_ent(&entry[i], function, i);
  2098. entry[i].flags |=
  2099. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  2100. ++*nent;
  2101. }
  2102. break;
  2103. }
  2104. case KVM_CPUID_SIGNATURE: {
  2105. char signature[12] = "KVMKVMKVM\0\0";
  2106. u32 *sigptr = (u32 *)signature;
  2107. entry->eax = 0;
  2108. entry->ebx = sigptr[0];
  2109. entry->ecx = sigptr[1];
  2110. entry->edx = sigptr[2];
  2111. break;
  2112. }
  2113. case KVM_CPUID_FEATURES:
  2114. entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
  2115. (1 << KVM_FEATURE_NOP_IO_DELAY) |
  2116. (1 << KVM_FEATURE_CLOCKSOURCE2) |
  2117. (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
  2118. entry->ebx = 0;
  2119. entry->ecx = 0;
  2120. entry->edx = 0;
  2121. break;
  2122. case 0x80000000:
  2123. entry->eax = min(entry->eax, 0x8000001a);
  2124. break;
  2125. case 0x80000001:
  2126. entry->edx &= kvm_supported_word1_x86_features;
  2127. cpuid_mask(&entry->edx, 1);
  2128. entry->ecx &= kvm_supported_word6_x86_features;
  2129. cpuid_mask(&entry->ecx, 6);
  2130. break;
  2131. }
  2132. kvm_x86_ops->set_supported_cpuid(function, entry);
  2133. put_cpu();
  2134. }
  2135. #undef F
  2136. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  2137. struct kvm_cpuid_entry2 __user *entries)
  2138. {
  2139. struct kvm_cpuid_entry2 *cpuid_entries;
  2140. int limit, nent = 0, r = -E2BIG;
  2141. u32 func;
  2142. if (cpuid->nent < 1)
  2143. goto out;
  2144. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  2145. cpuid->nent = KVM_MAX_CPUID_ENTRIES;
  2146. r = -ENOMEM;
  2147. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
  2148. if (!cpuid_entries)
  2149. goto out;
  2150. do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
  2151. limit = cpuid_entries[0].eax;
  2152. for (func = 1; func <= limit && nent < cpuid->nent; ++func)
  2153. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  2154. &nent, cpuid->nent);
  2155. r = -E2BIG;
  2156. if (nent >= cpuid->nent)
  2157. goto out_free;
  2158. do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
  2159. limit = cpuid_entries[nent - 1].eax;
  2160. for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
  2161. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  2162. &nent, cpuid->nent);
  2163. r = -E2BIG;
  2164. if (nent >= cpuid->nent)
  2165. goto out_free;
  2166. do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
  2167. cpuid->nent);
  2168. r = -E2BIG;
  2169. if (nent >= cpuid->nent)
  2170. goto out_free;
  2171. do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
  2172. cpuid->nent);
  2173. r = -E2BIG;
  2174. if (nent >= cpuid->nent)
  2175. goto out_free;
  2176. r = -EFAULT;
  2177. if (copy_to_user(entries, cpuid_entries,
  2178. nent * sizeof(struct kvm_cpuid_entry2)))
  2179. goto out_free;
  2180. cpuid->nent = nent;
  2181. r = 0;
  2182. out_free:
  2183. vfree(cpuid_entries);
  2184. out:
  2185. return r;
  2186. }
  2187. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  2188. struct kvm_lapic_state *s)
  2189. {
  2190. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  2191. return 0;
  2192. }
  2193. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  2194. struct kvm_lapic_state *s)
  2195. {
  2196. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  2197. kvm_apic_post_state_restore(vcpu);
  2198. update_cr8_intercept(vcpu);
  2199. return 0;
  2200. }
  2201. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  2202. struct kvm_interrupt *irq)
  2203. {
  2204. if (irq->irq < 0 || irq->irq >= 256)
  2205. return -EINVAL;
  2206. if (irqchip_in_kernel(vcpu->kvm))
  2207. return -ENXIO;
  2208. kvm_queue_interrupt(vcpu, irq->irq, false);
  2209. kvm_make_request(KVM_REQ_EVENT, vcpu);
  2210. return 0;
  2211. }
  2212. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  2213. {
  2214. kvm_inject_nmi(vcpu);
  2215. return 0;
  2216. }
  2217. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  2218. struct kvm_tpr_access_ctl *tac)
  2219. {
  2220. if (tac->flags)
  2221. return -EINVAL;
  2222. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  2223. return 0;
  2224. }
  2225. static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
  2226. u64 mcg_cap)
  2227. {
  2228. int r;
  2229. unsigned bank_num = mcg_cap & 0xff, bank;
  2230. r = -EINVAL;
  2231. if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
  2232. goto out;
  2233. if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
  2234. goto out;
  2235. r = 0;
  2236. vcpu->arch.mcg_cap = mcg_cap;
  2237. /* Init IA32_MCG_CTL to all 1s */
  2238. if (mcg_cap & MCG_CTL_P)
  2239. vcpu->arch.mcg_ctl = ~(u64)0;
  2240. /* Init IA32_MCi_CTL to all 1s */
  2241. for (bank = 0; bank < bank_num; bank++)
  2242. vcpu->arch.mce_banks[bank*4] = ~(u64)0;
  2243. out:
  2244. return r;
  2245. }
  2246. static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
  2247. struct kvm_x86_mce *mce)
  2248. {
  2249. u64 mcg_cap = vcpu->arch.mcg_cap;
  2250. unsigned bank_num = mcg_cap & 0xff;
  2251. u64 *banks = vcpu->arch.mce_banks;
  2252. if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
  2253. return -EINVAL;
  2254. /*
  2255. * if IA32_MCG_CTL is not all 1s, the uncorrected error
  2256. * reporting is disabled
  2257. */
  2258. if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
  2259. vcpu->arch.mcg_ctl != ~(u64)0)
  2260. return 0;
  2261. banks += 4 * mce->bank;
  2262. /*
  2263. * if IA32_MCi_CTL is not all 1s, the uncorrected error
  2264. * reporting is disabled for the bank
  2265. */
  2266. if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
  2267. return 0;
  2268. if (mce->status & MCI_STATUS_UC) {
  2269. if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
  2270. !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
  2271. printk(KERN_DEBUG "kvm: set_mce: "
  2272. "injects mce exception while "
  2273. "previous one is in progress!\n");
  2274. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  2275. return 0;
  2276. }
  2277. if (banks[1] & MCI_STATUS_VAL)
  2278. mce->status |= MCI_STATUS_OVER;
  2279. banks[2] = mce->addr;
  2280. banks[3] = mce->misc;
  2281. vcpu->arch.mcg_status = mce->mcg_status;
  2282. banks[1] = mce->status;
  2283. kvm_queue_exception(vcpu, MC_VECTOR);
  2284. } else if (!(banks[1] & MCI_STATUS_VAL)
  2285. || !(banks[1] & MCI_STATUS_UC)) {
  2286. if (banks[1] & MCI_STATUS_VAL)
  2287. mce->status |= MCI_STATUS_OVER;
  2288. banks[2] = mce->addr;
  2289. banks[3] = mce->misc;
  2290. banks[1] = mce->status;
  2291. } else
  2292. banks[1] |= MCI_STATUS_OVER;
  2293. return 0;
  2294. }
  2295. static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
  2296. struct kvm_vcpu_events *events)
  2297. {
  2298. events->exception.injected =
  2299. vcpu->arch.exception.pending &&
  2300. !kvm_exception_is_soft(vcpu->arch.exception.nr);
  2301. events->exception.nr = vcpu->arch.exception.nr;
  2302. events->exception.has_error_code = vcpu->arch.exception.has_error_code;
  2303. events->exception.pad = 0;
  2304. events->exception.error_code = vcpu->arch.exception.error_code;
  2305. events->interrupt.injected =
  2306. vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
  2307. events->interrupt.nr = vcpu->arch.interrupt.nr;
  2308. events->interrupt.soft = 0;
  2309. events->interrupt.shadow =
  2310. kvm_x86_ops->get_interrupt_shadow(vcpu,
  2311. KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
  2312. events->nmi.injected = vcpu->arch.nmi_injected;
  2313. events->nmi.pending = vcpu->arch.nmi_pending;
  2314. events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
  2315. events->nmi.pad = 0;
  2316. events->sipi_vector = vcpu->arch.sipi_vector;
  2317. events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
  2318. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  2319. | KVM_VCPUEVENT_VALID_SHADOW);
  2320. memset(&events->reserved, 0, sizeof(events->reserved));
  2321. }
  2322. static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
  2323. struct kvm_vcpu_events *events)
  2324. {
  2325. if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
  2326. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  2327. | KVM_VCPUEVENT_VALID_SHADOW))
  2328. return -EINVAL;
  2329. vcpu->arch.exception.pending = events->exception.injected;
  2330. vcpu->arch.exception.nr = events->exception.nr;
  2331. vcpu->arch.exception.has_error_code = events->exception.has_error_code;
  2332. vcpu->arch.exception.error_code = events->exception.error_code;
  2333. vcpu->arch.interrupt.pending = events->interrupt.injected;
  2334. vcpu->arch.interrupt.nr = events->interrupt.nr;
  2335. vcpu->arch.interrupt.soft = events->interrupt.soft;
  2336. if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
  2337. kvm_pic_clear_isr_ack(vcpu->kvm);
  2338. if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
  2339. kvm_x86_ops->set_interrupt_shadow(vcpu,
  2340. events->interrupt.shadow);
  2341. vcpu->arch.nmi_injected = events->nmi.injected;
  2342. if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
  2343. vcpu->arch.nmi_pending = events->nmi.pending;
  2344. kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
  2345. if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
  2346. vcpu->arch.sipi_vector = events->sipi_vector;
  2347. kvm_make_request(KVM_REQ_EVENT, vcpu);
  2348. return 0;
  2349. }
  2350. static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
  2351. struct kvm_debugregs *dbgregs)
  2352. {
  2353. memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
  2354. dbgregs->dr6 = vcpu->arch.dr6;
  2355. dbgregs->dr7 = vcpu->arch.dr7;
  2356. dbgregs->flags = 0;
  2357. memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
  2358. }
  2359. static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
  2360. struct kvm_debugregs *dbgregs)
  2361. {
  2362. if (dbgregs->flags)
  2363. return -EINVAL;
  2364. memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
  2365. vcpu->arch.dr6 = dbgregs->dr6;
  2366. vcpu->arch.dr7 = dbgregs->dr7;
  2367. return 0;
  2368. }
  2369. static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
  2370. struct kvm_xsave *guest_xsave)
  2371. {
  2372. if (cpu_has_xsave)
  2373. memcpy(guest_xsave->region,
  2374. &vcpu->arch.guest_fpu.state->xsave,
  2375. xstate_size);
  2376. else {
  2377. memcpy(guest_xsave->region,
  2378. &vcpu->arch.guest_fpu.state->fxsave,
  2379. sizeof(struct i387_fxsave_struct));
  2380. *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
  2381. XSTATE_FPSSE;
  2382. }
  2383. }
  2384. static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
  2385. struct kvm_xsave *guest_xsave)
  2386. {
  2387. u64 xstate_bv =
  2388. *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
  2389. if (cpu_has_xsave)
  2390. memcpy(&vcpu->arch.guest_fpu.state->xsave,
  2391. guest_xsave->region, xstate_size);
  2392. else {
  2393. if (xstate_bv & ~XSTATE_FPSSE)
  2394. return -EINVAL;
  2395. memcpy(&vcpu->arch.guest_fpu.state->fxsave,
  2396. guest_xsave->region, sizeof(struct i387_fxsave_struct));
  2397. }
  2398. return 0;
  2399. }
  2400. static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
  2401. struct kvm_xcrs *guest_xcrs)
  2402. {
  2403. if (!cpu_has_xsave) {
  2404. guest_xcrs->nr_xcrs = 0;
  2405. return;
  2406. }
  2407. guest_xcrs->nr_xcrs = 1;
  2408. guest_xcrs->flags = 0;
  2409. guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
  2410. guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
  2411. }
  2412. static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
  2413. struct kvm_xcrs *guest_xcrs)
  2414. {
  2415. int i, r = 0;
  2416. if (!cpu_has_xsave)
  2417. return -EINVAL;
  2418. if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
  2419. return -EINVAL;
  2420. for (i = 0; i < guest_xcrs->nr_xcrs; i++)
  2421. /* Only support XCR0 currently */
  2422. if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
  2423. r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
  2424. guest_xcrs->xcrs[0].value);
  2425. break;
  2426. }
  2427. if (r)
  2428. r = -EINVAL;
  2429. return r;
  2430. }
  2431. long kvm_arch_vcpu_ioctl(struct file *filp,
  2432. unsigned int ioctl, unsigned long arg)
  2433. {
  2434. struct kvm_vcpu *vcpu = filp->private_data;
  2435. void __user *argp = (void __user *)arg;
  2436. int r;
  2437. union {
  2438. struct kvm_lapic_state *lapic;
  2439. struct kvm_xsave *xsave;
  2440. struct kvm_xcrs *xcrs;
  2441. void *buffer;
  2442. } u;
  2443. u.buffer = NULL;
  2444. switch (ioctl) {
  2445. case KVM_GET_LAPIC: {
  2446. r = -EINVAL;
  2447. if (!vcpu->arch.apic)
  2448. goto out;
  2449. u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2450. r = -ENOMEM;
  2451. if (!u.lapic)
  2452. goto out;
  2453. r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
  2454. if (r)
  2455. goto out;
  2456. r = -EFAULT;
  2457. if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
  2458. goto out;
  2459. r = 0;
  2460. break;
  2461. }
  2462. case KVM_SET_LAPIC: {
  2463. r = -EINVAL;
  2464. if (!vcpu->arch.apic)
  2465. goto out;
  2466. u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2467. r = -ENOMEM;
  2468. if (!u.lapic)
  2469. goto out;
  2470. r = -EFAULT;
  2471. if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
  2472. goto out;
  2473. r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
  2474. if (r)
  2475. goto out;
  2476. r = 0;
  2477. break;
  2478. }
  2479. case KVM_INTERRUPT: {
  2480. struct kvm_interrupt irq;
  2481. r = -EFAULT;
  2482. if (copy_from_user(&irq, argp, sizeof irq))
  2483. goto out;
  2484. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  2485. if (r)
  2486. goto out;
  2487. r = 0;
  2488. break;
  2489. }
  2490. case KVM_NMI: {
  2491. r = kvm_vcpu_ioctl_nmi(vcpu);
  2492. if (r)
  2493. goto out;
  2494. r = 0;
  2495. break;
  2496. }
  2497. case KVM_SET_CPUID: {
  2498. struct kvm_cpuid __user *cpuid_arg = argp;
  2499. struct kvm_cpuid cpuid;
  2500. r = -EFAULT;
  2501. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2502. goto out;
  2503. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  2504. if (r)
  2505. goto out;
  2506. break;
  2507. }
  2508. case KVM_SET_CPUID2: {
  2509. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2510. struct kvm_cpuid2 cpuid;
  2511. r = -EFAULT;
  2512. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2513. goto out;
  2514. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  2515. cpuid_arg->entries);
  2516. if (r)
  2517. goto out;
  2518. break;
  2519. }
  2520. case KVM_GET_CPUID2: {
  2521. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2522. struct kvm_cpuid2 cpuid;
  2523. r = -EFAULT;
  2524. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2525. goto out;
  2526. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  2527. cpuid_arg->entries);
  2528. if (r)
  2529. goto out;
  2530. r = -EFAULT;
  2531. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  2532. goto out;
  2533. r = 0;
  2534. break;
  2535. }
  2536. case KVM_GET_MSRS:
  2537. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  2538. break;
  2539. case KVM_SET_MSRS:
  2540. r = msr_io(vcpu, argp, do_set_msr, 0);
  2541. break;
  2542. case KVM_TPR_ACCESS_REPORTING: {
  2543. struct kvm_tpr_access_ctl tac;
  2544. r = -EFAULT;
  2545. if (copy_from_user(&tac, argp, sizeof tac))
  2546. goto out;
  2547. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  2548. if (r)
  2549. goto out;
  2550. r = -EFAULT;
  2551. if (copy_to_user(argp, &tac, sizeof tac))
  2552. goto out;
  2553. r = 0;
  2554. break;
  2555. };
  2556. case KVM_SET_VAPIC_ADDR: {
  2557. struct kvm_vapic_addr va;
  2558. r = -EINVAL;
  2559. if (!irqchip_in_kernel(vcpu->kvm))
  2560. goto out;
  2561. r = -EFAULT;
  2562. if (copy_from_user(&va, argp, sizeof va))
  2563. goto out;
  2564. r = 0;
  2565. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  2566. break;
  2567. }
  2568. case KVM_X86_SETUP_MCE: {
  2569. u64 mcg_cap;
  2570. r = -EFAULT;
  2571. if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
  2572. goto out;
  2573. r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
  2574. break;
  2575. }
  2576. case KVM_X86_SET_MCE: {
  2577. struct kvm_x86_mce mce;
  2578. r = -EFAULT;
  2579. if (copy_from_user(&mce, argp, sizeof mce))
  2580. goto out;
  2581. r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
  2582. break;
  2583. }
  2584. case KVM_GET_VCPU_EVENTS: {
  2585. struct kvm_vcpu_events events;
  2586. kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
  2587. r = -EFAULT;
  2588. if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
  2589. break;
  2590. r = 0;
  2591. break;
  2592. }
  2593. case KVM_SET_VCPU_EVENTS: {
  2594. struct kvm_vcpu_events events;
  2595. r = -EFAULT;
  2596. if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
  2597. break;
  2598. r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
  2599. break;
  2600. }
  2601. case KVM_GET_DEBUGREGS: {
  2602. struct kvm_debugregs dbgregs;
  2603. kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
  2604. r = -EFAULT;
  2605. if (copy_to_user(argp, &dbgregs,
  2606. sizeof(struct kvm_debugregs)))
  2607. break;
  2608. r = 0;
  2609. break;
  2610. }
  2611. case KVM_SET_DEBUGREGS: {
  2612. struct kvm_debugregs dbgregs;
  2613. r = -EFAULT;
  2614. if (copy_from_user(&dbgregs, argp,
  2615. sizeof(struct kvm_debugregs)))
  2616. break;
  2617. r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
  2618. break;
  2619. }
  2620. case KVM_GET_XSAVE: {
  2621. u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
  2622. r = -ENOMEM;
  2623. if (!u.xsave)
  2624. break;
  2625. kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
  2626. r = -EFAULT;
  2627. if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
  2628. break;
  2629. r = 0;
  2630. break;
  2631. }
  2632. case KVM_SET_XSAVE: {
  2633. u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
  2634. r = -ENOMEM;
  2635. if (!u.xsave)
  2636. break;
  2637. r = -EFAULT;
  2638. if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
  2639. break;
  2640. r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
  2641. break;
  2642. }
  2643. case KVM_GET_XCRS: {
  2644. u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
  2645. r = -ENOMEM;
  2646. if (!u.xcrs)
  2647. break;
  2648. kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
  2649. r = -EFAULT;
  2650. if (copy_to_user(argp, u.xcrs,
  2651. sizeof(struct kvm_xcrs)))
  2652. break;
  2653. r = 0;
  2654. break;
  2655. }
  2656. case KVM_SET_XCRS: {
  2657. u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
  2658. r = -ENOMEM;
  2659. if (!u.xcrs)
  2660. break;
  2661. r = -EFAULT;
  2662. if (copy_from_user(u.xcrs, argp,
  2663. sizeof(struct kvm_xcrs)))
  2664. break;
  2665. r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
  2666. break;
  2667. }
  2668. default:
  2669. r = -EINVAL;
  2670. }
  2671. out:
  2672. kfree(u.buffer);
  2673. return r;
  2674. }
  2675. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  2676. {
  2677. int ret;
  2678. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  2679. return -1;
  2680. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  2681. return ret;
  2682. }
  2683. static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
  2684. u64 ident_addr)
  2685. {
  2686. kvm->arch.ept_identity_map_addr = ident_addr;
  2687. return 0;
  2688. }
  2689. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  2690. u32 kvm_nr_mmu_pages)
  2691. {
  2692. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  2693. return -EINVAL;
  2694. mutex_lock(&kvm->slots_lock);
  2695. spin_lock(&kvm->mmu_lock);
  2696. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  2697. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  2698. spin_unlock(&kvm->mmu_lock);
  2699. mutex_unlock(&kvm->slots_lock);
  2700. return 0;
  2701. }
  2702. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  2703. {
  2704. return kvm->arch.n_max_mmu_pages;
  2705. }
  2706. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2707. {
  2708. int r;
  2709. r = 0;
  2710. switch (chip->chip_id) {
  2711. case KVM_IRQCHIP_PIC_MASTER:
  2712. memcpy(&chip->chip.pic,
  2713. &pic_irqchip(kvm)->pics[0],
  2714. sizeof(struct kvm_pic_state));
  2715. break;
  2716. case KVM_IRQCHIP_PIC_SLAVE:
  2717. memcpy(&chip->chip.pic,
  2718. &pic_irqchip(kvm)->pics[1],
  2719. sizeof(struct kvm_pic_state));
  2720. break;
  2721. case KVM_IRQCHIP_IOAPIC:
  2722. r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
  2723. break;
  2724. default:
  2725. r = -EINVAL;
  2726. break;
  2727. }
  2728. return r;
  2729. }
  2730. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2731. {
  2732. int r;
  2733. r = 0;
  2734. switch (chip->chip_id) {
  2735. case KVM_IRQCHIP_PIC_MASTER:
  2736. spin_lock(&pic_irqchip(kvm)->lock);
  2737. memcpy(&pic_irqchip(kvm)->pics[0],
  2738. &chip->chip.pic,
  2739. sizeof(struct kvm_pic_state));
  2740. spin_unlock(&pic_irqchip(kvm)->lock);
  2741. break;
  2742. case KVM_IRQCHIP_PIC_SLAVE:
  2743. spin_lock(&pic_irqchip(kvm)->lock);
  2744. memcpy(&pic_irqchip(kvm)->pics[1],
  2745. &chip->chip.pic,
  2746. sizeof(struct kvm_pic_state));
  2747. spin_unlock(&pic_irqchip(kvm)->lock);
  2748. break;
  2749. case KVM_IRQCHIP_IOAPIC:
  2750. r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
  2751. break;
  2752. default:
  2753. r = -EINVAL;
  2754. break;
  2755. }
  2756. kvm_pic_update_irq(pic_irqchip(kvm));
  2757. return r;
  2758. }
  2759. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2760. {
  2761. int r = 0;
  2762. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2763. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  2764. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2765. return r;
  2766. }
  2767. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2768. {
  2769. int r = 0;
  2770. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2771. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  2772. kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
  2773. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2774. return r;
  2775. }
  2776. static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2777. {
  2778. int r = 0;
  2779. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2780. memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
  2781. sizeof(ps->channels));
  2782. ps->flags = kvm->arch.vpit->pit_state.flags;
  2783. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2784. memset(&ps->reserved, 0, sizeof(ps->reserved));
  2785. return r;
  2786. }
  2787. static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2788. {
  2789. int r = 0, start = 0;
  2790. u32 prev_legacy, cur_legacy;
  2791. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2792. prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2793. cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2794. if (!prev_legacy && cur_legacy)
  2795. start = 1;
  2796. memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
  2797. sizeof(kvm->arch.vpit->pit_state.channels));
  2798. kvm->arch.vpit->pit_state.flags = ps->flags;
  2799. kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
  2800. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2801. return r;
  2802. }
  2803. static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  2804. struct kvm_reinject_control *control)
  2805. {
  2806. if (!kvm->arch.vpit)
  2807. return -ENXIO;
  2808. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2809. kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
  2810. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2811. return 0;
  2812. }
  2813. /*
  2814. * Get (and clear) the dirty memory log for a memory slot.
  2815. */
  2816. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  2817. struct kvm_dirty_log *log)
  2818. {
  2819. int r, i;
  2820. struct kvm_memory_slot *memslot;
  2821. unsigned long n;
  2822. unsigned long is_dirty = 0;
  2823. mutex_lock(&kvm->slots_lock);
  2824. r = -EINVAL;
  2825. if (log->slot >= KVM_MEMORY_SLOTS)
  2826. goto out;
  2827. memslot = &kvm->memslots->memslots[log->slot];
  2828. r = -ENOENT;
  2829. if (!memslot->dirty_bitmap)
  2830. goto out;
  2831. n = kvm_dirty_bitmap_bytes(memslot);
  2832. for (i = 0; !is_dirty && i < n/sizeof(long); i++)
  2833. is_dirty = memslot->dirty_bitmap[i];
  2834. /* If nothing is dirty, don't bother messing with page tables. */
  2835. if (is_dirty) {
  2836. struct kvm_memslots *slots, *old_slots;
  2837. unsigned long *dirty_bitmap;
  2838. dirty_bitmap = memslot->dirty_bitmap_head;
  2839. if (memslot->dirty_bitmap == dirty_bitmap)
  2840. dirty_bitmap += n / sizeof(long);
  2841. memset(dirty_bitmap, 0, n);
  2842. r = -ENOMEM;
  2843. slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  2844. if (!slots)
  2845. goto out;
  2846. memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
  2847. slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
  2848. slots->generation++;
  2849. old_slots = kvm->memslots;
  2850. rcu_assign_pointer(kvm->memslots, slots);
  2851. synchronize_srcu_expedited(&kvm->srcu);
  2852. dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
  2853. kfree(old_slots);
  2854. spin_lock(&kvm->mmu_lock);
  2855. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  2856. spin_unlock(&kvm->mmu_lock);
  2857. r = -EFAULT;
  2858. if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
  2859. goto out;
  2860. } else {
  2861. r = -EFAULT;
  2862. if (clear_user(log->dirty_bitmap, n))
  2863. goto out;
  2864. }
  2865. r = 0;
  2866. out:
  2867. mutex_unlock(&kvm->slots_lock);
  2868. return r;
  2869. }
  2870. long kvm_arch_vm_ioctl(struct file *filp,
  2871. unsigned int ioctl, unsigned long arg)
  2872. {
  2873. struct kvm *kvm = filp->private_data;
  2874. void __user *argp = (void __user *)arg;
  2875. int r = -ENOTTY;
  2876. /*
  2877. * This union makes it completely explicit to gcc-3.x
  2878. * that these two variables' stack usage should be
  2879. * combined, not added together.
  2880. */
  2881. union {
  2882. struct kvm_pit_state ps;
  2883. struct kvm_pit_state2 ps2;
  2884. struct kvm_pit_config pit_config;
  2885. } u;
  2886. switch (ioctl) {
  2887. case KVM_SET_TSS_ADDR:
  2888. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  2889. if (r < 0)
  2890. goto out;
  2891. break;
  2892. case KVM_SET_IDENTITY_MAP_ADDR: {
  2893. u64 ident_addr;
  2894. r = -EFAULT;
  2895. if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
  2896. goto out;
  2897. r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
  2898. if (r < 0)
  2899. goto out;
  2900. break;
  2901. }
  2902. case KVM_SET_NR_MMU_PAGES:
  2903. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  2904. if (r)
  2905. goto out;
  2906. break;
  2907. case KVM_GET_NR_MMU_PAGES:
  2908. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  2909. break;
  2910. case KVM_CREATE_IRQCHIP: {
  2911. struct kvm_pic *vpic;
  2912. mutex_lock(&kvm->lock);
  2913. r = -EEXIST;
  2914. if (kvm->arch.vpic)
  2915. goto create_irqchip_unlock;
  2916. r = -ENOMEM;
  2917. vpic = kvm_create_pic(kvm);
  2918. if (vpic) {
  2919. r = kvm_ioapic_init(kvm);
  2920. if (r) {
  2921. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  2922. &vpic->dev);
  2923. kfree(vpic);
  2924. goto create_irqchip_unlock;
  2925. }
  2926. } else
  2927. goto create_irqchip_unlock;
  2928. smp_wmb();
  2929. kvm->arch.vpic = vpic;
  2930. smp_wmb();
  2931. r = kvm_setup_default_irq_routing(kvm);
  2932. if (r) {
  2933. mutex_lock(&kvm->irq_lock);
  2934. kvm_ioapic_destroy(kvm);
  2935. kvm_destroy_pic(kvm);
  2936. mutex_unlock(&kvm->irq_lock);
  2937. }
  2938. create_irqchip_unlock:
  2939. mutex_unlock(&kvm->lock);
  2940. break;
  2941. }
  2942. case KVM_CREATE_PIT:
  2943. u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
  2944. goto create_pit;
  2945. case KVM_CREATE_PIT2:
  2946. r = -EFAULT;
  2947. if (copy_from_user(&u.pit_config, argp,
  2948. sizeof(struct kvm_pit_config)))
  2949. goto out;
  2950. create_pit:
  2951. mutex_lock(&kvm->slots_lock);
  2952. r = -EEXIST;
  2953. if (kvm->arch.vpit)
  2954. goto create_pit_unlock;
  2955. r = -ENOMEM;
  2956. kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
  2957. if (kvm->arch.vpit)
  2958. r = 0;
  2959. create_pit_unlock:
  2960. mutex_unlock(&kvm->slots_lock);
  2961. break;
  2962. case KVM_IRQ_LINE_STATUS:
  2963. case KVM_IRQ_LINE: {
  2964. struct kvm_irq_level irq_event;
  2965. r = -EFAULT;
  2966. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  2967. goto out;
  2968. r = -ENXIO;
  2969. if (irqchip_in_kernel(kvm)) {
  2970. __s32 status;
  2971. status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  2972. irq_event.irq, irq_event.level);
  2973. if (ioctl == KVM_IRQ_LINE_STATUS) {
  2974. r = -EFAULT;
  2975. irq_event.status = status;
  2976. if (copy_to_user(argp, &irq_event,
  2977. sizeof irq_event))
  2978. goto out;
  2979. }
  2980. r = 0;
  2981. }
  2982. break;
  2983. }
  2984. case KVM_GET_IRQCHIP: {
  2985. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2986. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  2987. r = -ENOMEM;
  2988. if (!chip)
  2989. goto out;
  2990. r = -EFAULT;
  2991. if (copy_from_user(chip, argp, sizeof *chip))
  2992. goto get_irqchip_out;
  2993. r = -ENXIO;
  2994. if (!irqchip_in_kernel(kvm))
  2995. goto get_irqchip_out;
  2996. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  2997. if (r)
  2998. goto get_irqchip_out;
  2999. r = -EFAULT;
  3000. if (copy_to_user(argp, chip, sizeof *chip))
  3001. goto get_irqchip_out;
  3002. r = 0;
  3003. get_irqchip_out:
  3004. kfree(chip);
  3005. if (r)
  3006. goto out;
  3007. break;
  3008. }
  3009. case KVM_SET_IRQCHIP: {
  3010. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  3011. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  3012. r = -ENOMEM;
  3013. if (!chip)
  3014. goto out;
  3015. r = -EFAULT;
  3016. if (copy_from_user(chip, argp, sizeof *chip))
  3017. goto set_irqchip_out;
  3018. r = -ENXIO;
  3019. if (!irqchip_in_kernel(kvm))
  3020. goto set_irqchip_out;
  3021. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  3022. if (r)
  3023. goto set_irqchip_out;
  3024. r = 0;
  3025. set_irqchip_out:
  3026. kfree(chip);
  3027. if (r)
  3028. goto out;
  3029. break;
  3030. }
  3031. case KVM_GET_PIT: {
  3032. r = -EFAULT;
  3033. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  3034. goto out;
  3035. r = -ENXIO;
  3036. if (!kvm->arch.vpit)
  3037. goto out;
  3038. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  3039. if (r)
  3040. goto out;
  3041. r = -EFAULT;
  3042. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  3043. goto out;
  3044. r = 0;
  3045. break;
  3046. }
  3047. case KVM_SET_PIT: {
  3048. r = -EFAULT;
  3049. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  3050. goto out;
  3051. r = -ENXIO;
  3052. if (!kvm->arch.vpit)
  3053. goto out;
  3054. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  3055. if (r)
  3056. goto out;
  3057. r = 0;
  3058. break;
  3059. }
  3060. case KVM_GET_PIT2: {
  3061. r = -ENXIO;
  3062. if (!kvm->arch.vpit)
  3063. goto out;
  3064. r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
  3065. if (r)
  3066. goto out;
  3067. r = -EFAULT;
  3068. if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
  3069. goto out;
  3070. r = 0;
  3071. break;
  3072. }
  3073. case KVM_SET_PIT2: {
  3074. r = -EFAULT;
  3075. if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
  3076. goto out;
  3077. r = -ENXIO;
  3078. if (!kvm->arch.vpit)
  3079. goto out;
  3080. r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
  3081. if (r)
  3082. goto out;
  3083. r = 0;
  3084. break;
  3085. }
  3086. case KVM_REINJECT_CONTROL: {
  3087. struct kvm_reinject_control control;
  3088. r = -EFAULT;
  3089. if (copy_from_user(&control, argp, sizeof(control)))
  3090. goto out;
  3091. r = kvm_vm_ioctl_reinject(kvm, &control);
  3092. if (r)
  3093. goto out;
  3094. r = 0;
  3095. break;
  3096. }
  3097. case KVM_XEN_HVM_CONFIG: {
  3098. r = -EFAULT;
  3099. if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
  3100. sizeof(struct kvm_xen_hvm_config)))
  3101. goto out;
  3102. r = -EINVAL;
  3103. if (kvm->arch.xen_hvm_config.flags)
  3104. goto out;
  3105. r = 0;
  3106. break;
  3107. }
  3108. case KVM_SET_CLOCK: {
  3109. struct kvm_clock_data user_ns;
  3110. u64 now_ns;
  3111. s64 delta;
  3112. r = -EFAULT;
  3113. if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
  3114. goto out;
  3115. r = -EINVAL;
  3116. if (user_ns.flags)
  3117. goto out;
  3118. r = 0;
  3119. local_irq_disable();
  3120. now_ns = get_kernel_ns();
  3121. delta = user_ns.clock - now_ns;
  3122. local_irq_enable();
  3123. kvm->arch.kvmclock_offset = delta;
  3124. break;
  3125. }
  3126. case KVM_GET_CLOCK: {
  3127. struct kvm_clock_data user_ns;
  3128. u64 now_ns;
  3129. local_irq_disable();
  3130. now_ns = get_kernel_ns();
  3131. user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
  3132. local_irq_enable();
  3133. user_ns.flags = 0;
  3134. memset(&user_ns.pad, 0, sizeof(user_ns.pad));
  3135. r = -EFAULT;
  3136. if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
  3137. goto out;
  3138. r = 0;
  3139. break;
  3140. }
  3141. default:
  3142. ;
  3143. }
  3144. out:
  3145. return r;
  3146. }
  3147. static void kvm_init_msr_list(void)
  3148. {
  3149. u32 dummy[2];
  3150. unsigned i, j;
  3151. /* skip the first msrs in the list. KVM-specific */
  3152. for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
  3153. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  3154. continue;
  3155. if (j < i)
  3156. msrs_to_save[j] = msrs_to_save[i];
  3157. j++;
  3158. }
  3159. num_msrs_to_save = j;
  3160. }
  3161. static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
  3162. const void *v)
  3163. {
  3164. if (vcpu->arch.apic &&
  3165. !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
  3166. return 0;
  3167. return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
  3168. }
  3169. static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
  3170. {
  3171. if (vcpu->arch.apic &&
  3172. !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
  3173. return 0;
  3174. return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
  3175. }
  3176. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  3177. struct kvm_segment *var, int seg)
  3178. {
  3179. kvm_x86_ops->set_segment(vcpu, var, seg);
  3180. }
  3181. void kvm_get_segment(struct kvm_vcpu *vcpu,
  3182. struct kvm_segment *var, int seg)
  3183. {
  3184. kvm_x86_ops->get_segment(vcpu, var, seg);
  3185. }
  3186. static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
  3187. {
  3188. return gpa;
  3189. }
  3190. static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
  3191. {
  3192. gpa_t t_gpa;
  3193. struct x86_exception exception;
  3194. BUG_ON(!mmu_is_nested(vcpu));
  3195. /* NPT walks are always user-walks */
  3196. access |= PFERR_USER_MASK;
  3197. t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
  3198. return t_gpa;
  3199. }
  3200. gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
  3201. struct x86_exception *exception)
  3202. {
  3203. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3204. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3205. }
  3206. gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
  3207. struct x86_exception *exception)
  3208. {
  3209. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3210. access |= PFERR_FETCH_MASK;
  3211. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3212. }
  3213. gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
  3214. struct x86_exception *exception)
  3215. {
  3216. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3217. access |= PFERR_WRITE_MASK;
  3218. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3219. }
  3220. /* uses this to access any guest's mapped memory without checking CPL */
  3221. gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
  3222. struct x86_exception *exception)
  3223. {
  3224. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
  3225. }
  3226. static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
  3227. struct kvm_vcpu *vcpu, u32 access,
  3228. struct x86_exception *exception)
  3229. {
  3230. void *data = val;
  3231. int r = X86EMUL_CONTINUE;
  3232. while (bytes) {
  3233. gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
  3234. exception);
  3235. unsigned offset = addr & (PAGE_SIZE-1);
  3236. unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
  3237. int ret;
  3238. if (gpa == UNMAPPED_GVA)
  3239. return X86EMUL_PROPAGATE_FAULT;
  3240. ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
  3241. if (ret < 0) {
  3242. r = X86EMUL_IO_NEEDED;
  3243. goto out;
  3244. }
  3245. bytes -= toread;
  3246. data += toread;
  3247. addr += toread;
  3248. }
  3249. out:
  3250. return r;
  3251. }
  3252. /* used for instruction fetching */
  3253. static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
  3254. struct kvm_vcpu *vcpu,
  3255. struct x86_exception *exception)
  3256. {
  3257. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3258. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
  3259. access | PFERR_FETCH_MASK,
  3260. exception);
  3261. }
  3262. static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
  3263. struct kvm_vcpu *vcpu,
  3264. struct x86_exception *exception)
  3265. {
  3266. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3267. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
  3268. exception);
  3269. }
  3270. static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
  3271. struct kvm_vcpu *vcpu,
  3272. struct x86_exception *exception)
  3273. {
  3274. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
  3275. }
  3276. static int kvm_write_guest_virt_system(gva_t addr, void *val,
  3277. unsigned int bytes,
  3278. struct kvm_vcpu *vcpu,
  3279. struct x86_exception *exception)
  3280. {
  3281. void *data = val;
  3282. int r = X86EMUL_CONTINUE;
  3283. while (bytes) {
  3284. gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
  3285. PFERR_WRITE_MASK,
  3286. exception);
  3287. unsigned offset = addr & (PAGE_SIZE-1);
  3288. unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  3289. int ret;
  3290. if (gpa == UNMAPPED_GVA)
  3291. return X86EMUL_PROPAGATE_FAULT;
  3292. ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
  3293. if (ret < 0) {
  3294. r = X86EMUL_IO_NEEDED;
  3295. goto out;
  3296. }
  3297. bytes -= towrite;
  3298. data += towrite;
  3299. addr += towrite;
  3300. }
  3301. out:
  3302. return r;
  3303. }
  3304. static int emulator_read_emulated(unsigned long addr,
  3305. void *val,
  3306. unsigned int bytes,
  3307. struct x86_exception *exception,
  3308. struct kvm_vcpu *vcpu)
  3309. {
  3310. gpa_t gpa;
  3311. if (vcpu->mmio_read_completed) {
  3312. memcpy(val, vcpu->mmio_data, bytes);
  3313. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
  3314. vcpu->mmio_phys_addr, *(u64 *)val);
  3315. vcpu->mmio_read_completed = 0;
  3316. return X86EMUL_CONTINUE;
  3317. }
  3318. gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
  3319. if (gpa == UNMAPPED_GVA)
  3320. return X86EMUL_PROPAGATE_FAULT;
  3321. /* For APIC access vmexit */
  3322. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3323. goto mmio;
  3324. if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception)
  3325. == X86EMUL_CONTINUE)
  3326. return X86EMUL_CONTINUE;
  3327. mmio:
  3328. /*
  3329. * Is this MMIO handled locally?
  3330. */
  3331. if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
  3332. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
  3333. return X86EMUL_CONTINUE;
  3334. }
  3335. trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
  3336. vcpu->mmio_needed = 1;
  3337. vcpu->run->exit_reason = KVM_EXIT_MMIO;
  3338. vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
  3339. vcpu->run->mmio.len = vcpu->mmio_size = bytes;
  3340. vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0;
  3341. return X86EMUL_IO_NEEDED;
  3342. }
  3343. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  3344. const void *val, int bytes)
  3345. {
  3346. int ret;
  3347. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  3348. if (ret < 0)
  3349. return 0;
  3350. kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
  3351. return 1;
  3352. }
  3353. static int emulator_write_emulated_onepage(unsigned long addr,
  3354. const void *val,
  3355. unsigned int bytes,
  3356. struct x86_exception *exception,
  3357. struct kvm_vcpu *vcpu)
  3358. {
  3359. gpa_t gpa;
  3360. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
  3361. if (gpa == UNMAPPED_GVA)
  3362. return X86EMUL_PROPAGATE_FAULT;
  3363. /* For APIC access vmexit */
  3364. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3365. goto mmio;
  3366. if (emulator_write_phys(vcpu, gpa, val, bytes))
  3367. return X86EMUL_CONTINUE;
  3368. mmio:
  3369. trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
  3370. /*
  3371. * Is this MMIO handled locally?
  3372. */
  3373. if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
  3374. return X86EMUL_CONTINUE;
  3375. vcpu->mmio_needed = 1;
  3376. vcpu->run->exit_reason = KVM_EXIT_MMIO;
  3377. vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
  3378. vcpu->run->mmio.len = vcpu->mmio_size = bytes;
  3379. vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1;
  3380. memcpy(vcpu->run->mmio.data, val, bytes);
  3381. return X86EMUL_CONTINUE;
  3382. }
  3383. int emulator_write_emulated(unsigned long addr,
  3384. const void *val,
  3385. unsigned int bytes,
  3386. struct x86_exception *exception,
  3387. struct kvm_vcpu *vcpu)
  3388. {
  3389. /* Crossing a page boundary? */
  3390. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  3391. int rc, now;
  3392. now = -addr & ~PAGE_MASK;
  3393. rc = emulator_write_emulated_onepage(addr, val, now, exception,
  3394. vcpu);
  3395. if (rc != X86EMUL_CONTINUE)
  3396. return rc;
  3397. addr += now;
  3398. val += now;
  3399. bytes -= now;
  3400. }
  3401. return emulator_write_emulated_onepage(addr, val, bytes, exception,
  3402. vcpu);
  3403. }
  3404. #define CMPXCHG_TYPE(t, ptr, old, new) \
  3405. (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
  3406. #ifdef CONFIG_X86_64
  3407. # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
  3408. #else
  3409. # define CMPXCHG64(ptr, old, new) \
  3410. (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
  3411. #endif
  3412. static int emulator_cmpxchg_emulated(unsigned long addr,
  3413. const void *old,
  3414. const void *new,
  3415. unsigned int bytes,
  3416. struct x86_exception *exception,
  3417. struct kvm_vcpu *vcpu)
  3418. {
  3419. gpa_t gpa;
  3420. struct page *page;
  3421. char *kaddr;
  3422. bool exchanged;
  3423. /* guests cmpxchg8b have to be emulated atomically */
  3424. if (bytes > 8 || (bytes & (bytes - 1)))
  3425. goto emul_write;
  3426. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
  3427. if (gpa == UNMAPPED_GVA ||
  3428. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3429. goto emul_write;
  3430. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  3431. goto emul_write;
  3432. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  3433. if (is_error_page(page)) {
  3434. kvm_release_page_clean(page);
  3435. goto emul_write;
  3436. }
  3437. kaddr = kmap_atomic(page, KM_USER0);
  3438. kaddr += offset_in_page(gpa);
  3439. switch (bytes) {
  3440. case 1:
  3441. exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
  3442. break;
  3443. case 2:
  3444. exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
  3445. break;
  3446. case 4:
  3447. exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
  3448. break;
  3449. case 8:
  3450. exchanged = CMPXCHG64(kaddr, old, new);
  3451. break;
  3452. default:
  3453. BUG();
  3454. }
  3455. kunmap_atomic(kaddr, KM_USER0);
  3456. kvm_release_page_dirty(page);
  3457. if (!exchanged)
  3458. return X86EMUL_CMPXCHG_FAILED;
  3459. kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
  3460. return X86EMUL_CONTINUE;
  3461. emul_write:
  3462. printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
  3463. return emulator_write_emulated(addr, new, bytes, exception, vcpu);
  3464. }
  3465. static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
  3466. {
  3467. /* TODO: String I/O for in kernel device */
  3468. int r;
  3469. if (vcpu->arch.pio.in)
  3470. r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
  3471. vcpu->arch.pio.size, pd);
  3472. else
  3473. r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
  3474. vcpu->arch.pio.port, vcpu->arch.pio.size,
  3475. pd);
  3476. return r;
  3477. }
  3478. static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
  3479. unsigned int count, struct kvm_vcpu *vcpu)
  3480. {
  3481. if (vcpu->arch.pio.count)
  3482. goto data_avail;
  3483. trace_kvm_pio(0, port, size, 1);
  3484. vcpu->arch.pio.port = port;
  3485. vcpu->arch.pio.in = 1;
  3486. vcpu->arch.pio.count = count;
  3487. vcpu->arch.pio.size = size;
  3488. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3489. data_avail:
  3490. memcpy(val, vcpu->arch.pio_data, size * count);
  3491. vcpu->arch.pio.count = 0;
  3492. return 1;
  3493. }
  3494. vcpu->run->exit_reason = KVM_EXIT_IO;
  3495. vcpu->run->io.direction = KVM_EXIT_IO_IN;
  3496. vcpu->run->io.size = size;
  3497. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3498. vcpu->run->io.count = count;
  3499. vcpu->run->io.port = port;
  3500. return 0;
  3501. }
  3502. static int emulator_pio_out_emulated(int size, unsigned short port,
  3503. const void *val, unsigned int count,
  3504. struct kvm_vcpu *vcpu)
  3505. {
  3506. trace_kvm_pio(1, port, size, 1);
  3507. vcpu->arch.pio.port = port;
  3508. vcpu->arch.pio.in = 0;
  3509. vcpu->arch.pio.count = count;
  3510. vcpu->arch.pio.size = size;
  3511. memcpy(vcpu->arch.pio_data, val, size * count);
  3512. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3513. vcpu->arch.pio.count = 0;
  3514. return 1;
  3515. }
  3516. vcpu->run->exit_reason = KVM_EXIT_IO;
  3517. vcpu->run->io.direction = KVM_EXIT_IO_OUT;
  3518. vcpu->run->io.size = size;
  3519. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3520. vcpu->run->io.count = count;
  3521. vcpu->run->io.port = port;
  3522. return 0;
  3523. }
  3524. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  3525. {
  3526. return kvm_x86_ops->get_segment_base(vcpu, seg);
  3527. }
  3528. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  3529. {
  3530. kvm_mmu_invlpg(vcpu, address);
  3531. return X86EMUL_CONTINUE;
  3532. }
  3533. int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
  3534. {
  3535. if (!need_emulate_wbinvd(vcpu))
  3536. return X86EMUL_CONTINUE;
  3537. if (kvm_x86_ops->has_wbinvd_exit()) {
  3538. int cpu = get_cpu();
  3539. cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
  3540. smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
  3541. wbinvd_ipi, NULL, 1);
  3542. put_cpu();
  3543. cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
  3544. } else
  3545. wbinvd();
  3546. return X86EMUL_CONTINUE;
  3547. }
  3548. EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
  3549. int emulate_clts(struct kvm_vcpu *vcpu)
  3550. {
  3551. kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
  3552. kvm_x86_ops->fpu_activate(vcpu);
  3553. return X86EMUL_CONTINUE;
  3554. }
  3555. int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu)
  3556. {
  3557. return _kvm_get_dr(vcpu, dr, dest);
  3558. }
  3559. int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu)
  3560. {
  3561. return __kvm_set_dr(vcpu, dr, value);
  3562. }
  3563. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  3564. {
  3565. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  3566. }
  3567. static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
  3568. {
  3569. unsigned long value;
  3570. switch (cr) {
  3571. case 0:
  3572. value = kvm_read_cr0(vcpu);
  3573. break;
  3574. case 2:
  3575. value = vcpu->arch.cr2;
  3576. break;
  3577. case 3:
  3578. value = vcpu->arch.cr3;
  3579. break;
  3580. case 4:
  3581. value = kvm_read_cr4(vcpu);
  3582. break;
  3583. case 8:
  3584. value = kvm_get_cr8(vcpu);
  3585. break;
  3586. default:
  3587. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3588. return 0;
  3589. }
  3590. return value;
  3591. }
  3592. static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
  3593. {
  3594. int res = 0;
  3595. switch (cr) {
  3596. case 0:
  3597. res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
  3598. break;
  3599. case 2:
  3600. vcpu->arch.cr2 = val;
  3601. break;
  3602. case 3:
  3603. res = kvm_set_cr3(vcpu, val);
  3604. break;
  3605. case 4:
  3606. res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
  3607. break;
  3608. case 8:
  3609. res = __kvm_set_cr8(vcpu, val & 0xfUL);
  3610. break;
  3611. default:
  3612. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3613. res = -1;
  3614. }
  3615. return res;
  3616. }
  3617. static int emulator_get_cpl(struct kvm_vcpu *vcpu)
  3618. {
  3619. return kvm_x86_ops->get_cpl(vcpu);
  3620. }
  3621. static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
  3622. {
  3623. kvm_x86_ops->get_gdt(vcpu, dt);
  3624. }
  3625. static void emulator_get_idt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
  3626. {
  3627. kvm_x86_ops->get_idt(vcpu, dt);
  3628. }
  3629. static unsigned long emulator_get_cached_segment_base(int seg,
  3630. struct kvm_vcpu *vcpu)
  3631. {
  3632. return get_segment_base(vcpu, seg);
  3633. }
  3634. static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
  3635. struct kvm_vcpu *vcpu)
  3636. {
  3637. struct kvm_segment var;
  3638. kvm_get_segment(vcpu, &var, seg);
  3639. if (var.unusable)
  3640. return false;
  3641. if (var.g)
  3642. var.limit >>= 12;
  3643. set_desc_limit(desc, var.limit);
  3644. set_desc_base(desc, (unsigned long)var.base);
  3645. desc->type = var.type;
  3646. desc->s = var.s;
  3647. desc->dpl = var.dpl;
  3648. desc->p = var.present;
  3649. desc->avl = var.avl;
  3650. desc->l = var.l;
  3651. desc->d = var.db;
  3652. desc->g = var.g;
  3653. return true;
  3654. }
  3655. static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
  3656. struct kvm_vcpu *vcpu)
  3657. {
  3658. struct kvm_segment var;
  3659. /* needed to preserve selector */
  3660. kvm_get_segment(vcpu, &var, seg);
  3661. var.base = get_desc_base(desc);
  3662. var.limit = get_desc_limit(desc);
  3663. if (desc->g)
  3664. var.limit = (var.limit << 12) | 0xfff;
  3665. var.type = desc->type;
  3666. var.present = desc->p;
  3667. var.dpl = desc->dpl;
  3668. var.db = desc->d;
  3669. var.s = desc->s;
  3670. var.l = desc->l;
  3671. var.g = desc->g;
  3672. var.avl = desc->avl;
  3673. var.present = desc->p;
  3674. var.unusable = !var.present;
  3675. var.padding = 0;
  3676. kvm_set_segment(vcpu, &var, seg);
  3677. return;
  3678. }
  3679. static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
  3680. {
  3681. struct kvm_segment kvm_seg;
  3682. kvm_get_segment(vcpu, &kvm_seg, seg);
  3683. return kvm_seg.selector;
  3684. }
  3685. static void emulator_set_segment_selector(u16 sel, int seg,
  3686. struct kvm_vcpu *vcpu)
  3687. {
  3688. struct kvm_segment kvm_seg;
  3689. kvm_get_segment(vcpu, &kvm_seg, seg);
  3690. kvm_seg.selector = sel;
  3691. kvm_set_segment(vcpu, &kvm_seg, seg);
  3692. }
  3693. static struct x86_emulate_ops emulate_ops = {
  3694. .read_std = kvm_read_guest_virt_system,
  3695. .write_std = kvm_write_guest_virt_system,
  3696. .fetch = kvm_fetch_guest_virt,
  3697. .read_emulated = emulator_read_emulated,
  3698. .write_emulated = emulator_write_emulated,
  3699. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  3700. .pio_in_emulated = emulator_pio_in_emulated,
  3701. .pio_out_emulated = emulator_pio_out_emulated,
  3702. .get_cached_descriptor = emulator_get_cached_descriptor,
  3703. .set_cached_descriptor = emulator_set_cached_descriptor,
  3704. .get_segment_selector = emulator_get_segment_selector,
  3705. .set_segment_selector = emulator_set_segment_selector,
  3706. .get_cached_segment_base = emulator_get_cached_segment_base,
  3707. .get_gdt = emulator_get_gdt,
  3708. .get_idt = emulator_get_idt,
  3709. .get_cr = emulator_get_cr,
  3710. .set_cr = emulator_set_cr,
  3711. .cpl = emulator_get_cpl,
  3712. .get_dr = emulator_get_dr,
  3713. .set_dr = emulator_set_dr,
  3714. .set_msr = kvm_set_msr,
  3715. .get_msr = kvm_get_msr,
  3716. };
  3717. static void cache_all_regs(struct kvm_vcpu *vcpu)
  3718. {
  3719. kvm_register_read(vcpu, VCPU_REGS_RAX);
  3720. kvm_register_read(vcpu, VCPU_REGS_RSP);
  3721. kvm_register_read(vcpu, VCPU_REGS_RIP);
  3722. vcpu->arch.regs_dirty = ~0;
  3723. }
  3724. static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
  3725. {
  3726. u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
  3727. /*
  3728. * an sti; sti; sequence only disable interrupts for the first
  3729. * instruction. So, if the last instruction, be it emulated or
  3730. * not, left the system with the INT_STI flag enabled, it
  3731. * means that the last instruction is an sti. We should not
  3732. * leave the flag on in this case. The same goes for mov ss
  3733. */
  3734. if (!(int_shadow & mask))
  3735. kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
  3736. }
  3737. static void inject_emulated_exception(struct kvm_vcpu *vcpu)
  3738. {
  3739. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  3740. if (ctxt->exception.vector == PF_VECTOR)
  3741. kvm_propagate_fault(vcpu, &ctxt->exception);
  3742. else if (ctxt->exception.error_code_valid)
  3743. kvm_queue_exception_e(vcpu, ctxt->exception.vector,
  3744. ctxt->exception.error_code);
  3745. else
  3746. kvm_queue_exception(vcpu, ctxt->exception.vector);
  3747. }
  3748. static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
  3749. {
  3750. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  3751. int cs_db, cs_l;
  3752. cache_all_regs(vcpu);
  3753. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  3754. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  3755. vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  3756. vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
  3757. vcpu->arch.emulate_ctxt.mode =
  3758. (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
  3759. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  3760. ? X86EMUL_MODE_VM86 : cs_l
  3761. ? X86EMUL_MODE_PROT64 : cs_db
  3762. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  3763. memset(c, 0, sizeof(struct decode_cache));
  3764. memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
  3765. }
  3766. int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
  3767. {
  3768. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  3769. int ret;
  3770. init_emulate_ctxt(vcpu);
  3771. vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
  3772. vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
  3773. vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip;
  3774. ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
  3775. if (ret != X86EMUL_CONTINUE)
  3776. return EMULATE_FAIL;
  3777. vcpu->arch.emulate_ctxt.eip = c->eip;
  3778. memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
  3779. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
  3780. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  3781. if (irq == NMI_VECTOR)
  3782. vcpu->arch.nmi_pending = false;
  3783. else
  3784. vcpu->arch.interrupt.pending = false;
  3785. return EMULATE_DONE;
  3786. }
  3787. EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
  3788. static int handle_emulation_failure(struct kvm_vcpu *vcpu)
  3789. {
  3790. ++vcpu->stat.insn_emulation_fail;
  3791. trace_kvm_emulate_insn_failed(vcpu);
  3792. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  3793. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  3794. vcpu->run->internal.ndata = 0;
  3795. kvm_queue_exception(vcpu, UD_VECTOR);
  3796. return EMULATE_FAIL;
  3797. }
  3798. static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
  3799. {
  3800. gpa_t gpa;
  3801. if (tdp_enabled)
  3802. return false;
  3803. /*
  3804. * if emulation was due to access to shadowed page table
  3805. * and it failed try to unshadow page and re-entetr the
  3806. * guest to let CPU execute the instruction.
  3807. */
  3808. if (kvm_mmu_unprotect_page_virt(vcpu, gva))
  3809. return true;
  3810. gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
  3811. if (gpa == UNMAPPED_GVA)
  3812. return true; /* let cpu generate fault */
  3813. if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
  3814. return true;
  3815. return false;
  3816. }
  3817. int emulate_instruction(struct kvm_vcpu *vcpu,
  3818. unsigned long cr2,
  3819. u16 error_code,
  3820. int emulation_type)
  3821. {
  3822. int r;
  3823. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  3824. kvm_clear_exception_queue(vcpu);
  3825. vcpu->arch.mmio_fault_cr2 = cr2;
  3826. /*
  3827. * TODO: fix emulate.c to use guest_read/write_register
  3828. * instead of direct ->regs accesses, can save hundred cycles
  3829. * on Intel for instructions that don't read/change RSP, for
  3830. * for example.
  3831. */
  3832. cache_all_regs(vcpu);
  3833. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  3834. init_emulate_ctxt(vcpu);
  3835. vcpu->arch.emulate_ctxt.interruptibility = 0;
  3836. vcpu->arch.emulate_ctxt.have_exception = false;
  3837. vcpu->arch.emulate_ctxt.perm_ok = false;
  3838. r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
  3839. if (r == X86EMUL_PROPAGATE_FAULT)
  3840. goto done;
  3841. trace_kvm_emulate_insn_start(vcpu);
  3842. /* Only allow emulation of specific instructions on #UD
  3843. * (namely VMMCALL, sysenter, sysexit, syscall)*/
  3844. if (emulation_type & EMULTYPE_TRAP_UD) {
  3845. if (!c->twobyte)
  3846. return EMULATE_FAIL;
  3847. switch (c->b) {
  3848. case 0x01: /* VMMCALL */
  3849. if (c->modrm_mod != 3 || c->modrm_rm != 1)
  3850. return EMULATE_FAIL;
  3851. break;
  3852. case 0x34: /* sysenter */
  3853. case 0x35: /* sysexit */
  3854. if (c->modrm_mod != 0 || c->modrm_rm != 0)
  3855. return EMULATE_FAIL;
  3856. break;
  3857. case 0x05: /* syscall */
  3858. if (c->modrm_mod != 0 || c->modrm_rm != 0)
  3859. return EMULATE_FAIL;
  3860. break;
  3861. default:
  3862. return EMULATE_FAIL;
  3863. }
  3864. if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
  3865. return EMULATE_FAIL;
  3866. }
  3867. ++vcpu->stat.insn_emulation;
  3868. if (r) {
  3869. if (reexecute_instruction(vcpu, cr2))
  3870. return EMULATE_DONE;
  3871. if (emulation_type & EMULTYPE_SKIP)
  3872. return EMULATE_FAIL;
  3873. return handle_emulation_failure(vcpu);
  3874. }
  3875. }
  3876. if (emulation_type & EMULTYPE_SKIP) {
  3877. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
  3878. return EMULATE_DONE;
  3879. }
  3880. /* this is needed for vmware backdor interface to work since it
  3881. changes registers values during IO operation */
  3882. memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
  3883. restart:
  3884. r = x86_emulate_insn(&vcpu->arch.emulate_ctxt);
  3885. if (r == EMULATION_FAILED) {
  3886. if (reexecute_instruction(vcpu, cr2))
  3887. return EMULATE_DONE;
  3888. return handle_emulation_failure(vcpu);
  3889. }
  3890. done:
  3891. if (vcpu->arch.emulate_ctxt.have_exception) {
  3892. inject_emulated_exception(vcpu);
  3893. r = EMULATE_DONE;
  3894. } else if (vcpu->arch.pio.count) {
  3895. if (!vcpu->arch.pio.in)
  3896. vcpu->arch.pio.count = 0;
  3897. r = EMULATE_DO_MMIO;
  3898. } else if (vcpu->mmio_needed) {
  3899. if (vcpu->mmio_is_write)
  3900. vcpu->mmio_needed = 0;
  3901. r = EMULATE_DO_MMIO;
  3902. } else if (r == EMULATION_RESTART)
  3903. goto restart;
  3904. else
  3905. r = EMULATE_DONE;
  3906. toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
  3907. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  3908. kvm_make_request(KVM_REQ_EVENT, vcpu);
  3909. memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
  3910. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
  3911. return r;
  3912. }
  3913. EXPORT_SYMBOL_GPL(emulate_instruction);
  3914. int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
  3915. {
  3916. unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3917. int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
  3918. /* do not return to emulator after return from userspace */
  3919. vcpu->arch.pio.count = 0;
  3920. return ret;
  3921. }
  3922. EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
  3923. static void tsc_bad(void *info)
  3924. {
  3925. __get_cpu_var(cpu_tsc_khz) = 0;
  3926. }
  3927. static void tsc_khz_changed(void *data)
  3928. {
  3929. struct cpufreq_freqs *freq = data;
  3930. unsigned long khz = 0;
  3931. if (data)
  3932. khz = freq->new;
  3933. else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  3934. khz = cpufreq_quick_get(raw_smp_processor_id());
  3935. if (!khz)
  3936. khz = tsc_khz;
  3937. __get_cpu_var(cpu_tsc_khz) = khz;
  3938. }
  3939. static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  3940. void *data)
  3941. {
  3942. struct cpufreq_freqs *freq = data;
  3943. struct kvm *kvm;
  3944. struct kvm_vcpu *vcpu;
  3945. int i, send_ipi = 0;
  3946. /*
  3947. * We allow guests to temporarily run on slowing clocks,
  3948. * provided we notify them after, or to run on accelerating
  3949. * clocks, provided we notify them before. Thus time never
  3950. * goes backwards.
  3951. *
  3952. * However, we have a problem. We can't atomically update
  3953. * the frequency of a given CPU from this function; it is
  3954. * merely a notifier, which can be called from any CPU.
  3955. * Changing the TSC frequency at arbitrary points in time
  3956. * requires a recomputation of local variables related to
  3957. * the TSC for each VCPU. We must flag these local variables
  3958. * to be updated and be sure the update takes place with the
  3959. * new frequency before any guests proceed.
  3960. *
  3961. * Unfortunately, the combination of hotplug CPU and frequency
  3962. * change creates an intractable locking scenario; the order
  3963. * of when these callouts happen is undefined with respect to
  3964. * CPU hotplug, and they can race with each other. As such,
  3965. * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
  3966. * undefined; you can actually have a CPU frequency change take
  3967. * place in between the computation of X and the setting of the
  3968. * variable. To protect against this problem, all updates of
  3969. * the per_cpu tsc_khz variable are done in an interrupt
  3970. * protected IPI, and all callers wishing to update the value
  3971. * must wait for a synchronous IPI to complete (which is trivial
  3972. * if the caller is on the CPU already). This establishes the
  3973. * necessary total order on variable updates.
  3974. *
  3975. * Note that because a guest time update may take place
  3976. * anytime after the setting of the VCPU's request bit, the
  3977. * correct TSC value must be set before the request. However,
  3978. * to ensure the update actually makes it to any guest which
  3979. * starts running in hardware virtualization between the set
  3980. * and the acquisition of the spinlock, we must also ping the
  3981. * CPU after setting the request bit.
  3982. *
  3983. */
  3984. if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
  3985. return 0;
  3986. if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
  3987. return 0;
  3988. smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
  3989. spin_lock(&kvm_lock);
  3990. list_for_each_entry(kvm, &vm_list, vm_list) {
  3991. kvm_for_each_vcpu(i, vcpu, kvm) {
  3992. if (vcpu->cpu != freq->cpu)
  3993. continue;
  3994. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  3995. if (vcpu->cpu != smp_processor_id())
  3996. send_ipi = 1;
  3997. }
  3998. }
  3999. spin_unlock(&kvm_lock);
  4000. if (freq->old < freq->new && send_ipi) {
  4001. /*
  4002. * We upscale the frequency. Must make the guest
  4003. * doesn't see old kvmclock values while running with
  4004. * the new frequency, otherwise we risk the guest sees
  4005. * time go backwards.
  4006. *
  4007. * In case we update the frequency for another cpu
  4008. * (which might be in guest context) send an interrupt
  4009. * to kick the cpu out of guest context. Next time
  4010. * guest context is entered kvmclock will be updated,
  4011. * so the guest will not see stale values.
  4012. */
  4013. smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
  4014. }
  4015. return 0;
  4016. }
  4017. static struct notifier_block kvmclock_cpufreq_notifier_block = {
  4018. .notifier_call = kvmclock_cpufreq_notifier
  4019. };
  4020. static int kvmclock_cpu_notifier(struct notifier_block *nfb,
  4021. unsigned long action, void *hcpu)
  4022. {
  4023. unsigned int cpu = (unsigned long)hcpu;
  4024. switch (action) {
  4025. case CPU_ONLINE:
  4026. case CPU_DOWN_FAILED:
  4027. smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  4028. break;
  4029. case CPU_DOWN_PREPARE:
  4030. smp_call_function_single(cpu, tsc_bad, NULL, 1);
  4031. break;
  4032. }
  4033. return NOTIFY_OK;
  4034. }
  4035. static struct notifier_block kvmclock_cpu_notifier_block = {
  4036. .notifier_call = kvmclock_cpu_notifier,
  4037. .priority = -INT_MAX
  4038. };
  4039. static void kvm_timer_init(void)
  4040. {
  4041. int cpu;
  4042. max_tsc_khz = tsc_khz;
  4043. register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  4044. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  4045. #ifdef CONFIG_CPU_FREQ
  4046. struct cpufreq_policy policy;
  4047. memset(&policy, 0, sizeof(policy));
  4048. cpu = get_cpu();
  4049. cpufreq_get_policy(&policy, cpu);
  4050. if (policy.cpuinfo.max_freq)
  4051. max_tsc_khz = policy.cpuinfo.max_freq;
  4052. put_cpu();
  4053. #endif
  4054. cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
  4055. CPUFREQ_TRANSITION_NOTIFIER);
  4056. }
  4057. pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
  4058. for_each_online_cpu(cpu)
  4059. smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  4060. }
  4061. static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
  4062. static int kvm_is_in_guest(void)
  4063. {
  4064. return percpu_read(current_vcpu) != NULL;
  4065. }
  4066. static int kvm_is_user_mode(void)
  4067. {
  4068. int user_mode = 3;
  4069. if (percpu_read(current_vcpu))
  4070. user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
  4071. return user_mode != 0;
  4072. }
  4073. static unsigned long kvm_get_guest_ip(void)
  4074. {
  4075. unsigned long ip = 0;
  4076. if (percpu_read(current_vcpu))
  4077. ip = kvm_rip_read(percpu_read(current_vcpu));
  4078. return ip;
  4079. }
  4080. static struct perf_guest_info_callbacks kvm_guest_cbs = {
  4081. .is_in_guest = kvm_is_in_guest,
  4082. .is_user_mode = kvm_is_user_mode,
  4083. .get_guest_ip = kvm_get_guest_ip,
  4084. };
  4085. void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
  4086. {
  4087. percpu_write(current_vcpu, vcpu);
  4088. }
  4089. EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
  4090. void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
  4091. {
  4092. percpu_write(current_vcpu, NULL);
  4093. }
  4094. EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
  4095. int kvm_arch_init(void *opaque)
  4096. {
  4097. int r;
  4098. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  4099. if (kvm_x86_ops) {
  4100. printk(KERN_ERR "kvm: already loaded the other module\n");
  4101. r = -EEXIST;
  4102. goto out;
  4103. }
  4104. if (!ops->cpu_has_kvm_support()) {
  4105. printk(KERN_ERR "kvm: no hardware support\n");
  4106. r = -EOPNOTSUPP;
  4107. goto out;
  4108. }
  4109. if (ops->disabled_by_bios()) {
  4110. printk(KERN_ERR "kvm: disabled by bios\n");
  4111. r = -EOPNOTSUPP;
  4112. goto out;
  4113. }
  4114. r = kvm_mmu_module_init();
  4115. if (r)
  4116. goto out;
  4117. kvm_init_msr_list();
  4118. kvm_x86_ops = ops;
  4119. kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
  4120. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  4121. PT_DIRTY_MASK, PT64_NX_MASK, 0);
  4122. kvm_timer_init();
  4123. perf_register_guest_info_callbacks(&kvm_guest_cbs);
  4124. if (cpu_has_xsave)
  4125. host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
  4126. return 0;
  4127. out:
  4128. return r;
  4129. }
  4130. void kvm_arch_exit(void)
  4131. {
  4132. perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
  4133. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  4134. cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
  4135. CPUFREQ_TRANSITION_NOTIFIER);
  4136. unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  4137. kvm_x86_ops = NULL;
  4138. kvm_mmu_module_exit();
  4139. }
  4140. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  4141. {
  4142. ++vcpu->stat.halt_exits;
  4143. if (irqchip_in_kernel(vcpu->kvm)) {
  4144. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  4145. return 1;
  4146. } else {
  4147. vcpu->run->exit_reason = KVM_EXIT_HLT;
  4148. return 0;
  4149. }
  4150. }
  4151. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  4152. static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
  4153. unsigned long a1)
  4154. {
  4155. if (is_long_mode(vcpu))
  4156. return a0;
  4157. else
  4158. return a0 | ((gpa_t)a1 << 32);
  4159. }
  4160. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  4161. {
  4162. u64 param, ingpa, outgpa, ret;
  4163. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  4164. bool fast, longmode;
  4165. int cs_db, cs_l;
  4166. /*
  4167. * hypercall generates UD from non zero cpl and real mode
  4168. * per HYPER-V spec
  4169. */
  4170. if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
  4171. kvm_queue_exception(vcpu, UD_VECTOR);
  4172. return 0;
  4173. }
  4174. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  4175. longmode = is_long_mode(vcpu) && cs_l == 1;
  4176. if (!longmode) {
  4177. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  4178. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  4179. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  4180. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  4181. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  4182. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  4183. }
  4184. #ifdef CONFIG_X86_64
  4185. else {
  4186. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4187. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4188. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  4189. }
  4190. #endif
  4191. code = param & 0xffff;
  4192. fast = (param >> 16) & 0x1;
  4193. rep_cnt = (param >> 32) & 0xfff;
  4194. rep_idx = (param >> 48) & 0xfff;
  4195. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  4196. switch (code) {
  4197. case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
  4198. kvm_vcpu_on_spin(vcpu);
  4199. break;
  4200. default:
  4201. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  4202. break;
  4203. }
  4204. ret = res | (((u64)rep_done & 0xfff) << 32);
  4205. if (longmode) {
  4206. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  4207. } else {
  4208. kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
  4209. kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
  4210. }
  4211. return 1;
  4212. }
  4213. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  4214. {
  4215. unsigned long nr, a0, a1, a2, a3, ret;
  4216. int r = 1;
  4217. if (kvm_hv_hypercall_enabled(vcpu->kvm))
  4218. return kvm_hv_hypercall(vcpu);
  4219. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4220. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4221. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4222. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4223. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4224. trace_kvm_hypercall(nr, a0, a1, a2, a3);
  4225. if (!is_long_mode(vcpu)) {
  4226. nr &= 0xFFFFFFFF;
  4227. a0 &= 0xFFFFFFFF;
  4228. a1 &= 0xFFFFFFFF;
  4229. a2 &= 0xFFFFFFFF;
  4230. a3 &= 0xFFFFFFFF;
  4231. }
  4232. if (kvm_x86_ops->get_cpl(vcpu) != 0) {
  4233. ret = -KVM_EPERM;
  4234. goto out;
  4235. }
  4236. switch (nr) {
  4237. case KVM_HC_VAPIC_POLL_IRQ:
  4238. ret = 0;
  4239. break;
  4240. case KVM_HC_MMU_OP:
  4241. r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
  4242. break;
  4243. default:
  4244. ret = -KVM_ENOSYS;
  4245. break;
  4246. }
  4247. out:
  4248. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  4249. ++vcpu->stat.hypercalls;
  4250. return r;
  4251. }
  4252. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  4253. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  4254. {
  4255. char instruction[3];
  4256. unsigned long rip = kvm_rip_read(vcpu);
  4257. /*
  4258. * Blow out the MMU to ensure that no other VCPU has an active mapping
  4259. * to ensure that the updated hypercall appears atomically across all
  4260. * VCPUs.
  4261. */
  4262. kvm_mmu_zap_all(vcpu->kvm);
  4263. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  4264. return emulator_write_emulated(rip, instruction, 3, NULL, vcpu);
  4265. }
  4266. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  4267. {
  4268. struct desc_ptr dt = { limit, base };
  4269. kvm_x86_ops->set_gdt(vcpu, &dt);
  4270. }
  4271. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  4272. {
  4273. struct desc_ptr dt = { limit, base };
  4274. kvm_x86_ops->set_idt(vcpu, &dt);
  4275. }
  4276. static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  4277. {
  4278. struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
  4279. int j, nent = vcpu->arch.cpuid_nent;
  4280. e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
  4281. /* when no next entry is found, the current entry[i] is reselected */
  4282. for (j = i + 1; ; j = (j + 1) % nent) {
  4283. struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
  4284. if (ej->function == e->function) {
  4285. ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  4286. return j;
  4287. }
  4288. }
  4289. return 0; /* silence gcc, even though control never reaches here */
  4290. }
  4291. /* find an entry with matching function, matching index (if needed), and that
  4292. * should be read next (if it's stateful) */
  4293. static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
  4294. u32 function, u32 index)
  4295. {
  4296. if (e->function != function)
  4297. return 0;
  4298. if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
  4299. return 0;
  4300. if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
  4301. !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
  4302. return 0;
  4303. return 1;
  4304. }
  4305. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  4306. u32 function, u32 index)
  4307. {
  4308. int i;
  4309. struct kvm_cpuid_entry2 *best = NULL;
  4310. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  4311. struct kvm_cpuid_entry2 *e;
  4312. e = &vcpu->arch.cpuid_entries[i];
  4313. if (is_matching_cpuid_entry(e, function, index)) {
  4314. if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
  4315. move_to_next_stateful_cpuid_entry(vcpu, i);
  4316. best = e;
  4317. break;
  4318. }
  4319. /*
  4320. * Both basic or both extended?
  4321. */
  4322. if (((e->function ^ function) & 0x80000000) == 0)
  4323. if (!best || e->function > best->function)
  4324. best = e;
  4325. }
  4326. return best;
  4327. }
  4328. EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
  4329. int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
  4330. {
  4331. struct kvm_cpuid_entry2 *best;
  4332. best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
  4333. if (!best || best->eax < 0x80000008)
  4334. goto not_found;
  4335. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  4336. if (best)
  4337. return best->eax & 0xff;
  4338. not_found:
  4339. return 36;
  4340. }
  4341. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  4342. {
  4343. u32 function, index;
  4344. struct kvm_cpuid_entry2 *best;
  4345. function = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4346. index = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4347. kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
  4348. kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
  4349. kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
  4350. kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
  4351. best = kvm_find_cpuid_entry(vcpu, function, index);
  4352. if (best) {
  4353. kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
  4354. kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
  4355. kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
  4356. kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
  4357. }
  4358. kvm_x86_ops->skip_emulated_instruction(vcpu);
  4359. trace_kvm_cpuid(function,
  4360. kvm_register_read(vcpu, VCPU_REGS_RAX),
  4361. kvm_register_read(vcpu, VCPU_REGS_RBX),
  4362. kvm_register_read(vcpu, VCPU_REGS_RCX),
  4363. kvm_register_read(vcpu, VCPU_REGS_RDX));
  4364. }
  4365. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  4366. /*
  4367. * Check if userspace requested an interrupt window, and that the
  4368. * interrupt window is open.
  4369. *
  4370. * No need to exit to userspace if we already have an interrupt queued.
  4371. */
  4372. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
  4373. {
  4374. return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
  4375. vcpu->run->request_interrupt_window &&
  4376. kvm_arch_interrupt_allowed(vcpu));
  4377. }
  4378. static void post_kvm_run_save(struct kvm_vcpu *vcpu)
  4379. {
  4380. struct kvm_run *kvm_run = vcpu->run;
  4381. kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  4382. kvm_run->cr8 = kvm_get_cr8(vcpu);
  4383. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  4384. if (irqchip_in_kernel(vcpu->kvm))
  4385. kvm_run->ready_for_interrupt_injection = 1;
  4386. else
  4387. kvm_run->ready_for_interrupt_injection =
  4388. kvm_arch_interrupt_allowed(vcpu) &&
  4389. !kvm_cpu_has_interrupt(vcpu) &&
  4390. !kvm_event_needs_reinjection(vcpu);
  4391. }
  4392. static void vapic_enter(struct kvm_vcpu *vcpu)
  4393. {
  4394. struct kvm_lapic *apic = vcpu->arch.apic;
  4395. struct page *page;
  4396. if (!apic || !apic->vapic_addr)
  4397. return;
  4398. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  4399. vcpu->arch.apic->vapic_page = page;
  4400. }
  4401. static void vapic_exit(struct kvm_vcpu *vcpu)
  4402. {
  4403. struct kvm_lapic *apic = vcpu->arch.apic;
  4404. int idx;
  4405. if (!apic || !apic->vapic_addr)
  4406. return;
  4407. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4408. kvm_release_page_dirty(apic->vapic_page);
  4409. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  4410. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4411. }
  4412. static void update_cr8_intercept(struct kvm_vcpu *vcpu)
  4413. {
  4414. int max_irr, tpr;
  4415. if (!kvm_x86_ops->update_cr8_intercept)
  4416. return;
  4417. if (!vcpu->arch.apic)
  4418. return;
  4419. if (!vcpu->arch.apic->vapic_addr)
  4420. max_irr = kvm_lapic_find_highest_irr(vcpu);
  4421. else
  4422. max_irr = -1;
  4423. if (max_irr != -1)
  4424. max_irr >>= 4;
  4425. tpr = kvm_lapic_get_cr8(vcpu);
  4426. kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
  4427. }
  4428. static void inject_pending_event(struct kvm_vcpu *vcpu)
  4429. {
  4430. /* try to reinject previous events if any */
  4431. if (vcpu->arch.exception.pending) {
  4432. trace_kvm_inj_exception(vcpu->arch.exception.nr,
  4433. vcpu->arch.exception.has_error_code,
  4434. vcpu->arch.exception.error_code);
  4435. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  4436. vcpu->arch.exception.has_error_code,
  4437. vcpu->arch.exception.error_code,
  4438. vcpu->arch.exception.reinject);
  4439. return;
  4440. }
  4441. if (vcpu->arch.nmi_injected) {
  4442. kvm_x86_ops->set_nmi(vcpu);
  4443. return;
  4444. }
  4445. if (vcpu->arch.interrupt.pending) {
  4446. kvm_x86_ops->set_irq(vcpu);
  4447. return;
  4448. }
  4449. /* try to inject new event if pending */
  4450. if (vcpu->arch.nmi_pending) {
  4451. if (kvm_x86_ops->nmi_allowed(vcpu)) {
  4452. vcpu->arch.nmi_pending = false;
  4453. vcpu->arch.nmi_injected = true;
  4454. kvm_x86_ops->set_nmi(vcpu);
  4455. }
  4456. } else if (kvm_cpu_has_interrupt(vcpu)) {
  4457. if (kvm_x86_ops->interrupt_allowed(vcpu)) {
  4458. kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
  4459. false);
  4460. kvm_x86_ops->set_irq(vcpu);
  4461. }
  4462. }
  4463. }
  4464. static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
  4465. {
  4466. if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
  4467. !vcpu->guest_xcr0_loaded) {
  4468. /* kvm_set_xcr() also depends on this */
  4469. xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
  4470. vcpu->guest_xcr0_loaded = 1;
  4471. }
  4472. }
  4473. static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
  4474. {
  4475. if (vcpu->guest_xcr0_loaded) {
  4476. if (vcpu->arch.xcr0 != host_xcr0)
  4477. xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
  4478. vcpu->guest_xcr0_loaded = 0;
  4479. }
  4480. }
  4481. static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
  4482. {
  4483. int r;
  4484. bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
  4485. vcpu->run->request_interrupt_window;
  4486. if (vcpu->requests) {
  4487. if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
  4488. kvm_mmu_unload(vcpu);
  4489. if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
  4490. __kvm_migrate_timers(vcpu);
  4491. if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
  4492. r = kvm_guest_time_update(vcpu);
  4493. if (unlikely(r))
  4494. goto out;
  4495. }
  4496. if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
  4497. kvm_mmu_sync_roots(vcpu);
  4498. if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
  4499. kvm_x86_ops->tlb_flush(vcpu);
  4500. if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
  4501. vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
  4502. r = 0;
  4503. goto out;
  4504. }
  4505. if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
  4506. vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  4507. r = 0;
  4508. goto out;
  4509. }
  4510. if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
  4511. vcpu->fpu_active = 0;
  4512. kvm_x86_ops->fpu_deactivate(vcpu);
  4513. }
  4514. if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
  4515. /* Page is swapped out. Do synthetic halt */
  4516. vcpu->arch.apf.halted = true;
  4517. r = 1;
  4518. goto out;
  4519. }
  4520. }
  4521. r = kvm_mmu_reload(vcpu);
  4522. if (unlikely(r))
  4523. goto out;
  4524. if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
  4525. inject_pending_event(vcpu);
  4526. /* enable NMI/IRQ window open exits if needed */
  4527. if (vcpu->arch.nmi_pending)
  4528. kvm_x86_ops->enable_nmi_window(vcpu);
  4529. else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
  4530. kvm_x86_ops->enable_irq_window(vcpu);
  4531. if (kvm_lapic_enabled(vcpu)) {
  4532. update_cr8_intercept(vcpu);
  4533. kvm_lapic_sync_to_vapic(vcpu);
  4534. }
  4535. }
  4536. preempt_disable();
  4537. kvm_x86_ops->prepare_guest_switch(vcpu);
  4538. if (vcpu->fpu_active)
  4539. kvm_load_guest_fpu(vcpu);
  4540. kvm_load_guest_xcr0(vcpu);
  4541. atomic_set(&vcpu->guest_mode, 1);
  4542. smp_wmb();
  4543. local_irq_disable();
  4544. if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
  4545. || need_resched() || signal_pending(current)) {
  4546. atomic_set(&vcpu->guest_mode, 0);
  4547. smp_wmb();
  4548. local_irq_enable();
  4549. preempt_enable();
  4550. kvm_x86_ops->cancel_injection(vcpu);
  4551. r = 1;
  4552. goto out;
  4553. }
  4554. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4555. kvm_guest_enter();
  4556. if (unlikely(vcpu->arch.switch_db_regs)) {
  4557. set_debugreg(0, 7);
  4558. set_debugreg(vcpu->arch.eff_db[0], 0);
  4559. set_debugreg(vcpu->arch.eff_db[1], 1);
  4560. set_debugreg(vcpu->arch.eff_db[2], 2);
  4561. set_debugreg(vcpu->arch.eff_db[3], 3);
  4562. }
  4563. trace_kvm_entry(vcpu->vcpu_id);
  4564. kvm_x86_ops->run(vcpu);
  4565. /*
  4566. * If the guest has used debug registers, at least dr7
  4567. * will be disabled while returning to the host.
  4568. * If we don't have active breakpoints in the host, we don't
  4569. * care about the messed up debug address registers. But if
  4570. * we have some of them active, restore the old state.
  4571. */
  4572. if (hw_breakpoint_active())
  4573. hw_breakpoint_restore();
  4574. kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
  4575. atomic_set(&vcpu->guest_mode, 0);
  4576. smp_wmb();
  4577. local_irq_enable();
  4578. ++vcpu->stat.exits;
  4579. /*
  4580. * We must have an instruction between local_irq_enable() and
  4581. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  4582. * the interrupt shadow. The stat.exits increment will do nicely.
  4583. * But we need to prevent reordering, hence this barrier():
  4584. */
  4585. barrier();
  4586. kvm_guest_exit();
  4587. preempt_enable();
  4588. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4589. /*
  4590. * Profile KVM exit RIPs:
  4591. */
  4592. if (unlikely(prof_on == KVM_PROFILING)) {
  4593. unsigned long rip = kvm_rip_read(vcpu);
  4594. profile_hit(KVM_PROFILING, (void *)rip);
  4595. }
  4596. kvm_lapic_sync_from_vapic(vcpu);
  4597. r = kvm_x86_ops->handle_exit(vcpu);
  4598. out:
  4599. return r;
  4600. }
  4601. static int __vcpu_run(struct kvm_vcpu *vcpu)
  4602. {
  4603. int r;
  4604. struct kvm *kvm = vcpu->kvm;
  4605. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  4606. pr_debug("vcpu %d received sipi with vector # %x\n",
  4607. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  4608. kvm_lapic_reset(vcpu);
  4609. r = kvm_arch_vcpu_reset(vcpu);
  4610. if (r)
  4611. return r;
  4612. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4613. }
  4614. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4615. vapic_enter(vcpu);
  4616. r = 1;
  4617. while (r > 0) {
  4618. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
  4619. !vcpu->arch.apf.halted)
  4620. r = vcpu_enter_guest(vcpu);
  4621. else {
  4622. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4623. kvm_vcpu_block(vcpu);
  4624. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4625. if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
  4626. {
  4627. switch(vcpu->arch.mp_state) {
  4628. case KVM_MP_STATE_HALTED:
  4629. vcpu->arch.mp_state =
  4630. KVM_MP_STATE_RUNNABLE;
  4631. case KVM_MP_STATE_RUNNABLE:
  4632. vcpu->arch.apf.halted = false;
  4633. break;
  4634. case KVM_MP_STATE_SIPI_RECEIVED:
  4635. default:
  4636. r = -EINTR;
  4637. break;
  4638. }
  4639. }
  4640. }
  4641. if (r <= 0)
  4642. break;
  4643. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  4644. if (kvm_cpu_has_pending_timer(vcpu))
  4645. kvm_inject_pending_timer_irqs(vcpu);
  4646. if (dm_request_for_irq_injection(vcpu)) {
  4647. r = -EINTR;
  4648. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4649. ++vcpu->stat.request_irq_exits;
  4650. }
  4651. kvm_check_async_pf_completion(vcpu);
  4652. if (signal_pending(current)) {
  4653. r = -EINTR;
  4654. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4655. ++vcpu->stat.signal_exits;
  4656. }
  4657. if (need_resched()) {
  4658. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4659. kvm_resched(vcpu);
  4660. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4661. }
  4662. }
  4663. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4664. vapic_exit(vcpu);
  4665. return r;
  4666. }
  4667. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  4668. {
  4669. int r;
  4670. sigset_t sigsaved;
  4671. if (vcpu->sigset_active)
  4672. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  4673. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  4674. kvm_vcpu_block(vcpu);
  4675. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  4676. r = -EAGAIN;
  4677. goto out;
  4678. }
  4679. /* re-sync apic's tpr */
  4680. if (!irqchip_in_kernel(vcpu->kvm))
  4681. kvm_set_cr8(vcpu, kvm_run->cr8);
  4682. if (vcpu->arch.pio.count || vcpu->mmio_needed) {
  4683. if (vcpu->mmio_needed) {
  4684. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  4685. vcpu->mmio_read_completed = 1;
  4686. vcpu->mmio_needed = 0;
  4687. }
  4688. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4689. r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
  4690. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4691. if (r != EMULATE_DONE) {
  4692. r = 0;
  4693. goto out;
  4694. }
  4695. }
  4696. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
  4697. kvm_register_write(vcpu, VCPU_REGS_RAX,
  4698. kvm_run->hypercall.ret);
  4699. r = __vcpu_run(vcpu);
  4700. out:
  4701. post_kvm_run_save(vcpu);
  4702. if (vcpu->sigset_active)
  4703. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  4704. return r;
  4705. }
  4706. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4707. {
  4708. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4709. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4710. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4711. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4712. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4713. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  4714. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  4715. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  4716. #ifdef CONFIG_X86_64
  4717. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  4718. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  4719. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  4720. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  4721. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  4722. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  4723. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  4724. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  4725. #endif
  4726. regs->rip = kvm_rip_read(vcpu);
  4727. regs->rflags = kvm_get_rflags(vcpu);
  4728. return 0;
  4729. }
  4730. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4731. {
  4732. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  4733. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  4734. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  4735. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  4736. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  4737. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  4738. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  4739. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  4740. #ifdef CONFIG_X86_64
  4741. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  4742. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  4743. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  4744. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  4745. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  4746. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  4747. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  4748. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  4749. #endif
  4750. kvm_rip_write(vcpu, regs->rip);
  4751. kvm_set_rflags(vcpu, regs->rflags);
  4752. vcpu->arch.exception.pending = false;
  4753. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4754. return 0;
  4755. }
  4756. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  4757. {
  4758. struct kvm_segment cs;
  4759. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  4760. *db = cs.db;
  4761. *l = cs.l;
  4762. }
  4763. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  4764. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  4765. struct kvm_sregs *sregs)
  4766. {
  4767. struct desc_ptr dt;
  4768. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  4769. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  4770. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  4771. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  4772. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  4773. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  4774. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  4775. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  4776. kvm_x86_ops->get_idt(vcpu, &dt);
  4777. sregs->idt.limit = dt.size;
  4778. sregs->idt.base = dt.address;
  4779. kvm_x86_ops->get_gdt(vcpu, &dt);
  4780. sregs->gdt.limit = dt.size;
  4781. sregs->gdt.base = dt.address;
  4782. sregs->cr0 = kvm_read_cr0(vcpu);
  4783. sregs->cr2 = vcpu->arch.cr2;
  4784. sregs->cr3 = vcpu->arch.cr3;
  4785. sregs->cr4 = kvm_read_cr4(vcpu);
  4786. sregs->cr8 = kvm_get_cr8(vcpu);
  4787. sregs->efer = vcpu->arch.efer;
  4788. sregs->apic_base = kvm_get_apic_base(vcpu);
  4789. memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
  4790. if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
  4791. set_bit(vcpu->arch.interrupt.nr,
  4792. (unsigned long *)sregs->interrupt_bitmap);
  4793. return 0;
  4794. }
  4795. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  4796. struct kvm_mp_state *mp_state)
  4797. {
  4798. mp_state->mp_state = vcpu->arch.mp_state;
  4799. return 0;
  4800. }
  4801. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  4802. struct kvm_mp_state *mp_state)
  4803. {
  4804. vcpu->arch.mp_state = mp_state->mp_state;
  4805. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4806. return 0;
  4807. }
  4808. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
  4809. bool has_error_code, u32 error_code)
  4810. {
  4811. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  4812. int ret;
  4813. init_emulate_ctxt(vcpu);
  4814. ret = emulator_task_switch(&vcpu->arch.emulate_ctxt,
  4815. tss_selector, reason, has_error_code,
  4816. error_code);
  4817. if (ret)
  4818. return EMULATE_FAIL;
  4819. memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
  4820. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
  4821. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  4822. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4823. return EMULATE_DONE;
  4824. }
  4825. EXPORT_SYMBOL_GPL(kvm_task_switch);
  4826. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  4827. struct kvm_sregs *sregs)
  4828. {
  4829. int mmu_reset_needed = 0;
  4830. int pending_vec, max_bits;
  4831. struct desc_ptr dt;
  4832. dt.size = sregs->idt.limit;
  4833. dt.address = sregs->idt.base;
  4834. kvm_x86_ops->set_idt(vcpu, &dt);
  4835. dt.size = sregs->gdt.limit;
  4836. dt.address = sregs->gdt.base;
  4837. kvm_x86_ops->set_gdt(vcpu, &dt);
  4838. vcpu->arch.cr2 = sregs->cr2;
  4839. mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
  4840. vcpu->arch.cr3 = sregs->cr3;
  4841. kvm_set_cr8(vcpu, sregs->cr8);
  4842. mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
  4843. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  4844. kvm_set_apic_base(vcpu, sregs->apic_base);
  4845. mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
  4846. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  4847. vcpu->arch.cr0 = sregs->cr0;
  4848. mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
  4849. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  4850. if (sregs->cr4 & X86_CR4_OSXSAVE)
  4851. update_cpuid(vcpu);
  4852. if (!is_long_mode(vcpu) && is_pae(vcpu)) {
  4853. load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
  4854. mmu_reset_needed = 1;
  4855. }
  4856. if (mmu_reset_needed)
  4857. kvm_mmu_reset_context(vcpu);
  4858. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  4859. pending_vec = find_first_bit(
  4860. (const unsigned long *)sregs->interrupt_bitmap, max_bits);
  4861. if (pending_vec < max_bits) {
  4862. kvm_queue_interrupt(vcpu, pending_vec, false);
  4863. pr_debug("Set back pending irq %d\n", pending_vec);
  4864. if (irqchip_in_kernel(vcpu->kvm))
  4865. kvm_pic_clear_isr_ack(vcpu->kvm);
  4866. }
  4867. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  4868. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  4869. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  4870. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  4871. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  4872. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  4873. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  4874. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  4875. update_cr8_intercept(vcpu);
  4876. /* Older userspace won't unhalt the vcpu on reset. */
  4877. if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
  4878. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  4879. !is_protmode(vcpu))
  4880. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4881. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4882. return 0;
  4883. }
  4884. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  4885. struct kvm_guest_debug *dbg)
  4886. {
  4887. unsigned long rflags;
  4888. int i, r;
  4889. if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
  4890. r = -EBUSY;
  4891. if (vcpu->arch.exception.pending)
  4892. goto out;
  4893. if (dbg->control & KVM_GUESTDBG_INJECT_DB)
  4894. kvm_queue_exception(vcpu, DB_VECTOR);
  4895. else
  4896. kvm_queue_exception(vcpu, BP_VECTOR);
  4897. }
  4898. /*
  4899. * Read rflags as long as potentially injected trace flags are still
  4900. * filtered out.
  4901. */
  4902. rflags = kvm_get_rflags(vcpu);
  4903. vcpu->guest_debug = dbg->control;
  4904. if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
  4905. vcpu->guest_debug = 0;
  4906. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
  4907. for (i = 0; i < KVM_NR_DB_REGS; ++i)
  4908. vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
  4909. vcpu->arch.switch_db_regs =
  4910. (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
  4911. } else {
  4912. for (i = 0; i < KVM_NR_DB_REGS; i++)
  4913. vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  4914. vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
  4915. }
  4916. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  4917. vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
  4918. get_segment_base(vcpu, VCPU_SREG_CS);
  4919. /*
  4920. * Trigger an rflags update that will inject or remove the trace
  4921. * flags.
  4922. */
  4923. kvm_set_rflags(vcpu, rflags);
  4924. kvm_x86_ops->set_guest_debug(vcpu, dbg);
  4925. r = 0;
  4926. out:
  4927. return r;
  4928. }
  4929. /*
  4930. * Translate a guest virtual address to a guest physical address.
  4931. */
  4932. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  4933. struct kvm_translation *tr)
  4934. {
  4935. unsigned long vaddr = tr->linear_address;
  4936. gpa_t gpa;
  4937. int idx;
  4938. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4939. gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
  4940. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4941. tr->physical_address = gpa;
  4942. tr->valid = gpa != UNMAPPED_GVA;
  4943. tr->writeable = 1;
  4944. tr->usermode = 0;
  4945. return 0;
  4946. }
  4947. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  4948. {
  4949. struct i387_fxsave_struct *fxsave =
  4950. &vcpu->arch.guest_fpu.state->fxsave;
  4951. memcpy(fpu->fpr, fxsave->st_space, 128);
  4952. fpu->fcw = fxsave->cwd;
  4953. fpu->fsw = fxsave->swd;
  4954. fpu->ftwx = fxsave->twd;
  4955. fpu->last_opcode = fxsave->fop;
  4956. fpu->last_ip = fxsave->rip;
  4957. fpu->last_dp = fxsave->rdp;
  4958. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  4959. return 0;
  4960. }
  4961. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  4962. {
  4963. struct i387_fxsave_struct *fxsave =
  4964. &vcpu->arch.guest_fpu.state->fxsave;
  4965. memcpy(fxsave->st_space, fpu->fpr, 128);
  4966. fxsave->cwd = fpu->fcw;
  4967. fxsave->swd = fpu->fsw;
  4968. fxsave->twd = fpu->ftwx;
  4969. fxsave->fop = fpu->last_opcode;
  4970. fxsave->rip = fpu->last_ip;
  4971. fxsave->rdp = fpu->last_dp;
  4972. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  4973. return 0;
  4974. }
  4975. int fx_init(struct kvm_vcpu *vcpu)
  4976. {
  4977. int err;
  4978. err = fpu_alloc(&vcpu->arch.guest_fpu);
  4979. if (err)
  4980. return err;
  4981. fpu_finit(&vcpu->arch.guest_fpu);
  4982. /*
  4983. * Ensure guest xcr0 is valid for loading
  4984. */
  4985. vcpu->arch.xcr0 = XSTATE_FP;
  4986. vcpu->arch.cr0 |= X86_CR0_ET;
  4987. return 0;
  4988. }
  4989. EXPORT_SYMBOL_GPL(fx_init);
  4990. static void fx_free(struct kvm_vcpu *vcpu)
  4991. {
  4992. fpu_free(&vcpu->arch.guest_fpu);
  4993. }
  4994. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  4995. {
  4996. if (vcpu->guest_fpu_loaded)
  4997. return;
  4998. /*
  4999. * Restore all possible states in the guest,
  5000. * and assume host would use all available bits.
  5001. * Guest xcr0 would be loaded later.
  5002. */
  5003. kvm_put_guest_xcr0(vcpu);
  5004. vcpu->guest_fpu_loaded = 1;
  5005. unlazy_fpu(current);
  5006. fpu_restore_checking(&vcpu->arch.guest_fpu);
  5007. trace_kvm_fpu(1);
  5008. }
  5009. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  5010. {
  5011. kvm_put_guest_xcr0(vcpu);
  5012. if (!vcpu->guest_fpu_loaded)
  5013. return;
  5014. vcpu->guest_fpu_loaded = 0;
  5015. fpu_save_init(&vcpu->arch.guest_fpu);
  5016. ++vcpu->stat.fpu_reload;
  5017. kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
  5018. trace_kvm_fpu(0);
  5019. }
  5020. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  5021. {
  5022. if (vcpu->arch.time_page) {
  5023. kvm_release_page_dirty(vcpu->arch.time_page);
  5024. vcpu->arch.time_page = NULL;
  5025. }
  5026. free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
  5027. fx_free(vcpu);
  5028. kvm_x86_ops->vcpu_free(vcpu);
  5029. }
  5030. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  5031. unsigned int id)
  5032. {
  5033. if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
  5034. printk_once(KERN_WARNING
  5035. "kvm: SMP vm created on host with unstable TSC; "
  5036. "guest TSC will not be reliable\n");
  5037. return kvm_x86_ops->vcpu_create(kvm, id);
  5038. }
  5039. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  5040. {
  5041. int r;
  5042. vcpu->arch.mtrr_state.have_fixed = 1;
  5043. vcpu_load(vcpu);
  5044. r = kvm_arch_vcpu_reset(vcpu);
  5045. if (r == 0)
  5046. r = kvm_mmu_setup(vcpu);
  5047. vcpu_put(vcpu);
  5048. if (r < 0)
  5049. goto free_vcpu;
  5050. return 0;
  5051. free_vcpu:
  5052. kvm_x86_ops->vcpu_free(vcpu);
  5053. return r;
  5054. }
  5055. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  5056. {
  5057. vcpu->arch.apf.msr_val = 0;
  5058. vcpu_load(vcpu);
  5059. kvm_mmu_unload(vcpu);
  5060. vcpu_put(vcpu);
  5061. fx_free(vcpu);
  5062. kvm_x86_ops->vcpu_free(vcpu);
  5063. }
  5064. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  5065. {
  5066. vcpu->arch.nmi_pending = false;
  5067. vcpu->arch.nmi_injected = false;
  5068. vcpu->arch.switch_db_regs = 0;
  5069. memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
  5070. vcpu->arch.dr6 = DR6_FIXED_1;
  5071. vcpu->arch.dr7 = DR7_FIXED_1;
  5072. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5073. vcpu->arch.apf.msr_val = 0;
  5074. kvm_clear_async_pf_completion_queue(vcpu);
  5075. kvm_async_pf_hash_reset(vcpu);
  5076. vcpu->arch.apf.halted = false;
  5077. return kvm_x86_ops->vcpu_reset(vcpu);
  5078. }
  5079. int kvm_arch_hardware_enable(void *garbage)
  5080. {
  5081. struct kvm *kvm;
  5082. struct kvm_vcpu *vcpu;
  5083. int i;
  5084. kvm_shared_msr_cpu_online();
  5085. list_for_each_entry(kvm, &vm_list, vm_list)
  5086. kvm_for_each_vcpu(i, vcpu, kvm)
  5087. if (vcpu->cpu == smp_processor_id())
  5088. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  5089. return kvm_x86_ops->hardware_enable(garbage);
  5090. }
  5091. void kvm_arch_hardware_disable(void *garbage)
  5092. {
  5093. kvm_x86_ops->hardware_disable(garbage);
  5094. drop_user_return_notifiers(garbage);
  5095. }
  5096. int kvm_arch_hardware_setup(void)
  5097. {
  5098. return kvm_x86_ops->hardware_setup();
  5099. }
  5100. void kvm_arch_hardware_unsetup(void)
  5101. {
  5102. kvm_x86_ops->hardware_unsetup();
  5103. }
  5104. void kvm_arch_check_processor_compat(void *rtn)
  5105. {
  5106. kvm_x86_ops->check_processor_compatibility(rtn);
  5107. }
  5108. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  5109. {
  5110. struct page *page;
  5111. struct kvm *kvm;
  5112. int r;
  5113. BUG_ON(vcpu->kvm == NULL);
  5114. kvm = vcpu->kvm;
  5115. vcpu->arch.emulate_ctxt.ops = &emulate_ops;
  5116. vcpu->arch.walk_mmu = &vcpu->arch.mmu;
  5117. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  5118. vcpu->arch.mmu.translate_gpa = translate_gpa;
  5119. vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
  5120. if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
  5121. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  5122. else
  5123. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  5124. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  5125. if (!page) {
  5126. r = -ENOMEM;
  5127. goto fail;
  5128. }
  5129. vcpu->arch.pio_data = page_address(page);
  5130. if (!kvm->arch.virtual_tsc_khz)
  5131. kvm_arch_set_tsc_khz(kvm, max_tsc_khz);
  5132. r = kvm_mmu_create(vcpu);
  5133. if (r < 0)
  5134. goto fail_free_pio_data;
  5135. if (irqchip_in_kernel(kvm)) {
  5136. r = kvm_create_lapic(vcpu);
  5137. if (r < 0)
  5138. goto fail_mmu_destroy;
  5139. }
  5140. vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
  5141. GFP_KERNEL);
  5142. if (!vcpu->arch.mce_banks) {
  5143. r = -ENOMEM;
  5144. goto fail_free_lapic;
  5145. }
  5146. vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
  5147. if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
  5148. goto fail_free_mce_banks;
  5149. kvm_async_pf_hash_reset(vcpu);
  5150. return 0;
  5151. fail_free_mce_banks:
  5152. kfree(vcpu->arch.mce_banks);
  5153. fail_free_lapic:
  5154. kvm_free_lapic(vcpu);
  5155. fail_mmu_destroy:
  5156. kvm_mmu_destroy(vcpu);
  5157. fail_free_pio_data:
  5158. free_page((unsigned long)vcpu->arch.pio_data);
  5159. fail:
  5160. return r;
  5161. }
  5162. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  5163. {
  5164. int idx;
  5165. kfree(vcpu->arch.mce_banks);
  5166. kvm_free_lapic(vcpu);
  5167. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5168. kvm_mmu_destroy(vcpu);
  5169. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5170. free_page((unsigned long)vcpu->arch.pio_data);
  5171. }
  5172. int kvm_arch_init_vm(struct kvm *kvm)
  5173. {
  5174. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  5175. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  5176. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  5177. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  5178. spin_lock_init(&kvm->arch.tsc_write_lock);
  5179. return 0;
  5180. }
  5181. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  5182. {
  5183. vcpu_load(vcpu);
  5184. kvm_mmu_unload(vcpu);
  5185. vcpu_put(vcpu);
  5186. }
  5187. static void kvm_free_vcpus(struct kvm *kvm)
  5188. {
  5189. unsigned int i;
  5190. struct kvm_vcpu *vcpu;
  5191. /*
  5192. * Unpin any mmu pages first.
  5193. */
  5194. kvm_for_each_vcpu(i, vcpu, kvm) {
  5195. kvm_clear_async_pf_completion_queue(vcpu);
  5196. kvm_unload_vcpu_mmu(vcpu);
  5197. }
  5198. kvm_for_each_vcpu(i, vcpu, kvm)
  5199. kvm_arch_vcpu_free(vcpu);
  5200. mutex_lock(&kvm->lock);
  5201. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  5202. kvm->vcpus[i] = NULL;
  5203. atomic_set(&kvm->online_vcpus, 0);
  5204. mutex_unlock(&kvm->lock);
  5205. }
  5206. void kvm_arch_sync_events(struct kvm *kvm)
  5207. {
  5208. kvm_free_all_assigned_devices(kvm);
  5209. kvm_free_pit(kvm);
  5210. }
  5211. void kvm_arch_destroy_vm(struct kvm *kvm)
  5212. {
  5213. kvm_iommu_unmap_guest(kvm);
  5214. kfree(kvm->arch.vpic);
  5215. kfree(kvm->arch.vioapic);
  5216. kvm_free_vcpus(kvm);
  5217. if (kvm->arch.apic_access_page)
  5218. put_page(kvm->arch.apic_access_page);
  5219. if (kvm->arch.ept_identity_pagetable)
  5220. put_page(kvm->arch.ept_identity_pagetable);
  5221. }
  5222. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  5223. struct kvm_memory_slot *memslot,
  5224. struct kvm_memory_slot old,
  5225. struct kvm_userspace_memory_region *mem,
  5226. int user_alloc)
  5227. {
  5228. int npages = memslot->npages;
  5229. int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
  5230. /* Prevent internal slot pages from being moved by fork()/COW. */
  5231. if (memslot->id >= KVM_MEMORY_SLOTS)
  5232. map_flags = MAP_SHARED | MAP_ANONYMOUS;
  5233. /*To keep backward compatibility with older userspace,
  5234. *x86 needs to hanlde !user_alloc case.
  5235. */
  5236. if (!user_alloc) {
  5237. if (npages && !old.rmap) {
  5238. unsigned long userspace_addr;
  5239. down_write(&current->mm->mmap_sem);
  5240. userspace_addr = do_mmap(NULL, 0,
  5241. npages * PAGE_SIZE,
  5242. PROT_READ | PROT_WRITE,
  5243. map_flags,
  5244. 0);
  5245. up_write(&current->mm->mmap_sem);
  5246. if (IS_ERR((void *)userspace_addr))
  5247. return PTR_ERR((void *)userspace_addr);
  5248. memslot->userspace_addr = userspace_addr;
  5249. }
  5250. }
  5251. return 0;
  5252. }
  5253. void kvm_arch_commit_memory_region(struct kvm *kvm,
  5254. struct kvm_userspace_memory_region *mem,
  5255. struct kvm_memory_slot old,
  5256. int user_alloc)
  5257. {
  5258. int npages = mem->memory_size >> PAGE_SHIFT;
  5259. if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
  5260. int ret;
  5261. down_write(&current->mm->mmap_sem);
  5262. ret = do_munmap(current->mm, old.userspace_addr,
  5263. old.npages * PAGE_SIZE);
  5264. up_write(&current->mm->mmap_sem);
  5265. if (ret < 0)
  5266. printk(KERN_WARNING
  5267. "kvm_vm_ioctl_set_memory_region: "
  5268. "failed to munmap memory\n");
  5269. }
  5270. spin_lock(&kvm->mmu_lock);
  5271. if (!kvm->arch.n_requested_mmu_pages) {
  5272. unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  5273. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  5274. }
  5275. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  5276. spin_unlock(&kvm->mmu_lock);
  5277. }
  5278. void kvm_arch_flush_shadow(struct kvm *kvm)
  5279. {
  5280. kvm_mmu_zap_all(kvm);
  5281. kvm_reload_remote_mmus(kvm);
  5282. }
  5283. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  5284. {
  5285. return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
  5286. !vcpu->arch.apf.halted)
  5287. || !list_empty_careful(&vcpu->async_pf.done)
  5288. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
  5289. || vcpu->arch.nmi_pending ||
  5290. (kvm_arch_interrupt_allowed(vcpu) &&
  5291. kvm_cpu_has_interrupt(vcpu));
  5292. }
  5293. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  5294. {
  5295. int me;
  5296. int cpu = vcpu->cpu;
  5297. if (waitqueue_active(&vcpu->wq)) {
  5298. wake_up_interruptible(&vcpu->wq);
  5299. ++vcpu->stat.halt_wakeup;
  5300. }
  5301. me = get_cpu();
  5302. if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
  5303. if (atomic_xchg(&vcpu->guest_mode, 0))
  5304. smp_send_reschedule(cpu);
  5305. put_cpu();
  5306. }
  5307. int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
  5308. {
  5309. return kvm_x86_ops->interrupt_allowed(vcpu);
  5310. }
  5311. bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
  5312. {
  5313. unsigned long current_rip = kvm_rip_read(vcpu) +
  5314. get_segment_base(vcpu, VCPU_SREG_CS);
  5315. return current_rip == linear_rip;
  5316. }
  5317. EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
  5318. unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
  5319. {
  5320. unsigned long rflags;
  5321. rflags = kvm_x86_ops->get_rflags(vcpu);
  5322. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  5323. rflags &= ~X86_EFLAGS_TF;
  5324. return rflags;
  5325. }
  5326. EXPORT_SYMBOL_GPL(kvm_get_rflags);
  5327. void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  5328. {
  5329. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
  5330. kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
  5331. rflags |= X86_EFLAGS_TF;
  5332. kvm_x86_ops->set_rflags(vcpu, rflags);
  5333. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5334. }
  5335. EXPORT_SYMBOL_GPL(kvm_set_rflags);
  5336. void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
  5337. {
  5338. int r;
  5339. if (!vcpu->arch.mmu.direct_map || !work->arch.direct_map ||
  5340. is_error_page(work->page))
  5341. return;
  5342. r = kvm_mmu_reload(vcpu);
  5343. if (unlikely(r))
  5344. return;
  5345. vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
  5346. }
  5347. static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
  5348. {
  5349. return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
  5350. }
  5351. static inline u32 kvm_async_pf_next_probe(u32 key)
  5352. {
  5353. return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
  5354. }
  5355. static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  5356. {
  5357. u32 key = kvm_async_pf_hash_fn(gfn);
  5358. while (vcpu->arch.apf.gfns[key] != ~0)
  5359. key = kvm_async_pf_next_probe(key);
  5360. vcpu->arch.apf.gfns[key] = gfn;
  5361. }
  5362. static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
  5363. {
  5364. int i;
  5365. u32 key = kvm_async_pf_hash_fn(gfn);
  5366. for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
  5367. (vcpu->arch.apf.gfns[key] != gfn &&
  5368. vcpu->arch.apf.gfns[key] != ~0); i++)
  5369. key = kvm_async_pf_next_probe(key);
  5370. return key;
  5371. }
  5372. bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  5373. {
  5374. return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
  5375. }
  5376. static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  5377. {
  5378. u32 i, j, k;
  5379. i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
  5380. while (true) {
  5381. vcpu->arch.apf.gfns[i] = ~0;
  5382. do {
  5383. j = kvm_async_pf_next_probe(j);
  5384. if (vcpu->arch.apf.gfns[j] == ~0)
  5385. return;
  5386. k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
  5387. /*
  5388. * k lies cyclically in ]i,j]
  5389. * | i.k.j |
  5390. * |....j i.k.| or |.k..j i...|
  5391. */
  5392. } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
  5393. vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
  5394. i = j;
  5395. }
  5396. }
  5397. static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
  5398. {
  5399. return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
  5400. sizeof(val));
  5401. }
  5402. void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
  5403. struct kvm_async_pf *work)
  5404. {
  5405. struct x86_exception fault;
  5406. trace_kvm_async_pf_not_present(work->arch.token, work->gva);
  5407. kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
  5408. if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
  5409. (vcpu->arch.apf.send_user_only &&
  5410. kvm_x86_ops->get_cpl(vcpu) == 0))
  5411. kvm_make_request(KVM_REQ_APF_HALT, vcpu);
  5412. else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
  5413. fault.vector = PF_VECTOR;
  5414. fault.error_code_valid = true;
  5415. fault.error_code = 0;
  5416. fault.nested_page_fault = false;
  5417. fault.address = work->arch.token;
  5418. kvm_inject_page_fault(vcpu, &fault);
  5419. }
  5420. }
  5421. void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
  5422. struct kvm_async_pf *work)
  5423. {
  5424. struct x86_exception fault;
  5425. trace_kvm_async_pf_ready(work->arch.token, work->gva);
  5426. if (is_error_page(work->page))
  5427. work->arch.token = ~0; /* broadcast wakeup */
  5428. else
  5429. kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
  5430. if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
  5431. !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
  5432. fault.vector = PF_VECTOR;
  5433. fault.error_code_valid = true;
  5434. fault.error_code = 0;
  5435. fault.nested_page_fault = false;
  5436. fault.address = work->arch.token;
  5437. kvm_inject_page_fault(vcpu, &fault);
  5438. }
  5439. vcpu->arch.apf.halted = false;
  5440. }
  5441. bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
  5442. {
  5443. if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
  5444. return true;
  5445. else
  5446. return !kvm_event_needs_reinjection(vcpu) &&
  5447. kvm_x86_ops->interrupt_allowed(vcpu);
  5448. }
  5449. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
  5450. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
  5451. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
  5452. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
  5453. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
  5454. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
  5455. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
  5456. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
  5457. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
  5458. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
  5459. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
  5460. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);