x86.c 183 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  10. *
  11. * Authors:
  12. * Avi Kivity <avi@qumranet.com>
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Amit Shah <amit.shah@qumranet.com>
  15. * Ben-Ami Yassour <benami@il.ibm.com>
  16. *
  17. * This work is licensed under the terms of the GNU GPL, version 2. See
  18. * the COPYING file in the top-level directory.
  19. *
  20. */
  21. #include <linux/kvm_host.h>
  22. #include "irq.h"
  23. #include "mmu.h"
  24. #include "i8254.h"
  25. #include "tss.h"
  26. #include "kvm_cache_regs.h"
  27. #include "x86.h"
  28. #include "cpuid.h"
  29. #include <linux/clocksource.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/kvm.h>
  32. #include <linux/fs.h>
  33. #include <linux/vmalloc.h>
  34. #include <linux/module.h>
  35. #include <linux/mman.h>
  36. #include <linux/highmem.h>
  37. #include <linux/iommu.h>
  38. #include <linux/intel-iommu.h>
  39. #include <linux/cpufreq.h>
  40. #include <linux/user-return-notifier.h>
  41. #include <linux/srcu.h>
  42. #include <linux/slab.h>
  43. #include <linux/perf_event.h>
  44. #include <linux/uaccess.h>
  45. #include <linux/hash.h>
  46. #include <linux/pci.h>
  47. #include <linux/timekeeper_internal.h>
  48. #include <linux/pvclock_gtod.h>
  49. #include <trace/events/kvm.h>
  50. #define CREATE_TRACE_POINTS
  51. #include "trace.h"
  52. #include <asm/debugreg.h>
  53. #include <asm/msr.h>
  54. #include <asm/desc.h>
  55. #include <asm/mtrr.h>
  56. #include <asm/mce.h>
  57. #include <asm/i387.h>
  58. #include <asm/fpu-internal.h> /* Ugh! */
  59. #include <asm/xcr.h>
  60. #include <asm/pvclock.h>
  61. #include <asm/div64.h>
  62. #define MAX_IO_MSRS 256
  63. #define KVM_MAX_MCE_BANKS 32
  64. #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
  65. #define emul_to_vcpu(ctxt) \
  66. container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
  67. /* EFER defaults:
  68. * - enable syscall per default because its emulated by KVM
  69. * - enable LME and LMA per default on 64 bit KVM
  70. */
  71. #ifdef CONFIG_X86_64
  72. static
  73. u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
  74. #else
  75. static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
  76. #endif
  77. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  78. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  79. static void update_cr8_intercept(struct kvm_vcpu *vcpu);
  80. static void process_nmi(struct kvm_vcpu *vcpu);
  81. struct kvm_x86_ops *kvm_x86_ops;
  82. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  83. static bool ignore_msrs = 0;
  84. module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
  85. bool kvm_has_tsc_control;
  86. EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
  87. u32 kvm_max_guest_tsc_khz;
  88. EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
  89. /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
  90. static u32 tsc_tolerance_ppm = 250;
  91. module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
  92. #define KVM_NR_SHARED_MSRS 16
  93. struct kvm_shared_msrs_global {
  94. int nr;
  95. u32 msrs[KVM_NR_SHARED_MSRS];
  96. };
  97. struct kvm_shared_msrs {
  98. struct user_return_notifier urn;
  99. bool registered;
  100. struct kvm_shared_msr_values {
  101. u64 host;
  102. u64 curr;
  103. } values[KVM_NR_SHARED_MSRS];
  104. };
  105. static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
  106. static struct kvm_shared_msrs __percpu *shared_msrs;
  107. struct kvm_stats_debugfs_item debugfs_entries[] = {
  108. { "pf_fixed", VCPU_STAT(pf_fixed) },
  109. { "pf_guest", VCPU_STAT(pf_guest) },
  110. { "tlb_flush", VCPU_STAT(tlb_flush) },
  111. { "invlpg", VCPU_STAT(invlpg) },
  112. { "exits", VCPU_STAT(exits) },
  113. { "io_exits", VCPU_STAT(io_exits) },
  114. { "mmio_exits", VCPU_STAT(mmio_exits) },
  115. { "signal_exits", VCPU_STAT(signal_exits) },
  116. { "irq_window", VCPU_STAT(irq_window_exits) },
  117. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  118. { "halt_exits", VCPU_STAT(halt_exits) },
  119. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  120. { "hypercalls", VCPU_STAT(hypercalls) },
  121. { "request_irq", VCPU_STAT(request_irq_exits) },
  122. { "irq_exits", VCPU_STAT(irq_exits) },
  123. { "host_state_reload", VCPU_STAT(host_state_reload) },
  124. { "efer_reload", VCPU_STAT(efer_reload) },
  125. { "fpu_reload", VCPU_STAT(fpu_reload) },
  126. { "insn_emulation", VCPU_STAT(insn_emulation) },
  127. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  128. { "irq_injections", VCPU_STAT(irq_injections) },
  129. { "nmi_injections", VCPU_STAT(nmi_injections) },
  130. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  131. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  132. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  133. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  134. { "mmu_flooded", VM_STAT(mmu_flooded) },
  135. { "mmu_recycled", VM_STAT(mmu_recycled) },
  136. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  137. { "mmu_unsync", VM_STAT(mmu_unsync) },
  138. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  139. { "largepages", VM_STAT(lpages) },
  140. { NULL }
  141. };
  142. u64 __read_mostly host_xcr0;
  143. static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
  144. static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
  145. {
  146. int i;
  147. for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
  148. vcpu->arch.apf.gfns[i] = ~0;
  149. }
  150. static void kvm_on_user_return(struct user_return_notifier *urn)
  151. {
  152. unsigned slot;
  153. struct kvm_shared_msrs *locals
  154. = container_of(urn, struct kvm_shared_msrs, urn);
  155. struct kvm_shared_msr_values *values;
  156. for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
  157. values = &locals->values[slot];
  158. if (values->host != values->curr) {
  159. wrmsrl(shared_msrs_global.msrs[slot], values->host);
  160. values->curr = values->host;
  161. }
  162. }
  163. locals->registered = false;
  164. user_return_notifier_unregister(urn);
  165. }
  166. static void shared_msr_update(unsigned slot, u32 msr)
  167. {
  168. u64 value;
  169. unsigned int cpu = smp_processor_id();
  170. struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
  171. /* only read, and nobody should modify it at this time,
  172. * so don't need lock */
  173. if (slot >= shared_msrs_global.nr) {
  174. printk(KERN_ERR "kvm: invalid MSR slot!");
  175. return;
  176. }
  177. rdmsrl_safe(msr, &value);
  178. smsr->values[slot].host = value;
  179. smsr->values[slot].curr = value;
  180. }
  181. void kvm_define_shared_msr(unsigned slot, u32 msr)
  182. {
  183. if (slot >= shared_msrs_global.nr)
  184. shared_msrs_global.nr = slot + 1;
  185. shared_msrs_global.msrs[slot] = msr;
  186. /* we need ensured the shared_msr_global have been updated */
  187. smp_wmb();
  188. }
  189. EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
  190. static void kvm_shared_msr_cpu_online(void)
  191. {
  192. unsigned i;
  193. for (i = 0; i < shared_msrs_global.nr; ++i)
  194. shared_msr_update(i, shared_msrs_global.msrs[i]);
  195. }
  196. void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
  197. {
  198. unsigned int cpu = smp_processor_id();
  199. struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
  200. if (((value ^ smsr->values[slot].curr) & mask) == 0)
  201. return;
  202. smsr->values[slot].curr = value;
  203. wrmsrl(shared_msrs_global.msrs[slot], value);
  204. if (!smsr->registered) {
  205. smsr->urn.on_user_return = kvm_on_user_return;
  206. user_return_notifier_register(&smsr->urn);
  207. smsr->registered = true;
  208. }
  209. }
  210. EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
  211. static void drop_user_return_notifiers(void *ignore)
  212. {
  213. unsigned int cpu = smp_processor_id();
  214. struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
  215. if (smsr->registered)
  216. kvm_on_user_return(&smsr->urn);
  217. }
  218. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  219. {
  220. return vcpu->arch.apic_base;
  221. }
  222. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  223. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  224. {
  225. /* TODO: reserve bits check */
  226. kvm_lapic_set_base(vcpu, data);
  227. }
  228. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  229. asmlinkage void kvm_spurious_fault(void)
  230. {
  231. /* Fault while not rebooting. We want the trace. */
  232. BUG();
  233. }
  234. EXPORT_SYMBOL_GPL(kvm_spurious_fault);
  235. #define EXCPT_BENIGN 0
  236. #define EXCPT_CONTRIBUTORY 1
  237. #define EXCPT_PF 2
  238. static int exception_class(int vector)
  239. {
  240. switch (vector) {
  241. case PF_VECTOR:
  242. return EXCPT_PF;
  243. case DE_VECTOR:
  244. case TS_VECTOR:
  245. case NP_VECTOR:
  246. case SS_VECTOR:
  247. case GP_VECTOR:
  248. return EXCPT_CONTRIBUTORY;
  249. default:
  250. break;
  251. }
  252. return EXCPT_BENIGN;
  253. }
  254. static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
  255. unsigned nr, bool has_error, u32 error_code,
  256. bool reinject)
  257. {
  258. u32 prev_nr;
  259. int class1, class2;
  260. kvm_make_request(KVM_REQ_EVENT, vcpu);
  261. if (!vcpu->arch.exception.pending) {
  262. queue:
  263. vcpu->arch.exception.pending = true;
  264. vcpu->arch.exception.has_error_code = has_error;
  265. vcpu->arch.exception.nr = nr;
  266. vcpu->arch.exception.error_code = error_code;
  267. vcpu->arch.exception.reinject = reinject;
  268. return;
  269. }
  270. /* to check exception */
  271. prev_nr = vcpu->arch.exception.nr;
  272. if (prev_nr == DF_VECTOR) {
  273. /* triple fault -> shutdown */
  274. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  275. return;
  276. }
  277. class1 = exception_class(prev_nr);
  278. class2 = exception_class(nr);
  279. if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
  280. || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
  281. /* generate double fault per SDM Table 5-5 */
  282. vcpu->arch.exception.pending = true;
  283. vcpu->arch.exception.has_error_code = true;
  284. vcpu->arch.exception.nr = DF_VECTOR;
  285. vcpu->arch.exception.error_code = 0;
  286. } else
  287. /* replace previous exception with a new one in a hope
  288. that instruction re-execution will regenerate lost
  289. exception */
  290. goto queue;
  291. }
  292. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  293. {
  294. kvm_multiple_exception(vcpu, nr, false, 0, false);
  295. }
  296. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  297. void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  298. {
  299. kvm_multiple_exception(vcpu, nr, false, 0, true);
  300. }
  301. EXPORT_SYMBOL_GPL(kvm_requeue_exception);
  302. void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
  303. {
  304. if (err)
  305. kvm_inject_gp(vcpu, 0);
  306. else
  307. kvm_x86_ops->skip_emulated_instruction(vcpu);
  308. }
  309. EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
  310. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  311. {
  312. ++vcpu->stat.pf_guest;
  313. vcpu->arch.cr2 = fault->address;
  314. kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
  315. }
  316. EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
  317. void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
  318. {
  319. if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
  320. vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
  321. else
  322. vcpu->arch.mmu.inject_page_fault(vcpu, fault);
  323. }
  324. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  325. {
  326. atomic_inc(&vcpu->arch.nmi_queued);
  327. kvm_make_request(KVM_REQ_NMI, vcpu);
  328. }
  329. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  330. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  331. {
  332. kvm_multiple_exception(vcpu, nr, true, error_code, false);
  333. }
  334. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  335. void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  336. {
  337. kvm_multiple_exception(vcpu, nr, true, error_code, true);
  338. }
  339. EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
  340. /*
  341. * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
  342. * a #GP and return false.
  343. */
  344. bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
  345. {
  346. if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
  347. return true;
  348. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  349. return false;
  350. }
  351. EXPORT_SYMBOL_GPL(kvm_require_cpl);
  352. /*
  353. * This function will be used to read from the physical memory of the currently
  354. * running guest. The difference to kvm_read_guest_page is that this function
  355. * can read from guest physical or from the guest's guest physical memory.
  356. */
  357. int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  358. gfn_t ngfn, void *data, int offset, int len,
  359. u32 access)
  360. {
  361. gfn_t real_gfn;
  362. gpa_t ngpa;
  363. ngpa = gfn_to_gpa(ngfn);
  364. real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
  365. if (real_gfn == UNMAPPED_GVA)
  366. return -EFAULT;
  367. real_gfn = gpa_to_gfn(real_gfn);
  368. return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
  369. }
  370. EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
  371. int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
  372. void *data, int offset, int len, u32 access)
  373. {
  374. return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
  375. data, offset, len, access);
  376. }
  377. /*
  378. * Load the pae pdptrs. Return true is they are all valid.
  379. */
  380. int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
  381. {
  382. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  383. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  384. int i;
  385. int ret;
  386. u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
  387. ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
  388. offset * sizeof(u64), sizeof(pdpte),
  389. PFERR_USER_MASK|PFERR_WRITE_MASK);
  390. if (ret < 0) {
  391. ret = 0;
  392. goto out;
  393. }
  394. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  395. if (is_present_gpte(pdpte[i]) &&
  396. (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
  397. ret = 0;
  398. goto out;
  399. }
  400. }
  401. ret = 1;
  402. memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
  403. __set_bit(VCPU_EXREG_PDPTR,
  404. (unsigned long *)&vcpu->arch.regs_avail);
  405. __set_bit(VCPU_EXREG_PDPTR,
  406. (unsigned long *)&vcpu->arch.regs_dirty);
  407. out:
  408. return ret;
  409. }
  410. EXPORT_SYMBOL_GPL(load_pdptrs);
  411. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  412. {
  413. u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
  414. bool changed = true;
  415. int offset;
  416. gfn_t gfn;
  417. int r;
  418. if (is_long_mode(vcpu) || !is_pae(vcpu))
  419. return false;
  420. if (!test_bit(VCPU_EXREG_PDPTR,
  421. (unsigned long *)&vcpu->arch.regs_avail))
  422. return true;
  423. gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
  424. offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
  425. r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
  426. PFERR_USER_MASK | PFERR_WRITE_MASK);
  427. if (r < 0)
  428. goto out;
  429. changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
  430. out:
  431. return changed;
  432. }
  433. int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  434. {
  435. unsigned long old_cr0 = kvm_read_cr0(vcpu);
  436. unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
  437. X86_CR0_CD | X86_CR0_NW;
  438. cr0 |= X86_CR0_ET;
  439. #ifdef CONFIG_X86_64
  440. if (cr0 & 0xffffffff00000000UL)
  441. return 1;
  442. #endif
  443. cr0 &= ~CR0_RESERVED_BITS;
  444. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
  445. return 1;
  446. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
  447. return 1;
  448. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  449. #ifdef CONFIG_X86_64
  450. if ((vcpu->arch.efer & EFER_LME)) {
  451. int cs_db, cs_l;
  452. if (!is_pae(vcpu))
  453. return 1;
  454. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  455. if (cs_l)
  456. return 1;
  457. } else
  458. #endif
  459. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
  460. kvm_read_cr3(vcpu)))
  461. return 1;
  462. }
  463. if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
  464. return 1;
  465. kvm_x86_ops->set_cr0(vcpu, cr0);
  466. if ((cr0 ^ old_cr0) & X86_CR0_PG) {
  467. kvm_clear_async_pf_completion_queue(vcpu);
  468. kvm_async_pf_hash_reset(vcpu);
  469. }
  470. if ((cr0 ^ old_cr0) & update_bits)
  471. kvm_mmu_reset_context(vcpu);
  472. return 0;
  473. }
  474. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  475. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  476. {
  477. (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
  478. }
  479. EXPORT_SYMBOL_GPL(kvm_lmsw);
  480. static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
  481. {
  482. if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
  483. !vcpu->guest_xcr0_loaded) {
  484. /* kvm_set_xcr() also depends on this */
  485. xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
  486. vcpu->guest_xcr0_loaded = 1;
  487. }
  488. }
  489. static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
  490. {
  491. if (vcpu->guest_xcr0_loaded) {
  492. if (vcpu->arch.xcr0 != host_xcr0)
  493. xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
  494. vcpu->guest_xcr0_loaded = 0;
  495. }
  496. }
  497. int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  498. {
  499. u64 xcr0;
  500. /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
  501. if (index != XCR_XFEATURE_ENABLED_MASK)
  502. return 1;
  503. xcr0 = xcr;
  504. if (kvm_x86_ops->get_cpl(vcpu) != 0)
  505. return 1;
  506. if (!(xcr0 & XSTATE_FP))
  507. return 1;
  508. if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
  509. return 1;
  510. if (xcr0 & ~host_xcr0)
  511. return 1;
  512. kvm_put_guest_xcr0(vcpu);
  513. vcpu->arch.xcr0 = xcr0;
  514. return 0;
  515. }
  516. int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  517. {
  518. if (__kvm_set_xcr(vcpu, index, xcr)) {
  519. kvm_inject_gp(vcpu, 0);
  520. return 1;
  521. }
  522. return 0;
  523. }
  524. EXPORT_SYMBOL_GPL(kvm_set_xcr);
  525. int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  526. {
  527. unsigned long old_cr4 = kvm_read_cr4(vcpu);
  528. unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
  529. X86_CR4_PAE | X86_CR4_SMEP;
  530. if (cr4 & CR4_RESERVED_BITS)
  531. return 1;
  532. if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
  533. return 1;
  534. if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
  535. return 1;
  536. if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
  537. return 1;
  538. if (is_long_mode(vcpu)) {
  539. if (!(cr4 & X86_CR4_PAE))
  540. return 1;
  541. } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
  542. && ((cr4 ^ old_cr4) & pdptr_bits)
  543. && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
  544. kvm_read_cr3(vcpu)))
  545. return 1;
  546. if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
  547. if (!guest_cpuid_has_pcid(vcpu))
  548. return 1;
  549. /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
  550. if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
  551. return 1;
  552. }
  553. if (kvm_x86_ops->set_cr4(vcpu, cr4))
  554. return 1;
  555. if (((cr4 ^ old_cr4) & pdptr_bits) ||
  556. (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
  557. kvm_mmu_reset_context(vcpu);
  558. if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
  559. kvm_update_cpuid(vcpu);
  560. return 0;
  561. }
  562. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  563. int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  564. {
  565. if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
  566. kvm_mmu_sync_roots(vcpu);
  567. kvm_mmu_flush_tlb(vcpu);
  568. return 0;
  569. }
  570. if (is_long_mode(vcpu)) {
  571. if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) {
  572. if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
  573. return 1;
  574. } else
  575. if (cr3 & CR3_L_MODE_RESERVED_BITS)
  576. return 1;
  577. } else {
  578. if (is_pae(vcpu)) {
  579. if (cr3 & CR3_PAE_RESERVED_BITS)
  580. return 1;
  581. if (is_paging(vcpu) &&
  582. !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
  583. return 1;
  584. }
  585. /*
  586. * We don't check reserved bits in nonpae mode, because
  587. * this isn't enforced, and VMware depends on this.
  588. */
  589. }
  590. /*
  591. * Does the new cr3 value map to physical memory? (Note, we
  592. * catch an invalid cr3 even in real-mode, because it would
  593. * cause trouble later on when we turn on paging anyway.)
  594. *
  595. * A real CPU would silently accept an invalid cr3 and would
  596. * attempt to use it - with largely undefined (and often hard
  597. * to debug) behavior on the guest side.
  598. */
  599. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  600. return 1;
  601. vcpu->arch.cr3 = cr3;
  602. __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
  603. vcpu->arch.mmu.new_cr3(vcpu);
  604. return 0;
  605. }
  606. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  607. int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  608. {
  609. if (cr8 & CR8_RESERVED_BITS)
  610. return 1;
  611. if (irqchip_in_kernel(vcpu->kvm))
  612. kvm_lapic_set_tpr(vcpu, cr8);
  613. else
  614. vcpu->arch.cr8 = cr8;
  615. return 0;
  616. }
  617. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  618. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  619. {
  620. if (irqchip_in_kernel(vcpu->kvm))
  621. return kvm_lapic_get_cr8(vcpu);
  622. else
  623. return vcpu->arch.cr8;
  624. }
  625. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  626. static void kvm_update_dr7(struct kvm_vcpu *vcpu)
  627. {
  628. unsigned long dr7;
  629. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  630. dr7 = vcpu->arch.guest_debug_dr7;
  631. else
  632. dr7 = vcpu->arch.dr7;
  633. kvm_x86_ops->set_dr7(vcpu, dr7);
  634. vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK);
  635. }
  636. static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  637. {
  638. switch (dr) {
  639. case 0 ... 3:
  640. vcpu->arch.db[dr] = val;
  641. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
  642. vcpu->arch.eff_db[dr] = val;
  643. break;
  644. case 4:
  645. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  646. return 1; /* #UD */
  647. /* fall through */
  648. case 6:
  649. if (val & 0xffffffff00000000ULL)
  650. return -1; /* #GP */
  651. vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
  652. break;
  653. case 5:
  654. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  655. return 1; /* #UD */
  656. /* fall through */
  657. default: /* 7 */
  658. if (val & 0xffffffff00000000ULL)
  659. return -1; /* #GP */
  660. vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
  661. kvm_update_dr7(vcpu);
  662. break;
  663. }
  664. return 0;
  665. }
  666. int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  667. {
  668. int res;
  669. res = __kvm_set_dr(vcpu, dr, val);
  670. if (res > 0)
  671. kvm_queue_exception(vcpu, UD_VECTOR);
  672. else if (res < 0)
  673. kvm_inject_gp(vcpu, 0);
  674. return res;
  675. }
  676. EXPORT_SYMBOL_GPL(kvm_set_dr);
  677. static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  678. {
  679. switch (dr) {
  680. case 0 ... 3:
  681. *val = vcpu->arch.db[dr];
  682. break;
  683. case 4:
  684. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  685. return 1;
  686. /* fall through */
  687. case 6:
  688. *val = vcpu->arch.dr6;
  689. break;
  690. case 5:
  691. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  692. return 1;
  693. /* fall through */
  694. default: /* 7 */
  695. *val = vcpu->arch.dr7;
  696. break;
  697. }
  698. return 0;
  699. }
  700. int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  701. {
  702. if (_kvm_get_dr(vcpu, dr, val)) {
  703. kvm_queue_exception(vcpu, UD_VECTOR);
  704. return 1;
  705. }
  706. return 0;
  707. }
  708. EXPORT_SYMBOL_GPL(kvm_get_dr);
  709. bool kvm_rdpmc(struct kvm_vcpu *vcpu)
  710. {
  711. u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  712. u64 data;
  713. int err;
  714. err = kvm_pmu_read_pmc(vcpu, ecx, &data);
  715. if (err)
  716. return err;
  717. kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
  718. kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
  719. return err;
  720. }
  721. EXPORT_SYMBOL_GPL(kvm_rdpmc);
  722. /*
  723. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  724. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  725. *
  726. * This list is modified at module load time to reflect the
  727. * capabilities of the host cpu. This capabilities test skips MSRs that are
  728. * kvm-specific. Those are put in the beginning of the list.
  729. */
  730. #define KVM_SAVE_MSRS_BEGIN 10
  731. static u32 msrs_to_save[] = {
  732. MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  733. MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
  734. HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
  735. HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
  736. MSR_KVM_PV_EOI_EN,
  737. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  738. MSR_STAR,
  739. #ifdef CONFIG_X86_64
  740. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  741. #endif
  742. MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  743. };
  744. static unsigned num_msrs_to_save;
  745. static const u32 emulated_msrs[] = {
  746. MSR_IA32_TSC_ADJUST,
  747. MSR_IA32_TSCDEADLINE,
  748. MSR_IA32_MISC_ENABLE,
  749. MSR_IA32_MCG_STATUS,
  750. MSR_IA32_MCG_CTL,
  751. };
  752. bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
  753. {
  754. if (efer & efer_reserved_bits)
  755. return false;
  756. if (efer & EFER_FFXSR) {
  757. struct kvm_cpuid_entry2 *feat;
  758. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  759. if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
  760. return false;
  761. }
  762. if (efer & EFER_SVME) {
  763. struct kvm_cpuid_entry2 *feat;
  764. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  765. if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
  766. return false;
  767. }
  768. return true;
  769. }
  770. EXPORT_SYMBOL_GPL(kvm_valid_efer);
  771. static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
  772. {
  773. u64 old_efer = vcpu->arch.efer;
  774. if (!kvm_valid_efer(vcpu, efer))
  775. return 1;
  776. if (is_paging(vcpu)
  777. && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
  778. return 1;
  779. efer &= ~EFER_LMA;
  780. efer |= vcpu->arch.efer & EFER_LMA;
  781. kvm_x86_ops->set_efer(vcpu, efer);
  782. /* Update reserved bits */
  783. if ((efer ^ old_efer) & EFER_NX)
  784. kvm_mmu_reset_context(vcpu);
  785. return 0;
  786. }
  787. void kvm_enable_efer_bits(u64 mask)
  788. {
  789. efer_reserved_bits &= ~mask;
  790. }
  791. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  792. /*
  793. * Writes msr value into into the appropriate "register".
  794. * Returns 0 on success, non-0 otherwise.
  795. * Assumes vcpu_load() was already called.
  796. */
  797. int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
  798. {
  799. return kvm_x86_ops->set_msr(vcpu, msr);
  800. }
  801. /*
  802. * Adapt set_msr() to msr_io()'s calling convention
  803. */
  804. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  805. {
  806. struct msr_data msr;
  807. msr.data = *data;
  808. msr.index = index;
  809. msr.host_initiated = true;
  810. return kvm_set_msr(vcpu, &msr);
  811. }
  812. #ifdef CONFIG_X86_64
  813. struct pvclock_gtod_data {
  814. seqcount_t seq;
  815. struct { /* extract of a clocksource struct */
  816. int vclock_mode;
  817. cycle_t cycle_last;
  818. cycle_t mask;
  819. u32 mult;
  820. u32 shift;
  821. } clock;
  822. /* open coded 'struct timespec' */
  823. u64 monotonic_time_snsec;
  824. time_t monotonic_time_sec;
  825. };
  826. static struct pvclock_gtod_data pvclock_gtod_data;
  827. static void update_pvclock_gtod(struct timekeeper *tk)
  828. {
  829. struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
  830. write_seqcount_begin(&vdata->seq);
  831. /* copy pvclock gtod data */
  832. vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode;
  833. vdata->clock.cycle_last = tk->clock->cycle_last;
  834. vdata->clock.mask = tk->clock->mask;
  835. vdata->clock.mult = tk->mult;
  836. vdata->clock.shift = tk->shift;
  837. vdata->monotonic_time_sec = tk->xtime_sec
  838. + tk->wall_to_monotonic.tv_sec;
  839. vdata->monotonic_time_snsec = tk->xtime_nsec
  840. + (tk->wall_to_monotonic.tv_nsec
  841. << tk->shift);
  842. while (vdata->monotonic_time_snsec >=
  843. (((u64)NSEC_PER_SEC) << tk->shift)) {
  844. vdata->monotonic_time_snsec -=
  845. ((u64)NSEC_PER_SEC) << tk->shift;
  846. vdata->monotonic_time_sec++;
  847. }
  848. write_seqcount_end(&vdata->seq);
  849. }
  850. #endif
  851. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  852. {
  853. int version;
  854. int r;
  855. struct pvclock_wall_clock wc;
  856. struct timespec boot;
  857. if (!wall_clock)
  858. return;
  859. r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
  860. if (r)
  861. return;
  862. if (version & 1)
  863. ++version; /* first time write, random junk */
  864. ++version;
  865. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  866. /*
  867. * The guest calculates current wall clock time by adding
  868. * system time (updated by kvm_guest_time_update below) to the
  869. * wall clock specified here. guest system time equals host
  870. * system time for us, thus we must fill in host boot time here.
  871. */
  872. getboottime(&boot);
  873. if (kvm->arch.kvmclock_offset) {
  874. struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
  875. boot = timespec_sub(boot, ts);
  876. }
  877. wc.sec = boot.tv_sec;
  878. wc.nsec = boot.tv_nsec;
  879. wc.version = version;
  880. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  881. version++;
  882. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  883. }
  884. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  885. {
  886. uint32_t quotient, remainder;
  887. /* Don't try to replace with do_div(), this one calculates
  888. * "(dividend << 32) / divisor" */
  889. __asm__ ( "divl %4"
  890. : "=a" (quotient), "=d" (remainder)
  891. : "0" (0), "1" (dividend), "r" (divisor) );
  892. return quotient;
  893. }
  894. static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
  895. s8 *pshift, u32 *pmultiplier)
  896. {
  897. uint64_t scaled64;
  898. int32_t shift = 0;
  899. uint64_t tps64;
  900. uint32_t tps32;
  901. tps64 = base_khz * 1000LL;
  902. scaled64 = scaled_khz * 1000LL;
  903. while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
  904. tps64 >>= 1;
  905. shift--;
  906. }
  907. tps32 = (uint32_t)tps64;
  908. while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
  909. if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
  910. scaled64 >>= 1;
  911. else
  912. tps32 <<= 1;
  913. shift++;
  914. }
  915. *pshift = shift;
  916. *pmultiplier = div_frac(scaled64, tps32);
  917. pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
  918. __func__, base_khz, scaled_khz, shift, *pmultiplier);
  919. }
  920. static inline u64 get_kernel_ns(void)
  921. {
  922. struct timespec ts;
  923. WARN_ON(preemptible());
  924. ktime_get_ts(&ts);
  925. monotonic_to_bootbased(&ts);
  926. return timespec_to_ns(&ts);
  927. }
  928. #ifdef CONFIG_X86_64
  929. static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
  930. #endif
  931. static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
  932. unsigned long max_tsc_khz;
  933. static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
  934. {
  935. return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
  936. vcpu->arch.virtual_tsc_shift);
  937. }
  938. static u32 adjust_tsc_khz(u32 khz, s32 ppm)
  939. {
  940. u64 v = (u64)khz * (1000000 + ppm);
  941. do_div(v, 1000000);
  942. return v;
  943. }
  944. static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
  945. {
  946. u32 thresh_lo, thresh_hi;
  947. int use_scaling = 0;
  948. /* tsc_khz can be zero if TSC calibration fails */
  949. if (this_tsc_khz == 0)
  950. return;
  951. /* Compute a scale to convert nanoseconds in TSC cycles */
  952. kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
  953. &vcpu->arch.virtual_tsc_shift,
  954. &vcpu->arch.virtual_tsc_mult);
  955. vcpu->arch.virtual_tsc_khz = this_tsc_khz;
  956. /*
  957. * Compute the variation in TSC rate which is acceptable
  958. * within the range of tolerance and decide if the
  959. * rate being applied is within that bounds of the hardware
  960. * rate. If so, no scaling or compensation need be done.
  961. */
  962. thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
  963. thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
  964. if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
  965. pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
  966. use_scaling = 1;
  967. }
  968. kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
  969. }
  970. static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
  971. {
  972. u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
  973. vcpu->arch.virtual_tsc_mult,
  974. vcpu->arch.virtual_tsc_shift);
  975. tsc += vcpu->arch.this_tsc_write;
  976. return tsc;
  977. }
  978. void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
  979. {
  980. #ifdef CONFIG_X86_64
  981. bool vcpus_matched;
  982. bool do_request = false;
  983. struct kvm_arch *ka = &vcpu->kvm->arch;
  984. struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
  985. vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
  986. atomic_read(&vcpu->kvm->online_vcpus));
  987. if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
  988. if (!ka->use_master_clock)
  989. do_request = 1;
  990. if (!vcpus_matched && ka->use_master_clock)
  991. do_request = 1;
  992. if (do_request)
  993. kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
  994. trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
  995. atomic_read(&vcpu->kvm->online_vcpus),
  996. ka->use_master_clock, gtod->clock.vclock_mode);
  997. #endif
  998. }
  999. static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
  1000. {
  1001. u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
  1002. vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
  1003. }
  1004. void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
  1005. {
  1006. struct kvm *kvm = vcpu->kvm;
  1007. u64 offset, ns, elapsed;
  1008. unsigned long flags;
  1009. s64 usdiff;
  1010. bool matched;
  1011. u64 data = msr->data;
  1012. raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
  1013. offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
  1014. ns = get_kernel_ns();
  1015. elapsed = ns - kvm->arch.last_tsc_nsec;
  1016. if (vcpu->arch.virtual_tsc_khz) {
  1017. int faulted = 0;
  1018. /* n.b - signed multiplication and division required */
  1019. usdiff = data - kvm->arch.last_tsc_write;
  1020. #ifdef CONFIG_X86_64
  1021. usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
  1022. #else
  1023. /* do_div() only does unsigned */
  1024. asm("1: idivl %[divisor]\n"
  1025. "2: xor %%edx, %%edx\n"
  1026. " movl $0, %[faulted]\n"
  1027. "3:\n"
  1028. ".section .fixup,\"ax\"\n"
  1029. "4: movl $1, %[faulted]\n"
  1030. " jmp 3b\n"
  1031. ".previous\n"
  1032. _ASM_EXTABLE(1b, 4b)
  1033. : "=A"(usdiff), [faulted] "=r" (faulted)
  1034. : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
  1035. #endif
  1036. do_div(elapsed, 1000);
  1037. usdiff -= elapsed;
  1038. if (usdiff < 0)
  1039. usdiff = -usdiff;
  1040. /* idivl overflow => difference is larger than USEC_PER_SEC */
  1041. if (faulted)
  1042. usdiff = USEC_PER_SEC;
  1043. } else
  1044. usdiff = USEC_PER_SEC; /* disable TSC match window below */
  1045. /*
  1046. * Special case: TSC write with a small delta (1 second) of virtual
  1047. * cycle time against real time is interpreted as an attempt to
  1048. * synchronize the CPU.
  1049. *
  1050. * For a reliable TSC, we can match TSC offsets, and for an unstable
  1051. * TSC, we add elapsed time in this computation. We could let the
  1052. * compensation code attempt to catch up if we fall behind, but
  1053. * it's better to try to match offsets from the beginning.
  1054. */
  1055. if (usdiff < USEC_PER_SEC &&
  1056. vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
  1057. if (!check_tsc_unstable()) {
  1058. offset = kvm->arch.cur_tsc_offset;
  1059. pr_debug("kvm: matched tsc offset for %llu\n", data);
  1060. } else {
  1061. u64 delta = nsec_to_cycles(vcpu, elapsed);
  1062. data += delta;
  1063. offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
  1064. pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
  1065. }
  1066. matched = true;
  1067. } else {
  1068. /*
  1069. * We split periods of matched TSC writes into generations.
  1070. * For each generation, we track the original measured
  1071. * nanosecond time, offset, and write, so if TSCs are in
  1072. * sync, we can match exact offset, and if not, we can match
  1073. * exact software computation in compute_guest_tsc()
  1074. *
  1075. * These values are tracked in kvm->arch.cur_xxx variables.
  1076. */
  1077. kvm->arch.cur_tsc_generation++;
  1078. kvm->arch.cur_tsc_nsec = ns;
  1079. kvm->arch.cur_tsc_write = data;
  1080. kvm->arch.cur_tsc_offset = offset;
  1081. matched = false;
  1082. pr_debug("kvm: new tsc generation %u, clock %llu\n",
  1083. kvm->arch.cur_tsc_generation, data);
  1084. }
  1085. /*
  1086. * We also track th most recent recorded KHZ, write and time to
  1087. * allow the matching interval to be extended at each write.
  1088. */
  1089. kvm->arch.last_tsc_nsec = ns;
  1090. kvm->arch.last_tsc_write = data;
  1091. kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
  1092. /* Reset of TSC must disable overshoot protection below */
  1093. vcpu->arch.hv_clock.tsc_timestamp = 0;
  1094. vcpu->arch.last_guest_tsc = data;
  1095. /* Keep track of which generation this VCPU has synchronized to */
  1096. vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
  1097. vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
  1098. vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
  1099. if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
  1100. update_ia32_tsc_adjust_msr(vcpu, offset);
  1101. kvm_x86_ops->write_tsc_offset(vcpu, offset);
  1102. raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
  1103. spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
  1104. if (matched)
  1105. kvm->arch.nr_vcpus_matched_tsc++;
  1106. else
  1107. kvm->arch.nr_vcpus_matched_tsc = 0;
  1108. kvm_track_tsc_matching(vcpu);
  1109. spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
  1110. }
  1111. EXPORT_SYMBOL_GPL(kvm_write_tsc);
  1112. #ifdef CONFIG_X86_64
  1113. static cycle_t read_tsc(void)
  1114. {
  1115. cycle_t ret;
  1116. u64 last;
  1117. /*
  1118. * Empirically, a fence (of type that depends on the CPU)
  1119. * before rdtsc is enough to ensure that rdtsc is ordered
  1120. * with respect to loads. The various CPU manuals are unclear
  1121. * as to whether rdtsc can be reordered with later loads,
  1122. * but no one has ever seen it happen.
  1123. */
  1124. rdtsc_barrier();
  1125. ret = (cycle_t)vget_cycles();
  1126. last = pvclock_gtod_data.clock.cycle_last;
  1127. if (likely(ret >= last))
  1128. return ret;
  1129. /*
  1130. * GCC likes to generate cmov here, but this branch is extremely
  1131. * predictable (it's just a funciton of time and the likely is
  1132. * very likely) and there's a data dependence, so force GCC
  1133. * to generate a branch instead. I don't barrier() because
  1134. * we don't actually need a barrier, and if this function
  1135. * ever gets inlined it will generate worse code.
  1136. */
  1137. asm volatile ("");
  1138. return last;
  1139. }
  1140. static inline u64 vgettsc(cycle_t *cycle_now)
  1141. {
  1142. long v;
  1143. struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
  1144. *cycle_now = read_tsc();
  1145. v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
  1146. return v * gtod->clock.mult;
  1147. }
  1148. static int do_monotonic(struct timespec *ts, cycle_t *cycle_now)
  1149. {
  1150. unsigned long seq;
  1151. u64 ns;
  1152. int mode;
  1153. struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
  1154. ts->tv_nsec = 0;
  1155. do {
  1156. seq = read_seqcount_begin(&gtod->seq);
  1157. mode = gtod->clock.vclock_mode;
  1158. ts->tv_sec = gtod->monotonic_time_sec;
  1159. ns = gtod->monotonic_time_snsec;
  1160. ns += vgettsc(cycle_now);
  1161. ns >>= gtod->clock.shift;
  1162. } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
  1163. timespec_add_ns(ts, ns);
  1164. return mode;
  1165. }
  1166. /* returns true if host is using tsc clocksource */
  1167. static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
  1168. {
  1169. struct timespec ts;
  1170. /* checked again under seqlock below */
  1171. if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
  1172. return false;
  1173. if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC)
  1174. return false;
  1175. monotonic_to_bootbased(&ts);
  1176. *kernel_ns = timespec_to_ns(&ts);
  1177. return true;
  1178. }
  1179. #endif
  1180. /*
  1181. *
  1182. * Assuming a stable TSC across physical CPUS, and a stable TSC
  1183. * across virtual CPUs, the following condition is possible.
  1184. * Each numbered line represents an event visible to both
  1185. * CPUs at the next numbered event.
  1186. *
  1187. * "timespecX" represents host monotonic time. "tscX" represents
  1188. * RDTSC value.
  1189. *
  1190. * VCPU0 on CPU0 | VCPU1 on CPU1
  1191. *
  1192. * 1. read timespec0,tsc0
  1193. * 2. | timespec1 = timespec0 + N
  1194. * | tsc1 = tsc0 + M
  1195. * 3. transition to guest | transition to guest
  1196. * 4. ret0 = timespec0 + (rdtsc - tsc0) |
  1197. * 5. | ret1 = timespec1 + (rdtsc - tsc1)
  1198. * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
  1199. *
  1200. * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
  1201. *
  1202. * - ret0 < ret1
  1203. * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
  1204. * ...
  1205. * - 0 < N - M => M < N
  1206. *
  1207. * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
  1208. * always the case (the difference between two distinct xtime instances
  1209. * might be smaller then the difference between corresponding TSC reads,
  1210. * when updating guest vcpus pvclock areas).
  1211. *
  1212. * To avoid that problem, do not allow visibility of distinct
  1213. * system_timestamp/tsc_timestamp values simultaneously: use a master
  1214. * copy of host monotonic time values. Update that master copy
  1215. * in lockstep.
  1216. *
  1217. * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
  1218. *
  1219. */
  1220. static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
  1221. {
  1222. #ifdef CONFIG_X86_64
  1223. struct kvm_arch *ka = &kvm->arch;
  1224. int vclock_mode;
  1225. bool host_tsc_clocksource, vcpus_matched;
  1226. vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
  1227. atomic_read(&kvm->online_vcpus));
  1228. /*
  1229. * If the host uses TSC clock, then passthrough TSC as stable
  1230. * to the guest.
  1231. */
  1232. host_tsc_clocksource = kvm_get_time_and_clockread(
  1233. &ka->master_kernel_ns,
  1234. &ka->master_cycle_now);
  1235. ka->use_master_clock = host_tsc_clocksource & vcpus_matched;
  1236. if (ka->use_master_clock)
  1237. atomic_set(&kvm_guest_has_master_clock, 1);
  1238. vclock_mode = pvclock_gtod_data.clock.vclock_mode;
  1239. trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
  1240. vcpus_matched);
  1241. #endif
  1242. }
  1243. static int kvm_guest_time_update(struct kvm_vcpu *v)
  1244. {
  1245. unsigned long flags, this_tsc_khz;
  1246. struct kvm_vcpu_arch *vcpu = &v->arch;
  1247. struct kvm_arch *ka = &v->kvm->arch;
  1248. s64 kernel_ns, max_kernel_ns;
  1249. u64 tsc_timestamp, host_tsc;
  1250. struct pvclock_vcpu_time_info guest_hv_clock;
  1251. u8 pvclock_flags;
  1252. bool use_master_clock;
  1253. kernel_ns = 0;
  1254. host_tsc = 0;
  1255. /*
  1256. * If the host uses TSC clock, then passthrough TSC as stable
  1257. * to the guest.
  1258. */
  1259. spin_lock(&ka->pvclock_gtod_sync_lock);
  1260. use_master_clock = ka->use_master_clock;
  1261. if (use_master_clock) {
  1262. host_tsc = ka->master_cycle_now;
  1263. kernel_ns = ka->master_kernel_ns;
  1264. }
  1265. spin_unlock(&ka->pvclock_gtod_sync_lock);
  1266. /* Keep irq disabled to prevent changes to the clock */
  1267. local_irq_save(flags);
  1268. this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
  1269. if (unlikely(this_tsc_khz == 0)) {
  1270. local_irq_restore(flags);
  1271. kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
  1272. return 1;
  1273. }
  1274. if (!use_master_clock) {
  1275. host_tsc = native_read_tsc();
  1276. kernel_ns = get_kernel_ns();
  1277. }
  1278. tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
  1279. /*
  1280. * We may have to catch up the TSC to match elapsed wall clock
  1281. * time for two reasons, even if kvmclock is used.
  1282. * 1) CPU could have been running below the maximum TSC rate
  1283. * 2) Broken TSC compensation resets the base at each VCPU
  1284. * entry to avoid unknown leaps of TSC even when running
  1285. * again on the same CPU. This may cause apparent elapsed
  1286. * time to disappear, and the guest to stand still or run
  1287. * very slowly.
  1288. */
  1289. if (vcpu->tsc_catchup) {
  1290. u64 tsc = compute_guest_tsc(v, kernel_ns);
  1291. if (tsc > tsc_timestamp) {
  1292. adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
  1293. tsc_timestamp = tsc;
  1294. }
  1295. }
  1296. local_irq_restore(flags);
  1297. if (!vcpu->pv_time_enabled)
  1298. return 0;
  1299. /*
  1300. * Time as measured by the TSC may go backwards when resetting the base
  1301. * tsc_timestamp. The reason for this is that the TSC resolution is
  1302. * higher than the resolution of the other clock scales. Thus, many
  1303. * possible measurments of the TSC correspond to one measurement of any
  1304. * other clock, and so a spread of values is possible. This is not a
  1305. * problem for the computation of the nanosecond clock; with TSC rates
  1306. * around 1GHZ, there can only be a few cycles which correspond to one
  1307. * nanosecond value, and any path through this code will inevitably
  1308. * take longer than that. However, with the kernel_ns value itself,
  1309. * the precision may be much lower, down to HZ granularity. If the
  1310. * first sampling of TSC against kernel_ns ends in the low part of the
  1311. * range, and the second in the high end of the range, we can get:
  1312. *
  1313. * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
  1314. *
  1315. * As the sampling errors potentially range in the thousands of cycles,
  1316. * it is possible such a time value has already been observed by the
  1317. * guest. To protect against this, we must compute the system time as
  1318. * observed by the guest and ensure the new system time is greater.
  1319. */
  1320. max_kernel_ns = 0;
  1321. if (vcpu->hv_clock.tsc_timestamp) {
  1322. max_kernel_ns = vcpu->last_guest_tsc -
  1323. vcpu->hv_clock.tsc_timestamp;
  1324. max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
  1325. vcpu->hv_clock.tsc_to_system_mul,
  1326. vcpu->hv_clock.tsc_shift);
  1327. max_kernel_ns += vcpu->last_kernel_ns;
  1328. }
  1329. if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
  1330. kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
  1331. &vcpu->hv_clock.tsc_shift,
  1332. &vcpu->hv_clock.tsc_to_system_mul);
  1333. vcpu->hw_tsc_khz = this_tsc_khz;
  1334. }
  1335. /* with a master <monotonic time, tsc value> tuple,
  1336. * pvclock clock reads always increase at the (scaled) rate
  1337. * of guest TSC - no need to deal with sampling errors.
  1338. */
  1339. if (!use_master_clock) {
  1340. if (max_kernel_ns > kernel_ns)
  1341. kernel_ns = max_kernel_ns;
  1342. }
  1343. /* With all the info we got, fill in the values */
  1344. vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
  1345. vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
  1346. vcpu->last_kernel_ns = kernel_ns;
  1347. vcpu->last_guest_tsc = tsc_timestamp;
  1348. /*
  1349. * The interface expects us to write an even number signaling that the
  1350. * update is finished. Since the guest won't see the intermediate
  1351. * state, we just increase by 2 at the end.
  1352. */
  1353. vcpu->hv_clock.version += 2;
  1354. if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
  1355. &guest_hv_clock, sizeof(guest_hv_clock))))
  1356. return 0;
  1357. /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
  1358. pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
  1359. if (vcpu->pvclock_set_guest_stopped_request) {
  1360. pvclock_flags |= PVCLOCK_GUEST_STOPPED;
  1361. vcpu->pvclock_set_guest_stopped_request = false;
  1362. }
  1363. /* If the host uses TSC clocksource, then it is stable */
  1364. if (use_master_clock)
  1365. pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
  1366. vcpu->hv_clock.flags = pvclock_flags;
  1367. kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
  1368. &vcpu->hv_clock,
  1369. sizeof(vcpu->hv_clock));
  1370. return 0;
  1371. }
  1372. /*
  1373. * kvmclock updates which are isolated to a given vcpu, such as
  1374. * vcpu->cpu migration, should not allow system_timestamp from
  1375. * the rest of the vcpus to remain static. Otherwise ntp frequency
  1376. * correction applies to one vcpu's system_timestamp but not
  1377. * the others.
  1378. *
  1379. * So in those cases, request a kvmclock update for all vcpus.
  1380. * The worst case for a remote vcpu to update its kvmclock
  1381. * is then bounded by maximum nohz sleep latency.
  1382. */
  1383. static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
  1384. {
  1385. int i;
  1386. struct kvm *kvm = v->kvm;
  1387. struct kvm_vcpu *vcpu;
  1388. kvm_for_each_vcpu(i, vcpu, kvm) {
  1389. set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
  1390. kvm_vcpu_kick(vcpu);
  1391. }
  1392. }
  1393. static bool msr_mtrr_valid(unsigned msr)
  1394. {
  1395. switch (msr) {
  1396. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  1397. case MSR_MTRRfix64K_00000:
  1398. case MSR_MTRRfix16K_80000:
  1399. case MSR_MTRRfix16K_A0000:
  1400. case MSR_MTRRfix4K_C0000:
  1401. case MSR_MTRRfix4K_C8000:
  1402. case MSR_MTRRfix4K_D0000:
  1403. case MSR_MTRRfix4K_D8000:
  1404. case MSR_MTRRfix4K_E0000:
  1405. case MSR_MTRRfix4K_E8000:
  1406. case MSR_MTRRfix4K_F0000:
  1407. case MSR_MTRRfix4K_F8000:
  1408. case MSR_MTRRdefType:
  1409. case MSR_IA32_CR_PAT:
  1410. return true;
  1411. case 0x2f8:
  1412. return true;
  1413. }
  1414. return false;
  1415. }
  1416. static bool valid_pat_type(unsigned t)
  1417. {
  1418. return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
  1419. }
  1420. static bool valid_mtrr_type(unsigned t)
  1421. {
  1422. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  1423. }
  1424. static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1425. {
  1426. int i;
  1427. if (!msr_mtrr_valid(msr))
  1428. return false;
  1429. if (msr == MSR_IA32_CR_PAT) {
  1430. for (i = 0; i < 8; i++)
  1431. if (!valid_pat_type((data >> (i * 8)) & 0xff))
  1432. return false;
  1433. return true;
  1434. } else if (msr == MSR_MTRRdefType) {
  1435. if (data & ~0xcff)
  1436. return false;
  1437. return valid_mtrr_type(data & 0xff);
  1438. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  1439. for (i = 0; i < 8 ; i++)
  1440. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  1441. return false;
  1442. return true;
  1443. }
  1444. /* variable MTRRs */
  1445. return valid_mtrr_type(data & 0xff);
  1446. }
  1447. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1448. {
  1449. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1450. if (!mtrr_valid(vcpu, msr, data))
  1451. return 1;
  1452. if (msr == MSR_MTRRdefType) {
  1453. vcpu->arch.mtrr_state.def_type = data;
  1454. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  1455. } else if (msr == MSR_MTRRfix64K_00000)
  1456. p[0] = data;
  1457. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1458. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  1459. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1460. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  1461. else if (msr == MSR_IA32_CR_PAT)
  1462. vcpu->arch.pat = data;
  1463. else { /* Variable MTRRs */
  1464. int idx, is_mtrr_mask;
  1465. u64 *pt;
  1466. idx = (msr - 0x200) / 2;
  1467. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1468. if (!is_mtrr_mask)
  1469. pt =
  1470. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1471. else
  1472. pt =
  1473. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1474. *pt = data;
  1475. }
  1476. kvm_mmu_reset_context(vcpu);
  1477. return 0;
  1478. }
  1479. static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1480. {
  1481. u64 mcg_cap = vcpu->arch.mcg_cap;
  1482. unsigned bank_num = mcg_cap & 0xff;
  1483. switch (msr) {
  1484. case MSR_IA32_MCG_STATUS:
  1485. vcpu->arch.mcg_status = data;
  1486. break;
  1487. case MSR_IA32_MCG_CTL:
  1488. if (!(mcg_cap & MCG_CTL_P))
  1489. return 1;
  1490. if (data != 0 && data != ~(u64)0)
  1491. return -1;
  1492. vcpu->arch.mcg_ctl = data;
  1493. break;
  1494. default:
  1495. if (msr >= MSR_IA32_MC0_CTL &&
  1496. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1497. u32 offset = msr - MSR_IA32_MC0_CTL;
  1498. /* only 0 or all 1s can be written to IA32_MCi_CTL
  1499. * some Linux kernels though clear bit 10 in bank 4 to
  1500. * workaround a BIOS/GART TBL issue on AMD K8s, ignore
  1501. * this to avoid an uncatched #GP in the guest
  1502. */
  1503. if ((offset & 0x3) == 0 &&
  1504. data != 0 && (data | (1 << 10)) != ~(u64)0)
  1505. return -1;
  1506. vcpu->arch.mce_banks[offset] = data;
  1507. break;
  1508. }
  1509. return 1;
  1510. }
  1511. return 0;
  1512. }
  1513. static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
  1514. {
  1515. struct kvm *kvm = vcpu->kvm;
  1516. int lm = is_long_mode(vcpu);
  1517. u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
  1518. : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
  1519. u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
  1520. : kvm->arch.xen_hvm_config.blob_size_32;
  1521. u32 page_num = data & ~PAGE_MASK;
  1522. u64 page_addr = data & PAGE_MASK;
  1523. u8 *page;
  1524. int r;
  1525. r = -E2BIG;
  1526. if (page_num >= blob_size)
  1527. goto out;
  1528. r = -ENOMEM;
  1529. page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
  1530. if (IS_ERR(page)) {
  1531. r = PTR_ERR(page);
  1532. goto out;
  1533. }
  1534. if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
  1535. goto out_free;
  1536. r = 0;
  1537. out_free:
  1538. kfree(page);
  1539. out:
  1540. return r;
  1541. }
  1542. static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  1543. {
  1544. return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
  1545. }
  1546. static bool kvm_hv_msr_partition_wide(u32 msr)
  1547. {
  1548. bool r = false;
  1549. switch (msr) {
  1550. case HV_X64_MSR_GUEST_OS_ID:
  1551. case HV_X64_MSR_HYPERCALL:
  1552. r = true;
  1553. break;
  1554. }
  1555. return r;
  1556. }
  1557. static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1558. {
  1559. struct kvm *kvm = vcpu->kvm;
  1560. switch (msr) {
  1561. case HV_X64_MSR_GUEST_OS_ID:
  1562. kvm->arch.hv_guest_os_id = data;
  1563. /* setting guest os id to zero disables hypercall page */
  1564. if (!kvm->arch.hv_guest_os_id)
  1565. kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  1566. break;
  1567. case HV_X64_MSR_HYPERCALL: {
  1568. u64 gfn;
  1569. unsigned long addr;
  1570. u8 instructions[4];
  1571. /* if guest os id is not set hypercall should remain disabled */
  1572. if (!kvm->arch.hv_guest_os_id)
  1573. break;
  1574. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  1575. kvm->arch.hv_hypercall = data;
  1576. break;
  1577. }
  1578. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  1579. addr = gfn_to_hva(kvm, gfn);
  1580. if (kvm_is_error_hva(addr))
  1581. return 1;
  1582. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  1583. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  1584. if (__copy_to_user((void __user *)addr, instructions, 4))
  1585. return 1;
  1586. kvm->arch.hv_hypercall = data;
  1587. break;
  1588. }
  1589. default:
  1590. vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1591. "data 0x%llx\n", msr, data);
  1592. return 1;
  1593. }
  1594. return 0;
  1595. }
  1596. static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1597. {
  1598. switch (msr) {
  1599. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  1600. unsigned long addr;
  1601. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  1602. vcpu->arch.hv_vapic = data;
  1603. break;
  1604. }
  1605. addr = gfn_to_hva(vcpu->kvm, data >>
  1606. HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
  1607. if (kvm_is_error_hva(addr))
  1608. return 1;
  1609. if (__clear_user((void __user *)addr, PAGE_SIZE))
  1610. return 1;
  1611. vcpu->arch.hv_vapic = data;
  1612. break;
  1613. }
  1614. case HV_X64_MSR_EOI:
  1615. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  1616. case HV_X64_MSR_ICR:
  1617. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  1618. case HV_X64_MSR_TPR:
  1619. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  1620. default:
  1621. vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1622. "data 0x%llx\n", msr, data);
  1623. return 1;
  1624. }
  1625. return 0;
  1626. }
  1627. static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
  1628. {
  1629. gpa_t gpa = data & ~0x3f;
  1630. /* Bits 2:5 are reserved, Should be zero */
  1631. if (data & 0x3c)
  1632. return 1;
  1633. vcpu->arch.apf.msr_val = data;
  1634. if (!(data & KVM_ASYNC_PF_ENABLED)) {
  1635. kvm_clear_async_pf_completion_queue(vcpu);
  1636. kvm_async_pf_hash_reset(vcpu);
  1637. return 0;
  1638. }
  1639. if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
  1640. sizeof(u32)))
  1641. return 1;
  1642. vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
  1643. kvm_async_pf_wakeup_all(vcpu);
  1644. return 0;
  1645. }
  1646. static void kvmclock_reset(struct kvm_vcpu *vcpu)
  1647. {
  1648. vcpu->arch.pv_time_enabled = false;
  1649. }
  1650. static void accumulate_steal_time(struct kvm_vcpu *vcpu)
  1651. {
  1652. u64 delta;
  1653. if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  1654. return;
  1655. delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
  1656. vcpu->arch.st.last_steal = current->sched_info.run_delay;
  1657. vcpu->arch.st.accum_steal = delta;
  1658. }
  1659. static void record_steal_time(struct kvm_vcpu *vcpu)
  1660. {
  1661. if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  1662. return;
  1663. if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
  1664. &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
  1665. return;
  1666. vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
  1667. vcpu->arch.st.steal.version += 2;
  1668. vcpu->arch.st.accum_steal = 0;
  1669. kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
  1670. &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
  1671. }
  1672. int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  1673. {
  1674. bool pr = false;
  1675. u32 msr = msr_info->index;
  1676. u64 data = msr_info->data;
  1677. switch (msr) {
  1678. case MSR_AMD64_NB_CFG:
  1679. case MSR_IA32_UCODE_REV:
  1680. case MSR_IA32_UCODE_WRITE:
  1681. case MSR_VM_HSAVE_PA:
  1682. case MSR_AMD64_PATCH_LOADER:
  1683. case MSR_AMD64_BU_CFG2:
  1684. break;
  1685. case MSR_EFER:
  1686. return set_efer(vcpu, data);
  1687. case MSR_K7_HWCR:
  1688. data &= ~(u64)0x40; /* ignore flush filter disable */
  1689. data &= ~(u64)0x100; /* ignore ignne emulation enable */
  1690. data &= ~(u64)0x8; /* ignore TLB cache disable */
  1691. if (data != 0) {
  1692. vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
  1693. data);
  1694. return 1;
  1695. }
  1696. break;
  1697. case MSR_FAM10H_MMIO_CONF_BASE:
  1698. if (data != 0) {
  1699. vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
  1700. "0x%llx\n", data);
  1701. return 1;
  1702. }
  1703. break;
  1704. case MSR_IA32_DEBUGCTLMSR:
  1705. if (!data) {
  1706. /* We support the non-activated case already */
  1707. break;
  1708. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  1709. /* Values other than LBR and BTF are vendor-specific,
  1710. thus reserved and should throw a #GP */
  1711. return 1;
  1712. }
  1713. vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  1714. __func__, data);
  1715. break;
  1716. case 0x200 ... 0x2ff:
  1717. return set_msr_mtrr(vcpu, msr, data);
  1718. case MSR_IA32_APICBASE:
  1719. kvm_set_apic_base(vcpu, data);
  1720. break;
  1721. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1722. return kvm_x2apic_msr_write(vcpu, msr, data);
  1723. case MSR_IA32_TSCDEADLINE:
  1724. kvm_set_lapic_tscdeadline_msr(vcpu, data);
  1725. break;
  1726. case MSR_IA32_TSC_ADJUST:
  1727. if (guest_cpuid_has_tsc_adjust(vcpu)) {
  1728. if (!msr_info->host_initiated) {
  1729. u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
  1730. kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
  1731. }
  1732. vcpu->arch.ia32_tsc_adjust_msr = data;
  1733. }
  1734. break;
  1735. case MSR_IA32_MISC_ENABLE:
  1736. vcpu->arch.ia32_misc_enable_msr = data;
  1737. break;
  1738. case MSR_KVM_WALL_CLOCK_NEW:
  1739. case MSR_KVM_WALL_CLOCK:
  1740. vcpu->kvm->arch.wall_clock = data;
  1741. kvm_write_wall_clock(vcpu->kvm, data);
  1742. break;
  1743. case MSR_KVM_SYSTEM_TIME_NEW:
  1744. case MSR_KVM_SYSTEM_TIME: {
  1745. u64 gpa_offset;
  1746. kvmclock_reset(vcpu);
  1747. vcpu->arch.time = data;
  1748. kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
  1749. /* we verify if the enable bit is set... */
  1750. if (!(data & 1))
  1751. break;
  1752. gpa_offset = data & ~(PAGE_MASK | 1);
  1753. if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
  1754. &vcpu->arch.pv_time, data & ~1ULL,
  1755. sizeof(struct pvclock_vcpu_time_info)))
  1756. vcpu->arch.pv_time_enabled = false;
  1757. else
  1758. vcpu->arch.pv_time_enabled = true;
  1759. break;
  1760. }
  1761. case MSR_KVM_ASYNC_PF_EN:
  1762. if (kvm_pv_enable_async_pf(vcpu, data))
  1763. return 1;
  1764. break;
  1765. case MSR_KVM_STEAL_TIME:
  1766. if (unlikely(!sched_info_on()))
  1767. return 1;
  1768. if (data & KVM_STEAL_RESERVED_MASK)
  1769. return 1;
  1770. if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
  1771. data & KVM_STEAL_VALID_BITS,
  1772. sizeof(struct kvm_steal_time)))
  1773. return 1;
  1774. vcpu->arch.st.msr_val = data;
  1775. if (!(data & KVM_MSR_ENABLED))
  1776. break;
  1777. vcpu->arch.st.last_steal = current->sched_info.run_delay;
  1778. preempt_disable();
  1779. accumulate_steal_time(vcpu);
  1780. preempt_enable();
  1781. kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
  1782. break;
  1783. case MSR_KVM_PV_EOI_EN:
  1784. if (kvm_lapic_enable_pv_eoi(vcpu, data))
  1785. return 1;
  1786. break;
  1787. case MSR_IA32_MCG_CTL:
  1788. case MSR_IA32_MCG_STATUS:
  1789. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1790. return set_msr_mce(vcpu, msr, data);
  1791. /* Performance counters are not protected by a CPUID bit,
  1792. * so we should check all of them in the generic path for the sake of
  1793. * cross vendor migration.
  1794. * Writing a zero into the event select MSRs disables them,
  1795. * which we perfectly emulate ;-). Any other value should be at least
  1796. * reported, some guests depend on them.
  1797. */
  1798. case MSR_K7_EVNTSEL0:
  1799. case MSR_K7_EVNTSEL1:
  1800. case MSR_K7_EVNTSEL2:
  1801. case MSR_K7_EVNTSEL3:
  1802. if (data != 0)
  1803. vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1804. "0x%x data 0x%llx\n", msr, data);
  1805. break;
  1806. /* at least RHEL 4 unconditionally writes to the perfctr registers,
  1807. * so we ignore writes to make it happy.
  1808. */
  1809. case MSR_K7_PERFCTR0:
  1810. case MSR_K7_PERFCTR1:
  1811. case MSR_K7_PERFCTR2:
  1812. case MSR_K7_PERFCTR3:
  1813. vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1814. "0x%x data 0x%llx\n", msr, data);
  1815. break;
  1816. case MSR_P6_PERFCTR0:
  1817. case MSR_P6_PERFCTR1:
  1818. pr = true;
  1819. case MSR_P6_EVNTSEL0:
  1820. case MSR_P6_EVNTSEL1:
  1821. if (kvm_pmu_msr(vcpu, msr))
  1822. return kvm_pmu_set_msr(vcpu, msr_info);
  1823. if (pr || data != 0)
  1824. vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
  1825. "0x%x data 0x%llx\n", msr, data);
  1826. break;
  1827. case MSR_K7_CLK_CTL:
  1828. /*
  1829. * Ignore all writes to this no longer documented MSR.
  1830. * Writes are only relevant for old K7 processors,
  1831. * all pre-dating SVM, but a recommended workaround from
  1832. * AMD for these chips. It is possible to specify the
  1833. * affected processor models on the command line, hence
  1834. * the need to ignore the workaround.
  1835. */
  1836. break;
  1837. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1838. if (kvm_hv_msr_partition_wide(msr)) {
  1839. int r;
  1840. mutex_lock(&vcpu->kvm->lock);
  1841. r = set_msr_hyperv_pw(vcpu, msr, data);
  1842. mutex_unlock(&vcpu->kvm->lock);
  1843. return r;
  1844. } else
  1845. return set_msr_hyperv(vcpu, msr, data);
  1846. break;
  1847. case MSR_IA32_BBL_CR_CTL3:
  1848. /* Drop writes to this legacy MSR -- see rdmsr
  1849. * counterpart for further detail.
  1850. */
  1851. vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
  1852. break;
  1853. case MSR_AMD64_OSVW_ID_LENGTH:
  1854. if (!guest_cpuid_has_osvw(vcpu))
  1855. return 1;
  1856. vcpu->arch.osvw.length = data;
  1857. break;
  1858. case MSR_AMD64_OSVW_STATUS:
  1859. if (!guest_cpuid_has_osvw(vcpu))
  1860. return 1;
  1861. vcpu->arch.osvw.status = data;
  1862. break;
  1863. default:
  1864. if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
  1865. return xen_hvm_config(vcpu, data);
  1866. if (kvm_pmu_msr(vcpu, msr))
  1867. return kvm_pmu_set_msr(vcpu, msr_info);
  1868. if (!ignore_msrs) {
  1869. vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
  1870. msr, data);
  1871. return 1;
  1872. } else {
  1873. vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
  1874. msr, data);
  1875. break;
  1876. }
  1877. }
  1878. return 0;
  1879. }
  1880. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1881. /*
  1882. * Reads an msr value (of 'msr_index') into 'pdata'.
  1883. * Returns 0 on success, non-0 otherwise.
  1884. * Assumes vcpu_load() was already called.
  1885. */
  1886. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1887. {
  1888. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  1889. }
  1890. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1891. {
  1892. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1893. if (!msr_mtrr_valid(msr))
  1894. return 1;
  1895. if (msr == MSR_MTRRdefType)
  1896. *pdata = vcpu->arch.mtrr_state.def_type +
  1897. (vcpu->arch.mtrr_state.enabled << 10);
  1898. else if (msr == MSR_MTRRfix64K_00000)
  1899. *pdata = p[0];
  1900. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1901. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  1902. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1903. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  1904. else if (msr == MSR_IA32_CR_PAT)
  1905. *pdata = vcpu->arch.pat;
  1906. else { /* Variable MTRRs */
  1907. int idx, is_mtrr_mask;
  1908. u64 *pt;
  1909. idx = (msr - 0x200) / 2;
  1910. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1911. if (!is_mtrr_mask)
  1912. pt =
  1913. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1914. else
  1915. pt =
  1916. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1917. *pdata = *pt;
  1918. }
  1919. return 0;
  1920. }
  1921. static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1922. {
  1923. u64 data;
  1924. u64 mcg_cap = vcpu->arch.mcg_cap;
  1925. unsigned bank_num = mcg_cap & 0xff;
  1926. switch (msr) {
  1927. case MSR_IA32_P5_MC_ADDR:
  1928. case MSR_IA32_P5_MC_TYPE:
  1929. data = 0;
  1930. break;
  1931. case MSR_IA32_MCG_CAP:
  1932. data = vcpu->arch.mcg_cap;
  1933. break;
  1934. case MSR_IA32_MCG_CTL:
  1935. if (!(mcg_cap & MCG_CTL_P))
  1936. return 1;
  1937. data = vcpu->arch.mcg_ctl;
  1938. break;
  1939. case MSR_IA32_MCG_STATUS:
  1940. data = vcpu->arch.mcg_status;
  1941. break;
  1942. default:
  1943. if (msr >= MSR_IA32_MC0_CTL &&
  1944. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1945. u32 offset = msr - MSR_IA32_MC0_CTL;
  1946. data = vcpu->arch.mce_banks[offset];
  1947. break;
  1948. }
  1949. return 1;
  1950. }
  1951. *pdata = data;
  1952. return 0;
  1953. }
  1954. static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1955. {
  1956. u64 data = 0;
  1957. struct kvm *kvm = vcpu->kvm;
  1958. switch (msr) {
  1959. case HV_X64_MSR_GUEST_OS_ID:
  1960. data = kvm->arch.hv_guest_os_id;
  1961. break;
  1962. case HV_X64_MSR_HYPERCALL:
  1963. data = kvm->arch.hv_hypercall;
  1964. break;
  1965. default:
  1966. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1967. return 1;
  1968. }
  1969. *pdata = data;
  1970. return 0;
  1971. }
  1972. static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1973. {
  1974. u64 data = 0;
  1975. switch (msr) {
  1976. case HV_X64_MSR_VP_INDEX: {
  1977. int r;
  1978. struct kvm_vcpu *v;
  1979. kvm_for_each_vcpu(r, v, vcpu->kvm)
  1980. if (v == vcpu)
  1981. data = r;
  1982. break;
  1983. }
  1984. case HV_X64_MSR_EOI:
  1985. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  1986. case HV_X64_MSR_ICR:
  1987. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  1988. case HV_X64_MSR_TPR:
  1989. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  1990. case HV_X64_MSR_APIC_ASSIST_PAGE:
  1991. data = vcpu->arch.hv_vapic;
  1992. break;
  1993. default:
  1994. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1995. return 1;
  1996. }
  1997. *pdata = data;
  1998. return 0;
  1999. }
  2000. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  2001. {
  2002. u64 data;
  2003. switch (msr) {
  2004. case MSR_IA32_PLATFORM_ID:
  2005. case MSR_IA32_EBL_CR_POWERON:
  2006. case MSR_IA32_DEBUGCTLMSR:
  2007. case MSR_IA32_LASTBRANCHFROMIP:
  2008. case MSR_IA32_LASTBRANCHTOIP:
  2009. case MSR_IA32_LASTINTFROMIP:
  2010. case MSR_IA32_LASTINTTOIP:
  2011. case MSR_K8_SYSCFG:
  2012. case MSR_K7_HWCR:
  2013. case MSR_VM_HSAVE_PA:
  2014. case MSR_K7_EVNTSEL0:
  2015. case MSR_K7_PERFCTR0:
  2016. case MSR_K8_INT_PENDING_MSG:
  2017. case MSR_AMD64_NB_CFG:
  2018. case MSR_FAM10H_MMIO_CONF_BASE:
  2019. case MSR_AMD64_BU_CFG2:
  2020. data = 0;
  2021. break;
  2022. case MSR_P6_PERFCTR0:
  2023. case MSR_P6_PERFCTR1:
  2024. case MSR_P6_EVNTSEL0:
  2025. case MSR_P6_EVNTSEL1:
  2026. if (kvm_pmu_msr(vcpu, msr))
  2027. return kvm_pmu_get_msr(vcpu, msr, pdata);
  2028. data = 0;
  2029. break;
  2030. case MSR_IA32_UCODE_REV:
  2031. data = 0x100000000ULL;
  2032. break;
  2033. case MSR_MTRRcap:
  2034. data = 0x500 | KVM_NR_VAR_MTRR;
  2035. break;
  2036. case 0x200 ... 0x2ff:
  2037. return get_msr_mtrr(vcpu, msr, pdata);
  2038. case 0xcd: /* fsb frequency */
  2039. data = 3;
  2040. break;
  2041. /*
  2042. * MSR_EBC_FREQUENCY_ID
  2043. * Conservative value valid for even the basic CPU models.
  2044. * Models 0,1: 000 in bits 23:21 indicating a bus speed of
  2045. * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
  2046. * and 266MHz for model 3, or 4. Set Core Clock
  2047. * Frequency to System Bus Frequency Ratio to 1 (bits
  2048. * 31:24) even though these are only valid for CPU
  2049. * models > 2, however guests may end up dividing or
  2050. * multiplying by zero otherwise.
  2051. */
  2052. case MSR_EBC_FREQUENCY_ID:
  2053. data = 1 << 24;
  2054. break;
  2055. case MSR_IA32_APICBASE:
  2056. data = kvm_get_apic_base(vcpu);
  2057. break;
  2058. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  2059. return kvm_x2apic_msr_read(vcpu, msr, pdata);
  2060. break;
  2061. case MSR_IA32_TSCDEADLINE:
  2062. data = kvm_get_lapic_tscdeadline_msr(vcpu);
  2063. break;
  2064. case MSR_IA32_TSC_ADJUST:
  2065. data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
  2066. break;
  2067. case MSR_IA32_MISC_ENABLE:
  2068. data = vcpu->arch.ia32_misc_enable_msr;
  2069. break;
  2070. case MSR_IA32_PERF_STATUS:
  2071. /* TSC increment by tick */
  2072. data = 1000ULL;
  2073. /* CPU multiplier */
  2074. data |= (((uint64_t)4ULL) << 40);
  2075. break;
  2076. case MSR_EFER:
  2077. data = vcpu->arch.efer;
  2078. break;
  2079. case MSR_KVM_WALL_CLOCK:
  2080. case MSR_KVM_WALL_CLOCK_NEW:
  2081. data = vcpu->kvm->arch.wall_clock;
  2082. break;
  2083. case MSR_KVM_SYSTEM_TIME:
  2084. case MSR_KVM_SYSTEM_TIME_NEW:
  2085. data = vcpu->arch.time;
  2086. break;
  2087. case MSR_KVM_ASYNC_PF_EN:
  2088. data = vcpu->arch.apf.msr_val;
  2089. break;
  2090. case MSR_KVM_STEAL_TIME:
  2091. data = vcpu->arch.st.msr_val;
  2092. break;
  2093. case MSR_KVM_PV_EOI_EN:
  2094. data = vcpu->arch.pv_eoi.msr_val;
  2095. break;
  2096. case MSR_IA32_P5_MC_ADDR:
  2097. case MSR_IA32_P5_MC_TYPE:
  2098. case MSR_IA32_MCG_CAP:
  2099. case MSR_IA32_MCG_CTL:
  2100. case MSR_IA32_MCG_STATUS:
  2101. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  2102. return get_msr_mce(vcpu, msr, pdata);
  2103. case MSR_K7_CLK_CTL:
  2104. /*
  2105. * Provide expected ramp-up count for K7. All other
  2106. * are set to zero, indicating minimum divisors for
  2107. * every field.
  2108. *
  2109. * This prevents guest kernels on AMD host with CPU
  2110. * type 6, model 8 and higher from exploding due to
  2111. * the rdmsr failing.
  2112. */
  2113. data = 0x20000000;
  2114. break;
  2115. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  2116. if (kvm_hv_msr_partition_wide(msr)) {
  2117. int r;
  2118. mutex_lock(&vcpu->kvm->lock);
  2119. r = get_msr_hyperv_pw(vcpu, msr, pdata);
  2120. mutex_unlock(&vcpu->kvm->lock);
  2121. return r;
  2122. } else
  2123. return get_msr_hyperv(vcpu, msr, pdata);
  2124. break;
  2125. case MSR_IA32_BBL_CR_CTL3:
  2126. /* This legacy MSR exists but isn't fully documented in current
  2127. * silicon. It is however accessed by winxp in very narrow
  2128. * scenarios where it sets bit #19, itself documented as
  2129. * a "reserved" bit. Best effort attempt to source coherent
  2130. * read data here should the balance of the register be
  2131. * interpreted by the guest:
  2132. *
  2133. * L2 cache control register 3: 64GB range, 256KB size,
  2134. * enabled, latency 0x1, configured
  2135. */
  2136. data = 0xbe702111;
  2137. break;
  2138. case MSR_AMD64_OSVW_ID_LENGTH:
  2139. if (!guest_cpuid_has_osvw(vcpu))
  2140. return 1;
  2141. data = vcpu->arch.osvw.length;
  2142. break;
  2143. case MSR_AMD64_OSVW_STATUS:
  2144. if (!guest_cpuid_has_osvw(vcpu))
  2145. return 1;
  2146. data = vcpu->arch.osvw.status;
  2147. break;
  2148. default:
  2149. if (kvm_pmu_msr(vcpu, msr))
  2150. return kvm_pmu_get_msr(vcpu, msr, pdata);
  2151. if (!ignore_msrs) {
  2152. vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  2153. return 1;
  2154. } else {
  2155. vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
  2156. data = 0;
  2157. }
  2158. break;
  2159. }
  2160. *pdata = data;
  2161. return 0;
  2162. }
  2163. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  2164. /*
  2165. * Read or write a bunch of msrs. All parameters are kernel addresses.
  2166. *
  2167. * @return number of msrs set successfully.
  2168. */
  2169. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  2170. struct kvm_msr_entry *entries,
  2171. int (*do_msr)(struct kvm_vcpu *vcpu,
  2172. unsigned index, u64 *data))
  2173. {
  2174. int i, idx;
  2175. idx = srcu_read_lock(&vcpu->kvm->srcu);
  2176. for (i = 0; i < msrs->nmsrs; ++i)
  2177. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  2178. break;
  2179. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  2180. return i;
  2181. }
  2182. /*
  2183. * Read or write a bunch of msrs. Parameters are user addresses.
  2184. *
  2185. * @return number of msrs set successfully.
  2186. */
  2187. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  2188. int (*do_msr)(struct kvm_vcpu *vcpu,
  2189. unsigned index, u64 *data),
  2190. int writeback)
  2191. {
  2192. struct kvm_msrs msrs;
  2193. struct kvm_msr_entry *entries;
  2194. int r, n;
  2195. unsigned size;
  2196. r = -EFAULT;
  2197. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  2198. goto out;
  2199. r = -E2BIG;
  2200. if (msrs.nmsrs >= MAX_IO_MSRS)
  2201. goto out;
  2202. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  2203. entries = memdup_user(user_msrs->entries, size);
  2204. if (IS_ERR(entries)) {
  2205. r = PTR_ERR(entries);
  2206. goto out;
  2207. }
  2208. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  2209. if (r < 0)
  2210. goto out_free;
  2211. r = -EFAULT;
  2212. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  2213. goto out_free;
  2214. r = n;
  2215. out_free:
  2216. kfree(entries);
  2217. out:
  2218. return r;
  2219. }
  2220. int kvm_dev_ioctl_check_extension(long ext)
  2221. {
  2222. int r;
  2223. switch (ext) {
  2224. case KVM_CAP_IRQCHIP:
  2225. case KVM_CAP_HLT:
  2226. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  2227. case KVM_CAP_SET_TSS_ADDR:
  2228. case KVM_CAP_EXT_CPUID:
  2229. case KVM_CAP_CLOCKSOURCE:
  2230. case KVM_CAP_PIT:
  2231. case KVM_CAP_NOP_IO_DELAY:
  2232. case KVM_CAP_MP_STATE:
  2233. case KVM_CAP_SYNC_MMU:
  2234. case KVM_CAP_USER_NMI:
  2235. case KVM_CAP_REINJECT_CONTROL:
  2236. case KVM_CAP_IRQ_INJECT_STATUS:
  2237. case KVM_CAP_IRQFD:
  2238. case KVM_CAP_IOEVENTFD:
  2239. case KVM_CAP_PIT2:
  2240. case KVM_CAP_PIT_STATE2:
  2241. case KVM_CAP_SET_IDENTITY_MAP_ADDR:
  2242. case KVM_CAP_XEN_HVM:
  2243. case KVM_CAP_ADJUST_CLOCK:
  2244. case KVM_CAP_VCPU_EVENTS:
  2245. case KVM_CAP_HYPERV:
  2246. case KVM_CAP_HYPERV_VAPIC:
  2247. case KVM_CAP_HYPERV_SPIN:
  2248. case KVM_CAP_PCI_SEGMENT:
  2249. case KVM_CAP_DEBUGREGS:
  2250. case KVM_CAP_X86_ROBUST_SINGLESTEP:
  2251. case KVM_CAP_XSAVE:
  2252. case KVM_CAP_ASYNC_PF:
  2253. case KVM_CAP_GET_TSC_KHZ:
  2254. case KVM_CAP_KVMCLOCK_CTRL:
  2255. case KVM_CAP_READONLY_MEM:
  2256. #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
  2257. case KVM_CAP_ASSIGN_DEV_IRQ:
  2258. case KVM_CAP_PCI_2_3:
  2259. #endif
  2260. r = 1;
  2261. break;
  2262. case KVM_CAP_COALESCED_MMIO:
  2263. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  2264. break;
  2265. case KVM_CAP_VAPIC:
  2266. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  2267. break;
  2268. case KVM_CAP_NR_VCPUS:
  2269. r = KVM_SOFT_MAX_VCPUS;
  2270. break;
  2271. case KVM_CAP_MAX_VCPUS:
  2272. r = KVM_MAX_VCPUS;
  2273. break;
  2274. case KVM_CAP_NR_MEMSLOTS:
  2275. r = KVM_USER_MEM_SLOTS;
  2276. break;
  2277. case KVM_CAP_PV_MMU: /* obsolete */
  2278. r = 0;
  2279. break;
  2280. #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
  2281. case KVM_CAP_IOMMU:
  2282. r = iommu_present(&pci_bus_type);
  2283. break;
  2284. #endif
  2285. case KVM_CAP_MCE:
  2286. r = KVM_MAX_MCE_BANKS;
  2287. break;
  2288. case KVM_CAP_XCRS:
  2289. r = cpu_has_xsave;
  2290. break;
  2291. case KVM_CAP_TSC_CONTROL:
  2292. r = kvm_has_tsc_control;
  2293. break;
  2294. case KVM_CAP_TSC_DEADLINE_TIMER:
  2295. r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
  2296. break;
  2297. default:
  2298. r = 0;
  2299. break;
  2300. }
  2301. return r;
  2302. }
  2303. long kvm_arch_dev_ioctl(struct file *filp,
  2304. unsigned int ioctl, unsigned long arg)
  2305. {
  2306. void __user *argp = (void __user *)arg;
  2307. long r;
  2308. switch (ioctl) {
  2309. case KVM_GET_MSR_INDEX_LIST: {
  2310. struct kvm_msr_list __user *user_msr_list = argp;
  2311. struct kvm_msr_list msr_list;
  2312. unsigned n;
  2313. r = -EFAULT;
  2314. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  2315. goto out;
  2316. n = msr_list.nmsrs;
  2317. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  2318. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  2319. goto out;
  2320. r = -E2BIG;
  2321. if (n < msr_list.nmsrs)
  2322. goto out;
  2323. r = -EFAULT;
  2324. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  2325. num_msrs_to_save * sizeof(u32)))
  2326. goto out;
  2327. if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
  2328. &emulated_msrs,
  2329. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  2330. goto out;
  2331. r = 0;
  2332. break;
  2333. }
  2334. case KVM_GET_SUPPORTED_CPUID: {
  2335. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2336. struct kvm_cpuid2 cpuid;
  2337. r = -EFAULT;
  2338. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2339. goto out;
  2340. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  2341. cpuid_arg->entries);
  2342. if (r)
  2343. goto out;
  2344. r = -EFAULT;
  2345. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  2346. goto out;
  2347. r = 0;
  2348. break;
  2349. }
  2350. case KVM_X86_GET_MCE_CAP_SUPPORTED: {
  2351. u64 mce_cap;
  2352. mce_cap = KVM_MCE_CAP_SUPPORTED;
  2353. r = -EFAULT;
  2354. if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
  2355. goto out;
  2356. r = 0;
  2357. break;
  2358. }
  2359. default:
  2360. r = -EINVAL;
  2361. }
  2362. out:
  2363. return r;
  2364. }
  2365. static void wbinvd_ipi(void *garbage)
  2366. {
  2367. wbinvd();
  2368. }
  2369. static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
  2370. {
  2371. return vcpu->kvm->arch.iommu_domain &&
  2372. !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
  2373. }
  2374. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  2375. {
  2376. /* Address WBINVD may be executed by guest */
  2377. if (need_emulate_wbinvd(vcpu)) {
  2378. if (kvm_x86_ops->has_wbinvd_exit())
  2379. cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
  2380. else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
  2381. smp_call_function_single(vcpu->cpu,
  2382. wbinvd_ipi, NULL, 1);
  2383. }
  2384. kvm_x86_ops->vcpu_load(vcpu, cpu);
  2385. /* Apply any externally detected TSC adjustments (due to suspend) */
  2386. if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
  2387. adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
  2388. vcpu->arch.tsc_offset_adjustment = 0;
  2389. set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
  2390. }
  2391. if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
  2392. s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
  2393. native_read_tsc() - vcpu->arch.last_host_tsc;
  2394. if (tsc_delta < 0)
  2395. mark_tsc_unstable("KVM discovered backwards TSC");
  2396. if (check_tsc_unstable()) {
  2397. u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
  2398. vcpu->arch.last_guest_tsc);
  2399. kvm_x86_ops->write_tsc_offset(vcpu, offset);
  2400. vcpu->arch.tsc_catchup = 1;
  2401. }
  2402. /*
  2403. * On a host with synchronized TSC, there is no need to update
  2404. * kvmclock on vcpu->cpu migration
  2405. */
  2406. if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
  2407. kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
  2408. if (vcpu->cpu != cpu)
  2409. kvm_migrate_timers(vcpu);
  2410. vcpu->cpu = cpu;
  2411. }
  2412. accumulate_steal_time(vcpu);
  2413. kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
  2414. }
  2415. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  2416. {
  2417. kvm_x86_ops->vcpu_put(vcpu);
  2418. kvm_put_guest_fpu(vcpu);
  2419. vcpu->arch.last_host_tsc = native_read_tsc();
  2420. }
  2421. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  2422. struct kvm_lapic_state *s)
  2423. {
  2424. kvm_x86_ops->sync_pir_to_irr(vcpu);
  2425. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  2426. return 0;
  2427. }
  2428. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  2429. struct kvm_lapic_state *s)
  2430. {
  2431. kvm_apic_post_state_restore(vcpu, s);
  2432. update_cr8_intercept(vcpu);
  2433. return 0;
  2434. }
  2435. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  2436. struct kvm_interrupt *irq)
  2437. {
  2438. if (irq->irq >= KVM_NR_INTERRUPTS)
  2439. return -EINVAL;
  2440. if (irqchip_in_kernel(vcpu->kvm))
  2441. return -ENXIO;
  2442. kvm_queue_interrupt(vcpu, irq->irq, false);
  2443. kvm_make_request(KVM_REQ_EVENT, vcpu);
  2444. return 0;
  2445. }
  2446. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  2447. {
  2448. kvm_inject_nmi(vcpu);
  2449. return 0;
  2450. }
  2451. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  2452. struct kvm_tpr_access_ctl *tac)
  2453. {
  2454. if (tac->flags)
  2455. return -EINVAL;
  2456. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  2457. return 0;
  2458. }
  2459. static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
  2460. u64 mcg_cap)
  2461. {
  2462. int r;
  2463. unsigned bank_num = mcg_cap & 0xff, bank;
  2464. r = -EINVAL;
  2465. if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
  2466. goto out;
  2467. if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
  2468. goto out;
  2469. r = 0;
  2470. vcpu->arch.mcg_cap = mcg_cap;
  2471. /* Init IA32_MCG_CTL to all 1s */
  2472. if (mcg_cap & MCG_CTL_P)
  2473. vcpu->arch.mcg_ctl = ~(u64)0;
  2474. /* Init IA32_MCi_CTL to all 1s */
  2475. for (bank = 0; bank < bank_num; bank++)
  2476. vcpu->arch.mce_banks[bank*4] = ~(u64)0;
  2477. out:
  2478. return r;
  2479. }
  2480. static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
  2481. struct kvm_x86_mce *mce)
  2482. {
  2483. u64 mcg_cap = vcpu->arch.mcg_cap;
  2484. unsigned bank_num = mcg_cap & 0xff;
  2485. u64 *banks = vcpu->arch.mce_banks;
  2486. if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
  2487. return -EINVAL;
  2488. /*
  2489. * if IA32_MCG_CTL is not all 1s, the uncorrected error
  2490. * reporting is disabled
  2491. */
  2492. if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
  2493. vcpu->arch.mcg_ctl != ~(u64)0)
  2494. return 0;
  2495. banks += 4 * mce->bank;
  2496. /*
  2497. * if IA32_MCi_CTL is not all 1s, the uncorrected error
  2498. * reporting is disabled for the bank
  2499. */
  2500. if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
  2501. return 0;
  2502. if (mce->status & MCI_STATUS_UC) {
  2503. if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
  2504. !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
  2505. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  2506. return 0;
  2507. }
  2508. if (banks[1] & MCI_STATUS_VAL)
  2509. mce->status |= MCI_STATUS_OVER;
  2510. banks[2] = mce->addr;
  2511. banks[3] = mce->misc;
  2512. vcpu->arch.mcg_status = mce->mcg_status;
  2513. banks[1] = mce->status;
  2514. kvm_queue_exception(vcpu, MC_VECTOR);
  2515. } else if (!(banks[1] & MCI_STATUS_VAL)
  2516. || !(banks[1] & MCI_STATUS_UC)) {
  2517. if (banks[1] & MCI_STATUS_VAL)
  2518. mce->status |= MCI_STATUS_OVER;
  2519. banks[2] = mce->addr;
  2520. banks[3] = mce->misc;
  2521. banks[1] = mce->status;
  2522. } else
  2523. banks[1] |= MCI_STATUS_OVER;
  2524. return 0;
  2525. }
  2526. static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
  2527. struct kvm_vcpu_events *events)
  2528. {
  2529. process_nmi(vcpu);
  2530. events->exception.injected =
  2531. vcpu->arch.exception.pending &&
  2532. !kvm_exception_is_soft(vcpu->arch.exception.nr);
  2533. events->exception.nr = vcpu->arch.exception.nr;
  2534. events->exception.has_error_code = vcpu->arch.exception.has_error_code;
  2535. events->exception.pad = 0;
  2536. events->exception.error_code = vcpu->arch.exception.error_code;
  2537. events->interrupt.injected =
  2538. vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
  2539. events->interrupt.nr = vcpu->arch.interrupt.nr;
  2540. events->interrupt.soft = 0;
  2541. events->interrupt.shadow =
  2542. kvm_x86_ops->get_interrupt_shadow(vcpu,
  2543. KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
  2544. events->nmi.injected = vcpu->arch.nmi_injected;
  2545. events->nmi.pending = vcpu->arch.nmi_pending != 0;
  2546. events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
  2547. events->nmi.pad = 0;
  2548. events->sipi_vector = 0; /* never valid when reporting to user space */
  2549. events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
  2550. | KVM_VCPUEVENT_VALID_SHADOW);
  2551. memset(&events->reserved, 0, sizeof(events->reserved));
  2552. }
  2553. static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
  2554. struct kvm_vcpu_events *events)
  2555. {
  2556. if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
  2557. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  2558. | KVM_VCPUEVENT_VALID_SHADOW))
  2559. return -EINVAL;
  2560. process_nmi(vcpu);
  2561. vcpu->arch.exception.pending = events->exception.injected;
  2562. vcpu->arch.exception.nr = events->exception.nr;
  2563. vcpu->arch.exception.has_error_code = events->exception.has_error_code;
  2564. vcpu->arch.exception.error_code = events->exception.error_code;
  2565. vcpu->arch.interrupt.pending = events->interrupt.injected;
  2566. vcpu->arch.interrupt.nr = events->interrupt.nr;
  2567. vcpu->arch.interrupt.soft = events->interrupt.soft;
  2568. if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
  2569. kvm_x86_ops->set_interrupt_shadow(vcpu,
  2570. events->interrupt.shadow);
  2571. vcpu->arch.nmi_injected = events->nmi.injected;
  2572. if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
  2573. vcpu->arch.nmi_pending = events->nmi.pending;
  2574. kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
  2575. if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
  2576. kvm_vcpu_has_lapic(vcpu))
  2577. vcpu->arch.apic->sipi_vector = events->sipi_vector;
  2578. kvm_make_request(KVM_REQ_EVENT, vcpu);
  2579. return 0;
  2580. }
  2581. static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
  2582. struct kvm_debugregs *dbgregs)
  2583. {
  2584. memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
  2585. dbgregs->dr6 = vcpu->arch.dr6;
  2586. dbgregs->dr7 = vcpu->arch.dr7;
  2587. dbgregs->flags = 0;
  2588. memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
  2589. }
  2590. static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
  2591. struct kvm_debugregs *dbgregs)
  2592. {
  2593. if (dbgregs->flags)
  2594. return -EINVAL;
  2595. memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
  2596. vcpu->arch.dr6 = dbgregs->dr6;
  2597. vcpu->arch.dr7 = dbgregs->dr7;
  2598. return 0;
  2599. }
  2600. static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
  2601. struct kvm_xsave *guest_xsave)
  2602. {
  2603. if (cpu_has_xsave)
  2604. memcpy(guest_xsave->region,
  2605. &vcpu->arch.guest_fpu.state->xsave,
  2606. xstate_size);
  2607. else {
  2608. memcpy(guest_xsave->region,
  2609. &vcpu->arch.guest_fpu.state->fxsave,
  2610. sizeof(struct i387_fxsave_struct));
  2611. *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
  2612. XSTATE_FPSSE;
  2613. }
  2614. }
  2615. static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
  2616. struct kvm_xsave *guest_xsave)
  2617. {
  2618. u64 xstate_bv =
  2619. *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
  2620. if (cpu_has_xsave)
  2621. memcpy(&vcpu->arch.guest_fpu.state->xsave,
  2622. guest_xsave->region, xstate_size);
  2623. else {
  2624. if (xstate_bv & ~XSTATE_FPSSE)
  2625. return -EINVAL;
  2626. memcpy(&vcpu->arch.guest_fpu.state->fxsave,
  2627. guest_xsave->region, sizeof(struct i387_fxsave_struct));
  2628. }
  2629. return 0;
  2630. }
  2631. static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
  2632. struct kvm_xcrs *guest_xcrs)
  2633. {
  2634. if (!cpu_has_xsave) {
  2635. guest_xcrs->nr_xcrs = 0;
  2636. return;
  2637. }
  2638. guest_xcrs->nr_xcrs = 1;
  2639. guest_xcrs->flags = 0;
  2640. guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
  2641. guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
  2642. }
  2643. static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
  2644. struct kvm_xcrs *guest_xcrs)
  2645. {
  2646. int i, r = 0;
  2647. if (!cpu_has_xsave)
  2648. return -EINVAL;
  2649. if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
  2650. return -EINVAL;
  2651. for (i = 0; i < guest_xcrs->nr_xcrs; i++)
  2652. /* Only support XCR0 currently */
  2653. if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
  2654. r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
  2655. guest_xcrs->xcrs[0].value);
  2656. break;
  2657. }
  2658. if (r)
  2659. r = -EINVAL;
  2660. return r;
  2661. }
  2662. /*
  2663. * kvm_set_guest_paused() indicates to the guest kernel that it has been
  2664. * stopped by the hypervisor. This function will be called from the host only.
  2665. * EINVAL is returned when the host attempts to set the flag for a guest that
  2666. * does not support pv clocks.
  2667. */
  2668. static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
  2669. {
  2670. if (!vcpu->arch.pv_time_enabled)
  2671. return -EINVAL;
  2672. vcpu->arch.pvclock_set_guest_stopped_request = true;
  2673. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  2674. return 0;
  2675. }
  2676. long kvm_arch_vcpu_ioctl(struct file *filp,
  2677. unsigned int ioctl, unsigned long arg)
  2678. {
  2679. struct kvm_vcpu *vcpu = filp->private_data;
  2680. void __user *argp = (void __user *)arg;
  2681. int r;
  2682. union {
  2683. struct kvm_lapic_state *lapic;
  2684. struct kvm_xsave *xsave;
  2685. struct kvm_xcrs *xcrs;
  2686. void *buffer;
  2687. } u;
  2688. u.buffer = NULL;
  2689. switch (ioctl) {
  2690. case KVM_GET_LAPIC: {
  2691. r = -EINVAL;
  2692. if (!vcpu->arch.apic)
  2693. goto out;
  2694. u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2695. r = -ENOMEM;
  2696. if (!u.lapic)
  2697. goto out;
  2698. r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
  2699. if (r)
  2700. goto out;
  2701. r = -EFAULT;
  2702. if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
  2703. goto out;
  2704. r = 0;
  2705. break;
  2706. }
  2707. case KVM_SET_LAPIC: {
  2708. r = -EINVAL;
  2709. if (!vcpu->arch.apic)
  2710. goto out;
  2711. u.lapic = memdup_user(argp, sizeof(*u.lapic));
  2712. if (IS_ERR(u.lapic))
  2713. return PTR_ERR(u.lapic);
  2714. r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
  2715. break;
  2716. }
  2717. case KVM_INTERRUPT: {
  2718. struct kvm_interrupt irq;
  2719. r = -EFAULT;
  2720. if (copy_from_user(&irq, argp, sizeof irq))
  2721. goto out;
  2722. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  2723. break;
  2724. }
  2725. case KVM_NMI: {
  2726. r = kvm_vcpu_ioctl_nmi(vcpu);
  2727. break;
  2728. }
  2729. case KVM_SET_CPUID: {
  2730. struct kvm_cpuid __user *cpuid_arg = argp;
  2731. struct kvm_cpuid cpuid;
  2732. r = -EFAULT;
  2733. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2734. goto out;
  2735. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  2736. break;
  2737. }
  2738. case KVM_SET_CPUID2: {
  2739. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2740. struct kvm_cpuid2 cpuid;
  2741. r = -EFAULT;
  2742. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2743. goto out;
  2744. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  2745. cpuid_arg->entries);
  2746. break;
  2747. }
  2748. case KVM_GET_CPUID2: {
  2749. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2750. struct kvm_cpuid2 cpuid;
  2751. r = -EFAULT;
  2752. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2753. goto out;
  2754. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  2755. cpuid_arg->entries);
  2756. if (r)
  2757. goto out;
  2758. r = -EFAULT;
  2759. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  2760. goto out;
  2761. r = 0;
  2762. break;
  2763. }
  2764. case KVM_GET_MSRS:
  2765. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  2766. break;
  2767. case KVM_SET_MSRS:
  2768. r = msr_io(vcpu, argp, do_set_msr, 0);
  2769. break;
  2770. case KVM_TPR_ACCESS_REPORTING: {
  2771. struct kvm_tpr_access_ctl tac;
  2772. r = -EFAULT;
  2773. if (copy_from_user(&tac, argp, sizeof tac))
  2774. goto out;
  2775. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  2776. if (r)
  2777. goto out;
  2778. r = -EFAULT;
  2779. if (copy_to_user(argp, &tac, sizeof tac))
  2780. goto out;
  2781. r = 0;
  2782. break;
  2783. };
  2784. case KVM_SET_VAPIC_ADDR: {
  2785. struct kvm_vapic_addr va;
  2786. r = -EINVAL;
  2787. if (!irqchip_in_kernel(vcpu->kvm))
  2788. goto out;
  2789. r = -EFAULT;
  2790. if (copy_from_user(&va, argp, sizeof va))
  2791. goto out;
  2792. r = 0;
  2793. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  2794. break;
  2795. }
  2796. case KVM_X86_SETUP_MCE: {
  2797. u64 mcg_cap;
  2798. r = -EFAULT;
  2799. if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
  2800. goto out;
  2801. r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
  2802. break;
  2803. }
  2804. case KVM_X86_SET_MCE: {
  2805. struct kvm_x86_mce mce;
  2806. r = -EFAULT;
  2807. if (copy_from_user(&mce, argp, sizeof mce))
  2808. goto out;
  2809. r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
  2810. break;
  2811. }
  2812. case KVM_GET_VCPU_EVENTS: {
  2813. struct kvm_vcpu_events events;
  2814. kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
  2815. r = -EFAULT;
  2816. if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
  2817. break;
  2818. r = 0;
  2819. break;
  2820. }
  2821. case KVM_SET_VCPU_EVENTS: {
  2822. struct kvm_vcpu_events events;
  2823. r = -EFAULT;
  2824. if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
  2825. break;
  2826. r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
  2827. break;
  2828. }
  2829. case KVM_GET_DEBUGREGS: {
  2830. struct kvm_debugregs dbgregs;
  2831. kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
  2832. r = -EFAULT;
  2833. if (copy_to_user(argp, &dbgregs,
  2834. sizeof(struct kvm_debugregs)))
  2835. break;
  2836. r = 0;
  2837. break;
  2838. }
  2839. case KVM_SET_DEBUGREGS: {
  2840. struct kvm_debugregs dbgregs;
  2841. r = -EFAULT;
  2842. if (copy_from_user(&dbgregs, argp,
  2843. sizeof(struct kvm_debugregs)))
  2844. break;
  2845. r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
  2846. break;
  2847. }
  2848. case KVM_GET_XSAVE: {
  2849. u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
  2850. r = -ENOMEM;
  2851. if (!u.xsave)
  2852. break;
  2853. kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
  2854. r = -EFAULT;
  2855. if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
  2856. break;
  2857. r = 0;
  2858. break;
  2859. }
  2860. case KVM_SET_XSAVE: {
  2861. u.xsave = memdup_user(argp, sizeof(*u.xsave));
  2862. if (IS_ERR(u.xsave))
  2863. return PTR_ERR(u.xsave);
  2864. r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
  2865. break;
  2866. }
  2867. case KVM_GET_XCRS: {
  2868. u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
  2869. r = -ENOMEM;
  2870. if (!u.xcrs)
  2871. break;
  2872. kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
  2873. r = -EFAULT;
  2874. if (copy_to_user(argp, u.xcrs,
  2875. sizeof(struct kvm_xcrs)))
  2876. break;
  2877. r = 0;
  2878. break;
  2879. }
  2880. case KVM_SET_XCRS: {
  2881. u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
  2882. if (IS_ERR(u.xcrs))
  2883. return PTR_ERR(u.xcrs);
  2884. r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
  2885. break;
  2886. }
  2887. case KVM_SET_TSC_KHZ: {
  2888. u32 user_tsc_khz;
  2889. r = -EINVAL;
  2890. user_tsc_khz = (u32)arg;
  2891. if (user_tsc_khz >= kvm_max_guest_tsc_khz)
  2892. goto out;
  2893. if (user_tsc_khz == 0)
  2894. user_tsc_khz = tsc_khz;
  2895. kvm_set_tsc_khz(vcpu, user_tsc_khz);
  2896. r = 0;
  2897. goto out;
  2898. }
  2899. case KVM_GET_TSC_KHZ: {
  2900. r = vcpu->arch.virtual_tsc_khz;
  2901. goto out;
  2902. }
  2903. case KVM_KVMCLOCK_CTRL: {
  2904. r = kvm_set_guest_paused(vcpu);
  2905. goto out;
  2906. }
  2907. default:
  2908. r = -EINVAL;
  2909. }
  2910. out:
  2911. kfree(u.buffer);
  2912. return r;
  2913. }
  2914. int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  2915. {
  2916. return VM_FAULT_SIGBUS;
  2917. }
  2918. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  2919. {
  2920. int ret;
  2921. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  2922. return -EINVAL;
  2923. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  2924. return ret;
  2925. }
  2926. static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
  2927. u64 ident_addr)
  2928. {
  2929. kvm->arch.ept_identity_map_addr = ident_addr;
  2930. return 0;
  2931. }
  2932. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  2933. u32 kvm_nr_mmu_pages)
  2934. {
  2935. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  2936. return -EINVAL;
  2937. mutex_lock(&kvm->slots_lock);
  2938. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  2939. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  2940. mutex_unlock(&kvm->slots_lock);
  2941. return 0;
  2942. }
  2943. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  2944. {
  2945. return kvm->arch.n_max_mmu_pages;
  2946. }
  2947. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2948. {
  2949. int r;
  2950. r = 0;
  2951. switch (chip->chip_id) {
  2952. case KVM_IRQCHIP_PIC_MASTER:
  2953. memcpy(&chip->chip.pic,
  2954. &pic_irqchip(kvm)->pics[0],
  2955. sizeof(struct kvm_pic_state));
  2956. break;
  2957. case KVM_IRQCHIP_PIC_SLAVE:
  2958. memcpy(&chip->chip.pic,
  2959. &pic_irqchip(kvm)->pics[1],
  2960. sizeof(struct kvm_pic_state));
  2961. break;
  2962. case KVM_IRQCHIP_IOAPIC:
  2963. r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
  2964. break;
  2965. default:
  2966. r = -EINVAL;
  2967. break;
  2968. }
  2969. return r;
  2970. }
  2971. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2972. {
  2973. int r;
  2974. r = 0;
  2975. switch (chip->chip_id) {
  2976. case KVM_IRQCHIP_PIC_MASTER:
  2977. spin_lock(&pic_irqchip(kvm)->lock);
  2978. memcpy(&pic_irqchip(kvm)->pics[0],
  2979. &chip->chip.pic,
  2980. sizeof(struct kvm_pic_state));
  2981. spin_unlock(&pic_irqchip(kvm)->lock);
  2982. break;
  2983. case KVM_IRQCHIP_PIC_SLAVE:
  2984. spin_lock(&pic_irqchip(kvm)->lock);
  2985. memcpy(&pic_irqchip(kvm)->pics[1],
  2986. &chip->chip.pic,
  2987. sizeof(struct kvm_pic_state));
  2988. spin_unlock(&pic_irqchip(kvm)->lock);
  2989. break;
  2990. case KVM_IRQCHIP_IOAPIC:
  2991. r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
  2992. break;
  2993. default:
  2994. r = -EINVAL;
  2995. break;
  2996. }
  2997. kvm_pic_update_irq(pic_irqchip(kvm));
  2998. return r;
  2999. }
  3000. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  3001. {
  3002. int r = 0;
  3003. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  3004. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  3005. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  3006. return r;
  3007. }
  3008. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  3009. {
  3010. int r = 0;
  3011. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  3012. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  3013. kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
  3014. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  3015. return r;
  3016. }
  3017. static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  3018. {
  3019. int r = 0;
  3020. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  3021. memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
  3022. sizeof(ps->channels));
  3023. ps->flags = kvm->arch.vpit->pit_state.flags;
  3024. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  3025. memset(&ps->reserved, 0, sizeof(ps->reserved));
  3026. return r;
  3027. }
  3028. static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  3029. {
  3030. int r = 0, start = 0;
  3031. u32 prev_legacy, cur_legacy;
  3032. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  3033. prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
  3034. cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
  3035. if (!prev_legacy && cur_legacy)
  3036. start = 1;
  3037. memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
  3038. sizeof(kvm->arch.vpit->pit_state.channels));
  3039. kvm->arch.vpit->pit_state.flags = ps->flags;
  3040. kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
  3041. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  3042. return r;
  3043. }
  3044. static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  3045. struct kvm_reinject_control *control)
  3046. {
  3047. if (!kvm->arch.vpit)
  3048. return -ENXIO;
  3049. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  3050. kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
  3051. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  3052. return 0;
  3053. }
  3054. /**
  3055. * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
  3056. * @kvm: kvm instance
  3057. * @log: slot id and address to which we copy the log
  3058. *
  3059. * We need to keep it in mind that VCPU threads can write to the bitmap
  3060. * concurrently. So, to avoid losing data, we keep the following order for
  3061. * each bit:
  3062. *
  3063. * 1. Take a snapshot of the bit and clear it if needed.
  3064. * 2. Write protect the corresponding page.
  3065. * 3. Flush TLB's if needed.
  3066. * 4. Copy the snapshot to the userspace.
  3067. *
  3068. * Between 2 and 3, the guest may write to the page using the remaining TLB
  3069. * entry. This is not a problem because the page will be reported dirty at
  3070. * step 4 using the snapshot taken before and step 3 ensures that successive
  3071. * writes will be logged for the next call.
  3072. */
  3073. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  3074. {
  3075. int r;
  3076. struct kvm_memory_slot *memslot;
  3077. unsigned long n, i;
  3078. unsigned long *dirty_bitmap;
  3079. unsigned long *dirty_bitmap_buffer;
  3080. bool is_dirty = false;
  3081. mutex_lock(&kvm->slots_lock);
  3082. r = -EINVAL;
  3083. if (log->slot >= KVM_USER_MEM_SLOTS)
  3084. goto out;
  3085. memslot = id_to_memslot(kvm->memslots, log->slot);
  3086. dirty_bitmap = memslot->dirty_bitmap;
  3087. r = -ENOENT;
  3088. if (!dirty_bitmap)
  3089. goto out;
  3090. n = kvm_dirty_bitmap_bytes(memslot);
  3091. dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
  3092. memset(dirty_bitmap_buffer, 0, n);
  3093. spin_lock(&kvm->mmu_lock);
  3094. for (i = 0; i < n / sizeof(long); i++) {
  3095. unsigned long mask;
  3096. gfn_t offset;
  3097. if (!dirty_bitmap[i])
  3098. continue;
  3099. is_dirty = true;
  3100. mask = xchg(&dirty_bitmap[i], 0);
  3101. dirty_bitmap_buffer[i] = mask;
  3102. offset = i * BITS_PER_LONG;
  3103. kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
  3104. }
  3105. if (is_dirty)
  3106. kvm_flush_remote_tlbs(kvm);
  3107. spin_unlock(&kvm->mmu_lock);
  3108. r = -EFAULT;
  3109. if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
  3110. goto out;
  3111. r = 0;
  3112. out:
  3113. mutex_unlock(&kvm->slots_lock);
  3114. return r;
  3115. }
  3116. int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
  3117. bool line_status)
  3118. {
  3119. if (!irqchip_in_kernel(kvm))
  3120. return -ENXIO;
  3121. irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  3122. irq_event->irq, irq_event->level,
  3123. line_status);
  3124. return 0;
  3125. }
  3126. long kvm_arch_vm_ioctl(struct file *filp,
  3127. unsigned int ioctl, unsigned long arg)
  3128. {
  3129. struct kvm *kvm = filp->private_data;
  3130. void __user *argp = (void __user *)arg;
  3131. int r = -ENOTTY;
  3132. /*
  3133. * This union makes it completely explicit to gcc-3.x
  3134. * that these two variables' stack usage should be
  3135. * combined, not added together.
  3136. */
  3137. union {
  3138. struct kvm_pit_state ps;
  3139. struct kvm_pit_state2 ps2;
  3140. struct kvm_pit_config pit_config;
  3141. } u;
  3142. switch (ioctl) {
  3143. case KVM_SET_TSS_ADDR:
  3144. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  3145. break;
  3146. case KVM_SET_IDENTITY_MAP_ADDR: {
  3147. u64 ident_addr;
  3148. r = -EFAULT;
  3149. if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
  3150. goto out;
  3151. r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
  3152. break;
  3153. }
  3154. case KVM_SET_NR_MMU_PAGES:
  3155. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  3156. break;
  3157. case KVM_GET_NR_MMU_PAGES:
  3158. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  3159. break;
  3160. case KVM_CREATE_IRQCHIP: {
  3161. struct kvm_pic *vpic;
  3162. mutex_lock(&kvm->lock);
  3163. r = -EEXIST;
  3164. if (kvm->arch.vpic)
  3165. goto create_irqchip_unlock;
  3166. r = -EINVAL;
  3167. if (atomic_read(&kvm->online_vcpus))
  3168. goto create_irqchip_unlock;
  3169. r = -ENOMEM;
  3170. vpic = kvm_create_pic(kvm);
  3171. if (vpic) {
  3172. r = kvm_ioapic_init(kvm);
  3173. if (r) {
  3174. mutex_lock(&kvm->slots_lock);
  3175. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  3176. &vpic->dev_master);
  3177. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  3178. &vpic->dev_slave);
  3179. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  3180. &vpic->dev_eclr);
  3181. mutex_unlock(&kvm->slots_lock);
  3182. kfree(vpic);
  3183. goto create_irqchip_unlock;
  3184. }
  3185. } else
  3186. goto create_irqchip_unlock;
  3187. smp_wmb();
  3188. kvm->arch.vpic = vpic;
  3189. smp_wmb();
  3190. r = kvm_setup_default_irq_routing(kvm);
  3191. if (r) {
  3192. mutex_lock(&kvm->slots_lock);
  3193. mutex_lock(&kvm->irq_lock);
  3194. kvm_ioapic_destroy(kvm);
  3195. kvm_destroy_pic(kvm);
  3196. mutex_unlock(&kvm->irq_lock);
  3197. mutex_unlock(&kvm->slots_lock);
  3198. }
  3199. create_irqchip_unlock:
  3200. mutex_unlock(&kvm->lock);
  3201. break;
  3202. }
  3203. case KVM_CREATE_PIT:
  3204. u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
  3205. goto create_pit;
  3206. case KVM_CREATE_PIT2:
  3207. r = -EFAULT;
  3208. if (copy_from_user(&u.pit_config, argp,
  3209. sizeof(struct kvm_pit_config)))
  3210. goto out;
  3211. create_pit:
  3212. mutex_lock(&kvm->slots_lock);
  3213. r = -EEXIST;
  3214. if (kvm->arch.vpit)
  3215. goto create_pit_unlock;
  3216. r = -ENOMEM;
  3217. kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
  3218. if (kvm->arch.vpit)
  3219. r = 0;
  3220. create_pit_unlock:
  3221. mutex_unlock(&kvm->slots_lock);
  3222. break;
  3223. case KVM_GET_IRQCHIP: {
  3224. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  3225. struct kvm_irqchip *chip;
  3226. chip = memdup_user(argp, sizeof(*chip));
  3227. if (IS_ERR(chip)) {
  3228. r = PTR_ERR(chip);
  3229. goto out;
  3230. }
  3231. r = -ENXIO;
  3232. if (!irqchip_in_kernel(kvm))
  3233. goto get_irqchip_out;
  3234. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  3235. if (r)
  3236. goto get_irqchip_out;
  3237. r = -EFAULT;
  3238. if (copy_to_user(argp, chip, sizeof *chip))
  3239. goto get_irqchip_out;
  3240. r = 0;
  3241. get_irqchip_out:
  3242. kfree(chip);
  3243. break;
  3244. }
  3245. case KVM_SET_IRQCHIP: {
  3246. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  3247. struct kvm_irqchip *chip;
  3248. chip = memdup_user(argp, sizeof(*chip));
  3249. if (IS_ERR(chip)) {
  3250. r = PTR_ERR(chip);
  3251. goto out;
  3252. }
  3253. r = -ENXIO;
  3254. if (!irqchip_in_kernel(kvm))
  3255. goto set_irqchip_out;
  3256. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  3257. if (r)
  3258. goto set_irqchip_out;
  3259. r = 0;
  3260. set_irqchip_out:
  3261. kfree(chip);
  3262. break;
  3263. }
  3264. case KVM_GET_PIT: {
  3265. r = -EFAULT;
  3266. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  3267. goto out;
  3268. r = -ENXIO;
  3269. if (!kvm->arch.vpit)
  3270. goto out;
  3271. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  3272. if (r)
  3273. goto out;
  3274. r = -EFAULT;
  3275. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  3276. goto out;
  3277. r = 0;
  3278. break;
  3279. }
  3280. case KVM_SET_PIT: {
  3281. r = -EFAULT;
  3282. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  3283. goto out;
  3284. r = -ENXIO;
  3285. if (!kvm->arch.vpit)
  3286. goto out;
  3287. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  3288. break;
  3289. }
  3290. case KVM_GET_PIT2: {
  3291. r = -ENXIO;
  3292. if (!kvm->arch.vpit)
  3293. goto out;
  3294. r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
  3295. if (r)
  3296. goto out;
  3297. r = -EFAULT;
  3298. if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
  3299. goto out;
  3300. r = 0;
  3301. break;
  3302. }
  3303. case KVM_SET_PIT2: {
  3304. r = -EFAULT;
  3305. if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
  3306. goto out;
  3307. r = -ENXIO;
  3308. if (!kvm->arch.vpit)
  3309. goto out;
  3310. r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
  3311. break;
  3312. }
  3313. case KVM_REINJECT_CONTROL: {
  3314. struct kvm_reinject_control control;
  3315. r = -EFAULT;
  3316. if (copy_from_user(&control, argp, sizeof(control)))
  3317. goto out;
  3318. r = kvm_vm_ioctl_reinject(kvm, &control);
  3319. break;
  3320. }
  3321. case KVM_XEN_HVM_CONFIG: {
  3322. r = -EFAULT;
  3323. if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
  3324. sizeof(struct kvm_xen_hvm_config)))
  3325. goto out;
  3326. r = -EINVAL;
  3327. if (kvm->arch.xen_hvm_config.flags)
  3328. goto out;
  3329. r = 0;
  3330. break;
  3331. }
  3332. case KVM_SET_CLOCK: {
  3333. struct kvm_clock_data user_ns;
  3334. u64 now_ns;
  3335. s64 delta;
  3336. r = -EFAULT;
  3337. if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
  3338. goto out;
  3339. r = -EINVAL;
  3340. if (user_ns.flags)
  3341. goto out;
  3342. r = 0;
  3343. local_irq_disable();
  3344. now_ns = get_kernel_ns();
  3345. delta = user_ns.clock - now_ns;
  3346. local_irq_enable();
  3347. kvm->arch.kvmclock_offset = delta;
  3348. break;
  3349. }
  3350. case KVM_GET_CLOCK: {
  3351. struct kvm_clock_data user_ns;
  3352. u64 now_ns;
  3353. local_irq_disable();
  3354. now_ns = get_kernel_ns();
  3355. user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
  3356. local_irq_enable();
  3357. user_ns.flags = 0;
  3358. memset(&user_ns.pad, 0, sizeof(user_ns.pad));
  3359. r = -EFAULT;
  3360. if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
  3361. goto out;
  3362. r = 0;
  3363. break;
  3364. }
  3365. default:
  3366. ;
  3367. }
  3368. out:
  3369. return r;
  3370. }
  3371. static void kvm_init_msr_list(void)
  3372. {
  3373. u32 dummy[2];
  3374. unsigned i, j;
  3375. /* skip the first msrs in the list. KVM-specific */
  3376. for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
  3377. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  3378. continue;
  3379. if (j < i)
  3380. msrs_to_save[j] = msrs_to_save[i];
  3381. j++;
  3382. }
  3383. num_msrs_to_save = j;
  3384. }
  3385. static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
  3386. const void *v)
  3387. {
  3388. int handled = 0;
  3389. int n;
  3390. do {
  3391. n = min(len, 8);
  3392. if (!(vcpu->arch.apic &&
  3393. !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
  3394. && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
  3395. break;
  3396. handled += n;
  3397. addr += n;
  3398. len -= n;
  3399. v += n;
  3400. } while (len);
  3401. return handled;
  3402. }
  3403. static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
  3404. {
  3405. int handled = 0;
  3406. int n;
  3407. do {
  3408. n = min(len, 8);
  3409. if (!(vcpu->arch.apic &&
  3410. !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
  3411. && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
  3412. break;
  3413. trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
  3414. handled += n;
  3415. addr += n;
  3416. len -= n;
  3417. v += n;
  3418. } while (len);
  3419. return handled;
  3420. }
  3421. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  3422. struct kvm_segment *var, int seg)
  3423. {
  3424. kvm_x86_ops->set_segment(vcpu, var, seg);
  3425. }
  3426. void kvm_get_segment(struct kvm_vcpu *vcpu,
  3427. struct kvm_segment *var, int seg)
  3428. {
  3429. kvm_x86_ops->get_segment(vcpu, var, seg);
  3430. }
  3431. gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
  3432. {
  3433. gpa_t t_gpa;
  3434. struct x86_exception exception;
  3435. BUG_ON(!mmu_is_nested(vcpu));
  3436. /* NPT walks are always user-walks */
  3437. access |= PFERR_USER_MASK;
  3438. t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
  3439. return t_gpa;
  3440. }
  3441. gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
  3442. struct x86_exception *exception)
  3443. {
  3444. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3445. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3446. }
  3447. gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
  3448. struct x86_exception *exception)
  3449. {
  3450. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3451. access |= PFERR_FETCH_MASK;
  3452. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3453. }
  3454. gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
  3455. struct x86_exception *exception)
  3456. {
  3457. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3458. access |= PFERR_WRITE_MASK;
  3459. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3460. }
  3461. /* uses this to access any guest's mapped memory without checking CPL */
  3462. gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
  3463. struct x86_exception *exception)
  3464. {
  3465. return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
  3466. }
  3467. static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
  3468. struct kvm_vcpu *vcpu, u32 access,
  3469. struct x86_exception *exception)
  3470. {
  3471. void *data = val;
  3472. int r = X86EMUL_CONTINUE;
  3473. while (bytes) {
  3474. gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
  3475. exception);
  3476. unsigned offset = addr & (PAGE_SIZE-1);
  3477. unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
  3478. int ret;
  3479. if (gpa == UNMAPPED_GVA)
  3480. return X86EMUL_PROPAGATE_FAULT;
  3481. ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
  3482. if (ret < 0) {
  3483. r = X86EMUL_IO_NEEDED;
  3484. goto out;
  3485. }
  3486. bytes -= toread;
  3487. data += toread;
  3488. addr += toread;
  3489. }
  3490. out:
  3491. return r;
  3492. }
  3493. /* used for instruction fetching */
  3494. static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
  3495. gva_t addr, void *val, unsigned int bytes,
  3496. struct x86_exception *exception)
  3497. {
  3498. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3499. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3500. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
  3501. access | PFERR_FETCH_MASK,
  3502. exception);
  3503. }
  3504. int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
  3505. gva_t addr, void *val, unsigned int bytes,
  3506. struct x86_exception *exception)
  3507. {
  3508. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3509. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  3510. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
  3511. exception);
  3512. }
  3513. EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
  3514. static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
  3515. gva_t addr, void *val, unsigned int bytes,
  3516. struct x86_exception *exception)
  3517. {
  3518. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3519. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
  3520. }
  3521. int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
  3522. gva_t addr, void *val,
  3523. unsigned int bytes,
  3524. struct x86_exception *exception)
  3525. {
  3526. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3527. void *data = val;
  3528. int r = X86EMUL_CONTINUE;
  3529. while (bytes) {
  3530. gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
  3531. PFERR_WRITE_MASK,
  3532. exception);
  3533. unsigned offset = addr & (PAGE_SIZE-1);
  3534. unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  3535. int ret;
  3536. if (gpa == UNMAPPED_GVA)
  3537. return X86EMUL_PROPAGATE_FAULT;
  3538. ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
  3539. if (ret < 0) {
  3540. r = X86EMUL_IO_NEEDED;
  3541. goto out;
  3542. }
  3543. bytes -= towrite;
  3544. data += towrite;
  3545. addr += towrite;
  3546. }
  3547. out:
  3548. return r;
  3549. }
  3550. EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
  3551. static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
  3552. gpa_t *gpa, struct x86_exception *exception,
  3553. bool write)
  3554. {
  3555. u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
  3556. | (write ? PFERR_WRITE_MASK : 0);
  3557. if (vcpu_match_mmio_gva(vcpu, gva)
  3558. && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) {
  3559. *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
  3560. (gva & (PAGE_SIZE - 1));
  3561. trace_vcpu_match_mmio(gva, *gpa, write, false);
  3562. return 1;
  3563. }
  3564. *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
  3565. if (*gpa == UNMAPPED_GVA)
  3566. return -1;
  3567. /* For APIC access vmexit */
  3568. if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3569. return 1;
  3570. if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
  3571. trace_vcpu_match_mmio(gva, *gpa, write, true);
  3572. return 1;
  3573. }
  3574. return 0;
  3575. }
  3576. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  3577. const void *val, int bytes)
  3578. {
  3579. int ret;
  3580. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  3581. if (ret < 0)
  3582. return 0;
  3583. kvm_mmu_pte_write(vcpu, gpa, val, bytes);
  3584. return 1;
  3585. }
  3586. struct read_write_emulator_ops {
  3587. int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
  3588. int bytes);
  3589. int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
  3590. void *val, int bytes);
  3591. int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
  3592. int bytes, void *val);
  3593. int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
  3594. void *val, int bytes);
  3595. bool write;
  3596. };
  3597. static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
  3598. {
  3599. if (vcpu->mmio_read_completed) {
  3600. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
  3601. vcpu->mmio_fragments[0].gpa, *(u64 *)val);
  3602. vcpu->mmio_read_completed = 0;
  3603. return 1;
  3604. }
  3605. return 0;
  3606. }
  3607. static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
  3608. void *val, int bytes)
  3609. {
  3610. return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
  3611. }
  3612. static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
  3613. void *val, int bytes)
  3614. {
  3615. return emulator_write_phys(vcpu, gpa, val, bytes);
  3616. }
  3617. static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
  3618. {
  3619. trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
  3620. return vcpu_mmio_write(vcpu, gpa, bytes, val);
  3621. }
  3622. static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
  3623. void *val, int bytes)
  3624. {
  3625. trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
  3626. return X86EMUL_IO_NEEDED;
  3627. }
  3628. static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
  3629. void *val, int bytes)
  3630. {
  3631. struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
  3632. memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
  3633. return X86EMUL_CONTINUE;
  3634. }
  3635. static const struct read_write_emulator_ops read_emultor = {
  3636. .read_write_prepare = read_prepare,
  3637. .read_write_emulate = read_emulate,
  3638. .read_write_mmio = vcpu_mmio_read,
  3639. .read_write_exit_mmio = read_exit_mmio,
  3640. };
  3641. static const struct read_write_emulator_ops write_emultor = {
  3642. .read_write_emulate = write_emulate,
  3643. .read_write_mmio = write_mmio,
  3644. .read_write_exit_mmio = write_exit_mmio,
  3645. .write = true,
  3646. };
  3647. static int emulator_read_write_onepage(unsigned long addr, void *val,
  3648. unsigned int bytes,
  3649. struct x86_exception *exception,
  3650. struct kvm_vcpu *vcpu,
  3651. const struct read_write_emulator_ops *ops)
  3652. {
  3653. gpa_t gpa;
  3654. int handled, ret;
  3655. bool write = ops->write;
  3656. struct kvm_mmio_fragment *frag;
  3657. ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
  3658. if (ret < 0)
  3659. return X86EMUL_PROPAGATE_FAULT;
  3660. /* For APIC access vmexit */
  3661. if (ret)
  3662. goto mmio;
  3663. if (ops->read_write_emulate(vcpu, gpa, val, bytes))
  3664. return X86EMUL_CONTINUE;
  3665. mmio:
  3666. /*
  3667. * Is this MMIO handled locally?
  3668. */
  3669. handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
  3670. if (handled == bytes)
  3671. return X86EMUL_CONTINUE;
  3672. gpa += handled;
  3673. bytes -= handled;
  3674. val += handled;
  3675. WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
  3676. frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
  3677. frag->gpa = gpa;
  3678. frag->data = val;
  3679. frag->len = bytes;
  3680. return X86EMUL_CONTINUE;
  3681. }
  3682. int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
  3683. void *val, unsigned int bytes,
  3684. struct x86_exception *exception,
  3685. const struct read_write_emulator_ops *ops)
  3686. {
  3687. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3688. gpa_t gpa;
  3689. int rc;
  3690. if (ops->read_write_prepare &&
  3691. ops->read_write_prepare(vcpu, val, bytes))
  3692. return X86EMUL_CONTINUE;
  3693. vcpu->mmio_nr_fragments = 0;
  3694. /* Crossing a page boundary? */
  3695. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  3696. int now;
  3697. now = -addr & ~PAGE_MASK;
  3698. rc = emulator_read_write_onepage(addr, val, now, exception,
  3699. vcpu, ops);
  3700. if (rc != X86EMUL_CONTINUE)
  3701. return rc;
  3702. addr += now;
  3703. val += now;
  3704. bytes -= now;
  3705. }
  3706. rc = emulator_read_write_onepage(addr, val, bytes, exception,
  3707. vcpu, ops);
  3708. if (rc != X86EMUL_CONTINUE)
  3709. return rc;
  3710. if (!vcpu->mmio_nr_fragments)
  3711. return rc;
  3712. gpa = vcpu->mmio_fragments[0].gpa;
  3713. vcpu->mmio_needed = 1;
  3714. vcpu->mmio_cur_fragment = 0;
  3715. vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
  3716. vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
  3717. vcpu->run->exit_reason = KVM_EXIT_MMIO;
  3718. vcpu->run->mmio.phys_addr = gpa;
  3719. return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
  3720. }
  3721. static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
  3722. unsigned long addr,
  3723. void *val,
  3724. unsigned int bytes,
  3725. struct x86_exception *exception)
  3726. {
  3727. return emulator_read_write(ctxt, addr, val, bytes,
  3728. exception, &read_emultor);
  3729. }
  3730. int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
  3731. unsigned long addr,
  3732. const void *val,
  3733. unsigned int bytes,
  3734. struct x86_exception *exception)
  3735. {
  3736. return emulator_read_write(ctxt, addr, (void *)val, bytes,
  3737. exception, &write_emultor);
  3738. }
  3739. #define CMPXCHG_TYPE(t, ptr, old, new) \
  3740. (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
  3741. #ifdef CONFIG_X86_64
  3742. # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
  3743. #else
  3744. # define CMPXCHG64(ptr, old, new) \
  3745. (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
  3746. #endif
  3747. static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
  3748. unsigned long addr,
  3749. const void *old,
  3750. const void *new,
  3751. unsigned int bytes,
  3752. struct x86_exception *exception)
  3753. {
  3754. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3755. gpa_t gpa;
  3756. struct page *page;
  3757. char *kaddr;
  3758. bool exchanged;
  3759. /* guests cmpxchg8b have to be emulated atomically */
  3760. if (bytes > 8 || (bytes & (bytes - 1)))
  3761. goto emul_write;
  3762. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
  3763. if (gpa == UNMAPPED_GVA ||
  3764. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3765. goto emul_write;
  3766. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  3767. goto emul_write;
  3768. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  3769. if (is_error_page(page))
  3770. goto emul_write;
  3771. kaddr = kmap_atomic(page);
  3772. kaddr += offset_in_page(gpa);
  3773. switch (bytes) {
  3774. case 1:
  3775. exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
  3776. break;
  3777. case 2:
  3778. exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
  3779. break;
  3780. case 4:
  3781. exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
  3782. break;
  3783. case 8:
  3784. exchanged = CMPXCHG64(kaddr, old, new);
  3785. break;
  3786. default:
  3787. BUG();
  3788. }
  3789. kunmap_atomic(kaddr);
  3790. kvm_release_page_dirty(page);
  3791. if (!exchanged)
  3792. return X86EMUL_CMPXCHG_FAILED;
  3793. kvm_mmu_pte_write(vcpu, gpa, new, bytes);
  3794. return X86EMUL_CONTINUE;
  3795. emul_write:
  3796. printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
  3797. return emulator_write_emulated(ctxt, addr, new, bytes, exception);
  3798. }
  3799. static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
  3800. {
  3801. /* TODO: String I/O for in kernel device */
  3802. int r;
  3803. if (vcpu->arch.pio.in)
  3804. r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
  3805. vcpu->arch.pio.size, pd);
  3806. else
  3807. r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
  3808. vcpu->arch.pio.port, vcpu->arch.pio.size,
  3809. pd);
  3810. return r;
  3811. }
  3812. static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
  3813. unsigned short port, void *val,
  3814. unsigned int count, bool in)
  3815. {
  3816. trace_kvm_pio(!in, port, size, count);
  3817. vcpu->arch.pio.port = port;
  3818. vcpu->arch.pio.in = in;
  3819. vcpu->arch.pio.count = count;
  3820. vcpu->arch.pio.size = size;
  3821. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3822. vcpu->arch.pio.count = 0;
  3823. return 1;
  3824. }
  3825. vcpu->run->exit_reason = KVM_EXIT_IO;
  3826. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  3827. vcpu->run->io.size = size;
  3828. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3829. vcpu->run->io.count = count;
  3830. vcpu->run->io.port = port;
  3831. return 0;
  3832. }
  3833. static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
  3834. int size, unsigned short port, void *val,
  3835. unsigned int count)
  3836. {
  3837. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3838. int ret;
  3839. if (vcpu->arch.pio.count)
  3840. goto data_avail;
  3841. ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
  3842. if (ret) {
  3843. data_avail:
  3844. memcpy(val, vcpu->arch.pio_data, size * count);
  3845. vcpu->arch.pio.count = 0;
  3846. return 1;
  3847. }
  3848. return 0;
  3849. }
  3850. static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
  3851. int size, unsigned short port,
  3852. const void *val, unsigned int count)
  3853. {
  3854. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3855. memcpy(vcpu->arch.pio_data, val, size * count);
  3856. return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
  3857. }
  3858. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  3859. {
  3860. return kvm_x86_ops->get_segment_base(vcpu, seg);
  3861. }
  3862. static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
  3863. {
  3864. kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
  3865. }
  3866. int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
  3867. {
  3868. if (!need_emulate_wbinvd(vcpu))
  3869. return X86EMUL_CONTINUE;
  3870. if (kvm_x86_ops->has_wbinvd_exit()) {
  3871. int cpu = get_cpu();
  3872. cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
  3873. smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
  3874. wbinvd_ipi, NULL, 1);
  3875. put_cpu();
  3876. cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
  3877. } else
  3878. wbinvd();
  3879. return X86EMUL_CONTINUE;
  3880. }
  3881. EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
  3882. static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
  3883. {
  3884. kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
  3885. }
  3886. int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  3887. {
  3888. return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
  3889. }
  3890. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  3891. {
  3892. return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
  3893. }
  3894. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  3895. {
  3896. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  3897. }
  3898. static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
  3899. {
  3900. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3901. unsigned long value;
  3902. switch (cr) {
  3903. case 0:
  3904. value = kvm_read_cr0(vcpu);
  3905. break;
  3906. case 2:
  3907. value = vcpu->arch.cr2;
  3908. break;
  3909. case 3:
  3910. value = kvm_read_cr3(vcpu);
  3911. break;
  3912. case 4:
  3913. value = kvm_read_cr4(vcpu);
  3914. break;
  3915. case 8:
  3916. value = kvm_get_cr8(vcpu);
  3917. break;
  3918. default:
  3919. kvm_err("%s: unexpected cr %u\n", __func__, cr);
  3920. return 0;
  3921. }
  3922. return value;
  3923. }
  3924. static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
  3925. {
  3926. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  3927. int res = 0;
  3928. switch (cr) {
  3929. case 0:
  3930. res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
  3931. break;
  3932. case 2:
  3933. vcpu->arch.cr2 = val;
  3934. break;
  3935. case 3:
  3936. res = kvm_set_cr3(vcpu, val);
  3937. break;
  3938. case 4:
  3939. res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
  3940. break;
  3941. case 8:
  3942. res = kvm_set_cr8(vcpu, val);
  3943. break;
  3944. default:
  3945. kvm_err("%s: unexpected cr %u\n", __func__, cr);
  3946. res = -1;
  3947. }
  3948. return res;
  3949. }
  3950. static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val)
  3951. {
  3952. kvm_set_rflags(emul_to_vcpu(ctxt), val);
  3953. }
  3954. static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
  3955. {
  3956. return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
  3957. }
  3958. static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3959. {
  3960. kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
  3961. }
  3962. static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3963. {
  3964. kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
  3965. }
  3966. static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3967. {
  3968. kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
  3969. }
  3970. static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
  3971. {
  3972. kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
  3973. }
  3974. static unsigned long emulator_get_cached_segment_base(
  3975. struct x86_emulate_ctxt *ctxt, int seg)
  3976. {
  3977. return get_segment_base(emul_to_vcpu(ctxt), seg);
  3978. }
  3979. static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
  3980. struct desc_struct *desc, u32 *base3,
  3981. int seg)
  3982. {
  3983. struct kvm_segment var;
  3984. kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
  3985. *selector = var.selector;
  3986. if (var.unusable) {
  3987. memset(desc, 0, sizeof(*desc));
  3988. return false;
  3989. }
  3990. if (var.g)
  3991. var.limit >>= 12;
  3992. set_desc_limit(desc, var.limit);
  3993. set_desc_base(desc, (unsigned long)var.base);
  3994. #ifdef CONFIG_X86_64
  3995. if (base3)
  3996. *base3 = var.base >> 32;
  3997. #endif
  3998. desc->type = var.type;
  3999. desc->s = var.s;
  4000. desc->dpl = var.dpl;
  4001. desc->p = var.present;
  4002. desc->avl = var.avl;
  4003. desc->l = var.l;
  4004. desc->d = var.db;
  4005. desc->g = var.g;
  4006. return true;
  4007. }
  4008. static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
  4009. struct desc_struct *desc, u32 base3,
  4010. int seg)
  4011. {
  4012. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  4013. struct kvm_segment var;
  4014. var.selector = selector;
  4015. var.base = get_desc_base(desc);
  4016. #ifdef CONFIG_X86_64
  4017. var.base |= ((u64)base3) << 32;
  4018. #endif
  4019. var.limit = get_desc_limit(desc);
  4020. if (desc->g)
  4021. var.limit = (var.limit << 12) | 0xfff;
  4022. var.type = desc->type;
  4023. var.present = desc->p;
  4024. var.dpl = desc->dpl;
  4025. var.db = desc->d;
  4026. var.s = desc->s;
  4027. var.l = desc->l;
  4028. var.g = desc->g;
  4029. var.avl = desc->avl;
  4030. var.present = desc->p;
  4031. var.unusable = !var.present;
  4032. var.padding = 0;
  4033. kvm_set_segment(vcpu, &var, seg);
  4034. return;
  4035. }
  4036. static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
  4037. u32 msr_index, u64 *pdata)
  4038. {
  4039. return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
  4040. }
  4041. static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
  4042. u32 msr_index, u64 data)
  4043. {
  4044. struct msr_data msr;
  4045. msr.data = data;
  4046. msr.index = msr_index;
  4047. msr.host_initiated = false;
  4048. return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
  4049. }
  4050. static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
  4051. u32 pmc, u64 *pdata)
  4052. {
  4053. return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
  4054. }
  4055. static void emulator_halt(struct x86_emulate_ctxt *ctxt)
  4056. {
  4057. emul_to_vcpu(ctxt)->arch.halt_request = 1;
  4058. }
  4059. static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
  4060. {
  4061. preempt_disable();
  4062. kvm_load_guest_fpu(emul_to_vcpu(ctxt));
  4063. /*
  4064. * CR0.TS may reference the host fpu state, not the guest fpu state,
  4065. * so it may be clear at this point.
  4066. */
  4067. clts();
  4068. }
  4069. static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
  4070. {
  4071. preempt_enable();
  4072. }
  4073. static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
  4074. struct x86_instruction_info *info,
  4075. enum x86_intercept_stage stage)
  4076. {
  4077. return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
  4078. }
  4079. static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
  4080. u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
  4081. {
  4082. kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
  4083. }
  4084. static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
  4085. {
  4086. return kvm_register_read(emul_to_vcpu(ctxt), reg);
  4087. }
  4088. static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
  4089. {
  4090. kvm_register_write(emul_to_vcpu(ctxt), reg, val);
  4091. }
  4092. static const struct x86_emulate_ops emulate_ops = {
  4093. .read_gpr = emulator_read_gpr,
  4094. .write_gpr = emulator_write_gpr,
  4095. .read_std = kvm_read_guest_virt_system,
  4096. .write_std = kvm_write_guest_virt_system,
  4097. .fetch = kvm_fetch_guest_virt,
  4098. .read_emulated = emulator_read_emulated,
  4099. .write_emulated = emulator_write_emulated,
  4100. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  4101. .invlpg = emulator_invlpg,
  4102. .pio_in_emulated = emulator_pio_in_emulated,
  4103. .pio_out_emulated = emulator_pio_out_emulated,
  4104. .get_segment = emulator_get_segment,
  4105. .set_segment = emulator_set_segment,
  4106. .get_cached_segment_base = emulator_get_cached_segment_base,
  4107. .get_gdt = emulator_get_gdt,
  4108. .get_idt = emulator_get_idt,
  4109. .set_gdt = emulator_set_gdt,
  4110. .set_idt = emulator_set_idt,
  4111. .get_cr = emulator_get_cr,
  4112. .set_cr = emulator_set_cr,
  4113. .set_rflags = emulator_set_rflags,
  4114. .cpl = emulator_get_cpl,
  4115. .get_dr = emulator_get_dr,
  4116. .set_dr = emulator_set_dr,
  4117. .set_msr = emulator_set_msr,
  4118. .get_msr = emulator_get_msr,
  4119. .read_pmc = emulator_read_pmc,
  4120. .halt = emulator_halt,
  4121. .wbinvd = emulator_wbinvd,
  4122. .fix_hypercall = emulator_fix_hypercall,
  4123. .get_fpu = emulator_get_fpu,
  4124. .put_fpu = emulator_put_fpu,
  4125. .intercept = emulator_intercept,
  4126. .get_cpuid = emulator_get_cpuid,
  4127. };
  4128. static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
  4129. {
  4130. u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
  4131. /*
  4132. * an sti; sti; sequence only disable interrupts for the first
  4133. * instruction. So, if the last instruction, be it emulated or
  4134. * not, left the system with the INT_STI flag enabled, it
  4135. * means that the last instruction is an sti. We should not
  4136. * leave the flag on in this case. The same goes for mov ss
  4137. */
  4138. if (!(int_shadow & mask))
  4139. kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
  4140. }
  4141. static void inject_emulated_exception(struct kvm_vcpu *vcpu)
  4142. {
  4143. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  4144. if (ctxt->exception.vector == PF_VECTOR)
  4145. kvm_propagate_fault(vcpu, &ctxt->exception);
  4146. else if (ctxt->exception.error_code_valid)
  4147. kvm_queue_exception_e(vcpu, ctxt->exception.vector,
  4148. ctxt->exception.error_code);
  4149. else
  4150. kvm_queue_exception(vcpu, ctxt->exception.vector);
  4151. }
  4152. static void init_decode_cache(struct x86_emulate_ctxt *ctxt)
  4153. {
  4154. memset(&ctxt->twobyte, 0,
  4155. (void *)&ctxt->_regs - (void *)&ctxt->twobyte);
  4156. ctxt->fetch.start = 0;
  4157. ctxt->fetch.end = 0;
  4158. ctxt->io_read.pos = 0;
  4159. ctxt->io_read.end = 0;
  4160. ctxt->mem_read.pos = 0;
  4161. ctxt->mem_read.end = 0;
  4162. }
  4163. static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
  4164. {
  4165. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  4166. int cs_db, cs_l;
  4167. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  4168. ctxt->eflags = kvm_get_rflags(vcpu);
  4169. ctxt->eip = kvm_rip_read(vcpu);
  4170. ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
  4171. (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
  4172. cs_l ? X86EMUL_MODE_PROT64 :
  4173. cs_db ? X86EMUL_MODE_PROT32 :
  4174. X86EMUL_MODE_PROT16;
  4175. ctxt->guest_mode = is_guest_mode(vcpu);
  4176. init_decode_cache(ctxt);
  4177. vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
  4178. }
  4179. int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
  4180. {
  4181. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  4182. int ret;
  4183. init_emulate_ctxt(vcpu);
  4184. ctxt->op_bytes = 2;
  4185. ctxt->ad_bytes = 2;
  4186. ctxt->_eip = ctxt->eip + inc_eip;
  4187. ret = emulate_int_real(ctxt, irq);
  4188. if (ret != X86EMUL_CONTINUE)
  4189. return EMULATE_FAIL;
  4190. ctxt->eip = ctxt->_eip;
  4191. kvm_rip_write(vcpu, ctxt->eip);
  4192. kvm_set_rflags(vcpu, ctxt->eflags);
  4193. if (irq == NMI_VECTOR)
  4194. vcpu->arch.nmi_pending = 0;
  4195. else
  4196. vcpu->arch.interrupt.pending = false;
  4197. return EMULATE_DONE;
  4198. }
  4199. EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
  4200. static int handle_emulation_failure(struct kvm_vcpu *vcpu)
  4201. {
  4202. int r = EMULATE_DONE;
  4203. ++vcpu->stat.insn_emulation_fail;
  4204. trace_kvm_emulate_insn_failed(vcpu);
  4205. if (!is_guest_mode(vcpu)) {
  4206. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  4207. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  4208. vcpu->run->internal.ndata = 0;
  4209. r = EMULATE_FAIL;
  4210. }
  4211. kvm_queue_exception(vcpu, UD_VECTOR);
  4212. return r;
  4213. }
  4214. static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
  4215. bool write_fault_to_shadow_pgtable,
  4216. int emulation_type)
  4217. {
  4218. gpa_t gpa = cr2;
  4219. pfn_t pfn;
  4220. if (emulation_type & EMULTYPE_NO_REEXECUTE)
  4221. return false;
  4222. if (!vcpu->arch.mmu.direct_map) {
  4223. /*
  4224. * Write permission should be allowed since only
  4225. * write access need to be emulated.
  4226. */
  4227. gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
  4228. /*
  4229. * If the mapping is invalid in guest, let cpu retry
  4230. * it to generate fault.
  4231. */
  4232. if (gpa == UNMAPPED_GVA)
  4233. return true;
  4234. }
  4235. /*
  4236. * Do not retry the unhandleable instruction if it faults on the
  4237. * readonly host memory, otherwise it will goto a infinite loop:
  4238. * retry instruction -> write #PF -> emulation fail -> retry
  4239. * instruction -> ...
  4240. */
  4241. pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
  4242. /*
  4243. * If the instruction failed on the error pfn, it can not be fixed,
  4244. * report the error to userspace.
  4245. */
  4246. if (is_error_noslot_pfn(pfn))
  4247. return false;
  4248. kvm_release_pfn_clean(pfn);
  4249. /* The instructions are well-emulated on direct mmu. */
  4250. if (vcpu->arch.mmu.direct_map) {
  4251. unsigned int indirect_shadow_pages;
  4252. spin_lock(&vcpu->kvm->mmu_lock);
  4253. indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
  4254. spin_unlock(&vcpu->kvm->mmu_lock);
  4255. if (indirect_shadow_pages)
  4256. kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
  4257. return true;
  4258. }
  4259. /*
  4260. * if emulation was due to access to shadowed page table
  4261. * and it failed try to unshadow page and re-enter the
  4262. * guest to let CPU execute the instruction.
  4263. */
  4264. kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
  4265. /*
  4266. * If the access faults on its page table, it can not
  4267. * be fixed by unprotecting shadow page and it should
  4268. * be reported to userspace.
  4269. */
  4270. return !write_fault_to_shadow_pgtable;
  4271. }
  4272. static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
  4273. unsigned long cr2, int emulation_type)
  4274. {
  4275. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  4276. unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
  4277. last_retry_eip = vcpu->arch.last_retry_eip;
  4278. last_retry_addr = vcpu->arch.last_retry_addr;
  4279. /*
  4280. * If the emulation is caused by #PF and it is non-page_table
  4281. * writing instruction, it means the VM-EXIT is caused by shadow
  4282. * page protected, we can zap the shadow page and retry this
  4283. * instruction directly.
  4284. *
  4285. * Note: if the guest uses a non-page-table modifying instruction
  4286. * on the PDE that points to the instruction, then we will unmap
  4287. * the instruction and go to an infinite loop. So, we cache the
  4288. * last retried eip and the last fault address, if we meet the eip
  4289. * and the address again, we can break out of the potential infinite
  4290. * loop.
  4291. */
  4292. vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
  4293. if (!(emulation_type & EMULTYPE_RETRY))
  4294. return false;
  4295. if (x86_page_table_writing_insn(ctxt))
  4296. return false;
  4297. if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
  4298. return false;
  4299. vcpu->arch.last_retry_eip = ctxt->eip;
  4300. vcpu->arch.last_retry_addr = cr2;
  4301. if (!vcpu->arch.mmu.direct_map)
  4302. gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
  4303. kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
  4304. return true;
  4305. }
  4306. static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
  4307. static int complete_emulated_pio(struct kvm_vcpu *vcpu);
  4308. int x86_emulate_instruction(struct kvm_vcpu *vcpu,
  4309. unsigned long cr2,
  4310. int emulation_type,
  4311. void *insn,
  4312. int insn_len)
  4313. {
  4314. int r;
  4315. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  4316. bool writeback = true;
  4317. bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
  4318. /*
  4319. * Clear write_fault_to_shadow_pgtable here to ensure it is
  4320. * never reused.
  4321. */
  4322. vcpu->arch.write_fault_to_shadow_pgtable = false;
  4323. kvm_clear_exception_queue(vcpu);
  4324. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  4325. init_emulate_ctxt(vcpu);
  4326. ctxt->interruptibility = 0;
  4327. ctxt->have_exception = false;
  4328. ctxt->perm_ok = false;
  4329. ctxt->only_vendor_specific_insn
  4330. = emulation_type & EMULTYPE_TRAP_UD;
  4331. r = x86_decode_insn(ctxt, insn, insn_len);
  4332. trace_kvm_emulate_insn_start(vcpu);
  4333. ++vcpu->stat.insn_emulation;
  4334. if (r != EMULATION_OK) {
  4335. if (emulation_type & EMULTYPE_TRAP_UD)
  4336. return EMULATE_FAIL;
  4337. if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
  4338. emulation_type))
  4339. return EMULATE_DONE;
  4340. if (emulation_type & EMULTYPE_SKIP)
  4341. return EMULATE_FAIL;
  4342. return handle_emulation_failure(vcpu);
  4343. }
  4344. }
  4345. if (emulation_type & EMULTYPE_SKIP) {
  4346. kvm_rip_write(vcpu, ctxt->_eip);
  4347. return EMULATE_DONE;
  4348. }
  4349. if (retry_instruction(ctxt, cr2, emulation_type))
  4350. return EMULATE_DONE;
  4351. /* this is needed for vmware backdoor interface to work since it
  4352. changes registers values during IO operation */
  4353. if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
  4354. vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
  4355. emulator_invalidate_register_cache(ctxt);
  4356. }
  4357. restart:
  4358. r = x86_emulate_insn(ctxt);
  4359. if (r == EMULATION_INTERCEPTED)
  4360. return EMULATE_DONE;
  4361. if (r == EMULATION_FAILED) {
  4362. if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
  4363. emulation_type))
  4364. return EMULATE_DONE;
  4365. return handle_emulation_failure(vcpu);
  4366. }
  4367. if (ctxt->have_exception) {
  4368. inject_emulated_exception(vcpu);
  4369. r = EMULATE_DONE;
  4370. } else if (vcpu->arch.pio.count) {
  4371. if (!vcpu->arch.pio.in)
  4372. vcpu->arch.pio.count = 0;
  4373. else {
  4374. writeback = false;
  4375. vcpu->arch.complete_userspace_io = complete_emulated_pio;
  4376. }
  4377. r = EMULATE_DO_MMIO;
  4378. } else if (vcpu->mmio_needed) {
  4379. if (!vcpu->mmio_is_write)
  4380. writeback = false;
  4381. r = EMULATE_DO_MMIO;
  4382. vcpu->arch.complete_userspace_io = complete_emulated_mmio;
  4383. } else if (r == EMULATION_RESTART)
  4384. goto restart;
  4385. else
  4386. r = EMULATE_DONE;
  4387. if (writeback) {
  4388. toggle_interruptibility(vcpu, ctxt->interruptibility);
  4389. kvm_set_rflags(vcpu, ctxt->eflags);
  4390. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4391. vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
  4392. kvm_rip_write(vcpu, ctxt->eip);
  4393. } else
  4394. vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
  4395. return r;
  4396. }
  4397. EXPORT_SYMBOL_GPL(x86_emulate_instruction);
  4398. int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
  4399. {
  4400. unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4401. int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
  4402. size, port, &val, 1);
  4403. /* do not return to emulator after return from userspace */
  4404. vcpu->arch.pio.count = 0;
  4405. return ret;
  4406. }
  4407. EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
  4408. static void tsc_bad(void *info)
  4409. {
  4410. __this_cpu_write(cpu_tsc_khz, 0);
  4411. }
  4412. static void tsc_khz_changed(void *data)
  4413. {
  4414. struct cpufreq_freqs *freq = data;
  4415. unsigned long khz = 0;
  4416. if (data)
  4417. khz = freq->new;
  4418. else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  4419. khz = cpufreq_quick_get(raw_smp_processor_id());
  4420. if (!khz)
  4421. khz = tsc_khz;
  4422. __this_cpu_write(cpu_tsc_khz, khz);
  4423. }
  4424. static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  4425. void *data)
  4426. {
  4427. struct cpufreq_freqs *freq = data;
  4428. struct kvm *kvm;
  4429. struct kvm_vcpu *vcpu;
  4430. int i, send_ipi = 0;
  4431. /*
  4432. * We allow guests to temporarily run on slowing clocks,
  4433. * provided we notify them after, or to run on accelerating
  4434. * clocks, provided we notify them before. Thus time never
  4435. * goes backwards.
  4436. *
  4437. * However, we have a problem. We can't atomically update
  4438. * the frequency of a given CPU from this function; it is
  4439. * merely a notifier, which can be called from any CPU.
  4440. * Changing the TSC frequency at arbitrary points in time
  4441. * requires a recomputation of local variables related to
  4442. * the TSC for each VCPU. We must flag these local variables
  4443. * to be updated and be sure the update takes place with the
  4444. * new frequency before any guests proceed.
  4445. *
  4446. * Unfortunately, the combination of hotplug CPU and frequency
  4447. * change creates an intractable locking scenario; the order
  4448. * of when these callouts happen is undefined with respect to
  4449. * CPU hotplug, and they can race with each other. As such,
  4450. * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
  4451. * undefined; you can actually have a CPU frequency change take
  4452. * place in between the computation of X and the setting of the
  4453. * variable. To protect against this problem, all updates of
  4454. * the per_cpu tsc_khz variable are done in an interrupt
  4455. * protected IPI, and all callers wishing to update the value
  4456. * must wait for a synchronous IPI to complete (which is trivial
  4457. * if the caller is on the CPU already). This establishes the
  4458. * necessary total order on variable updates.
  4459. *
  4460. * Note that because a guest time update may take place
  4461. * anytime after the setting of the VCPU's request bit, the
  4462. * correct TSC value must be set before the request. However,
  4463. * to ensure the update actually makes it to any guest which
  4464. * starts running in hardware virtualization between the set
  4465. * and the acquisition of the spinlock, we must also ping the
  4466. * CPU after setting the request bit.
  4467. *
  4468. */
  4469. if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
  4470. return 0;
  4471. if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
  4472. return 0;
  4473. smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
  4474. raw_spin_lock(&kvm_lock);
  4475. list_for_each_entry(kvm, &vm_list, vm_list) {
  4476. kvm_for_each_vcpu(i, vcpu, kvm) {
  4477. if (vcpu->cpu != freq->cpu)
  4478. continue;
  4479. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  4480. if (vcpu->cpu != smp_processor_id())
  4481. send_ipi = 1;
  4482. }
  4483. }
  4484. raw_spin_unlock(&kvm_lock);
  4485. if (freq->old < freq->new && send_ipi) {
  4486. /*
  4487. * We upscale the frequency. Must make the guest
  4488. * doesn't see old kvmclock values while running with
  4489. * the new frequency, otherwise we risk the guest sees
  4490. * time go backwards.
  4491. *
  4492. * In case we update the frequency for another cpu
  4493. * (which might be in guest context) send an interrupt
  4494. * to kick the cpu out of guest context. Next time
  4495. * guest context is entered kvmclock will be updated,
  4496. * so the guest will not see stale values.
  4497. */
  4498. smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
  4499. }
  4500. return 0;
  4501. }
  4502. static struct notifier_block kvmclock_cpufreq_notifier_block = {
  4503. .notifier_call = kvmclock_cpufreq_notifier
  4504. };
  4505. static int kvmclock_cpu_notifier(struct notifier_block *nfb,
  4506. unsigned long action, void *hcpu)
  4507. {
  4508. unsigned int cpu = (unsigned long)hcpu;
  4509. switch (action) {
  4510. case CPU_ONLINE:
  4511. case CPU_DOWN_FAILED:
  4512. smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  4513. break;
  4514. case CPU_DOWN_PREPARE:
  4515. smp_call_function_single(cpu, tsc_bad, NULL, 1);
  4516. break;
  4517. }
  4518. return NOTIFY_OK;
  4519. }
  4520. static struct notifier_block kvmclock_cpu_notifier_block = {
  4521. .notifier_call = kvmclock_cpu_notifier,
  4522. .priority = -INT_MAX
  4523. };
  4524. static void kvm_timer_init(void)
  4525. {
  4526. int cpu;
  4527. max_tsc_khz = tsc_khz;
  4528. register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  4529. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  4530. #ifdef CONFIG_CPU_FREQ
  4531. struct cpufreq_policy policy;
  4532. memset(&policy, 0, sizeof(policy));
  4533. cpu = get_cpu();
  4534. cpufreq_get_policy(&policy, cpu);
  4535. if (policy.cpuinfo.max_freq)
  4536. max_tsc_khz = policy.cpuinfo.max_freq;
  4537. put_cpu();
  4538. #endif
  4539. cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
  4540. CPUFREQ_TRANSITION_NOTIFIER);
  4541. }
  4542. pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
  4543. for_each_online_cpu(cpu)
  4544. smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  4545. }
  4546. static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
  4547. int kvm_is_in_guest(void)
  4548. {
  4549. return __this_cpu_read(current_vcpu) != NULL;
  4550. }
  4551. static int kvm_is_user_mode(void)
  4552. {
  4553. int user_mode = 3;
  4554. if (__this_cpu_read(current_vcpu))
  4555. user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
  4556. return user_mode != 0;
  4557. }
  4558. static unsigned long kvm_get_guest_ip(void)
  4559. {
  4560. unsigned long ip = 0;
  4561. if (__this_cpu_read(current_vcpu))
  4562. ip = kvm_rip_read(__this_cpu_read(current_vcpu));
  4563. return ip;
  4564. }
  4565. static struct perf_guest_info_callbacks kvm_guest_cbs = {
  4566. .is_in_guest = kvm_is_in_guest,
  4567. .is_user_mode = kvm_is_user_mode,
  4568. .get_guest_ip = kvm_get_guest_ip,
  4569. };
  4570. void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
  4571. {
  4572. __this_cpu_write(current_vcpu, vcpu);
  4573. }
  4574. EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
  4575. void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
  4576. {
  4577. __this_cpu_write(current_vcpu, NULL);
  4578. }
  4579. EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
  4580. static void kvm_set_mmio_spte_mask(void)
  4581. {
  4582. u64 mask;
  4583. int maxphyaddr = boot_cpu_data.x86_phys_bits;
  4584. /*
  4585. * Set the reserved bits and the present bit of an paging-structure
  4586. * entry to generate page fault with PFER.RSV = 1.
  4587. */
  4588. /* Mask the reserved physical address bits. */
  4589. mask = ((1ull << (51 - maxphyaddr + 1)) - 1) << maxphyaddr;
  4590. /* Bit 62 is always reserved for 32bit host. */
  4591. mask |= 0x3ull << 62;
  4592. /* Set the present bit. */
  4593. mask |= 1ull;
  4594. #ifdef CONFIG_X86_64
  4595. /*
  4596. * If reserved bit is not supported, clear the present bit to disable
  4597. * mmio page fault.
  4598. */
  4599. if (maxphyaddr == 52)
  4600. mask &= ~1ull;
  4601. #endif
  4602. kvm_mmu_set_mmio_spte_mask(mask);
  4603. }
  4604. #ifdef CONFIG_X86_64
  4605. static void pvclock_gtod_update_fn(struct work_struct *work)
  4606. {
  4607. struct kvm *kvm;
  4608. struct kvm_vcpu *vcpu;
  4609. int i;
  4610. raw_spin_lock(&kvm_lock);
  4611. list_for_each_entry(kvm, &vm_list, vm_list)
  4612. kvm_for_each_vcpu(i, vcpu, kvm)
  4613. set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
  4614. atomic_set(&kvm_guest_has_master_clock, 0);
  4615. raw_spin_unlock(&kvm_lock);
  4616. }
  4617. static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
  4618. /*
  4619. * Notification about pvclock gtod data update.
  4620. */
  4621. static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
  4622. void *priv)
  4623. {
  4624. struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
  4625. struct timekeeper *tk = priv;
  4626. update_pvclock_gtod(tk);
  4627. /* disable master clock if host does not trust, or does not
  4628. * use, TSC clocksource
  4629. */
  4630. if (gtod->clock.vclock_mode != VCLOCK_TSC &&
  4631. atomic_read(&kvm_guest_has_master_clock) != 0)
  4632. queue_work(system_long_wq, &pvclock_gtod_work);
  4633. return 0;
  4634. }
  4635. static struct notifier_block pvclock_gtod_notifier = {
  4636. .notifier_call = pvclock_gtod_notify,
  4637. };
  4638. #endif
  4639. int kvm_arch_init(void *opaque)
  4640. {
  4641. int r;
  4642. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  4643. if (kvm_x86_ops) {
  4644. printk(KERN_ERR "kvm: already loaded the other module\n");
  4645. r = -EEXIST;
  4646. goto out;
  4647. }
  4648. if (!ops->cpu_has_kvm_support()) {
  4649. printk(KERN_ERR "kvm: no hardware support\n");
  4650. r = -EOPNOTSUPP;
  4651. goto out;
  4652. }
  4653. if (ops->disabled_by_bios()) {
  4654. printk(KERN_ERR "kvm: disabled by bios\n");
  4655. r = -EOPNOTSUPP;
  4656. goto out;
  4657. }
  4658. r = -ENOMEM;
  4659. shared_msrs = alloc_percpu(struct kvm_shared_msrs);
  4660. if (!shared_msrs) {
  4661. printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
  4662. goto out;
  4663. }
  4664. r = kvm_mmu_module_init();
  4665. if (r)
  4666. goto out_free_percpu;
  4667. kvm_set_mmio_spte_mask();
  4668. kvm_init_msr_list();
  4669. kvm_x86_ops = ops;
  4670. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  4671. PT_DIRTY_MASK, PT64_NX_MASK, 0);
  4672. kvm_timer_init();
  4673. perf_register_guest_info_callbacks(&kvm_guest_cbs);
  4674. if (cpu_has_xsave)
  4675. host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
  4676. kvm_lapic_init();
  4677. #ifdef CONFIG_X86_64
  4678. pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
  4679. #endif
  4680. return 0;
  4681. out_free_percpu:
  4682. free_percpu(shared_msrs);
  4683. out:
  4684. return r;
  4685. }
  4686. void kvm_arch_exit(void)
  4687. {
  4688. perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
  4689. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  4690. cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
  4691. CPUFREQ_TRANSITION_NOTIFIER);
  4692. unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
  4693. #ifdef CONFIG_X86_64
  4694. pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
  4695. #endif
  4696. kvm_x86_ops = NULL;
  4697. kvm_mmu_module_exit();
  4698. free_percpu(shared_msrs);
  4699. }
  4700. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  4701. {
  4702. ++vcpu->stat.halt_exits;
  4703. if (irqchip_in_kernel(vcpu->kvm)) {
  4704. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  4705. return 1;
  4706. } else {
  4707. vcpu->run->exit_reason = KVM_EXIT_HLT;
  4708. return 0;
  4709. }
  4710. }
  4711. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  4712. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  4713. {
  4714. u64 param, ingpa, outgpa, ret;
  4715. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  4716. bool fast, longmode;
  4717. int cs_db, cs_l;
  4718. /*
  4719. * hypercall generates UD from non zero cpl and real mode
  4720. * per HYPER-V spec
  4721. */
  4722. if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
  4723. kvm_queue_exception(vcpu, UD_VECTOR);
  4724. return 0;
  4725. }
  4726. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  4727. longmode = is_long_mode(vcpu) && cs_l == 1;
  4728. if (!longmode) {
  4729. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  4730. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  4731. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  4732. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  4733. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  4734. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  4735. }
  4736. #ifdef CONFIG_X86_64
  4737. else {
  4738. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4739. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4740. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  4741. }
  4742. #endif
  4743. code = param & 0xffff;
  4744. fast = (param >> 16) & 0x1;
  4745. rep_cnt = (param >> 32) & 0xfff;
  4746. rep_idx = (param >> 48) & 0xfff;
  4747. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  4748. switch (code) {
  4749. case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
  4750. kvm_vcpu_on_spin(vcpu);
  4751. break;
  4752. default:
  4753. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  4754. break;
  4755. }
  4756. ret = res | (((u64)rep_done & 0xfff) << 32);
  4757. if (longmode) {
  4758. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  4759. } else {
  4760. kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
  4761. kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
  4762. }
  4763. return 1;
  4764. }
  4765. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  4766. {
  4767. unsigned long nr, a0, a1, a2, a3, ret;
  4768. int r = 1;
  4769. if (kvm_hv_hypercall_enabled(vcpu->kvm))
  4770. return kvm_hv_hypercall(vcpu);
  4771. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4772. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4773. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4774. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4775. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4776. trace_kvm_hypercall(nr, a0, a1, a2, a3);
  4777. if (!is_long_mode(vcpu)) {
  4778. nr &= 0xFFFFFFFF;
  4779. a0 &= 0xFFFFFFFF;
  4780. a1 &= 0xFFFFFFFF;
  4781. a2 &= 0xFFFFFFFF;
  4782. a3 &= 0xFFFFFFFF;
  4783. }
  4784. if (kvm_x86_ops->get_cpl(vcpu) != 0) {
  4785. ret = -KVM_EPERM;
  4786. goto out;
  4787. }
  4788. switch (nr) {
  4789. case KVM_HC_VAPIC_POLL_IRQ:
  4790. ret = 0;
  4791. break;
  4792. default:
  4793. ret = -KVM_ENOSYS;
  4794. break;
  4795. }
  4796. out:
  4797. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  4798. ++vcpu->stat.hypercalls;
  4799. return r;
  4800. }
  4801. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  4802. static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
  4803. {
  4804. struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  4805. char instruction[3];
  4806. unsigned long rip = kvm_rip_read(vcpu);
  4807. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  4808. return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
  4809. }
  4810. /*
  4811. * Check if userspace requested an interrupt window, and that the
  4812. * interrupt window is open.
  4813. *
  4814. * No need to exit to userspace if we already have an interrupt queued.
  4815. */
  4816. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
  4817. {
  4818. return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
  4819. vcpu->run->request_interrupt_window &&
  4820. kvm_arch_interrupt_allowed(vcpu));
  4821. }
  4822. static void post_kvm_run_save(struct kvm_vcpu *vcpu)
  4823. {
  4824. struct kvm_run *kvm_run = vcpu->run;
  4825. kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  4826. kvm_run->cr8 = kvm_get_cr8(vcpu);
  4827. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  4828. if (irqchip_in_kernel(vcpu->kvm))
  4829. kvm_run->ready_for_interrupt_injection = 1;
  4830. else
  4831. kvm_run->ready_for_interrupt_injection =
  4832. kvm_arch_interrupt_allowed(vcpu) &&
  4833. !kvm_cpu_has_interrupt(vcpu) &&
  4834. !kvm_event_needs_reinjection(vcpu);
  4835. }
  4836. static int vapic_enter(struct kvm_vcpu *vcpu)
  4837. {
  4838. struct kvm_lapic *apic = vcpu->arch.apic;
  4839. struct page *page;
  4840. if (!apic || !apic->vapic_addr)
  4841. return 0;
  4842. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  4843. if (is_error_page(page))
  4844. return -EFAULT;
  4845. vcpu->arch.apic->vapic_page = page;
  4846. return 0;
  4847. }
  4848. static void vapic_exit(struct kvm_vcpu *vcpu)
  4849. {
  4850. struct kvm_lapic *apic = vcpu->arch.apic;
  4851. int idx;
  4852. if (!apic || !apic->vapic_addr)
  4853. return;
  4854. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4855. kvm_release_page_dirty(apic->vapic_page);
  4856. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  4857. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4858. }
  4859. static void update_cr8_intercept(struct kvm_vcpu *vcpu)
  4860. {
  4861. int max_irr, tpr;
  4862. if (!kvm_x86_ops->update_cr8_intercept)
  4863. return;
  4864. if (!vcpu->arch.apic)
  4865. return;
  4866. if (!vcpu->arch.apic->vapic_addr)
  4867. max_irr = kvm_lapic_find_highest_irr(vcpu);
  4868. else
  4869. max_irr = -1;
  4870. if (max_irr != -1)
  4871. max_irr >>= 4;
  4872. tpr = kvm_lapic_get_cr8(vcpu);
  4873. kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
  4874. }
  4875. static void inject_pending_event(struct kvm_vcpu *vcpu)
  4876. {
  4877. /* try to reinject previous events if any */
  4878. if (vcpu->arch.exception.pending) {
  4879. trace_kvm_inj_exception(vcpu->arch.exception.nr,
  4880. vcpu->arch.exception.has_error_code,
  4881. vcpu->arch.exception.error_code);
  4882. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  4883. vcpu->arch.exception.has_error_code,
  4884. vcpu->arch.exception.error_code,
  4885. vcpu->arch.exception.reinject);
  4886. return;
  4887. }
  4888. if (vcpu->arch.nmi_injected) {
  4889. kvm_x86_ops->set_nmi(vcpu);
  4890. return;
  4891. }
  4892. if (vcpu->arch.interrupt.pending) {
  4893. kvm_x86_ops->set_irq(vcpu);
  4894. return;
  4895. }
  4896. /* try to inject new event if pending */
  4897. if (vcpu->arch.nmi_pending) {
  4898. if (kvm_x86_ops->nmi_allowed(vcpu)) {
  4899. --vcpu->arch.nmi_pending;
  4900. vcpu->arch.nmi_injected = true;
  4901. kvm_x86_ops->set_nmi(vcpu);
  4902. }
  4903. } else if (kvm_cpu_has_injectable_intr(vcpu)) {
  4904. if (kvm_x86_ops->interrupt_allowed(vcpu)) {
  4905. kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
  4906. false);
  4907. kvm_x86_ops->set_irq(vcpu);
  4908. }
  4909. }
  4910. }
  4911. static void process_nmi(struct kvm_vcpu *vcpu)
  4912. {
  4913. unsigned limit = 2;
  4914. /*
  4915. * x86 is limited to one NMI running, and one NMI pending after it.
  4916. * If an NMI is already in progress, limit further NMIs to just one.
  4917. * Otherwise, allow two (and we'll inject the first one immediately).
  4918. */
  4919. if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
  4920. limit = 1;
  4921. vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
  4922. vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
  4923. kvm_make_request(KVM_REQ_EVENT, vcpu);
  4924. }
  4925. static void kvm_gen_update_masterclock(struct kvm *kvm)
  4926. {
  4927. #ifdef CONFIG_X86_64
  4928. int i;
  4929. struct kvm_vcpu *vcpu;
  4930. struct kvm_arch *ka = &kvm->arch;
  4931. spin_lock(&ka->pvclock_gtod_sync_lock);
  4932. kvm_make_mclock_inprogress_request(kvm);
  4933. /* no guest entries from this point */
  4934. pvclock_update_vm_gtod_copy(kvm);
  4935. kvm_for_each_vcpu(i, vcpu, kvm)
  4936. set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
  4937. /* guest entries allowed */
  4938. kvm_for_each_vcpu(i, vcpu, kvm)
  4939. clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
  4940. spin_unlock(&ka->pvclock_gtod_sync_lock);
  4941. #endif
  4942. }
  4943. static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
  4944. {
  4945. u64 eoi_exit_bitmap[4];
  4946. u32 tmr[8];
  4947. if (!kvm_apic_hw_enabled(vcpu->arch.apic))
  4948. return;
  4949. memset(eoi_exit_bitmap, 0, 32);
  4950. memset(tmr, 0, 32);
  4951. kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr);
  4952. kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
  4953. kvm_apic_update_tmr(vcpu, tmr);
  4954. }
  4955. static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
  4956. {
  4957. int r;
  4958. bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
  4959. vcpu->run->request_interrupt_window;
  4960. bool req_immediate_exit = false;
  4961. if (vcpu->requests) {
  4962. if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
  4963. kvm_mmu_unload(vcpu);
  4964. if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
  4965. __kvm_migrate_timers(vcpu);
  4966. if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
  4967. kvm_gen_update_masterclock(vcpu->kvm);
  4968. if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
  4969. kvm_gen_kvmclock_update(vcpu);
  4970. if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
  4971. r = kvm_guest_time_update(vcpu);
  4972. if (unlikely(r))
  4973. goto out;
  4974. }
  4975. if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
  4976. kvm_mmu_sync_roots(vcpu);
  4977. if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
  4978. kvm_x86_ops->tlb_flush(vcpu);
  4979. if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
  4980. vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
  4981. r = 0;
  4982. goto out;
  4983. }
  4984. if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
  4985. vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  4986. r = 0;
  4987. goto out;
  4988. }
  4989. if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
  4990. vcpu->fpu_active = 0;
  4991. kvm_x86_ops->fpu_deactivate(vcpu);
  4992. }
  4993. if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
  4994. /* Page is swapped out. Do synthetic halt */
  4995. vcpu->arch.apf.halted = true;
  4996. r = 1;
  4997. goto out;
  4998. }
  4999. if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
  5000. record_steal_time(vcpu);
  5001. if (kvm_check_request(KVM_REQ_NMI, vcpu))
  5002. process_nmi(vcpu);
  5003. if (kvm_check_request(KVM_REQ_PMU, vcpu))
  5004. kvm_handle_pmu_event(vcpu);
  5005. if (kvm_check_request(KVM_REQ_PMI, vcpu))
  5006. kvm_deliver_pmi(vcpu);
  5007. if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
  5008. vcpu_scan_ioapic(vcpu);
  5009. }
  5010. if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
  5011. kvm_apic_accept_events(vcpu);
  5012. if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
  5013. r = 1;
  5014. goto out;
  5015. }
  5016. inject_pending_event(vcpu);
  5017. /* enable NMI/IRQ window open exits if needed */
  5018. if (vcpu->arch.nmi_pending)
  5019. req_immediate_exit =
  5020. kvm_x86_ops->enable_nmi_window(vcpu) != 0;
  5021. else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
  5022. req_immediate_exit =
  5023. kvm_x86_ops->enable_irq_window(vcpu) != 0;
  5024. if (kvm_lapic_enabled(vcpu)) {
  5025. /*
  5026. * Update architecture specific hints for APIC
  5027. * virtual interrupt delivery.
  5028. */
  5029. if (kvm_x86_ops->hwapic_irr_update)
  5030. kvm_x86_ops->hwapic_irr_update(vcpu,
  5031. kvm_lapic_find_highest_irr(vcpu));
  5032. update_cr8_intercept(vcpu);
  5033. kvm_lapic_sync_to_vapic(vcpu);
  5034. }
  5035. }
  5036. r = kvm_mmu_reload(vcpu);
  5037. if (unlikely(r)) {
  5038. goto cancel_injection;
  5039. }
  5040. preempt_disable();
  5041. kvm_x86_ops->prepare_guest_switch(vcpu);
  5042. if (vcpu->fpu_active)
  5043. kvm_load_guest_fpu(vcpu);
  5044. kvm_load_guest_xcr0(vcpu);
  5045. vcpu->mode = IN_GUEST_MODE;
  5046. /* We should set ->mode before check ->requests,
  5047. * see the comment in make_all_cpus_request.
  5048. */
  5049. smp_mb();
  5050. local_irq_disable();
  5051. if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
  5052. || need_resched() || signal_pending(current)) {
  5053. vcpu->mode = OUTSIDE_GUEST_MODE;
  5054. smp_wmb();
  5055. local_irq_enable();
  5056. preempt_enable();
  5057. r = 1;
  5058. goto cancel_injection;
  5059. }
  5060. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  5061. if (req_immediate_exit)
  5062. smp_send_reschedule(vcpu->cpu);
  5063. kvm_guest_enter();
  5064. if (unlikely(vcpu->arch.switch_db_regs)) {
  5065. set_debugreg(0, 7);
  5066. set_debugreg(vcpu->arch.eff_db[0], 0);
  5067. set_debugreg(vcpu->arch.eff_db[1], 1);
  5068. set_debugreg(vcpu->arch.eff_db[2], 2);
  5069. set_debugreg(vcpu->arch.eff_db[3], 3);
  5070. }
  5071. trace_kvm_entry(vcpu->vcpu_id);
  5072. kvm_x86_ops->run(vcpu);
  5073. /*
  5074. * If the guest has used debug registers, at least dr7
  5075. * will be disabled while returning to the host.
  5076. * If we don't have active breakpoints in the host, we don't
  5077. * care about the messed up debug address registers. But if
  5078. * we have some of them active, restore the old state.
  5079. */
  5080. if (hw_breakpoint_active())
  5081. hw_breakpoint_restore();
  5082. vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
  5083. native_read_tsc());
  5084. vcpu->mode = OUTSIDE_GUEST_MODE;
  5085. smp_wmb();
  5086. /* Interrupt is enabled by handle_external_intr() */
  5087. kvm_x86_ops->handle_external_intr(vcpu);
  5088. ++vcpu->stat.exits;
  5089. /*
  5090. * We must have an instruction between local_irq_enable() and
  5091. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  5092. * the interrupt shadow. The stat.exits increment will do nicely.
  5093. * But we need to prevent reordering, hence this barrier():
  5094. */
  5095. barrier();
  5096. kvm_guest_exit();
  5097. preempt_enable();
  5098. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  5099. /*
  5100. * Profile KVM exit RIPs:
  5101. */
  5102. if (unlikely(prof_on == KVM_PROFILING)) {
  5103. unsigned long rip = kvm_rip_read(vcpu);
  5104. profile_hit(KVM_PROFILING, (void *)rip);
  5105. }
  5106. if (unlikely(vcpu->arch.tsc_always_catchup))
  5107. kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  5108. if (vcpu->arch.apic_attention)
  5109. kvm_lapic_sync_from_vapic(vcpu);
  5110. r = kvm_x86_ops->handle_exit(vcpu);
  5111. return r;
  5112. cancel_injection:
  5113. kvm_x86_ops->cancel_injection(vcpu);
  5114. if (unlikely(vcpu->arch.apic_attention))
  5115. kvm_lapic_sync_from_vapic(vcpu);
  5116. out:
  5117. return r;
  5118. }
  5119. static int __vcpu_run(struct kvm_vcpu *vcpu)
  5120. {
  5121. int r;
  5122. struct kvm *kvm = vcpu->kvm;
  5123. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  5124. r = vapic_enter(vcpu);
  5125. if (r) {
  5126. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  5127. return r;
  5128. }
  5129. r = 1;
  5130. while (r > 0) {
  5131. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
  5132. !vcpu->arch.apf.halted)
  5133. r = vcpu_enter_guest(vcpu);
  5134. else {
  5135. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  5136. kvm_vcpu_block(vcpu);
  5137. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  5138. if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
  5139. kvm_apic_accept_events(vcpu);
  5140. switch(vcpu->arch.mp_state) {
  5141. case KVM_MP_STATE_HALTED:
  5142. vcpu->arch.mp_state =
  5143. KVM_MP_STATE_RUNNABLE;
  5144. case KVM_MP_STATE_RUNNABLE:
  5145. vcpu->arch.apf.halted = false;
  5146. break;
  5147. case KVM_MP_STATE_INIT_RECEIVED:
  5148. break;
  5149. default:
  5150. r = -EINTR;
  5151. break;
  5152. }
  5153. }
  5154. }
  5155. if (r <= 0)
  5156. break;
  5157. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  5158. if (kvm_cpu_has_pending_timer(vcpu))
  5159. kvm_inject_pending_timer_irqs(vcpu);
  5160. if (dm_request_for_irq_injection(vcpu)) {
  5161. r = -EINTR;
  5162. vcpu->run->exit_reason = KVM_EXIT_INTR;
  5163. ++vcpu->stat.request_irq_exits;
  5164. }
  5165. kvm_check_async_pf_completion(vcpu);
  5166. if (signal_pending(current)) {
  5167. r = -EINTR;
  5168. vcpu->run->exit_reason = KVM_EXIT_INTR;
  5169. ++vcpu->stat.signal_exits;
  5170. }
  5171. if (need_resched()) {
  5172. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  5173. kvm_resched(vcpu);
  5174. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  5175. }
  5176. }
  5177. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  5178. vapic_exit(vcpu);
  5179. return r;
  5180. }
  5181. static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
  5182. {
  5183. int r;
  5184. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  5185. r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
  5186. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  5187. if (r != EMULATE_DONE)
  5188. return 0;
  5189. return 1;
  5190. }
  5191. static int complete_emulated_pio(struct kvm_vcpu *vcpu)
  5192. {
  5193. BUG_ON(!vcpu->arch.pio.count);
  5194. return complete_emulated_io(vcpu);
  5195. }
  5196. /*
  5197. * Implements the following, as a state machine:
  5198. *
  5199. * read:
  5200. * for each fragment
  5201. * for each mmio piece in the fragment
  5202. * write gpa, len
  5203. * exit
  5204. * copy data
  5205. * execute insn
  5206. *
  5207. * write:
  5208. * for each fragment
  5209. * for each mmio piece in the fragment
  5210. * write gpa, len
  5211. * copy data
  5212. * exit
  5213. */
  5214. static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
  5215. {
  5216. struct kvm_run *run = vcpu->run;
  5217. struct kvm_mmio_fragment *frag;
  5218. unsigned len;
  5219. BUG_ON(!vcpu->mmio_needed);
  5220. /* Complete previous fragment */
  5221. frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
  5222. len = min(8u, frag->len);
  5223. if (!vcpu->mmio_is_write)
  5224. memcpy(frag->data, run->mmio.data, len);
  5225. if (frag->len <= 8) {
  5226. /* Switch to the next fragment. */
  5227. frag++;
  5228. vcpu->mmio_cur_fragment++;
  5229. } else {
  5230. /* Go forward to the next mmio piece. */
  5231. frag->data += len;
  5232. frag->gpa += len;
  5233. frag->len -= len;
  5234. }
  5235. if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
  5236. vcpu->mmio_needed = 0;
  5237. if (vcpu->mmio_is_write)
  5238. return 1;
  5239. vcpu->mmio_read_completed = 1;
  5240. return complete_emulated_io(vcpu);
  5241. }
  5242. run->exit_reason = KVM_EXIT_MMIO;
  5243. run->mmio.phys_addr = frag->gpa;
  5244. if (vcpu->mmio_is_write)
  5245. memcpy(run->mmio.data, frag->data, min(8u, frag->len));
  5246. run->mmio.len = min(8u, frag->len);
  5247. run->mmio.is_write = vcpu->mmio_is_write;
  5248. vcpu->arch.complete_userspace_io = complete_emulated_mmio;
  5249. return 0;
  5250. }
  5251. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  5252. {
  5253. int r;
  5254. sigset_t sigsaved;
  5255. if (!tsk_used_math(current) && init_fpu(current))
  5256. return -ENOMEM;
  5257. if (vcpu->sigset_active)
  5258. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  5259. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  5260. kvm_vcpu_block(vcpu);
  5261. kvm_apic_accept_events(vcpu);
  5262. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  5263. r = -EAGAIN;
  5264. goto out;
  5265. }
  5266. /* re-sync apic's tpr */
  5267. if (!irqchip_in_kernel(vcpu->kvm)) {
  5268. if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
  5269. r = -EINVAL;
  5270. goto out;
  5271. }
  5272. }
  5273. if (unlikely(vcpu->arch.complete_userspace_io)) {
  5274. int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
  5275. vcpu->arch.complete_userspace_io = NULL;
  5276. r = cui(vcpu);
  5277. if (r <= 0)
  5278. goto out;
  5279. } else
  5280. WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
  5281. r = __vcpu_run(vcpu);
  5282. out:
  5283. post_kvm_run_save(vcpu);
  5284. if (vcpu->sigset_active)
  5285. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  5286. return r;
  5287. }
  5288. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  5289. {
  5290. if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
  5291. /*
  5292. * We are here if userspace calls get_regs() in the middle of
  5293. * instruction emulation. Registers state needs to be copied
  5294. * back from emulation context to vcpu. Userspace shouldn't do
  5295. * that usually, but some bad designed PV devices (vmware
  5296. * backdoor interface) need this to work
  5297. */
  5298. emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
  5299. vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
  5300. }
  5301. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  5302. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  5303. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  5304. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  5305. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  5306. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  5307. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  5308. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  5309. #ifdef CONFIG_X86_64
  5310. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  5311. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  5312. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  5313. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  5314. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  5315. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  5316. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  5317. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  5318. #endif
  5319. regs->rip = kvm_rip_read(vcpu);
  5320. regs->rflags = kvm_get_rflags(vcpu);
  5321. return 0;
  5322. }
  5323. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  5324. {
  5325. vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
  5326. vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
  5327. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  5328. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  5329. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  5330. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  5331. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  5332. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  5333. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  5334. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  5335. #ifdef CONFIG_X86_64
  5336. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  5337. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  5338. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  5339. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  5340. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  5341. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  5342. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  5343. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  5344. #endif
  5345. kvm_rip_write(vcpu, regs->rip);
  5346. kvm_set_rflags(vcpu, regs->rflags);
  5347. vcpu->arch.exception.pending = false;
  5348. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5349. return 0;
  5350. }
  5351. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  5352. {
  5353. struct kvm_segment cs;
  5354. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  5355. *db = cs.db;
  5356. *l = cs.l;
  5357. }
  5358. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  5359. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  5360. struct kvm_sregs *sregs)
  5361. {
  5362. struct desc_ptr dt;
  5363. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  5364. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  5365. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  5366. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  5367. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  5368. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  5369. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  5370. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  5371. kvm_x86_ops->get_idt(vcpu, &dt);
  5372. sregs->idt.limit = dt.size;
  5373. sregs->idt.base = dt.address;
  5374. kvm_x86_ops->get_gdt(vcpu, &dt);
  5375. sregs->gdt.limit = dt.size;
  5376. sregs->gdt.base = dt.address;
  5377. sregs->cr0 = kvm_read_cr0(vcpu);
  5378. sregs->cr2 = vcpu->arch.cr2;
  5379. sregs->cr3 = kvm_read_cr3(vcpu);
  5380. sregs->cr4 = kvm_read_cr4(vcpu);
  5381. sregs->cr8 = kvm_get_cr8(vcpu);
  5382. sregs->efer = vcpu->arch.efer;
  5383. sregs->apic_base = kvm_get_apic_base(vcpu);
  5384. memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
  5385. if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
  5386. set_bit(vcpu->arch.interrupt.nr,
  5387. (unsigned long *)sregs->interrupt_bitmap);
  5388. return 0;
  5389. }
  5390. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  5391. struct kvm_mp_state *mp_state)
  5392. {
  5393. kvm_apic_accept_events(vcpu);
  5394. mp_state->mp_state = vcpu->arch.mp_state;
  5395. return 0;
  5396. }
  5397. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  5398. struct kvm_mp_state *mp_state)
  5399. {
  5400. if (!kvm_vcpu_has_lapic(vcpu) &&
  5401. mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
  5402. return -EINVAL;
  5403. if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
  5404. vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
  5405. set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
  5406. } else
  5407. vcpu->arch.mp_state = mp_state->mp_state;
  5408. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5409. return 0;
  5410. }
  5411. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
  5412. int reason, bool has_error_code, u32 error_code)
  5413. {
  5414. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  5415. int ret;
  5416. init_emulate_ctxt(vcpu);
  5417. ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
  5418. has_error_code, error_code);
  5419. if (ret)
  5420. return EMULATE_FAIL;
  5421. kvm_rip_write(vcpu, ctxt->eip);
  5422. kvm_set_rflags(vcpu, ctxt->eflags);
  5423. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5424. return EMULATE_DONE;
  5425. }
  5426. EXPORT_SYMBOL_GPL(kvm_task_switch);
  5427. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  5428. struct kvm_sregs *sregs)
  5429. {
  5430. int mmu_reset_needed = 0;
  5431. int pending_vec, max_bits, idx;
  5432. struct desc_ptr dt;
  5433. if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
  5434. return -EINVAL;
  5435. dt.size = sregs->idt.limit;
  5436. dt.address = sregs->idt.base;
  5437. kvm_x86_ops->set_idt(vcpu, &dt);
  5438. dt.size = sregs->gdt.limit;
  5439. dt.address = sregs->gdt.base;
  5440. kvm_x86_ops->set_gdt(vcpu, &dt);
  5441. vcpu->arch.cr2 = sregs->cr2;
  5442. mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
  5443. vcpu->arch.cr3 = sregs->cr3;
  5444. __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
  5445. kvm_set_cr8(vcpu, sregs->cr8);
  5446. mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
  5447. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  5448. kvm_set_apic_base(vcpu, sregs->apic_base);
  5449. mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
  5450. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  5451. vcpu->arch.cr0 = sregs->cr0;
  5452. mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
  5453. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  5454. if (sregs->cr4 & X86_CR4_OSXSAVE)
  5455. kvm_update_cpuid(vcpu);
  5456. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5457. if (!is_long_mode(vcpu) && is_pae(vcpu)) {
  5458. load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
  5459. mmu_reset_needed = 1;
  5460. }
  5461. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5462. if (mmu_reset_needed)
  5463. kvm_mmu_reset_context(vcpu);
  5464. max_bits = KVM_NR_INTERRUPTS;
  5465. pending_vec = find_first_bit(
  5466. (const unsigned long *)sregs->interrupt_bitmap, max_bits);
  5467. if (pending_vec < max_bits) {
  5468. kvm_queue_interrupt(vcpu, pending_vec, false);
  5469. pr_debug("Set back pending irq %d\n", pending_vec);
  5470. }
  5471. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  5472. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  5473. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  5474. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  5475. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  5476. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  5477. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  5478. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  5479. update_cr8_intercept(vcpu);
  5480. /* Older userspace won't unhalt the vcpu on reset. */
  5481. if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
  5482. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  5483. !is_protmode(vcpu))
  5484. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  5485. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5486. return 0;
  5487. }
  5488. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  5489. struct kvm_guest_debug *dbg)
  5490. {
  5491. unsigned long rflags;
  5492. int i, r;
  5493. if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
  5494. r = -EBUSY;
  5495. if (vcpu->arch.exception.pending)
  5496. goto out;
  5497. if (dbg->control & KVM_GUESTDBG_INJECT_DB)
  5498. kvm_queue_exception(vcpu, DB_VECTOR);
  5499. else
  5500. kvm_queue_exception(vcpu, BP_VECTOR);
  5501. }
  5502. /*
  5503. * Read rflags as long as potentially injected trace flags are still
  5504. * filtered out.
  5505. */
  5506. rflags = kvm_get_rflags(vcpu);
  5507. vcpu->guest_debug = dbg->control;
  5508. if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
  5509. vcpu->guest_debug = 0;
  5510. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
  5511. for (i = 0; i < KVM_NR_DB_REGS; ++i)
  5512. vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
  5513. vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
  5514. } else {
  5515. for (i = 0; i < KVM_NR_DB_REGS; i++)
  5516. vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  5517. }
  5518. kvm_update_dr7(vcpu);
  5519. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  5520. vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
  5521. get_segment_base(vcpu, VCPU_SREG_CS);
  5522. /*
  5523. * Trigger an rflags update that will inject or remove the trace
  5524. * flags.
  5525. */
  5526. kvm_set_rflags(vcpu, rflags);
  5527. kvm_x86_ops->update_db_bp_intercept(vcpu);
  5528. r = 0;
  5529. out:
  5530. return r;
  5531. }
  5532. /*
  5533. * Translate a guest virtual address to a guest physical address.
  5534. */
  5535. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  5536. struct kvm_translation *tr)
  5537. {
  5538. unsigned long vaddr = tr->linear_address;
  5539. gpa_t gpa;
  5540. int idx;
  5541. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5542. gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
  5543. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5544. tr->physical_address = gpa;
  5545. tr->valid = gpa != UNMAPPED_GVA;
  5546. tr->writeable = 1;
  5547. tr->usermode = 0;
  5548. return 0;
  5549. }
  5550. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  5551. {
  5552. struct i387_fxsave_struct *fxsave =
  5553. &vcpu->arch.guest_fpu.state->fxsave;
  5554. memcpy(fpu->fpr, fxsave->st_space, 128);
  5555. fpu->fcw = fxsave->cwd;
  5556. fpu->fsw = fxsave->swd;
  5557. fpu->ftwx = fxsave->twd;
  5558. fpu->last_opcode = fxsave->fop;
  5559. fpu->last_ip = fxsave->rip;
  5560. fpu->last_dp = fxsave->rdp;
  5561. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  5562. return 0;
  5563. }
  5564. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  5565. {
  5566. struct i387_fxsave_struct *fxsave =
  5567. &vcpu->arch.guest_fpu.state->fxsave;
  5568. memcpy(fxsave->st_space, fpu->fpr, 128);
  5569. fxsave->cwd = fpu->fcw;
  5570. fxsave->swd = fpu->fsw;
  5571. fxsave->twd = fpu->ftwx;
  5572. fxsave->fop = fpu->last_opcode;
  5573. fxsave->rip = fpu->last_ip;
  5574. fxsave->rdp = fpu->last_dp;
  5575. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  5576. return 0;
  5577. }
  5578. int fx_init(struct kvm_vcpu *vcpu)
  5579. {
  5580. int err;
  5581. err = fpu_alloc(&vcpu->arch.guest_fpu);
  5582. if (err)
  5583. return err;
  5584. fpu_finit(&vcpu->arch.guest_fpu);
  5585. /*
  5586. * Ensure guest xcr0 is valid for loading
  5587. */
  5588. vcpu->arch.xcr0 = XSTATE_FP;
  5589. vcpu->arch.cr0 |= X86_CR0_ET;
  5590. return 0;
  5591. }
  5592. EXPORT_SYMBOL_GPL(fx_init);
  5593. static void fx_free(struct kvm_vcpu *vcpu)
  5594. {
  5595. fpu_free(&vcpu->arch.guest_fpu);
  5596. }
  5597. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  5598. {
  5599. if (vcpu->guest_fpu_loaded)
  5600. return;
  5601. /*
  5602. * Restore all possible states in the guest,
  5603. * and assume host would use all available bits.
  5604. * Guest xcr0 would be loaded later.
  5605. */
  5606. kvm_put_guest_xcr0(vcpu);
  5607. vcpu->guest_fpu_loaded = 1;
  5608. __kernel_fpu_begin();
  5609. fpu_restore_checking(&vcpu->arch.guest_fpu);
  5610. trace_kvm_fpu(1);
  5611. }
  5612. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  5613. {
  5614. kvm_put_guest_xcr0(vcpu);
  5615. if (!vcpu->guest_fpu_loaded)
  5616. return;
  5617. vcpu->guest_fpu_loaded = 0;
  5618. fpu_save_init(&vcpu->arch.guest_fpu);
  5619. __kernel_fpu_end();
  5620. ++vcpu->stat.fpu_reload;
  5621. kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
  5622. trace_kvm_fpu(0);
  5623. }
  5624. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  5625. {
  5626. kvmclock_reset(vcpu);
  5627. free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
  5628. fx_free(vcpu);
  5629. kvm_x86_ops->vcpu_free(vcpu);
  5630. }
  5631. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  5632. unsigned int id)
  5633. {
  5634. if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
  5635. printk_once(KERN_WARNING
  5636. "kvm: SMP vm created on host with unstable TSC; "
  5637. "guest TSC will not be reliable\n");
  5638. return kvm_x86_ops->vcpu_create(kvm, id);
  5639. }
  5640. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  5641. {
  5642. int r;
  5643. vcpu->arch.mtrr_state.have_fixed = 1;
  5644. r = vcpu_load(vcpu);
  5645. if (r)
  5646. return r;
  5647. kvm_vcpu_reset(vcpu);
  5648. r = kvm_mmu_setup(vcpu);
  5649. vcpu_put(vcpu);
  5650. return r;
  5651. }
  5652. int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
  5653. {
  5654. int r;
  5655. struct msr_data msr;
  5656. r = vcpu_load(vcpu);
  5657. if (r)
  5658. return r;
  5659. msr.data = 0x0;
  5660. msr.index = MSR_IA32_TSC;
  5661. msr.host_initiated = true;
  5662. kvm_write_tsc(vcpu, &msr);
  5663. vcpu_put(vcpu);
  5664. return r;
  5665. }
  5666. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  5667. {
  5668. int r;
  5669. vcpu->arch.apf.msr_val = 0;
  5670. r = vcpu_load(vcpu);
  5671. BUG_ON(r);
  5672. kvm_mmu_unload(vcpu);
  5673. vcpu_put(vcpu);
  5674. fx_free(vcpu);
  5675. kvm_x86_ops->vcpu_free(vcpu);
  5676. }
  5677. void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
  5678. {
  5679. atomic_set(&vcpu->arch.nmi_queued, 0);
  5680. vcpu->arch.nmi_pending = 0;
  5681. vcpu->arch.nmi_injected = false;
  5682. memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
  5683. vcpu->arch.dr6 = DR6_FIXED_1;
  5684. vcpu->arch.dr7 = DR7_FIXED_1;
  5685. kvm_update_dr7(vcpu);
  5686. kvm_make_request(KVM_REQ_EVENT, vcpu);
  5687. vcpu->arch.apf.msr_val = 0;
  5688. vcpu->arch.st.msr_val = 0;
  5689. kvmclock_reset(vcpu);
  5690. kvm_clear_async_pf_completion_queue(vcpu);
  5691. kvm_async_pf_hash_reset(vcpu);
  5692. vcpu->arch.apf.halted = false;
  5693. kvm_pmu_reset(vcpu);
  5694. memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
  5695. vcpu->arch.regs_avail = ~0;
  5696. vcpu->arch.regs_dirty = ~0;
  5697. kvm_x86_ops->vcpu_reset(vcpu);
  5698. }
  5699. void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
  5700. {
  5701. struct kvm_segment cs;
  5702. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  5703. cs.selector = vector << 8;
  5704. cs.base = vector << 12;
  5705. kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
  5706. kvm_rip_write(vcpu, 0);
  5707. }
  5708. int kvm_arch_hardware_enable(void *garbage)
  5709. {
  5710. struct kvm *kvm;
  5711. struct kvm_vcpu *vcpu;
  5712. int i;
  5713. int ret;
  5714. u64 local_tsc;
  5715. u64 max_tsc = 0;
  5716. bool stable, backwards_tsc = false;
  5717. kvm_shared_msr_cpu_online();
  5718. ret = kvm_x86_ops->hardware_enable(garbage);
  5719. if (ret != 0)
  5720. return ret;
  5721. local_tsc = native_read_tsc();
  5722. stable = !check_tsc_unstable();
  5723. list_for_each_entry(kvm, &vm_list, vm_list) {
  5724. kvm_for_each_vcpu(i, vcpu, kvm) {
  5725. if (!stable && vcpu->cpu == smp_processor_id())
  5726. set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
  5727. if (stable && vcpu->arch.last_host_tsc > local_tsc) {
  5728. backwards_tsc = true;
  5729. if (vcpu->arch.last_host_tsc > max_tsc)
  5730. max_tsc = vcpu->arch.last_host_tsc;
  5731. }
  5732. }
  5733. }
  5734. /*
  5735. * Sometimes, even reliable TSCs go backwards. This happens on
  5736. * platforms that reset TSC during suspend or hibernate actions, but
  5737. * maintain synchronization. We must compensate. Fortunately, we can
  5738. * detect that condition here, which happens early in CPU bringup,
  5739. * before any KVM threads can be running. Unfortunately, we can't
  5740. * bring the TSCs fully up to date with real time, as we aren't yet far
  5741. * enough into CPU bringup that we know how much real time has actually
  5742. * elapsed; our helper function, get_kernel_ns() will be using boot
  5743. * variables that haven't been updated yet.
  5744. *
  5745. * So we simply find the maximum observed TSC above, then record the
  5746. * adjustment to TSC in each VCPU. When the VCPU later gets loaded,
  5747. * the adjustment will be applied. Note that we accumulate
  5748. * adjustments, in case multiple suspend cycles happen before some VCPU
  5749. * gets a chance to run again. In the event that no KVM threads get a
  5750. * chance to run, we will miss the entire elapsed period, as we'll have
  5751. * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
  5752. * loose cycle time. This isn't too big a deal, since the loss will be
  5753. * uniform across all VCPUs (not to mention the scenario is extremely
  5754. * unlikely). It is possible that a second hibernate recovery happens
  5755. * much faster than a first, causing the observed TSC here to be
  5756. * smaller; this would require additional padding adjustment, which is
  5757. * why we set last_host_tsc to the local tsc observed here.
  5758. *
  5759. * N.B. - this code below runs only on platforms with reliable TSC,
  5760. * as that is the only way backwards_tsc is set above. Also note
  5761. * that this runs for ALL vcpus, which is not a bug; all VCPUs should
  5762. * have the same delta_cyc adjustment applied if backwards_tsc
  5763. * is detected. Note further, this adjustment is only done once,
  5764. * as we reset last_host_tsc on all VCPUs to stop this from being
  5765. * called multiple times (one for each physical CPU bringup).
  5766. *
  5767. * Platforms with unreliable TSCs don't have to deal with this, they
  5768. * will be compensated by the logic in vcpu_load, which sets the TSC to
  5769. * catchup mode. This will catchup all VCPUs to real time, but cannot
  5770. * guarantee that they stay in perfect synchronization.
  5771. */
  5772. if (backwards_tsc) {
  5773. u64 delta_cyc = max_tsc - local_tsc;
  5774. list_for_each_entry(kvm, &vm_list, vm_list) {
  5775. kvm_for_each_vcpu(i, vcpu, kvm) {
  5776. vcpu->arch.tsc_offset_adjustment += delta_cyc;
  5777. vcpu->arch.last_host_tsc = local_tsc;
  5778. set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
  5779. &vcpu->requests);
  5780. }
  5781. /*
  5782. * We have to disable TSC offset matching.. if you were
  5783. * booting a VM while issuing an S4 host suspend....
  5784. * you may have some problem. Solving this issue is
  5785. * left as an exercise to the reader.
  5786. */
  5787. kvm->arch.last_tsc_nsec = 0;
  5788. kvm->arch.last_tsc_write = 0;
  5789. }
  5790. }
  5791. return 0;
  5792. }
  5793. void kvm_arch_hardware_disable(void *garbage)
  5794. {
  5795. kvm_x86_ops->hardware_disable(garbage);
  5796. drop_user_return_notifiers(garbage);
  5797. }
  5798. int kvm_arch_hardware_setup(void)
  5799. {
  5800. return kvm_x86_ops->hardware_setup();
  5801. }
  5802. void kvm_arch_hardware_unsetup(void)
  5803. {
  5804. kvm_x86_ops->hardware_unsetup();
  5805. }
  5806. void kvm_arch_check_processor_compat(void *rtn)
  5807. {
  5808. kvm_x86_ops->check_processor_compatibility(rtn);
  5809. }
  5810. bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
  5811. {
  5812. return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
  5813. }
  5814. struct static_key kvm_no_apic_vcpu __read_mostly;
  5815. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  5816. {
  5817. struct page *page;
  5818. struct kvm *kvm;
  5819. int r;
  5820. BUG_ON(vcpu->kvm == NULL);
  5821. kvm = vcpu->kvm;
  5822. vcpu->arch.emulate_ctxt.ops = &emulate_ops;
  5823. if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
  5824. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  5825. else
  5826. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  5827. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  5828. if (!page) {
  5829. r = -ENOMEM;
  5830. goto fail;
  5831. }
  5832. vcpu->arch.pio_data = page_address(page);
  5833. kvm_set_tsc_khz(vcpu, max_tsc_khz);
  5834. r = kvm_mmu_create(vcpu);
  5835. if (r < 0)
  5836. goto fail_free_pio_data;
  5837. if (irqchip_in_kernel(kvm)) {
  5838. r = kvm_create_lapic(vcpu);
  5839. if (r < 0)
  5840. goto fail_mmu_destroy;
  5841. } else
  5842. static_key_slow_inc(&kvm_no_apic_vcpu);
  5843. vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
  5844. GFP_KERNEL);
  5845. if (!vcpu->arch.mce_banks) {
  5846. r = -ENOMEM;
  5847. goto fail_free_lapic;
  5848. }
  5849. vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
  5850. if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
  5851. r = -ENOMEM;
  5852. goto fail_free_mce_banks;
  5853. }
  5854. r = fx_init(vcpu);
  5855. if (r)
  5856. goto fail_free_wbinvd_dirty_mask;
  5857. vcpu->arch.ia32_tsc_adjust_msr = 0x0;
  5858. vcpu->arch.pv_time_enabled = false;
  5859. kvm_async_pf_hash_reset(vcpu);
  5860. kvm_pmu_init(vcpu);
  5861. return 0;
  5862. fail_free_wbinvd_dirty_mask:
  5863. free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
  5864. fail_free_mce_banks:
  5865. kfree(vcpu->arch.mce_banks);
  5866. fail_free_lapic:
  5867. kvm_free_lapic(vcpu);
  5868. fail_mmu_destroy:
  5869. kvm_mmu_destroy(vcpu);
  5870. fail_free_pio_data:
  5871. free_page((unsigned long)vcpu->arch.pio_data);
  5872. fail:
  5873. return r;
  5874. }
  5875. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  5876. {
  5877. int idx;
  5878. kvm_pmu_destroy(vcpu);
  5879. kfree(vcpu->arch.mce_banks);
  5880. kvm_free_lapic(vcpu);
  5881. idx = srcu_read_lock(&vcpu->kvm->srcu);
  5882. kvm_mmu_destroy(vcpu);
  5883. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  5884. free_page((unsigned long)vcpu->arch.pio_data);
  5885. if (!irqchip_in_kernel(vcpu->kvm))
  5886. static_key_slow_dec(&kvm_no_apic_vcpu);
  5887. }
  5888. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  5889. {
  5890. if (type)
  5891. return -EINVAL;
  5892. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  5893. INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
  5894. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  5895. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  5896. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  5897. /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
  5898. set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
  5899. &kvm->arch.irq_sources_bitmap);
  5900. raw_spin_lock_init(&kvm->arch.tsc_write_lock);
  5901. mutex_init(&kvm->arch.apic_map_lock);
  5902. spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
  5903. pvclock_update_vm_gtod_copy(kvm);
  5904. return 0;
  5905. }
  5906. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  5907. {
  5908. int r;
  5909. r = vcpu_load(vcpu);
  5910. BUG_ON(r);
  5911. kvm_mmu_unload(vcpu);
  5912. vcpu_put(vcpu);
  5913. }
  5914. static void kvm_free_vcpus(struct kvm *kvm)
  5915. {
  5916. unsigned int i;
  5917. struct kvm_vcpu *vcpu;
  5918. /*
  5919. * Unpin any mmu pages first.
  5920. */
  5921. kvm_for_each_vcpu(i, vcpu, kvm) {
  5922. kvm_clear_async_pf_completion_queue(vcpu);
  5923. kvm_unload_vcpu_mmu(vcpu);
  5924. }
  5925. kvm_for_each_vcpu(i, vcpu, kvm)
  5926. kvm_arch_vcpu_free(vcpu);
  5927. mutex_lock(&kvm->lock);
  5928. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  5929. kvm->vcpus[i] = NULL;
  5930. atomic_set(&kvm->online_vcpus, 0);
  5931. mutex_unlock(&kvm->lock);
  5932. }
  5933. void kvm_arch_sync_events(struct kvm *kvm)
  5934. {
  5935. kvm_free_all_assigned_devices(kvm);
  5936. kvm_free_pit(kvm);
  5937. }
  5938. void kvm_arch_destroy_vm(struct kvm *kvm)
  5939. {
  5940. if (current->mm == kvm->mm) {
  5941. /*
  5942. * Free memory regions allocated on behalf of userspace,
  5943. * unless the the memory map has changed due to process exit
  5944. * or fd copying.
  5945. */
  5946. struct kvm_userspace_memory_region mem;
  5947. memset(&mem, 0, sizeof(mem));
  5948. mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
  5949. kvm_set_memory_region(kvm, &mem);
  5950. mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
  5951. kvm_set_memory_region(kvm, &mem);
  5952. mem.slot = TSS_PRIVATE_MEMSLOT;
  5953. kvm_set_memory_region(kvm, &mem);
  5954. }
  5955. kvm_iommu_unmap_guest(kvm);
  5956. kfree(kvm->arch.vpic);
  5957. kfree(kvm->arch.vioapic);
  5958. kvm_free_vcpus(kvm);
  5959. if (kvm->arch.apic_access_page)
  5960. put_page(kvm->arch.apic_access_page);
  5961. if (kvm->arch.ept_identity_pagetable)
  5962. put_page(kvm->arch.ept_identity_pagetable);
  5963. kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
  5964. }
  5965. void kvm_arch_free_memslot(struct kvm_memory_slot *free,
  5966. struct kvm_memory_slot *dont)
  5967. {
  5968. int i;
  5969. for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
  5970. if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
  5971. kvm_kvfree(free->arch.rmap[i]);
  5972. free->arch.rmap[i] = NULL;
  5973. }
  5974. if (i == 0)
  5975. continue;
  5976. if (!dont || free->arch.lpage_info[i - 1] !=
  5977. dont->arch.lpage_info[i - 1]) {
  5978. kvm_kvfree(free->arch.lpage_info[i - 1]);
  5979. free->arch.lpage_info[i - 1] = NULL;
  5980. }
  5981. }
  5982. }
  5983. int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
  5984. {
  5985. int i;
  5986. for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
  5987. unsigned long ugfn;
  5988. int lpages;
  5989. int level = i + 1;
  5990. lpages = gfn_to_index(slot->base_gfn + npages - 1,
  5991. slot->base_gfn, level) + 1;
  5992. slot->arch.rmap[i] =
  5993. kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
  5994. if (!slot->arch.rmap[i])
  5995. goto out_free;
  5996. if (i == 0)
  5997. continue;
  5998. slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
  5999. sizeof(*slot->arch.lpage_info[i - 1]));
  6000. if (!slot->arch.lpage_info[i - 1])
  6001. goto out_free;
  6002. if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
  6003. slot->arch.lpage_info[i - 1][0].write_count = 1;
  6004. if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
  6005. slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
  6006. ugfn = slot->userspace_addr >> PAGE_SHIFT;
  6007. /*
  6008. * If the gfn and userspace address are not aligned wrt each
  6009. * other, or if explicitly asked to, disable large page
  6010. * support for this slot
  6011. */
  6012. if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
  6013. !kvm_largepages_enabled()) {
  6014. unsigned long j;
  6015. for (j = 0; j < lpages; ++j)
  6016. slot->arch.lpage_info[i - 1][j].write_count = 1;
  6017. }
  6018. }
  6019. return 0;
  6020. out_free:
  6021. for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
  6022. kvm_kvfree(slot->arch.rmap[i]);
  6023. slot->arch.rmap[i] = NULL;
  6024. if (i == 0)
  6025. continue;
  6026. kvm_kvfree(slot->arch.lpage_info[i - 1]);
  6027. slot->arch.lpage_info[i - 1] = NULL;
  6028. }
  6029. return -ENOMEM;
  6030. }
  6031. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  6032. struct kvm_memory_slot *memslot,
  6033. struct kvm_userspace_memory_region *mem,
  6034. enum kvm_mr_change change)
  6035. {
  6036. /*
  6037. * Only private memory slots need to be mapped here since
  6038. * KVM_SET_MEMORY_REGION ioctl is no longer supported.
  6039. */
  6040. if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
  6041. unsigned long userspace_addr;
  6042. /*
  6043. * MAP_SHARED to prevent internal slot pages from being moved
  6044. * by fork()/COW.
  6045. */
  6046. userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
  6047. PROT_READ | PROT_WRITE,
  6048. MAP_SHARED | MAP_ANONYMOUS, 0);
  6049. if (IS_ERR((void *)userspace_addr))
  6050. return PTR_ERR((void *)userspace_addr);
  6051. memslot->userspace_addr = userspace_addr;
  6052. }
  6053. return 0;
  6054. }
  6055. void kvm_arch_commit_memory_region(struct kvm *kvm,
  6056. struct kvm_userspace_memory_region *mem,
  6057. const struct kvm_memory_slot *old,
  6058. enum kvm_mr_change change)
  6059. {
  6060. int nr_mmu_pages = 0;
  6061. if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) {
  6062. int ret;
  6063. ret = vm_munmap(old->userspace_addr,
  6064. old->npages * PAGE_SIZE);
  6065. if (ret < 0)
  6066. printk(KERN_WARNING
  6067. "kvm_vm_ioctl_set_memory_region: "
  6068. "failed to munmap memory\n");
  6069. }
  6070. if (!kvm->arch.n_requested_mmu_pages)
  6071. nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  6072. if (nr_mmu_pages)
  6073. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  6074. /*
  6075. * Write protect all pages for dirty logging.
  6076. * Existing largepage mappings are destroyed here and new ones will
  6077. * not be created until the end of the logging.
  6078. */
  6079. if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
  6080. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  6081. /*
  6082. * If memory slot is created, or moved, we need to clear all
  6083. * mmio sptes.
  6084. */
  6085. if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE))
  6086. kvm_mmu_zap_mmio_sptes(kvm);
  6087. }
  6088. void kvm_arch_flush_shadow_all(struct kvm *kvm)
  6089. {
  6090. kvm_mmu_invalidate_zap_all_pages(kvm);
  6091. }
  6092. void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  6093. struct kvm_memory_slot *slot)
  6094. {
  6095. kvm_mmu_invalidate_zap_all_pages(kvm);
  6096. }
  6097. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  6098. {
  6099. return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
  6100. !vcpu->arch.apf.halted)
  6101. || !list_empty_careful(&vcpu->async_pf.done)
  6102. || kvm_apic_has_events(vcpu)
  6103. || atomic_read(&vcpu->arch.nmi_queued) ||
  6104. (kvm_arch_interrupt_allowed(vcpu) &&
  6105. kvm_cpu_has_interrupt(vcpu));
  6106. }
  6107. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  6108. {
  6109. return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
  6110. }
  6111. int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
  6112. {
  6113. return kvm_x86_ops->interrupt_allowed(vcpu);
  6114. }
  6115. bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
  6116. {
  6117. unsigned long current_rip = kvm_rip_read(vcpu) +
  6118. get_segment_base(vcpu, VCPU_SREG_CS);
  6119. return current_rip == linear_rip;
  6120. }
  6121. EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
  6122. unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
  6123. {
  6124. unsigned long rflags;
  6125. rflags = kvm_x86_ops->get_rflags(vcpu);
  6126. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  6127. rflags &= ~X86_EFLAGS_TF;
  6128. return rflags;
  6129. }
  6130. EXPORT_SYMBOL_GPL(kvm_get_rflags);
  6131. void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  6132. {
  6133. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
  6134. kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
  6135. rflags |= X86_EFLAGS_TF;
  6136. kvm_x86_ops->set_rflags(vcpu, rflags);
  6137. kvm_make_request(KVM_REQ_EVENT, vcpu);
  6138. }
  6139. EXPORT_SYMBOL_GPL(kvm_set_rflags);
  6140. void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
  6141. {
  6142. int r;
  6143. if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
  6144. is_error_page(work->page))
  6145. return;
  6146. r = kvm_mmu_reload(vcpu);
  6147. if (unlikely(r))
  6148. return;
  6149. if (!vcpu->arch.mmu.direct_map &&
  6150. work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
  6151. return;
  6152. vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
  6153. }
  6154. static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
  6155. {
  6156. return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
  6157. }
  6158. static inline u32 kvm_async_pf_next_probe(u32 key)
  6159. {
  6160. return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
  6161. }
  6162. static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  6163. {
  6164. u32 key = kvm_async_pf_hash_fn(gfn);
  6165. while (vcpu->arch.apf.gfns[key] != ~0)
  6166. key = kvm_async_pf_next_probe(key);
  6167. vcpu->arch.apf.gfns[key] = gfn;
  6168. }
  6169. static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
  6170. {
  6171. int i;
  6172. u32 key = kvm_async_pf_hash_fn(gfn);
  6173. for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
  6174. (vcpu->arch.apf.gfns[key] != gfn &&
  6175. vcpu->arch.apf.gfns[key] != ~0); i++)
  6176. key = kvm_async_pf_next_probe(key);
  6177. return key;
  6178. }
  6179. bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  6180. {
  6181. return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
  6182. }
  6183. static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
  6184. {
  6185. u32 i, j, k;
  6186. i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
  6187. while (true) {
  6188. vcpu->arch.apf.gfns[i] = ~0;
  6189. do {
  6190. j = kvm_async_pf_next_probe(j);
  6191. if (vcpu->arch.apf.gfns[j] == ~0)
  6192. return;
  6193. k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
  6194. /*
  6195. * k lies cyclically in ]i,j]
  6196. * | i.k.j |
  6197. * |....j i.k.| or |.k..j i...|
  6198. */
  6199. } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
  6200. vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
  6201. i = j;
  6202. }
  6203. }
  6204. static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
  6205. {
  6206. return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
  6207. sizeof(val));
  6208. }
  6209. void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
  6210. struct kvm_async_pf *work)
  6211. {
  6212. struct x86_exception fault;
  6213. trace_kvm_async_pf_not_present(work->arch.token, work->gva);
  6214. kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
  6215. if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
  6216. (vcpu->arch.apf.send_user_only &&
  6217. kvm_x86_ops->get_cpl(vcpu) == 0))
  6218. kvm_make_request(KVM_REQ_APF_HALT, vcpu);
  6219. else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
  6220. fault.vector = PF_VECTOR;
  6221. fault.error_code_valid = true;
  6222. fault.error_code = 0;
  6223. fault.nested_page_fault = false;
  6224. fault.address = work->arch.token;
  6225. kvm_inject_page_fault(vcpu, &fault);
  6226. }
  6227. }
  6228. void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
  6229. struct kvm_async_pf *work)
  6230. {
  6231. struct x86_exception fault;
  6232. trace_kvm_async_pf_ready(work->arch.token, work->gva);
  6233. if (is_error_page(work->page))
  6234. work->arch.token = ~0; /* broadcast wakeup */
  6235. else
  6236. kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
  6237. if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
  6238. !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
  6239. fault.vector = PF_VECTOR;
  6240. fault.error_code_valid = true;
  6241. fault.error_code = 0;
  6242. fault.nested_page_fault = false;
  6243. fault.address = work->arch.token;
  6244. kvm_inject_page_fault(vcpu, &fault);
  6245. }
  6246. vcpu->arch.apf.halted = false;
  6247. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  6248. }
  6249. bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
  6250. {
  6251. if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
  6252. return true;
  6253. else
  6254. return !kvm_event_needs_reinjection(vcpu) &&
  6255. kvm_x86_ops->interrupt_allowed(vcpu);
  6256. }
  6257. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
  6258. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
  6259. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
  6260. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
  6261. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
  6262. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
  6263. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
  6264. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
  6265. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
  6266. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
  6267. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
  6268. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);