x86.c 136 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. * Copyright 2010 Red Hat, Inc. and/or its affilates.
  10. *
  11. * Authors:
  12. * Avi Kivity <avi@qumranet.com>
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Amit Shah <amit.shah@qumranet.com>
  15. * Ben-Ami Yassour <benami@il.ibm.com>
  16. *
  17. * This work is licensed under the terms of the GNU GPL, version 2. See
  18. * the COPYING file in the top-level directory.
  19. *
  20. */
  21. #include <linux/kvm_host.h>
  22. #include "irq.h"
  23. #include "mmu.h"
  24. #include "i8254.h"
  25. #include "tss.h"
  26. #include "kvm_cache_regs.h"
  27. #include "x86.h"
  28. #include <linux/clocksource.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/kvm.h>
  31. #include <linux/fs.h>
  32. #include <linux/vmalloc.h>
  33. #include <linux/module.h>
  34. #include <linux/mman.h>
  35. #include <linux/highmem.h>
  36. #include <linux/iommu.h>
  37. #include <linux/intel-iommu.h>
  38. #include <linux/cpufreq.h>
  39. #include <linux/user-return-notifier.h>
  40. #include <linux/srcu.h>
  41. #include <linux/slab.h>
  42. #include <linux/perf_event.h>
  43. #include <linux/uaccess.h>
  44. #include <trace/events/kvm.h>
  45. #define CREATE_TRACE_POINTS
  46. #include "trace.h"
  47. #include <asm/debugreg.h>
  48. #include <asm/msr.h>
  49. #include <asm/desc.h>
  50. #include <asm/mtrr.h>
  51. #include <asm/mce.h>
  52. #include <asm/i387.h>
  53. #include <asm/xcr.h>
  54. #define MAX_IO_MSRS 256
  55. #define CR0_RESERVED_BITS \
  56. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  57. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  58. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  59. #define CR4_RESERVED_BITS \
  60. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  61. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  62. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  63. | X86_CR4_OSXSAVE \
  64. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  65. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  66. #define KVM_MAX_MCE_BANKS 32
  67. #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
  68. /* EFER defaults:
  69. * - enable syscall per default because its emulated by KVM
  70. * - enable LME and LMA per default on 64 bit KVM
  71. */
  72. #ifdef CONFIG_X86_64
  73. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
  74. #else
  75. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
  76. #endif
  77. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  78. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  79. static void update_cr8_intercept(struct kvm_vcpu *vcpu);
  80. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  81. struct kvm_cpuid_entry2 __user *entries);
  82. struct kvm_x86_ops *kvm_x86_ops;
  83. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  84. int ignore_msrs = 0;
  85. module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
  86. #define KVM_NR_SHARED_MSRS 16
  87. struct kvm_shared_msrs_global {
  88. int nr;
  89. u32 msrs[KVM_NR_SHARED_MSRS];
  90. };
  91. struct kvm_shared_msrs {
  92. struct user_return_notifier urn;
  93. bool registered;
  94. struct kvm_shared_msr_values {
  95. u64 host;
  96. u64 curr;
  97. } values[KVM_NR_SHARED_MSRS];
  98. };
  99. static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
  100. static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
  101. struct kvm_stats_debugfs_item debugfs_entries[] = {
  102. { "pf_fixed", VCPU_STAT(pf_fixed) },
  103. { "pf_guest", VCPU_STAT(pf_guest) },
  104. { "tlb_flush", VCPU_STAT(tlb_flush) },
  105. { "invlpg", VCPU_STAT(invlpg) },
  106. { "exits", VCPU_STAT(exits) },
  107. { "io_exits", VCPU_STAT(io_exits) },
  108. { "mmio_exits", VCPU_STAT(mmio_exits) },
  109. { "signal_exits", VCPU_STAT(signal_exits) },
  110. { "irq_window", VCPU_STAT(irq_window_exits) },
  111. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  112. { "halt_exits", VCPU_STAT(halt_exits) },
  113. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  114. { "hypercalls", VCPU_STAT(hypercalls) },
  115. { "request_irq", VCPU_STAT(request_irq_exits) },
  116. { "irq_exits", VCPU_STAT(irq_exits) },
  117. { "host_state_reload", VCPU_STAT(host_state_reload) },
  118. { "efer_reload", VCPU_STAT(efer_reload) },
  119. { "fpu_reload", VCPU_STAT(fpu_reload) },
  120. { "insn_emulation", VCPU_STAT(insn_emulation) },
  121. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  122. { "irq_injections", VCPU_STAT(irq_injections) },
  123. { "nmi_injections", VCPU_STAT(nmi_injections) },
  124. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  125. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  126. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  127. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  128. { "mmu_flooded", VM_STAT(mmu_flooded) },
  129. { "mmu_recycled", VM_STAT(mmu_recycled) },
  130. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  131. { "mmu_unsync", VM_STAT(mmu_unsync) },
  132. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  133. { "largepages", VM_STAT(lpages) },
  134. { NULL }
  135. };
  136. u64 __read_mostly host_xcr0;
  137. static inline u32 bit(int bitno)
  138. {
  139. return 1 << (bitno & 31);
  140. }
  141. static void kvm_on_user_return(struct user_return_notifier *urn)
  142. {
  143. unsigned slot;
  144. struct kvm_shared_msrs *locals
  145. = container_of(urn, struct kvm_shared_msrs, urn);
  146. struct kvm_shared_msr_values *values;
  147. for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
  148. values = &locals->values[slot];
  149. if (values->host != values->curr) {
  150. wrmsrl(shared_msrs_global.msrs[slot], values->host);
  151. values->curr = values->host;
  152. }
  153. }
  154. locals->registered = false;
  155. user_return_notifier_unregister(urn);
  156. }
  157. static void shared_msr_update(unsigned slot, u32 msr)
  158. {
  159. struct kvm_shared_msrs *smsr;
  160. u64 value;
  161. smsr = &__get_cpu_var(shared_msrs);
  162. /* only read, and nobody should modify it at this time,
  163. * so don't need lock */
  164. if (slot >= shared_msrs_global.nr) {
  165. printk(KERN_ERR "kvm: invalid MSR slot!");
  166. return;
  167. }
  168. rdmsrl_safe(msr, &value);
  169. smsr->values[slot].host = value;
  170. smsr->values[slot].curr = value;
  171. }
  172. void kvm_define_shared_msr(unsigned slot, u32 msr)
  173. {
  174. if (slot >= shared_msrs_global.nr)
  175. shared_msrs_global.nr = slot + 1;
  176. shared_msrs_global.msrs[slot] = msr;
  177. /* we need ensured the shared_msr_global have been updated */
  178. smp_wmb();
  179. }
  180. EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
  181. static void kvm_shared_msr_cpu_online(void)
  182. {
  183. unsigned i;
  184. for (i = 0; i < shared_msrs_global.nr; ++i)
  185. shared_msr_update(i, shared_msrs_global.msrs[i]);
  186. }
  187. void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
  188. {
  189. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  190. if (((value ^ smsr->values[slot].curr) & mask) == 0)
  191. return;
  192. smsr->values[slot].curr = value;
  193. wrmsrl(shared_msrs_global.msrs[slot], value);
  194. if (!smsr->registered) {
  195. smsr->urn.on_user_return = kvm_on_user_return;
  196. user_return_notifier_register(&smsr->urn);
  197. smsr->registered = true;
  198. }
  199. }
  200. EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
  201. static void drop_user_return_notifiers(void *ignore)
  202. {
  203. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  204. if (smsr->registered)
  205. kvm_on_user_return(&smsr->urn);
  206. }
  207. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  208. {
  209. if (irqchip_in_kernel(vcpu->kvm))
  210. return vcpu->arch.apic_base;
  211. else
  212. return vcpu->arch.apic_base;
  213. }
  214. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  215. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  216. {
  217. /* TODO: reserve bits check */
  218. if (irqchip_in_kernel(vcpu->kvm))
  219. kvm_lapic_set_base(vcpu, data);
  220. else
  221. vcpu->arch.apic_base = data;
  222. }
  223. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  224. #define EXCPT_BENIGN 0
  225. #define EXCPT_CONTRIBUTORY 1
  226. #define EXCPT_PF 2
  227. static int exception_class(int vector)
  228. {
  229. switch (vector) {
  230. case PF_VECTOR:
  231. return EXCPT_PF;
  232. case DE_VECTOR:
  233. case TS_VECTOR:
  234. case NP_VECTOR:
  235. case SS_VECTOR:
  236. case GP_VECTOR:
  237. return EXCPT_CONTRIBUTORY;
  238. default:
  239. break;
  240. }
  241. return EXCPT_BENIGN;
  242. }
  243. static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
  244. unsigned nr, bool has_error, u32 error_code,
  245. bool reinject)
  246. {
  247. u32 prev_nr;
  248. int class1, class2;
  249. if (!vcpu->arch.exception.pending) {
  250. queue:
  251. vcpu->arch.exception.pending = true;
  252. vcpu->arch.exception.has_error_code = has_error;
  253. vcpu->arch.exception.nr = nr;
  254. vcpu->arch.exception.error_code = error_code;
  255. vcpu->arch.exception.reinject = reinject;
  256. return;
  257. }
  258. /* to check exception */
  259. prev_nr = vcpu->arch.exception.nr;
  260. if (prev_nr == DF_VECTOR) {
  261. /* triple fault -> shutdown */
  262. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  263. return;
  264. }
  265. class1 = exception_class(prev_nr);
  266. class2 = exception_class(nr);
  267. if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
  268. || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
  269. /* generate double fault per SDM Table 5-5 */
  270. vcpu->arch.exception.pending = true;
  271. vcpu->arch.exception.has_error_code = true;
  272. vcpu->arch.exception.nr = DF_VECTOR;
  273. vcpu->arch.exception.error_code = 0;
  274. } else
  275. /* replace previous exception with a new one in a hope
  276. that instruction re-execution will regenerate lost
  277. exception */
  278. goto queue;
  279. }
  280. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  281. {
  282. kvm_multiple_exception(vcpu, nr, false, 0, false);
  283. }
  284. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  285. void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  286. {
  287. kvm_multiple_exception(vcpu, nr, false, 0, true);
  288. }
  289. EXPORT_SYMBOL_GPL(kvm_requeue_exception);
  290. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  291. u32 error_code)
  292. {
  293. ++vcpu->stat.pf_guest;
  294. vcpu->arch.cr2 = addr;
  295. kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
  296. }
  297. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  298. {
  299. vcpu->arch.nmi_pending = 1;
  300. }
  301. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  302. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  303. {
  304. kvm_multiple_exception(vcpu, nr, true, error_code, false);
  305. }
  306. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  307. void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  308. {
  309. kvm_multiple_exception(vcpu, nr, true, error_code, true);
  310. }
  311. EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
  312. /*
  313. * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
  314. * a #GP and return false.
  315. */
  316. bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
  317. {
  318. if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
  319. return true;
  320. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  321. return false;
  322. }
  323. EXPORT_SYMBOL_GPL(kvm_require_cpl);
  324. /*
  325. * Load the pae pdptrs. Return true is they are all valid.
  326. */
  327. int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  328. {
  329. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  330. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  331. int i;
  332. int ret;
  333. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  334. ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
  335. offset * sizeof(u64), sizeof(pdpte));
  336. if (ret < 0) {
  337. ret = 0;
  338. goto out;
  339. }
  340. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  341. if (is_present_gpte(pdpte[i]) &&
  342. (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
  343. ret = 0;
  344. goto out;
  345. }
  346. }
  347. ret = 1;
  348. memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
  349. __set_bit(VCPU_EXREG_PDPTR,
  350. (unsigned long *)&vcpu->arch.regs_avail);
  351. __set_bit(VCPU_EXREG_PDPTR,
  352. (unsigned long *)&vcpu->arch.regs_dirty);
  353. out:
  354. return ret;
  355. }
  356. EXPORT_SYMBOL_GPL(load_pdptrs);
  357. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  358. {
  359. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  360. bool changed = true;
  361. int r;
  362. if (is_long_mode(vcpu) || !is_pae(vcpu))
  363. return false;
  364. if (!test_bit(VCPU_EXREG_PDPTR,
  365. (unsigned long *)&vcpu->arch.regs_avail))
  366. return true;
  367. r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
  368. if (r < 0)
  369. goto out;
  370. changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
  371. out:
  372. return changed;
  373. }
  374. int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  375. {
  376. unsigned long old_cr0 = kvm_read_cr0(vcpu);
  377. unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
  378. X86_CR0_CD | X86_CR0_NW;
  379. cr0 |= X86_CR0_ET;
  380. #ifdef CONFIG_X86_64
  381. if (cr0 & 0xffffffff00000000UL)
  382. return 1;
  383. #endif
  384. cr0 &= ~CR0_RESERVED_BITS;
  385. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
  386. return 1;
  387. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
  388. return 1;
  389. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  390. #ifdef CONFIG_X86_64
  391. if ((vcpu->arch.efer & EFER_LME)) {
  392. int cs_db, cs_l;
  393. if (!is_pae(vcpu))
  394. return 1;
  395. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  396. if (cs_l)
  397. return 1;
  398. } else
  399. #endif
  400. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
  401. return 1;
  402. }
  403. kvm_x86_ops->set_cr0(vcpu, cr0);
  404. if ((cr0 ^ old_cr0) & update_bits)
  405. kvm_mmu_reset_context(vcpu);
  406. return 0;
  407. }
  408. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  409. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  410. {
  411. (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
  412. }
  413. EXPORT_SYMBOL_GPL(kvm_lmsw);
  414. int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  415. {
  416. u64 xcr0;
  417. /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
  418. if (index != XCR_XFEATURE_ENABLED_MASK)
  419. return 1;
  420. xcr0 = xcr;
  421. if (kvm_x86_ops->get_cpl(vcpu) != 0)
  422. return 1;
  423. if (!(xcr0 & XSTATE_FP))
  424. return 1;
  425. if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
  426. return 1;
  427. if (xcr0 & ~host_xcr0)
  428. return 1;
  429. vcpu->arch.xcr0 = xcr0;
  430. vcpu->guest_xcr0_loaded = 0;
  431. return 0;
  432. }
  433. int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
  434. {
  435. if (__kvm_set_xcr(vcpu, index, xcr)) {
  436. kvm_inject_gp(vcpu, 0);
  437. return 1;
  438. }
  439. return 0;
  440. }
  441. EXPORT_SYMBOL_GPL(kvm_set_xcr);
  442. static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
  443. {
  444. struct kvm_cpuid_entry2 *best;
  445. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  446. return best && (best->ecx & bit(X86_FEATURE_XSAVE));
  447. }
  448. static void update_cpuid(struct kvm_vcpu *vcpu)
  449. {
  450. struct kvm_cpuid_entry2 *best;
  451. best = kvm_find_cpuid_entry(vcpu, 1, 0);
  452. if (!best)
  453. return;
  454. /* Update OSXSAVE bit */
  455. if (cpu_has_xsave && best->function == 0x1) {
  456. best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
  457. if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
  458. best->ecx |= bit(X86_FEATURE_OSXSAVE);
  459. }
  460. }
  461. int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  462. {
  463. unsigned long old_cr4 = kvm_read_cr4(vcpu);
  464. unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
  465. if (cr4 & CR4_RESERVED_BITS)
  466. return 1;
  467. if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
  468. return 1;
  469. if (is_long_mode(vcpu)) {
  470. if (!(cr4 & X86_CR4_PAE))
  471. return 1;
  472. } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
  473. && ((cr4 ^ old_cr4) & pdptr_bits)
  474. && !load_pdptrs(vcpu, vcpu->arch.cr3))
  475. return 1;
  476. if (cr4 & X86_CR4_VMXE)
  477. return 1;
  478. kvm_x86_ops->set_cr4(vcpu, cr4);
  479. if ((cr4 ^ old_cr4) & pdptr_bits)
  480. kvm_mmu_reset_context(vcpu);
  481. if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
  482. update_cpuid(vcpu);
  483. return 0;
  484. }
  485. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  486. int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  487. {
  488. if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
  489. kvm_mmu_sync_roots(vcpu);
  490. kvm_mmu_flush_tlb(vcpu);
  491. return 0;
  492. }
  493. if (is_long_mode(vcpu)) {
  494. if (cr3 & CR3_L_MODE_RESERVED_BITS)
  495. return 1;
  496. } else {
  497. if (is_pae(vcpu)) {
  498. if (cr3 & CR3_PAE_RESERVED_BITS)
  499. return 1;
  500. if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3))
  501. return 1;
  502. }
  503. /*
  504. * We don't check reserved bits in nonpae mode, because
  505. * this isn't enforced, and VMware depends on this.
  506. */
  507. }
  508. /*
  509. * Does the new cr3 value map to physical memory? (Note, we
  510. * catch an invalid cr3 even in real-mode, because it would
  511. * cause trouble later on when we turn on paging anyway.)
  512. *
  513. * A real CPU would silently accept an invalid cr3 and would
  514. * attempt to use it - with largely undefined (and often hard
  515. * to debug) behavior on the guest side.
  516. */
  517. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  518. return 1;
  519. vcpu->arch.cr3 = cr3;
  520. vcpu->arch.mmu.new_cr3(vcpu);
  521. return 0;
  522. }
  523. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  524. int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  525. {
  526. if (cr8 & CR8_RESERVED_BITS)
  527. return 1;
  528. if (irqchip_in_kernel(vcpu->kvm))
  529. kvm_lapic_set_tpr(vcpu, cr8);
  530. else
  531. vcpu->arch.cr8 = cr8;
  532. return 0;
  533. }
  534. void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  535. {
  536. if (__kvm_set_cr8(vcpu, cr8))
  537. kvm_inject_gp(vcpu, 0);
  538. }
  539. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  540. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  541. {
  542. if (irqchip_in_kernel(vcpu->kvm))
  543. return kvm_lapic_get_cr8(vcpu);
  544. else
  545. return vcpu->arch.cr8;
  546. }
  547. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  548. static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  549. {
  550. switch (dr) {
  551. case 0 ... 3:
  552. vcpu->arch.db[dr] = val;
  553. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
  554. vcpu->arch.eff_db[dr] = val;
  555. break;
  556. case 4:
  557. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  558. return 1; /* #UD */
  559. /* fall through */
  560. case 6:
  561. if (val & 0xffffffff00000000ULL)
  562. return -1; /* #GP */
  563. vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
  564. break;
  565. case 5:
  566. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  567. return 1; /* #UD */
  568. /* fall through */
  569. default: /* 7 */
  570. if (val & 0xffffffff00000000ULL)
  571. return -1; /* #GP */
  572. vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
  573. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
  574. kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
  575. vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
  576. }
  577. break;
  578. }
  579. return 0;
  580. }
  581. int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  582. {
  583. int res;
  584. res = __kvm_set_dr(vcpu, dr, val);
  585. if (res > 0)
  586. kvm_queue_exception(vcpu, UD_VECTOR);
  587. else if (res < 0)
  588. kvm_inject_gp(vcpu, 0);
  589. return res;
  590. }
  591. EXPORT_SYMBOL_GPL(kvm_set_dr);
  592. static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  593. {
  594. switch (dr) {
  595. case 0 ... 3:
  596. *val = vcpu->arch.db[dr];
  597. break;
  598. case 4:
  599. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  600. return 1;
  601. /* fall through */
  602. case 6:
  603. *val = vcpu->arch.dr6;
  604. break;
  605. case 5:
  606. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
  607. return 1;
  608. /* fall through */
  609. default: /* 7 */
  610. *val = vcpu->arch.dr7;
  611. break;
  612. }
  613. return 0;
  614. }
  615. int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  616. {
  617. if (_kvm_get_dr(vcpu, dr, val)) {
  618. kvm_queue_exception(vcpu, UD_VECTOR);
  619. return 1;
  620. }
  621. return 0;
  622. }
  623. EXPORT_SYMBOL_GPL(kvm_get_dr);
  624. /*
  625. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  626. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  627. *
  628. * This list is modified at module load time to reflect the
  629. * capabilities of the host cpu. This capabilities test skips MSRs that are
  630. * kvm-specific. Those are put in the beginning of the list.
  631. */
  632. #define KVM_SAVE_MSRS_BEGIN 7
  633. static u32 msrs_to_save[] = {
  634. MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  635. MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
  636. HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
  637. HV_X64_MSR_APIC_ASSIST_PAGE,
  638. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  639. MSR_K6_STAR,
  640. #ifdef CONFIG_X86_64
  641. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  642. #endif
  643. MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  644. };
  645. static unsigned num_msrs_to_save;
  646. static u32 emulated_msrs[] = {
  647. MSR_IA32_MISC_ENABLE,
  648. };
  649. static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
  650. {
  651. u64 old_efer = vcpu->arch.efer;
  652. if (efer & efer_reserved_bits)
  653. return 1;
  654. if (is_paging(vcpu)
  655. && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
  656. return 1;
  657. if (efer & EFER_FFXSR) {
  658. struct kvm_cpuid_entry2 *feat;
  659. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  660. if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
  661. return 1;
  662. }
  663. if (efer & EFER_SVME) {
  664. struct kvm_cpuid_entry2 *feat;
  665. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  666. if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
  667. return 1;
  668. }
  669. efer &= ~EFER_LMA;
  670. efer |= vcpu->arch.efer & EFER_LMA;
  671. kvm_x86_ops->set_efer(vcpu, efer);
  672. vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
  673. kvm_mmu_reset_context(vcpu);
  674. /* Update reserved bits */
  675. if ((efer ^ old_efer) & EFER_NX)
  676. kvm_mmu_reset_context(vcpu);
  677. return 0;
  678. }
  679. void kvm_enable_efer_bits(u64 mask)
  680. {
  681. efer_reserved_bits &= ~mask;
  682. }
  683. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  684. /*
  685. * Writes msr value into into the appropriate "register".
  686. * Returns 0 on success, non-0 otherwise.
  687. * Assumes vcpu_load() was already called.
  688. */
  689. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  690. {
  691. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  692. }
  693. /*
  694. * Adapt set_msr() to msr_io()'s calling convention
  695. */
  696. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  697. {
  698. return kvm_set_msr(vcpu, index, *data);
  699. }
  700. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  701. {
  702. int version;
  703. int r;
  704. struct pvclock_wall_clock wc;
  705. struct timespec boot;
  706. if (!wall_clock)
  707. return;
  708. r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
  709. if (r)
  710. return;
  711. if (version & 1)
  712. ++version; /* first time write, random junk */
  713. ++version;
  714. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  715. /*
  716. * The guest calculates current wall clock time by adding
  717. * system time (updated by kvm_write_guest_time below) to the
  718. * wall clock specified here. guest system time equals host
  719. * system time for us, thus we must fill in host boot time here.
  720. */
  721. getboottime(&boot);
  722. wc.sec = boot.tv_sec;
  723. wc.nsec = boot.tv_nsec;
  724. wc.version = version;
  725. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  726. version++;
  727. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  728. }
  729. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  730. {
  731. uint32_t quotient, remainder;
  732. /* Don't try to replace with do_div(), this one calculates
  733. * "(dividend << 32) / divisor" */
  734. __asm__ ( "divl %4"
  735. : "=a" (quotient), "=d" (remainder)
  736. : "0" (0), "1" (dividend), "r" (divisor) );
  737. return quotient;
  738. }
  739. static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
  740. {
  741. uint64_t nsecs = 1000000000LL;
  742. int32_t shift = 0;
  743. uint64_t tps64;
  744. uint32_t tps32;
  745. tps64 = tsc_khz * 1000LL;
  746. while (tps64 > nsecs*2) {
  747. tps64 >>= 1;
  748. shift--;
  749. }
  750. tps32 = (uint32_t)tps64;
  751. while (tps32 <= (uint32_t)nsecs) {
  752. tps32 <<= 1;
  753. shift++;
  754. }
  755. hv_clock->tsc_shift = shift;
  756. hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
  757. pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
  758. __func__, tsc_khz, hv_clock->tsc_shift,
  759. hv_clock->tsc_to_system_mul);
  760. }
  761. static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
  762. static void kvm_write_guest_time(struct kvm_vcpu *v)
  763. {
  764. struct timespec ts;
  765. unsigned long flags;
  766. struct kvm_vcpu_arch *vcpu = &v->arch;
  767. void *shared_kaddr;
  768. unsigned long this_tsc_khz;
  769. if ((!vcpu->time_page))
  770. return;
  771. this_tsc_khz = get_cpu_var(cpu_tsc_khz);
  772. if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
  773. kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
  774. vcpu->hv_clock_tsc_khz = this_tsc_khz;
  775. }
  776. put_cpu_var(cpu_tsc_khz);
  777. /* Keep irq disabled to prevent changes to the clock */
  778. local_irq_save(flags);
  779. kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
  780. ktime_get_ts(&ts);
  781. monotonic_to_bootbased(&ts);
  782. local_irq_restore(flags);
  783. /* With all the info we got, fill in the values */
  784. vcpu->hv_clock.system_time = ts.tv_nsec +
  785. (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
  786. vcpu->hv_clock.flags = 0;
  787. /*
  788. * The interface expects us to write an even number signaling that the
  789. * update is finished. Since the guest won't see the intermediate
  790. * state, we just increase by 2 at the end.
  791. */
  792. vcpu->hv_clock.version += 2;
  793. shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
  794. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  795. sizeof(vcpu->hv_clock));
  796. kunmap_atomic(shared_kaddr, KM_USER0);
  797. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  798. }
  799. static int kvm_request_guest_time_update(struct kvm_vcpu *v)
  800. {
  801. struct kvm_vcpu_arch *vcpu = &v->arch;
  802. if (!vcpu->time_page)
  803. return 0;
  804. set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
  805. return 1;
  806. }
  807. static bool msr_mtrr_valid(unsigned msr)
  808. {
  809. switch (msr) {
  810. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  811. case MSR_MTRRfix64K_00000:
  812. case MSR_MTRRfix16K_80000:
  813. case MSR_MTRRfix16K_A0000:
  814. case MSR_MTRRfix4K_C0000:
  815. case MSR_MTRRfix4K_C8000:
  816. case MSR_MTRRfix4K_D0000:
  817. case MSR_MTRRfix4K_D8000:
  818. case MSR_MTRRfix4K_E0000:
  819. case MSR_MTRRfix4K_E8000:
  820. case MSR_MTRRfix4K_F0000:
  821. case MSR_MTRRfix4K_F8000:
  822. case MSR_MTRRdefType:
  823. case MSR_IA32_CR_PAT:
  824. return true;
  825. case 0x2f8:
  826. return true;
  827. }
  828. return false;
  829. }
  830. static bool valid_pat_type(unsigned t)
  831. {
  832. return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
  833. }
  834. static bool valid_mtrr_type(unsigned t)
  835. {
  836. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  837. }
  838. static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  839. {
  840. int i;
  841. if (!msr_mtrr_valid(msr))
  842. return false;
  843. if (msr == MSR_IA32_CR_PAT) {
  844. for (i = 0; i < 8; i++)
  845. if (!valid_pat_type((data >> (i * 8)) & 0xff))
  846. return false;
  847. return true;
  848. } else if (msr == MSR_MTRRdefType) {
  849. if (data & ~0xcff)
  850. return false;
  851. return valid_mtrr_type(data & 0xff);
  852. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  853. for (i = 0; i < 8 ; i++)
  854. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  855. return false;
  856. return true;
  857. }
  858. /* variable MTRRs */
  859. return valid_mtrr_type(data & 0xff);
  860. }
  861. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  862. {
  863. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  864. if (!mtrr_valid(vcpu, msr, data))
  865. return 1;
  866. if (msr == MSR_MTRRdefType) {
  867. vcpu->arch.mtrr_state.def_type = data;
  868. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  869. } else if (msr == MSR_MTRRfix64K_00000)
  870. p[0] = data;
  871. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  872. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  873. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  874. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  875. else if (msr == MSR_IA32_CR_PAT)
  876. vcpu->arch.pat = data;
  877. else { /* Variable MTRRs */
  878. int idx, is_mtrr_mask;
  879. u64 *pt;
  880. idx = (msr - 0x200) / 2;
  881. is_mtrr_mask = msr - 0x200 - 2 * idx;
  882. if (!is_mtrr_mask)
  883. pt =
  884. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  885. else
  886. pt =
  887. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  888. *pt = data;
  889. }
  890. kvm_mmu_reset_context(vcpu);
  891. return 0;
  892. }
  893. static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  894. {
  895. u64 mcg_cap = vcpu->arch.mcg_cap;
  896. unsigned bank_num = mcg_cap & 0xff;
  897. switch (msr) {
  898. case MSR_IA32_MCG_STATUS:
  899. vcpu->arch.mcg_status = data;
  900. break;
  901. case MSR_IA32_MCG_CTL:
  902. if (!(mcg_cap & MCG_CTL_P))
  903. return 1;
  904. if (data != 0 && data != ~(u64)0)
  905. return -1;
  906. vcpu->arch.mcg_ctl = data;
  907. break;
  908. default:
  909. if (msr >= MSR_IA32_MC0_CTL &&
  910. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  911. u32 offset = msr - MSR_IA32_MC0_CTL;
  912. /* only 0 or all 1s can be written to IA32_MCi_CTL
  913. * some Linux kernels though clear bit 10 in bank 4 to
  914. * workaround a BIOS/GART TBL issue on AMD K8s, ignore
  915. * this to avoid an uncatched #GP in the guest
  916. */
  917. if ((offset & 0x3) == 0 &&
  918. data != 0 && (data | (1 << 10)) != ~(u64)0)
  919. return -1;
  920. vcpu->arch.mce_banks[offset] = data;
  921. break;
  922. }
  923. return 1;
  924. }
  925. return 0;
  926. }
  927. static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
  928. {
  929. struct kvm *kvm = vcpu->kvm;
  930. int lm = is_long_mode(vcpu);
  931. u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
  932. : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
  933. u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
  934. : kvm->arch.xen_hvm_config.blob_size_32;
  935. u32 page_num = data & ~PAGE_MASK;
  936. u64 page_addr = data & PAGE_MASK;
  937. u8 *page;
  938. int r;
  939. r = -E2BIG;
  940. if (page_num >= blob_size)
  941. goto out;
  942. r = -ENOMEM;
  943. page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  944. if (!page)
  945. goto out;
  946. r = -EFAULT;
  947. if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
  948. goto out_free;
  949. if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
  950. goto out_free;
  951. r = 0;
  952. out_free:
  953. kfree(page);
  954. out:
  955. return r;
  956. }
  957. static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  958. {
  959. return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
  960. }
  961. static bool kvm_hv_msr_partition_wide(u32 msr)
  962. {
  963. bool r = false;
  964. switch (msr) {
  965. case HV_X64_MSR_GUEST_OS_ID:
  966. case HV_X64_MSR_HYPERCALL:
  967. r = true;
  968. break;
  969. }
  970. return r;
  971. }
  972. static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  973. {
  974. struct kvm *kvm = vcpu->kvm;
  975. switch (msr) {
  976. case HV_X64_MSR_GUEST_OS_ID:
  977. kvm->arch.hv_guest_os_id = data;
  978. /* setting guest os id to zero disables hypercall page */
  979. if (!kvm->arch.hv_guest_os_id)
  980. kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  981. break;
  982. case HV_X64_MSR_HYPERCALL: {
  983. u64 gfn;
  984. unsigned long addr;
  985. u8 instructions[4];
  986. /* if guest os id is not set hypercall should remain disabled */
  987. if (!kvm->arch.hv_guest_os_id)
  988. break;
  989. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  990. kvm->arch.hv_hypercall = data;
  991. break;
  992. }
  993. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  994. addr = gfn_to_hva(kvm, gfn);
  995. if (kvm_is_error_hva(addr))
  996. return 1;
  997. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  998. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  999. if (copy_to_user((void __user *)addr, instructions, 4))
  1000. return 1;
  1001. kvm->arch.hv_hypercall = data;
  1002. break;
  1003. }
  1004. default:
  1005. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1006. "data 0x%llx\n", msr, data);
  1007. return 1;
  1008. }
  1009. return 0;
  1010. }
  1011. static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1012. {
  1013. switch (msr) {
  1014. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  1015. unsigned long addr;
  1016. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  1017. vcpu->arch.hv_vapic = data;
  1018. break;
  1019. }
  1020. addr = gfn_to_hva(vcpu->kvm, data >>
  1021. HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
  1022. if (kvm_is_error_hva(addr))
  1023. return 1;
  1024. if (clear_user((void __user *)addr, PAGE_SIZE))
  1025. return 1;
  1026. vcpu->arch.hv_vapic = data;
  1027. break;
  1028. }
  1029. case HV_X64_MSR_EOI:
  1030. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  1031. case HV_X64_MSR_ICR:
  1032. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  1033. case HV_X64_MSR_TPR:
  1034. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  1035. default:
  1036. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  1037. "data 0x%llx\n", msr, data);
  1038. return 1;
  1039. }
  1040. return 0;
  1041. }
  1042. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1043. {
  1044. switch (msr) {
  1045. case MSR_EFER:
  1046. return set_efer(vcpu, data);
  1047. case MSR_K7_HWCR:
  1048. data &= ~(u64)0x40; /* ignore flush filter disable */
  1049. data &= ~(u64)0x100; /* ignore ignne emulation enable */
  1050. if (data != 0) {
  1051. pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
  1052. data);
  1053. return 1;
  1054. }
  1055. break;
  1056. case MSR_FAM10H_MMIO_CONF_BASE:
  1057. if (data != 0) {
  1058. pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
  1059. "0x%llx\n", data);
  1060. return 1;
  1061. }
  1062. break;
  1063. case MSR_AMD64_NB_CFG:
  1064. break;
  1065. case MSR_IA32_DEBUGCTLMSR:
  1066. if (!data) {
  1067. /* We support the non-activated case already */
  1068. break;
  1069. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  1070. /* Values other than LBR and BTF are vendor-specific,
  1071. thus reserved and should throw a #GP */
  1072. return 1;
  1073. }
  1074. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  1075. __func__, data);
  1076. break;
  1077. case MSR_IA32_UCODE_REV:
  1078. case MSR_IA32_UCODE_WRITE:
  1079. case MSR_VM_HSAVE_PA:
  1080. case MSR_AMD64_PATCH_LOADER:
  1081. break;
  1082. case 0x200 ... 0x2ff:
  1083. return set_msr_mtrr(vcpu, msr, data);
  1084. case MSR_IA32_APICBASE:
  1085. kvm_set_apic_base(vcpu, data);
  1086. break;
  1087. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1088. return kvm_x2apic_msr_write(vcpu, msr, data);
  1089. case MSR_IA32_MISC_ENABLE:
  1090. vcpu->arch.ia32_misc_enable_msr = data;
  1091. break;
  1092. case MSR_KVM_WALL_CLOCK_NEW:
  1093. case MSR_KVM_WALL_CLOCK:
  1094. vcpu->kvm->arch.wall_clock = data;
  1095. kvm_write_wall_clock(vcpu->kvm, data);
  1096. break;
  1097. case MSR_KVM_SYSTEM_TIME_NEW:
  1098. case MSR_KVM_SYSTEM_TIME: {
  1099. if (vcpu->arch.time_page) {
  1100. kvm_release_page_dirty(vcpu->arch.time_page);
  1101. vcpu->arch.time_page = NULL;
  1102. }
  1103. vcpu->arch.time = data;
  1104. /* we verify if the enable bit is set... */
  1105. if (!(data & 1))
  1106. break;
  1107. /* ...but clean it before doing the actual write */
  1108. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  1109. vcpu->arch.time_page =
  1110. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  1111. if (is_error_page(vcpu->arch.time_page)) {
  1112. kvm_release_page_clean(vcpu->arch.time_page);
  1113. vcpu->arch.time_page = NULL;
  1114. }
  1115. kvm_request_guest_time_update(vcpu);
  1116. break;
  1117. }
  1118. case MSR_IA32_MCG_CTL:
  1119. case MSR_IA32_MCG_STATUS:
  1120. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1121. return set_msr_mce(vcpu, msr, data);
  1122. /* Performance counters are not protected by a CPUID bit,
  1123. * so we should check all of them in the generic path for the sake of
  1124. * cross vendor migration.
  1125. * Writing a zero into the event select MSRs disables them,
  1126. * which we perfectly emulate ;-). Any other value should be at least
  1127. * reported, some guests depend on them.
  1128. */
  1129. case MSR_P6_EVNTSEL0:
  1130. case MSR_P6_EVNTSEL1:
  1131. case MSR_K7_EVNTSEL0:
  1132. case MSR_K7_EVNTSEL1:
  1133. case MSR_K7_EVNTSEL2:
  1134. case MSR_K7_EVNTSEL3:
  1135. if (data != 0)
  1136. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1137. "0x%x data 0x%llx\n", msr, data);
  1138. break;
  1139. /* at least RHEL 4 unconditionally writes to the perfctr registers,
  1140. * so we ignore writes to make it happy.
  1141. */
  1142. case MSR_P6_PERFCTR0:
  1143. case MSR_P6_PERFCTR1:
  1144. case MSR_K7_PERFCTR0:
  1145. case MSR_K7_PERFCTR1:
  1146. case MSR_K7_PERFCTR2:
  1147. case MSR_K7_PERFCTR3:
  1148. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1149. "0x%x data 0x%llx\n", msr, data);
  1150. break;
  1151. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1152. if (kvm_hv_msr_partition_wide(msr)) {
  1153. int r;
  1154. mutex_lock(&vcpu->kvm->lock);
  1155. r = set_msr_hyperv_pw(vcpu, msr, data);
  1156. mutex_unlock(&vcpu->kvm->lock);
  1157. return r;
  1158. } else
  1159. return set_msr_hyperv(vcpu, msr, data);
  1160. break;
  1161. default:
  1162. if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
  1163. return xen_hvm_config(vcpu, data);
  1164. if (!ignore_msrs) {
  1165. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
  1166. msr, data);
  1167. return 1;
  1168. } else {
  1169. pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
  1170. msr, data);
  1171. break;
  1172. }
  1173. }
  1174. return 0;
  1175. }
  1176. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1177. /*
  1178. * Reads an msr value (of 'msr_index') into 'pdata'.
  1179. * Returns 0 on success, non-0 otherwise.
  1180. * Assumes vcpu_load() was already called.
  1181. */
  1182. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1183. {
  1184. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  1185. }
  1186. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1187. {
  1188. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1189. if (!msr_mtrr_valid(msr))
  1190. return 1;
  1191. if (msr == MSR_MTRRdefType)
  1192. *pdata = vcpu->arch.mtrr_state.def_type +
  1193. (vcpu->arch.mtrr_state.enabled << 10);
  1194. else if (msr == MSR_MTRRfix64K_00000)
  1195. *pdata = p[0];
  1196. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1197. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  1198. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1199. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  1200. else if (msr == MSR_IA32_CR_PAT)
  1201. *pdata = vcpu->arch.pat;
  1202. else { /* Variable MTRRs */
  1203. int idx, is_mtrr_mask;
  1204. u64 *pt;
  1205. idx = (msr - 0x200) / 2;
  1206. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1207. if (!is_mtrr_mask)
  1208. pt =
  1209. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1210. else
  1211. pt =
  1212. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1213. *pdata = *pt;
  1214. }
  1215. return 0;
  1216. }
  1217. static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1218. {
  1219. u64 data;
  1220. u64 mcg_cap = vcpu->arch.mcg_cap;
  1221. unsigned bank_num = mcg_cap & 0xff;
  1222. switch (msr) {
  1223. case MSR_IA32_P5_MC_ADDR:
  1224. case MSR_IA32_P5_MC_TYPE:
  1225. data = 0;
  1226. break;
  1227. case MSR_IA32_MCG_CAP:
  1228. data = vcpu->arch.mcg_cap;
  1229. break;
  1230. case MSR_IA32_MCG_CTL:
  1231. if (!(mcg_cap & MCG_CTL_P))
  1232. return 1;
  1233. data = vcpu->arch.mcg_ctl;
  1234. break;
  1235. case MSR_IA32_MCG_STATUS:
  1236. data = vcpu->arch.mcg_status;
  1237. break;
  1238. default:
  1239. if (msr >= MSR_IA32_MC0_CTL &&
  1240. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1241. u32 offset = msr - MSR_IA32_MC0_CTL;
  1242. data = vcpu->arch.mce_banks[offset];
  1243. break;
  1244. }
  1245. return 1;
  1246. }
  1247. *pdata = data;
  1248. return 0;
  1249. }
  1250. static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1251. {
  1252. u64 data = 0;
  1253. struct kvm *kvm = vcpu->kvm;
  1254. switch (msr) {
  1255. case HV_X64_MSR_GUEST_OS_ID:
  1256. data = kvm->arch.hv_guest_os_id;
  1257. break;
  1258. case HV_X64_MSR_HYPERCALL:
  1259. data = kvm->arch.hv_hypercall;
  1260. break;
  1261. default:
  1262. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1263. return 1;
  1264. }
  1265. *pdata = data;
  1266. return 0;
  1267. }
  1268. static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1269. {
  1270. u64 data = 0;
  1271. switch (msr) {
  1272. case HV_X64_MSR_VP_INDEX: {
  1273. int r;
  1274. struct kvm_vcpu *v;
  1275. kvm_for_each_vcpu(r, v, vcpu->kvm)
  1276. if (v == vcpu)
  1277. data = r;
  1278. break;
  1279. }
  1280. case HV_X64_MSR_EOI:
  1281. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  1282. case HV_X64_MSR_ICR:
  1283. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  1284. case HV_X64_MSR_TPR:
  1285. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  1286. default:
  1287. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1288. return 1;
  1289. }
  1290. *pdata = data;
  1291. return 0;
  1292. }
  1293. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1294. {
  1295. u64 data;
  1296. switch (msr) {
  1297. case MSR_IA32_PLATFORM_ID:
  1298. case MSR_IA32_UCODE_REV:
  1299. case MSR_IA32_EBL_CR_POWERON:
  1300. case MSR_IA32_DEBUGCTLMSR:
  1301. case MSR_IA32_LASTBRANCHFROMIP:
  1302. case MSR_IA32_LASTBRANCHTOIP:
  1303. case MSR_IA32_LASTINTFROMIP:
  1304. case MSR_IA32_LASTINTTOIP:
  1305. case MSR_K8_SYSCFG:
  1306. case MSR_K7_HWCR:
  1307. case MSR_VM_HSAVE_PA:
  1308. case MSR_P6_PERFCTR0:
  1309. case MSR_P6_PERFCTR1:
  1310. case MSR_P6_EVNTSEL0:
  1311. case MSR_P6_EVNTSEL1:
  1312. case MSR_K7_EVNTSEL0:
  1313. case MSR_K7_PERFCTR0:
  1314. case MSR_K8_INT_PENDING_MSG:
  1315. case MSR_AMD64_NB_CFG:
  1316. case MSR_FAM10H_MMIO_CONF_BASE:
  1317. data = 0;
  1318. break;
  1319. case MSR_MTRRcap:
  1320. data = 0x500 | KVM_NR_VAR_MTRR;
  1321. break;
  1322. case 0x200 ... 0x2ff:
  1323. return get_msr_mtrr(vcpu, msr, pdata);
  1324. case 0xcd: /* fsb frequency */
  1325. data = 3;
  1326. break;
  1327. case MSR_IA32_APICBASE:
  1328. data = kvm_get_apic_base(vcpu);
  1329. break;
  1330. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1331. return kvm_x2apic_msr_read(vcpu, msr, pdata);
  1332. break;
  1333. case MSR_IA32_MISC_ENABLE:
  1334. data = vcpu->arch.ia32_misc_enable_msr;
  1335. break;
  1336. case MSR_IA32_PERF_STATUS:
  1337. /* TSC increment by tick */
  1338. data = 1000ULL;
  1339. /* CPU multiplier */
  1340. data |= (((uint64_t)4ULL) << 40);
  1341. break;
  1342. case MSR_EFER:
  1343. data = vcpu->arch.efer;
  1344. break;
  1345. case MSR_KVM_WALL_CLOCK:
  1346. case MSR_KVM_WALL_CLOCK_NEW:
  1347. data = vcpu->kvm->arch.wall_clock;
  1348. break;
  1349. case MSR_KVM_SYSTEM_TIME:
  1350. case MSR_KVM_SYSTEM_TIME_NEW:
  1351. data = vcpu->arch.time;
  1352. break;
  1353. case MSR_IA32_P5_MC_ADDR:
  1354. case MSR_IA32_P5_MC_TYPE:
  1355. case MSR_IA32_MCG_CAP:
  1356. case MSR_IA32_MCG_CTL:
  1357. case MSR_IA32_MCG_STATUS:
  1358. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1359. return get_msr_mce(vcpu, msr, pdata);
  1360. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1361. if (kvm_hv_msr_partition_wide(msr)) {
  1362. int r;
  1363. mutex_lock(&vcpu->kvm->lock);
  1364. r = get_msr_hyperv_pw(vcpu, msr, pdata);
  1365. mutex_unlock(&vcpu->kvm->lock);
  1366. return r;
  1367. } else
  1368. return get_msr_hyperv(vcpu, msr, pdata);
  1369. break;
  1370. default:
  1371. if (!ignore_msrs) {
  1372. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  1373. return 1;
  1374. } else {
  1375. pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
  1376. data = 0;
  1377. }
  1378. break;
  1379. }
  1380. *pdata = data;
  1381. return 0;
  1382. }
  1383. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1384. /*
  1385. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1386. *
  1387. * @return number of msrs set successfully.
  1388. */
  1389. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  1390. struct kvm_msr_entry *entries,
  1391. int (*do_msr)(struct kvm_vcpu *vcpu,
  1392. unsigned index, u64 *data))
  1393. {
  1394. int i, idx;
  1395. idx = srcu_read_lock(&vcpu->kvm->srcu);
  1396. for (i = 0; i < msrs->nmsrs; ++i)
  1397. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  1398. break;
  1399. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1400. return i;
  1401. }
  1402. /*
  1403. * Read or write a bunch of msrs. Parameters are user addresses.
  1404. *
  1405. * @return number of msrs set successfully.
  1406. */
  1407. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  1408. int (*do_msr)(struct kvm_vcpu *vcpu,
  1409. unsigned index, u64 *data),
  1410. int writeback)
  1411. {
  1412. struct kvm_msrs msrs;
  1413. struct kvm_msr_entry *entries;
  1414. int r, n;
  1415. unsigned size;
  1416. r = -EFAULT;
  1417. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  1418. goto out;
  1419. r = -E2BIG;
  1420. if (msrs.nmsrs >= MAX_IO_MSRS)
  1421. goto out;
  1422. r = -ENOMEM;
  1423. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  1424. entries = kmalloc(size, GFP_KERNEL);
  1425. if (!entries)
  1426. goto out;
  1427. r = -EFAULT;
  1428. if (copy_from_user(entries, user_msrs->entries, size))
  1429. goto out_free;
  1430. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  1431. if (r < 0)
  1432. goto out_free;
  1433. r = -EFAULT;
  1434. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  1435. goto out_free;
  1436. r = n;
  1437. out_free:
  1438. kfree(entries);
  1439. out:
  1440. return r;
  1441. }
  1442. int kvm_dev_ioctl_check_extension(long ext)
  1443. {
  1444. int r;
  1445. switch (ext) {
  1446. case KVM_CAP_IRQCHIP:
  1447. case KVM_CAP_HLT:
  1448. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  1449. case KVM_CAP_SET_TSS_ADDR:
  1450. case KVM_CAP_EXT_CPUID:
  1451. case KVM_CAP_CLOCKSOURCE:
  1452. case KVM_CAP_PIT:
  1453. case KVM_CAP_NOP_IO_DELAY:
  1454. case KVM_CAP_MP_STATE:
  1455. case KVM_CAP_SYNC_MMU:
  1456. case KVM_CAP_REINJECT_CONTROL:
  1457. case KVM_CAP_IRQ_INJECT_STATUS:
  1458. case KVM_CAP_ASSIGN_DEV_IRQ:
  1459. case KVM_CAP_IRQFD:
  1460. case KVM_CAP_IOEVENTFD:
  1461. case KVM_CAP_PIT2:
  1462. case KVM_CAP_PIT_STATE2:
  1463. case KVM_CAP_SET_IDENTITY_MAP_ADDR:
  1464. case KVM_CAP_XEN_HVM:
  1465. case KVM_CAP_ADJUST_CLOCK:
  1466. case KVM_CAP_VCPU_EVENTS:
  1467. case KVM_CAP_HYPERV:
  1468. case KVM_CAP_HYPERV_VAPIC:
  1469. case KVM_CAP_HYPERV_SPIN:
  1470. case KVM_CAP_PCI_SEGMENT:
  1471. case KVM_CAP_DEBUGREGS:
  1472. case KVM_CAP_X86_ROBUST_SINGLESTEP:
  1473. r = 1;
  1474. break;
  1475. case KVM_CAP_COALESCED_MMIO:
  1476. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  1477. break;
  1478. case KVM_CAP_VAPIC:
  1479. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  1480. break;
  1481. case KVM_CAP_NR_VCPUS:
  1482. r = KVM_MAX_VCPUS;
  1483. break;
  1484. case KVM_CAP_NR_MEMSLOTS:
  1485. r = KVM_MEMORY_SLOTS;
  1486. break;
  1487. case KVM_CAP_PV_MMU: /* obsolete */
  1488. r = 0;
  1489. break;
  1490. case KVM_CAP_IOMMU:
  1491. r = iommu_found();
  1492. break;
  1493. case KVM_CAP_MCE:
  1494. r = KVM_MAX_MCE_BANKS;
  1495. break;
  1496. default:
  1497. r = 0;
  1498. break;
  1499. }
  1500. return r;
  1501. }
  1502. long kvm_arch_dev_ioctl(struct file *filp,
  1503. unsigned int ioctl, unsigned long arg)
  1504. {
  1505. void __user *argp = (void __user *)arg;
  1506. long r;
  1507. switch (ioctl) {
  1508. case KVM_GET_MSR_INDEX_LIST: {
  1509. struct kvm_msr_list __user *user_msr_list = argp;
  1510. struct kvm_msr_list msr_list;
  1511. unsigned n;
  1512. r = -EFAULT;
  1513. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  1514. goto out;
  1515. n = msr_list.nmsrs;
  1516. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  1517. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  1518. goto out;
  1519. r = -E2BIG;
  1520. if (n < msr_list.nmsrs)
  1521. goto out;
  1522. r = -EFAULT;
  1523. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  1524. num_msrs_to_save * sizeof(u32)))
  1525. goto out;
  1526. if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
  1527. &emulated_msrs,
  1528. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  1529. goto out;
  1530. r = 0;
  1531. break;
  1532. }
  1533. case KVM_GET_SUPPORTED_CPUID: {
  1534. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1535. struct kvm_cpuid2 cpuid;
  1536. r = -EFAULT;
  1537. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1538. goto out;
  1539. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  1540. cpuid_arg->entries);
  1541. if (r)
  1542. goto out;
  1543. r = -EFAULT;
  1544. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  1545. goto out;
  1546. r = 0;
  1547. break;
  1548. }
  1549. case KVM_X86_GET_MCE_CAP_SUPPORTED: {
  1550. u64 mce_cap;
  1551. mce_cap = KVM_MCE_CAP_SUPPORTED;
  1552. r = -EFAULT;
  1553. if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
  1554. goto out;
  1555. r = 0;
  1556. break;
  1557. }
  1558. default:
  1559. r = -EINVAL;
  1560. }
  1561. out:
  1562. return r;
  1563. }
  1564. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1565. {
  1566. kvm_x86_ops->vcpu_load(vcpu, cpu);
  1567. if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
  1568. unsigned long khz = cpufreq_quick_get(cpu);
  1569. if (!khz)
  1570. khz = tsc_khz;
  1571. per_cpu(cpu_tsc_khz, cpu) = khz;
  1572. }
  1573. kvm_request_guest_time_update(vcpu);
  1574. }
  1575. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  1576. {
  1577. kvm_x86_ops->vcpu_put(vcpu);
  1578. kvm_put_guest_fpu(vcpu);
  1579. }
  1580. static int is_efer_nx(void)
  1581. {
  1582. unsigned long long efer = 0;
  1583. rdmsrl_safe(MSR_EFER, &efer);
  1584. return efer & EFER_NX;
  1585. }
  1586. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  1587. {
  1588. int i;
  1589. struct kvm_cpuid_entry2 *e, *entry;
  1590. entry = NULL;
  1591. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  1592. e = &vcpu->arch.cpuid_entries[i];
  1593. if (e->function == 0x80000001) {
  1594. entry = e;
  1595. break;
  1596. }
  1597. }
  1598. if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
  1599. entry->edx &= ~(1 << 20);
  1600. printk(KERN_INFO "kvm: guest NX capability removed\n");
  1601. }
  1602. }
  1603. /* when an old userspace process fills a new kernel module */
  1604. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  1605. struct kvm_cpuid *cpuid,
  1606. struct kvm_cpuid_entry __user *entries)
  1607. {
  1608. int r, i;
  1609. struct kvm_cpuid_entry *cpuid_entries;
  1610. r = -E2BIG;
  1611. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1612. goto out;
  1613. r = -ENOMEM;
  1614. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  1615. if (!cpuid_entries)
  1616. goto out;
  1617. r = -EFAULT;
  1618. if (copy_from_user(cpuid_entries, entries,
  1619. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  1620. goto out_free;
  1621. for (i = 0; i < cpuid->nent; i++) {
  1622. vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
  1623. vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
  1624. vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
  1625. vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
  1626. vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
  1627. vcpu->arch.cpuid_entries[i].index = 0;
  1628. vcpu->arch.cpuid_entries[i].flags = 0;
  1629. vcpu->arch.cpuid_entries[i].padding[0] = 0;
  1630. vcpu->arch.cpuid_entries[i].padding[1] = 0;
  1631. vcpu->arch.cpuid_entries[i].padding[2] = 0;
  1632. }
  1633. vcpu->arch.cpuid_nent = cpuid->nent;
  1634. cpuid_fix_nx_cap(vcpu);
  1635. r = 0;
  1636. kvm_apic_set_version(vcpu);
  1637. kvm_x86_ops->cpuid_update(vcpu);
  1638. update_cpuid(vcpu);
  1639. out_free:
  1640. vfree(cpuid_entries);
  1641. out:
  1642. return r;
  1643. }
  1644. static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  1645. struct kvm_cpuid2 *cpuid,
  1646. struct kvm_cpuid_entry2 __user *entries)
  1647. {
  1648. int r;
  1649. r = -E2BIG;
  1650. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1651. goto out;
  1652. r = -EFAULT;
  1653. if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
  1654. cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
  1655. goto out;
  1656. vcpu->arch.cpuid_nent = cpuid->nent;
  1657. kvm_apic_set_version(vcpu);
  1658. kvm_x86_ops->cpuid_update(vcpu);
  1659. update_cpuid(vcpu);
  1660. return 0;
  1661. out:
  1662. return r;
  1663. }
  1664. static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  1665. struct kvm_cpuid2 *cpuid,
  1666. struct kvm_cpuid_entry2 __user *entries)
  1667. {
  1668. int r;
  1669. r = -E2BIG;
  1670. if (cpuid->nent < vcpu->arch.cpuid_nent)
  1671. goto out;
  1672. r = -EFAULT;
  1673. if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
  1674. vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
  1675. goto out;
  1676. return 0;
  1677. out:
  1678. cpuid->nent = vcpu->arch.cpuid_nent;
  1679. return r;
  1680. }
  1681. static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1682. u32 index)
  1683. {
  1684. entry->function = function;
  1685. entry->index = index;
  1686. cpuid_count(entry->function, entry->index,
  1687. &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
  1688. entry->flags = 0;
  1689. }
  1690. #define F(x) bit(X86_FEATURE_##x)
  1691. static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1692. u32 index, int *nent, int maxnent)
  1693. {
  1694. unsigned f_nx = is_efer_nx() ? F(NX) : 0;
  1695. #ifdef CONFIG_X86_64
  1696. unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
  1697. ? F(GBPAGES) : 0;
  1698. unsigned f_lm = F(LM);
  1699. #else
  1700. unsigned f_gbpages = 0;
  1701. unsigned f_lm = 0;
  1702. #endif
  1703. unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
  1704. /* cpuid 1.edx */
  1705. const u32 kvm_supported_word0_x86_features =
  1706. F(FPU) | F(VME) | F(DE) | F(PSE) |
  1707. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  1708. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
  1709. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  1710. F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
  1711. 0 /* Reserved, DS, ACPI */ | F(MMX) |
  1712. F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
  1713. 0 /* HTT, TM, Reserved, PBE */;
  1714. /* cpuid 0x80000001.edx */
  1715. const u32 kvm_supported_word1_x86_features =
  1716. F(FPU) | F(VME) | F(DE) | F(PSE) |
  1717. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  1718. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
  1719. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  1720. F(PAT) | F(PSE36) | 0 /* Reserved */ |
  1721. f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
  1722. F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
  1723. 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
  1724. /* cpuid 1.ecx */
  1725. const u32 kvm_supported_word4_x86_features =
  1726. F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
  1727. 0 /* DS-CPL, VMX, SMX, EST */ |
  1728. 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
  1729. 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
  1730. 0 /* Reserved, DCA */ | F(XMM4_1) |
  1731. F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
  1732. 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */;
  1733. /* cpuid 0x80000001.ecx */
  1734. const u32 kvm_supported_word6_x86_features =
  1735. F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
  1736. F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
  1737. F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
  1738. 0 /* SKINIT */ | 0 /* WDT */;
  1739. /* all calls to cpuid_count() should be made on the same cpu */
  1740. get_cpu();
  1741. do_cpuid_1_ent(entry, function, index);
  1742. ++*nent;
  1743. switch (function) {
  1744. case 0:
  1745. entry->eax = min(entry->eax, (u32)0xd);
  1746. break;
  1747. case 1:
  1748. entry->edx &= kvm_supported_word0_x86_features;
  1749. entry->ecx &= kvm_supported_word4_x86_features;
  1750. /* we support x2apic emulation even if host does not support
  1751. * it since we emulate x2apic in software */
  1752. entry->ecx |= F(X2APIC);
  1753. break;
  1754. /* function 2 entries are STATEFUL. That is, repeated cpuid commands
  1755. * may return different values. This forces us to get_cpu() before
  1756. * issuing the first command, and also to emulate this annoying behavior
  1757. * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
  1758. case 2: {
  1759. int t, times = entry->eax & 0xff;
  1760. entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1761. entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  1762. for (t = 1; t < times && *nent < maxnent; ++t) {
  1763. do_cpuid_1_ent(&entry[t], function, 0);
  1764. entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1765. ++*nent;
  1766. }
  1767. break;
  1768. }
  1769. /* function 4 and 0xb have additional index. */
  1770. case 4: {
  1771. int i, cache_type;
  1772. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1773. /* read more entries until cache_type is zero */
  1774. for (i = 1; *nent < maxnent; ++i) {
  1775. cache_type = entry[i - 1].eax & 0x1f;
  1776. if (!cache_type)
  1777. break;
  1778. do_cpuid_1_ent(&entry[i], function, i);
  1779. entry[i].flags |=
  1780. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1781. ++*nent;
  1782. }
  1783. break;
  1784. }
  1785. case 0xb: {
  1786. int i, level_type;
  1787. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1788. /* read more entries until level_type is zero */
  1789. for (i = 1; *nent < maxnent; ++i) {
  1790. level_type = entry[i - 1].ecx & 0xff00;
  1791. if (!level_type)
  1792. break;
  1793. do_cpuid_1_ent(&entry[i], function, i);
  1794. entry[i].flags |=
  1795. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1796. ++*nent;
  1797. }
  1798. break;
  1799. }
  1800. case 0xd: {
  1801. int i;
  1802. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1803. for (i = 1; *nent < maxnent; ++i) {
  1804. if (entry[i - 1].eax == 0 && i != 2)
  1805. break;
  1806. do_cpuid_1_ent(&entry[i], function, i);
  1807. entry[i].flags |=
  1808. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1809. ++*nent;
  1810. }
  1811. break;
  1812. }
  1813. case KVM_CPUID_SIGNATURE: {
  1814. char signature[12] = "KVMKVMKVM\0\0";
  1815. u32 *sigptr = (u32 *)signature;
  1816. entry->eax = 0;
  1817. entry->ebx = sigptr[0];
  1818. entry->ecx = sigptr[1];
  1819. entry->edx = sigptr[2];
  1820. break;
  1821. }
  1822. case KVM_CPUID_FEATURES:
  1823. entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
  1824. (1 << KVM_FEATURE_NOP_IO_DELAY) |
  1825. (1 << KVM_FEATURE_CLOCKSOURCE2) |
  1826. (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
  1827. entry->ebx = 0;
  1828. entry->ecx = 0;
  1829. entry->edx = 0;
  1830. break;
  1831. case 0x80000000:
  1832. entry->eax = min(entry->eax, 0x8000001a);
  1833. break;
  1834. case 0x80000001:
  1835. entry->edx &= kvm_supported_word1_x86_features;
  1836. entry->ecx &= kvm_supported_word6_x86_features;
  1837. break;
  1838. }
  1839. kvm_x86_ops->set_supported_cpuid(function, entry);
  1840. put_cpu();
  1841. }
  1842. #undef F
  1843. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  1844. struct kvm_cpuid_entry2 __user *entries)
  1845. {
  1846. struct kvm_cpuid_entry2 *cpuid_entries;
  1847. int limit, nent = 0, r = -E2BIG;
  1848. u32 func;
  1849. if (cpuid->nent < 1)
  1850. goto out;
  1851. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1852. cpuid->nent = KVM_MAX_CPUID_ENTRIES;
  1853. r = -ENOMEM;
  1854. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
  1855. if (!cpuid_entries)
  1856. goto out;
  1857. do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
  1858. limit = cpuid_entries[0].eax;
  1859. for (func = 1; func <= limit && nent < cpuid->nent; ++func)
  1860. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1861. &nent, cpuid->nent);
  1862. r = -E2BIG;
  1863. if (nent >= cpuid->nent)
  1864. goto out_free;
  1865. do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
  1866. limit = cpuid_entries[nent - 1].eax;
  1867. for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
  1868. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1869. &nent, cpuid->nent);
  1870. r = -E2BIG;
  1871. if (nent >= cpuid->nent)
  1872. goto out_free;
  1873. do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
  1874. cpuid->nent);
  1875. r = -E2BIG;
  1876. if (nent >= cpuid->nent)
  1877. goto out_free;
  1878. do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
  1879. cpuid->nent);
  1880. r = -E2BIG;
  1881. if (nent >= cpuid->nent)
  1882. goto out_free;
  1883. r = -EFAULT;
  1884. if (copy_to_user(entries, cpuid_entries,
  1885. nent * sizeof(struct kvm_cpuid_entry2)))
  1886. goto out_free;
  1887. cpuid->nent = nent;
  1888. r = 0;
  1889. out_free:
  1890. vfree(cpuid_entries);
  1891. out:
  1892. return r;
  1893. }
  1894. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  1895. struct kvm_lapic_state *s)
  1896. {
  1897. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  1898. return 0;
  1899. }
  1900. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  1901. struct kvm_lapic_state *s)
  1902. {
  1903. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  1904. kvm_apic_post_state_restore(vcpu);
  1905. update_cr8_intercept(vcpu);
  1906. return 0;
  1907. }
  1908. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  1909. struct kvm_interrupt *irq)
  1910. {
  1911. if (irq->irq < 0 || irq->irq >= 256)
  1912. return -EINVAL;
  1913. if (irqchip_in_kernel(vcpu->kvm))
  1914. return -ENXIO;
  1915. kvm_queue_interrupt(vcpu, irq->irq, false);
  1916. return 0;
  1917. }
  1918. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  1919. {
  1920. kvm_inject_nmi(vcpu);
  1921. return 0;
  1922. }
  1923. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  1924. struct kvm_tpr_access_ctl *tac)
  1925. {
  1926. if (tac->flags)
  1927. return -EINVAL;
  1928. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  1929. return 0;
  1930. }
  1931. static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
  1932. u64 mcg_cap)
  1933. {
  1934. int r;
  1935. unsigned bank_num = mcg_cap & 0xff, bank;
  1936. r = -EINVAL;
  1937. if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
  1938. goto out;
  1939. if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
  1940. goto out;
  1941. r = 0;
  1942. vcpu->arch.mcg_cap = mcg_cap;
  1943. /* Init IA32_MCG_CTL to all 1s */
  1944. if (mcg_cap & MCG_CTL_P)
  1945. vcpu->arch.mcg_ctl = ~(u64)0;
  1946. /* Init IA32_MCi_CTL to all 1s */
  1947. for (bank = 0; bank < bank_num; bank++)
  1948. vcpu->arch.mce_banks[bank*4] = ~(u64)0;
  1949. out:
  1950. return r;
  1951. }
  1952. static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
  1953. struct kvm_x86_mce *mce)
  1954. {
  1955. u64 mcg_cap = vcpu->arch.mcg_cap;
  1956. unsigned bank_num = mcg_cap & 0xff;
  1957. u64 *banks = vcpu->arch.mce_banks;
  1958. if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
  1959. return -EINVAL;
  1960. /*
  1961. * if IA32_MCG_CTL is not all 1s, the uncorrected error
  1962. * reporting is disabled
  1963. */
  1964. if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
  1965. vcpu->arch.mcg_ctl != ~(u64)0)
  1966. return 0;
  1967. banks += 4 * mce->bank;
  1968. /*
  1969. * if IA32_MCi_CTL is not all 1s, the uncorrected error
  1970. * reporting is disabled for the bank
  1971. */
  1972. if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
  1973. return 0;
  1974. if (mce->status & MCI_STATUS_UC) {
  1975. if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
  1976. !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
  1977. printk(KERN_DEBUG "kvm: set_mce: "
  1978. "injects mce exception while "
  1979. "previous one is in progress!\n");
  1980. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  1981. return 0;
  1982. }
  1983. if (banks[1] & MCI_STATUS_VAL)
  1984. mce->status |= MCI_STATUS_OVER;
  1985. banks[2] = mce->addr;
  1986. banks[3] = mce->misc;
  1987. vcpu->arch.mcg_status = mce->mcg_status;
  1988. banks[1] = mce->status;
  1989. kvm_queue_exception(vcpu, MC_VECTOR);
  1990. } else if (!(banks[1] & MCI_STATUS_VAL)
  1991. || !(banks[1] & MCI_STATUS_UC)) {
  1992. if (banks[1] & MCI_STATUS_VAL)
  1993. mce->status |= MCI_STATUS_OVER;
  1994. banks[2] = mce->addr;
  1995. banks[3] = mce->misc;
  1996. banks[1] = mce->status;
  1997. } else
  1998. banks[1] |= MCI_STATUS_OVER;
  1999. return 0;
  2000. }
  2001. static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
  2002. struct kvm_vcpu_events *events)
  2003. {
  2004. events->exception.injected =
  2005. vcpu->arch.exception.pending &&
  2006. !kvm_exception_is_soft(vcpu->arch.exception.nr);
  2007. events->exception.nr = vcpu->arch.exception.nr;
  2008. events->exception.has_error_code = vcpu->arch.exception.has_error_code;
  2009. events->exception.error_code = vcpu->arch.exception.error_code;
  2010. events->interrupt.injected =
  2011. vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
  2012. events->interrupt.nr = vcpu->arch.interrupt.nr;
  2013. events->interrupt.soft = 0;
  2014. events->interrupt.shadow =
  2015. kvm_x86_ops->get_interrupt_shadow(vcpu,
  2016. KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
  2017. events->nmi.injected = vcpu->arch.nmi_injected;
  2018. events->nmi.pending = vcpu->arch.nmi_pending;
  2019. events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
  2020. events->sipi_vector = vcpu->arch.sipi_vector;
  2021. events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
  2022. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  2023. | KVM_VCPUEVENT_VALID_SHADOW);
  2024. }
  2025. static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
  2026. struct kvm_vcpu_events *events)
  2027. {
  2028. if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
  2029. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  2030. | KVM_VCPUEVENT_VALID_SHADOW))
  2031. return -EINVAL;
  2032. vcpu->arch.exception.pending = events->exception.injected;
  2033. vcpu->arch.exception.nr = events->exception.nr;
  2034. vcpu->arch.exception.has_error_code = events->exception.has_error_code;
  2035. vcpu->arch.exception.error_code = events->exception.error_code;
  2036. vcpu->arch.interrupt.pending = events->interrupt.injected;
  2037. vcpu->arch.interrupt.nr = events->interrupt.nr;
  2038. vcpu->arch.interrupt.soft = events->interrupt.soft;
  2039. if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
  2040. kvm_pic_clear_isr_ack(vcpu->kvm);
  2041. if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
  2042. kvm_x86_ops->set_interrupt_shadow(vcpu,
  2043. events->interrupt.shadow);
  2044. vcpu->arch.nmi_injected = events->nmi.injected;
  2045. if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
  2046. vcpu->arch.nmi_pending = events->nmi.pending;
  2047. kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
  2048. if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
  2049. vcpu->arch.sipi_vector = events->sipi_vector;
  2050. return 0;
  2051. }
  2052. static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
  2053. struct kvm_debugregs *dbgregs)
  2054. {
  2055. memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
  2056. dbgregs->dr6 = vcpu->arch.dr6;
  2057. dbgregs->dr7 = vcpu->arch.dr7;
  2058. dbgregs->flags = 0;
  2059. }
  2060. static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
  2061. struct kvm_debugregs *dbgregs)
  2062. {
  2063. if (dbgregs->flags)
  2064. return -EINVAL;
  2065. memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
  2066. vcpu->arch.dr6 = dbgregs->dr6;
  2067. vcpu->arch.dr7 = dbgregs->dr7;
  2068. return 0;
  2069. }
  2070. long kvm_arch_vcpu_ioctl(struct file *filp,
  2071. unsigned int ioctl, unsigned long arg)
  2072. {
  2073. struct kvm_vcpu *vcpu = filp->private_data;
  2074. void __user *argp = (void __user *)arg;
  2075. int r;
  2076. struct kvm_lapic_state *lapic = NULL;
  2077. switch (ioctl) {
  2078. case KVM_GET_LAPIC: {
  2079. r = -EINVAL;
  2080. if (!vcpu->arch.apic)
  2081. goto out;
  2082. lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2083. r = -ENOMEM;
  2084. if (!lapic)
  2085. goto out;
  2086. r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
  2087. if (r)
  2088. goto out;
  2089. r = -EFAULT;
  2090. if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
  2091. goto out;
  2092. r = 0;
  2093. break;
  2094. }
  2095. case KVM_SET_LAPIC: {
  2096. r = -EINVAL;
  2097. if (!vcpu->arch.apic)
  2098. goto out;
  2099. lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2100. r = -ENOMEM;
  2101. if (!lapic)
  2102. goto out;
  2103. r = -EFAULT;
  2104. if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
  2105. goto out;
  2106. r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
  2107. if (r)
  2108. goto out;
  2109. r = 0;
  2110. break;
  2111. }
  2112. case KVM_INTERRUPT: {
  2113. struct kvm_interrupt irq;
  2114. r = -EFAULT;
  2115. if (copy_from_user(&irq, argp, sizeof irq))
  2116. goto out;
  2117. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  2118. if (r)
  2119. goto out;
  2120. r = 0;
  2121. break;
  2122. }
  2123. case KVM_NMI: {
  2124. r = kvm_vcpu_ioctl_nmi(vcpu);
  2125. if (r)
  2126. goto out;
  2127. r = 0;
  2128. break;
  2129. }
  2130. case KVM_SET_CPUID: {
  2131. struct kvm_cpuid __user *cpuid_arg = argp;
  2132. struct kvm_cpuid cpuid;
  2133. r = -EFAULT;
  2134. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2135. goto out;
  2136. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  2137. if (r)
  2138. goto out;
  2139. break;
  2140. }
  2141. case KVM_SET_CPUID2: {
  2142. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2143. struct kvm_cpuid2 cpuid;
  2144. r = -EFAULT;
  2145. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2146. goto out;
  2147. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  2148. cpuid_arg->entries);
  2149. if (r)
  2150. goto out;
  2151. break;
  2152. }
  2153. case KVM_GET_CPUID2: {
  2154. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2155. struct kvm_cpuid2 cpuid;
  2156. r = -EFAULT;
  2157. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2158. goto out;
  2159. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  2160. cpuid_arg->entries);
  2161. if (r)
  2162. goto out;
  2163. r = -EFAULT;
  2164. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  2165. goto out;
  2166. r = 0;
  2167. break;
  2168. }
  2169. case KVM_GET_MSRS:
  2170. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  2171. break;
  2172. case KVM_SET_MSRS:
  2173. r = msr_io(vcpu, argp, do_set_msr, 0);
  2174. break;
  2175. case KVM_TPR_ACCESS_REPORTING: {
  2176. struct kvm_tpr_access_ctl tac;
  2177. r = -EFAULT;
  2178. if (copy_from_user(&tac, argp, sizeof tac))
  2179. goto out;
  2180. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  2181. if (r)
  2182. goto out;
  2183. r = -EFAULT;
  2184. if (copy_to_user(argp, &tac, sizeof tac))
  2185. goto out;
  2186. r = 0;
  2187. break;
  2188. };
  2189. case KVM_SET_VAPIC_ADDR: {
  2190. struct kvm_vapic_addr va;
  2191. r = -EINVAL;
  2192. if (!irqchip_in_kernel(vcpu->kvm))
  2193. goto out;
  2194. r = -EFAULT;
  2195. if (copy_from_user(&va, argp, sizeof va))
  2196. goto out;
  2197. r = 0;
  2198. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  2199. break;
  2200. }
  2201. case KVM_X86_SETUP_MCE: {
  2202. u64 mcg_cap;
  2203. r = -EFAULT;
  2204. if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
  2205. goto out;
  2206. r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
  2207. break;
  2208. }
  2209. case KVM_X86_SET_MCE: {
  2210. struct kvm_x86_mce mce;
  2211. r = -EFAULT;
  2212. if (copy_from_user(&mce, argp, sizeof mce))
  2213. goto out;
  2214. r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
  2215. break;
  2216. }
  2217. case KVM_GET_VCPU_EVENTS: {
  2218. struct kvm_vcpu_events events;
  2219. kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
  2220. r = -EFAULT;
  2221. if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
  2222. break;
  2223. r = 0;
  2224. break;
  2225. }
  2226. case KVM_SET_VCPU_EVENTS: {
  2227. struct kvm_vcpu_events events;
  2228. r = -EFAULT;
  2229. if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
  2230. break;
  2231. r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
  2232. break;
  2233. }
  2234. case KVM_GET_DEBUGREGS: {
  2235. struct kvm_debugregs dbgregs;
  2236. kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
  2237. r = -EFAULT;
  2238. if (copy_to_user(argp, &dbgregs,
  2239. sizeof(struct kvm_debugregs)))
  2240. break;
  2241. r = 0;
  2242. break;
  2243. }
  2244. case KVM_SET_DEBUGREGS: {
  2245. struct kvm_debugregs dbgregs;
  2246. r = -EFAULT;
  2247. if (copy_from_user(&dbgregs, argp,
  2248. sizeof(struct kvm_debugregs)))
  2249. break;
  2250. r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
  2251. break;
  2252. }
  2253. default:
  2254. r = -EINVAL;
  2255. }
  2256. out:
  2257. kfree(lapic);
  2258. return r;
  2259. }
  2260. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  2261. {
  2262. int ret;
  2263. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  2264. return -1;
  2265. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  2266. return ret;
  2267. }
  2268. static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
  2269. u64 ident_addr)
  2270. {
  2271. kvm->arch.ept_identity_map_addr = ident_addr;
  2272. return 0;
  2273. }
  2274. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  2275. u32 kvm_nr_mmu_pages)
  2276. {
  2277. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  2278. return -EINVAL;
  2279. mutex_lock(&kvm->slots_lock);
  2280. spin_lock(&kvm->mmu_lock);
  2281. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  2282. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  2283. spin_unlock(&kvm->mmu_lock);
  2284. mutex_unlock(&kvm->slots_lock);
  2285. return 0;
  2286. }
  2287. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  2288. {
  2289. return kvm->arch.n_alloc_mmu_pages;
  2290. }
  2291. gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
  2292. {
  2293. int i;
  2294. struct kvm_mem_alias *alias;
  2295. struct kvm_mem_aliases *aliases;
  2296. aliases = kvm_aliases(kvm);
  2297. for (i = 0; i < aliases->naliases; ++i) {
  2298. alias = &aliases->aliases[i];
  2299. if (alias->flags & KVM_ALIAS_INVALID)
  2300. continue;
  2301. if (gfn >= alias->base_gfn
  2302. && gfn < alias->base_gfn + alias->npages)
  2303. return alias->target_gfn + gfn - alias->base_gfn;
  2304. }
  2305. return gfn;
  2306. }
  2307. gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  2308. {
  2309. int i;
  2310. struct kvm_mem_alias *alias;
  2311. struct kvm_mem_aliases *aliases;
  2312. aliases = kvm_aliases(kvm);
  2313. for (i = 0; i < aliases->naliases; ++i) {
  2314. alias = &aliases->aliases[i];
  2315. if (gfn >= alias->base_gfn
  2316. && gfn < alias->base_gfn + alias->npages)
  2317. return alias->target_gfn + gfn - alias->base_gfn;
  2318. }
  2319. return gfn;
  2320. }
  2321. /*
  2322. * Set a new alias region. Aliases map a portion of physical memory into
  2323. * another portion. This is useful for memory windows, for example the PC
  2324. * VGA region.
  2325. */
  2326. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  2327. struct kvm_memory_alias *alias)
  2328. {
  2329. int r, n;
  2330. struct kvm_mem_alias *p;
  2331. struct kvm_mem_aliases *aliases, *old_aliases;
  2332. r = -EINVAL;
  2333. /* General sanity checks */
  2334. if (alias->memory_size & (PAGE_SIZE - 1))
  2335. goto out;
  2336. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  2337. goto out;
  2338. if (alias->slot >= KVM_ALIAS_SLOTS)
  2339. goto out;
  2340. if (alias->guest_phys_addr + alias->memory_size
  2341. < alias->guest_phys_addr)
  2342. goto out;
  2343. if (alias->target_phys_addr + alias->memory_size
  2344. < alias->target_phys_addr)
  2345. goto out;
  2346. r = -ENOMEM;
  2347. aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
  2348. if (!aliases)
  2349. goto out;
  2350. mutex_lock(&kvm->slots_lock);
  2351. /* invalidate any gfn reference in case of deletion/shrinking */
  2352. memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
  2353. aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
  2354. old_aliases = kvm->arch.aliases;
  2355. rcu_assign_pointer(kvm->arch.aliases, aliases);
  2356. synchronize_srcu_expedited(&kvm->srcu);
  2357. kvm_mmu_zap_all(kvm);
  2358. kfree(old_aliases);
  2359. r = -ENOMEM;
  2360. aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
  2361. if (!aliases)
  2362. goto out_unlock;
  2363. memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
  2364. p = &aliases->aliases[alias->slot];
  2365. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  2366. p->npages = alias->memory_size >> PAGE_SHIFT;
  2367. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  2368. p->flags &= ~(KVM_ALIAS_INVALID);
  2369. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  2370. if (aliases->aliases[n - 1].npages)
  2371. break;
  2372. aliases->naliases = n;
  2373. old_aliases = kvm->arch.aliases;
  2374. rcu_assign_pointer(kvm->arch.aliases, aliases);
  2375. synchronize_srcu_expedited(&kvm->srcu);
  2376. kfree(old_aliases);
  2377. r = 0;
  2378. out_unlock:
  2379. mutex_unlock(&kvm->slots_lock);
  2380. out:
  2381. return r;
  2382. }
  2383. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2384. {
  2385. int r;
  2386. r = 0;
  2387. switch (chip->chip_id) {
  2388. case KVM_IRQCHIP_PIC_MASTER:
  2389. memcpy(&chip->chip.pic,
  2390. &pic_irqchip(kvm)->pics[0],
  2391. sizeof(struct kvm_pic_state));
  2392. break;
  2393. case KVM_IRQCHIP_PIC_SLAVE:
  2394. memcpy(&chip->chip.pic,
  2395. &pic_irqchip(kvm)->pics[1],
  2396. sizeof(struct kvm_pic_state));
  2397. break;
  2398. case KVM_IRQCHIP_IOAPIC:
  2399. r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
  2400. break;
  2401. default:
  2402. r = -EINVAL;
  2403. break;
  2404. }
  2405. return r;
  2406. }
  2407. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2408. {
  2409. int r;
  2410. r = 0;
  2411. switch (chip->chip_id) {
  2412. case KVM_IRQCHIP_PIC_MASTER:
  2413. raw_spin_lock(&pic_irqchip(kvm)->lock);
  2414. memcpy(&pic_irqchip(kvm)->pics[0],
  2415. &chip->chip.pic,
  2416. sizeof(struct kvm_pic_state));
  2417. raw_spin_unlock(&pic_irqchip(kvm)->lock);
  2418. break;
  2419. case KVM_IRQCHIP_PIC_SLAVE:
  2420. raw_spin_lock(&pic_irqchip(kvm)->lock);
  2421. memcpy(&pic_irqchip(kvm)->pics[1],
  2422. &chip->chip.pic,
  2423. sizeof(struct kvm_pic_state));
  2424. raw_spin_unlock(&pic_irqchip(kvm)->lock);
  2425. break;
  2426. case KVM_IRQCHIP_IOAPIC:
  2427. r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
  2428. break;
  2429. default:
  2430. r = -EINVAL;
  2431. break;
  2432. }
  2433. kvm_pic_update_irq(pic_irqchip(kvm));
  2434. return r;
  2435. }
  2436. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2437. {
  2438. int r = 0;
  2439. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2440. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  2441. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2442. return r;
  2443. }
  2444. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2445. {
  2446. int r = 0;
  2447. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2448. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  2449. kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
  2450. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2451. return r;
  2452. }
  2453. static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2454. {
  2455. int r = 0;
  2456. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2457. memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
  2458. sizeof(ps->channels));
  2459. ps->flags = kvm->arch.vpit->pit_state.flags;
  2460. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2461. return r;
  2462. }
  2463. static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2464. {
  2465. int r = 0, start = 0;
  2466. u32 prev_legacy, cur_legacy;
  2467. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2468. prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2469. cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2470. if (!prev_legacy && cur_legacy)
  2471. start = 1;
  2472. memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
  2473. sizeof(kvm->arch.vpit->pit_state.channels));
  2474. kvm->arch.vpit->pit_state.flags = ps->flags;
  2475. kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
  2476. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2477. return r;
  2478. }
  2479. static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  2480. struct kvm_reinject_control *control)
  2481. {
  2482. if (!kvm->arch.vpit)
  2483. return -ENXIO;
  2484. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2485. kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
  2486. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2487. return 0;
  2488. }
  2489. /*
  2490. * Get (and clear) the dirty memory log for a memory slot.
  2491. */
  2492. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  2493. struct kvm_dirty_log *log)
  2494. {
  2495. int r, i;
  2496. struct kvm_memory_slot *memslot;
  2497. unsigned long n;
  2498. unsigned long is_dirty = 0;
  2499. mutex_lock(&kvm->slots_lock);
  2500. r = -EINVAL;
  2501. if (log->slot >= KVM_MEMORY_SLOTS)
  2502. goto out;
  2503. memslot = &kvm->memslots->memslots[log->slot];
  2504. r = -ENOENT;
  2505. if (!memslot->dirty_bitmap)
  2506. goto out;
  2507. n = kvm_dirty_bitmap_bytes(memslot);
  2508. for (i = 0; !is_dirty && i < n/sizeof(long); i++)
  2509. is_dirty = memslot->dirty_bitmap[i];
  2510. /* If nothing is dirty, don't bother messing with page tables. */
  2511. if (is_dirty) {
  2512. struct kvm_memslots *slots, *old_slots;
  2513. unsigned long *dirty_bitmap;
  2514. spin_lock(&kvm->mmu_lock);
  2515. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  2516. spin_unlock(&kvm->mmu_lock);
  2517. r = -ENOMEM;
  2518. dirty_bitmap = vmalloc(n);
  2519. if (!dirty_bitmap)
  2520. goto out;
  2521. memset(dirty_bitmap, 0, n);
  2522. r = -ENOMEM;
  2523. slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  2524. if (!slots) {
  2525. vfree(dirty_bitmap);
  2526. goto out;
  2527. }
  2528. memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
  2529. slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
  2530. old_slots = kvm->memslots;
  2531. rcu_assign_pointer(kvm->memslots, slots);
  2532. synchronize_srcu_expedited(&kvm->srcu);
  2533. dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
  2534. kfree(old_slots);
  2535. r = -EFAULT;
  2536. if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
  2537. vfree(dirty_bitmap);
  2538. goto out;
  2539. }
  2540. vfree(dirty_bitmap);
  2541. } else {
  2542. r = -EFAULT;
  2543. if (clear_user(log->dirty_bitmap, n))
  2544. goto out;
  2545. }
  2546. r = 0;
  2547. out:
  2548. mutex_unlock(&kvm->slots_lock);
  2549. return r;
  2550. }
  2551. long kvm_arch_vm_ioctl(struct file *filp,
  2552. unsigned int ioctl, unsigned long arg)
  2553. {
  2554. struct kvm *kvm = filp->private_data;
  2555. void __user *argp = (void __user *)arg;
  2556. int r = -ENOTTY;
  2557. /*
  2558. * This union makes it completely explicit to gcc-3.x
  2559. * that these two variables' stack usage should be
  2560. * combined, not added together.
  2561. */
  2562. union {
  2563. struct kvm_pit_state ps;
  2564. struct kvm_pit_state2 ps2;
  2565. struct kvm_memory_alias alias;
  2566. struct kvm_pit_config pit_config;
  2567. } u;
  2568. switch (ioctl) {
  2569. case KVM_SET_TSS_ADDR:
  2570. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  2571. if (r < 0)
  2572. goto out;
  2573. break;
  2574. case KVM_SET_IDENTITY_MAP_ADDR: {
  2575. u64 ident_addr;
  2576. r = -EFAULT;
  2577. if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
  2578. goto out;
  2579. r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
  2580. if (r < 0)
  2581. goto out;
  2582. break;
  2583. }
  2584. case KVM_SET_MEMORY_REGION: {
  2585. struct kvm_memory_region kvm_mem;
  2586. struct kvm_userspace_memory_region kvm_userspace_mem;
  2587. r = -EFAULT;
  2588. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  2589. goto out;
  2590. kvm_userspace_mem.slot = kvm_mem.slot;
  2591. kvm_userspace_mem.flags = kvm_mem.flags;
  2592. kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
  2593. kvm_userspace_mem.memory_size = kvm_mem.memory_size;
  2594. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
  2595. if (r)
  2596. goto out;
  2597. break;
  2598. }
  2599. case KVM_SET_NR_MMU_PAGES:
  2600. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  2601. if (r)
  2602. goto out;
  2603. break;
  2604. case KVM_GET_NR_MMU_PAGES:
  2605. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  2606. break;
  2607. case KVM_SET_MEMORY_ALIAS:
  2608. r = -EFAULT;
  2609. if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
  2610. goto out;
  2611. r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
  2612. if (r)
  2613. goto out;
  2614. break;
  2615. case KVM_CREATE_IRQCHIP: {
  2616. struct kvm_pic *vpic;
  2617. mutex_lock(&kvm->lock);
  2618. r = -EEXIST;
  2619. if (kvm->arch.vpic)
  2620. goto create_irqchip_unlock;
  2621. r = -ENOMEM;
  2622. vpic = kvm_create_pic(kvm);
  2623. if (vpic) {
  2624. r = kvm_ioapic_init(kvm);
  2625. if (r) {
  2626. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  2627. &vpic->dev);
  2628. kfree(vpic);
  2629. goto create_irqchip_unlock;
  2630. }
  2631. } else
  2632. goto create_irqchip_unlock;
  2633. smp_wmb();
  2634. kvm->arch.vpic = vpic;
  2635. smp_wmb();
  2636. r = kvm_setup_default_irq_routing(kvm);
  2637. if (r) {
  2638. mutex_lock(&kvm->irq_lock);
  2639. kvm_ioapic_destroy(kvm);
  2640. kvm_destroy_pic(kvm);
  2641. mutex_unlock(&kvm->irq_lock);
  2642. }
  2643. create_irqchip_unlock:
  2644. mutex_unlock(&kvm->lock);
  2645. break;
  2646. }
  2647. case KVM_CREATE_PIT:
  2648. u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
  2649. goto create_pit;
  2650. case KVM_CREATE_PIT2:
  2651. r = -EFAULT;
  2652. if (copy_from_user(&u.pit_config, argp,
  2653. sizeof(struct kvm_pit_config)))
  2654. goto out;
  2655. create_pit:
  2656. mutex_lock(&kvm->slots_lock);
  2657. r = -EEXIST;
  2658. if (kvm->arch.vpit)
  2659. goto create_pit_unlock;
  2660. r = -ENOMEM;
  2661. kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
  2662. if (kvm->arch.vpit)
  2663. r = 0;
  2664. create_pit_unlock:
  2665. mutex_unlock(&kvm->slots_lock);
  2666. break;
  2667. case KVM_IRQ_LINE_STATUS:
  2668. case KVM_IRQ_LINE: {
  2669. struct kvm_irq_level irq_event;
  2670. r = -EFAULT;
  2671. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  2672. goto out;
  2673. r = -ENXIO;
  2674. if (irqchip_in_kernel(kvm)) {
  2675. __s32 status;
  2676. status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  2677. irq_event.irq, irq_event.level);
  2678. if (ioctl == KVM_IRQ_LINE_STATUS) {
  2679. r = -EFAULT;
  2680. irq_event.status = status;
  2681. if (copy_to_user(argp, &irq_event,
  2682. sizeof irq_event))
  2683. goto out;
  2684. }
  2685. r = 0;
  2686. }
  2687. break;
  2688. }
  2689. case KVM_GET_IRQCHIP: {
  2690. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2691. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  2692. r = -ENOMEM;
  2693. if (!chip)
  2694. goto out;
  2695. r = -EFAULT;
  2696. if (copy_from_user(chip, argp, sizeof *chip))
  2697. goto get_irqchip_out;
  2698. r = -ENXIO;
  2699. if (!irqchip_in_kernel(kvm))
  2700. goto get_irqchip_out;
  2701. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  2702. if (r)
  2703. goto get_irqchip_out;
  2704. r = -EFAULT;
  2705. if (copy_to_user(argp, chip, sizeof *chip))
  2706. goto get_irqchip_out;
  2707. r = 0;
  2708. get_irqchip_out:
  2709. kfree(chip);
  2710. if (r)
  2711. goto out;
  2712. break;
  2713. }
  2714. case KVM_SET_IRQCHIP: {
  2715. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2716. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  2717. r = -ENOMEM;
  2718. if (!chip)
  2719. goto out;
  2720. r = -EFAULT;
  2721. if (copy_from_user(chip, argp, sizeof *chip))
  2722. goto set_irqchip_out;
  2723. r = -ENXIO;
  2724. if (!irqchip_in_kernel(kvm))
  2725. goto set_irqchip_out;
  2726. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  2727. if (r)
  2728. goto set_irqchip_out;
  2729. r = 0;
  2730. set_irqchip_out:
  2731. kfree(chip);
  2732. if (r)
  2733. goto out;
  2734. break;
  2735. }
  2736. case KVM_GET_PIT: {
  2737. r = -EFAULT;
  2738. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  2739. goto out;
  2740. r = -ENXIO;
  2741. if (!kvm->arch.vpit)
  2742. goto out;
  2743. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  2744. if (r)
  2745. goto out;
  2746. r = -EFAULT;
  2747. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  2748. goto out;
  2749. r = 0;
  2750. break;
  2751. }
  2752. case KVM_SET_PIT: {
  2753. r = -EFAULT;
  2754. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  2755. goto out;
  2756. r = -ENXIO;
  2757. if (!kvm->arch.vpit)
  2758. goto out;
  2759. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  2760. if (r)
  2761. goto out;
  2762. r = 0;
  2763. break;
  2764. }
  2765. case KVM_GET_PIT2: {
  2766. r = -ENXIO;
  2767. if (!kvm->arch.vpit)
  2768. goto out;
  2769. r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
  2770. if (r)
  2771. goto out;
  2772. r = -EFAULT;
  2773. if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
  2774. goto out;
  2775. r = 0;
  2776. break;
  2777. }
  2778. case KVM_SET_PIT2: {
  2779. r = -EFAULT;
  2780. if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
  2781. goto out;
  2782. r = -ENXIO;
  2783. if (!kvm->arch.vpit)
  2784. goto out;
  2785. r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
  2786. if (r)
  2787. goto out;
  2788. r = 0;
  2789. break;
  2790. }
  2791. case KVM_REINJECT_CONTROL: {
  2792. struct kvm_reinject_control control;
  2793. r = -EFAULT;
  2794. if (copy_from_user(&control, argp, sizeof(control)))
  2795. goto out;
  2796. r = kvm_vm_ioctl_reinject(kvm, &control);
  2797. if (r)
  2798. goto out;
  2799. r = 0;
  2800. break;
  2801. }
  2802. case KVM_XEN_HVM_CONFIG: {
  2803. r = -EFAULT;
  2804. if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
  2805. sizeof(struct kvm_xen_hvm_config)))
  2806. goto out;
  2807. r = -EINVAL;
  2808. if (kvm->arch.xen_hvm_config.flags)
  2809. goto out;
  2810. r = 0;
  2811. break;
  2812. }
  2813. case KVM_SET_CLOCK: {
  2814. struct timespec now;
  2815. struct kvm_clock_data user_ns;
  2816. u64 now_ns;
  2817. s64 delta;
  2818. r = -EFAULT;
  2819. if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
  2820. goto out;
  2821. r = -EINVAL;
  2822. if (user_ns.flags)
  2823. goto out;
  2824. r = 0;
  2825. ktime_get_ts(&now);
  2826. now_ns = timespec_to_ns(&now);
  2827. delta = user_ns.clock - now_ns;
  2828. kvm->arch.kvmclock_offset = delta;
  2829. break;
  2830. }
  2831. case KVM_GET_CLOCK: {
  2832. struct timespec now;
  2833. struct kvm_clock_data user_ns;
  2834. u64 now_ns;
  2835. ktime_get_ts(&now);
  2836. now_ns = timespec_to_ns(&now);
  2837. user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
  2838. user_ns.flags = 0;
  2839. r = -EFAULT;
  2840. if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
  2841. goto out;
  2842. r = 0;
  2843. break;
  2844. }
  2845. default:
  2846. ;
  2847. }
  2848. out:
  2849. return r;
  2850. }
  2851. static void kvm_init_msr_list(void)
  2852. {
  2853. u32 dummy[2];
  2854. unsigned i, j;
  2855. /* skip the first msrs in the list. KVM-specific */
  2856. for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
  2857. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  2858. continue;
  2859. if (j < i)
  2860. msrs_to_save[j] = msrs_to_save[i];
  2861. j++;
  2862. }
  2863. num_msrs_to_save = j;
  2864. }
  2865. static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
  2866. const void *v)
  2867. {
  2868. if (vcpu->arch.apic &&
  2869. !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
  2870. return 0;
  2871. return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
  2872. }
  2873. static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
  2874. {
  2875. if (vcpu->arch.apic &&
  2876. !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
  2877. return 0;
  2878. return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
  2879. }
  2880. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  2881. struct kvm_segment *var, int seg)
  2882. {
  2883. kvm_x86_ops->set_segment(vcpu, var, seg);
  2884. }
  2885. void kvm_get_segment(struct kvm_vcpu *vcpu,
  2886. struct kvm_segment *var, int seg)
  2887. {
  2888. kvm_x86_ops->get_segment(vcpu, var, seg);
  2889. }
  2890. gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  2891. {
  2892. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2893. return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
  2894. }
  2895. gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  2896. {
  2897. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2898. access |= PFERR_FETCH_MASK;
  2899. return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
  2900. }
  2901. gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  2902. {
  2903. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2904. access |= PFERR_WRITE_MASK;
  2905. return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
  2906. }
  2907. /* uses this to access any guest's mapped memory without checking CPL */
  2908. gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  2909. {
  2910. return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
  2911. }
  2912. static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
  2913. struct kvm_vcpu *vcpu, u32 access,
  2914. u32 *error)
  2915. {
  2916. void *data = val;
  2917. int r = X86EMUL_CONTINUE;
  2918. while (bytes) {
  2919. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
  2920. unsigned offset = addr & (PAGE_SIZE-1);
  2921. unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
  2922. int ret;
  2923. if (gpa == UNMAPPED_GVA) {
  2924. r = X86EMUL_PROPAGATE_FAULT;
  2925. goto out;
  2926. }
  2927. ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
  2928. if (ret < 0) {
  2929. r = X86EMUL_IO_NEEDED;
  2930. goto out;
  2931. }
  2932. bytes -= toread;
  2933. data += toread;
  2934. addr += toread;
  2935. }
  2936. out:
  2937. return r;
  2938. }
  2939. /* used for instruction fetching */
  2940. static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
  2941. struct kvm_vcpu *vcpu, u32 *error)
  2942. {
  2943. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2944. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
  2945. access | PFERR_FETCH_MASK, error);
  2946. }
  2947. static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
  2948. struct kvm_vcpu *vcpu, u32 *error)
  2949. {
  2950. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2951. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
  2952. error);
  2953. }
  2954. static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
  2955. struct kvm_vcpu *vcpu, u32 *error)
  2956. {
  2957. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
  2958. }
  2959. static int kvm_write_guest_virt_system(gva_t addr, void *val,
  2960. unsigned int bytes,
  2961. struct kvm_vcpu *vcpu,
  2962. u32 *error)
  2963. {
  2964. void *data = val;
  2965. int r = X86EMUL_CONTINUE;
  2966. while (bytes) {
  2967. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
  2968. PFERR_WRITE_MASK, error);
  2969. unsigned offset = addr & (PAGE_SIZE-1);
  2970. unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  2971. int ret;
  2972. if (gpa == UNMAPPED_GVA) {
  2973. r = X86EMUL_PROPAGATE_FAULT;
  2974. goto out;
  2975. }
  2976. ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
  2977. if (ret < 0) {
  2978. r = X86EMUL_IO_NEEDED;
  2979. goto out;
  2980. }
  2981. bytes -= towrite;
  2982. data += towrite;
  2983. addr += towrite;
  2984. }
  2985. out:
  2986. return r;
  2987. }
  2988. static int emulator_read_emulated(unsigned long addr,
  2989. void *val,
  2990. unsigned int bytes,
  2991. unsigned int *error_code,
  2992. struct kvm_vcpu *vcpu)
  2993. {
  2994. gpa_t gpa;
  2995. if (vcpu->mmio_read_completed) {
  2996. memcpy(val, vcpu->mmio_data, bytes);
  2997. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
  2998. vcpu->mmio_phys_addr, *(u64 *)val);
  2999. vcpu->mmio_read_completed = 0;
  3000. return X86EMUL_CONTINUE;
  3001. }
  3002. gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
  3003. if (gpa == UNMAPPED_GVA)
  3004. return X86EMUL_PROPAGATE_FAULT;
  3005. /* For APIC access vmexit */
  3006. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3007. goto mmio;
  3008. if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
  3009. == X86EMUL_CONTINUE)
  3010. return X86EMUL_CONTINUE;
  3011. mmio:
  3012. /*
  3013. * Is this MMIO handled locally?
  3014. */
  3015. if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
  3016. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
  3017. return X86EMUL_CONTINUE;
  3018. }
  3019. trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
  3020. vcpu->mmio_needed = 1;
  3021. vcpu->run->exit_reason = KVM_EXIT_MMIO;
  3022. vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
  3023. vcpu->run->mmio.len = vcpu->mmio_size = bytes;
  3024. vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0;
  3025. return X86EMUL_IO_NEEDED;
  3026. }
  3027. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  3028. const void *val, int bytes)
  3029. {
  3030. int ret;
  3031. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  3032. if (ret < 0)
  3033. return 0;
  3034. kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
  3035. return 1;
  3036. }
  3037. static int emulator_write_emulated_onepage(unsigned long addr,
  3038. const void *val,
  3039. unsigned int bytes,
  3040. unsigned int *error_code,
  3041. struct kvm_vcpu *vcpu)
  3042. {
  3043. gpa_t gpa;
  3044. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
  3045. if (gpa == UNMAPPED_GVA)
  3046. return X86EMUL_PROPAGATE_FAULT;
  3047. /* For APIC access vmexit */
  3048. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3049. goto mmio;
  3050. if (emulator_write_phys(vcpu, gpa, val, bytes))
  3051. return X86EMUL_CONTINUE;
  3052. mmio:
  3053. trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
  3054. /*
  3055. * Is this MMIO handled locally?
  3056. */
  3057. if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
  3058. return X86EMUL_CONTINUE;
  3059. vcpu->mmio_needed = 1;
  3060. vcpu->run->exit_reason = KVM_EXIT_MMIO;
  3061. vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
  3062. vcpu->run->mmio.len = vcpu->mmio_size = bytes;
  3063. vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1;
  3064. memcpy(vcpu->run->mmio.data, val, bytes);
  3065. return X86EMUL_CONTINUE;
  3066. }
  3067. int emulator_write_emulated(unsigned long addr,
  3068. const void *val,
  3069. unsigned int bytes,
  3070. unsigned int *error_code,
  3071. struct kvm_vcpu *vcpu)
  3072. {
  3073. /* Crossing a page boundary? */
  3074. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  3075. int rc, now;
  3076. now = -addr & ~PAGE_MASK;
  3077. rc = emulator_write_emulated_onepage(addr, val, now, error_code,
  3078. vcpu);
  3079. if (rc != X86EMUL_CONTINUE)
  3080. return rc;
  3081. addr += now;
  3082. val += now;
  3083. bytes -= now;
  3084. }
  3085. return emulator_write_emulated_onepage(addr, val, bytes, error_code,
  3086. vcpu);
  3087. }
  3088. #define CMPXCHG_TYPE(t, ptr, old, new) \
  3089. (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
  3090. #ifdef CONFIG_X86_64
  3091. # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
  3092. #else
  3093. # define CMPXCHG64(ptr, old, new) \
  3094. (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
  3095. #endif
  3096. static int emulator_cmpxchg_emulated(unsigned long addr,
  3097. const void *old,
  3098. const void *new,
  3099. unsigned int bytes,
  3100. unsigned int *error_code,
  3101. struct kvm_vcpu *vcpu)
  3102. {
  3103. gpa_t gpa;
  3104. struct page *page;
  3105. char *kaddr;
  3106. bool exchanged;
  3107. /* guests cmpxchg8b have to be emulated atomically */
  3108. if (bytes > 8 || (bytes & (bytes - 1)))
  3109. goto emul_write;
  3110. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
  3111. if (gpa == UNMAPPED_GVA ||
  3112. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3113. goto emul_write;
  3114. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  3115. goto emul_write;
  3116. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  3117. kaddr = kmap_atomic(page, KM_USER0);
  3118. kaddr += offset_in_page(gpa);
  3119. switch (bytes) {
  3120. case 1:
  3121. exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
  3122. break;
  3123. case 2:
  3124. exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
  3125. break;
  3126. case 4:
  3127. exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
  3128. break;
  3129. case 8:
  3130. exchanged = CMPXCHG64(kaddr, old, new);
  3131. break;
  3132. default:
  3133. BUG();
  3134. }
  3135. kunmap_atomic(kaddr, KM_USER0);
  3136. kvm_release_page_dirty(page);
  3137. if (!exchanged)
  3138. return X86EMUL_CMPXCHG_FAILED;
  3139. kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
  3140. return X86EMUL_CONTINUE;
  3141. emul_write:
  3142. printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
  3143. return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
  3144. }
  3145. static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
  3146. {
  3147. /* TODO: String I/O for in kernel device */
  3148. int r;
  3149. if (vcpu->arch.pio.in)
  3150. r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
  3151. vcpu->arch.pio.size, pd);
  3152. else
  3153. r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
  3154. vcpu->arch.pio.port, vcpu->arch.pio.size,
  3155. pd);
  3156. return r;
  3157. }
  3158. static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
  3159. unsigned int count, struct kvm_vcpu *vcpu)
  3160. {
  3161. if (vcpu->arch.pio.count)
  3162. goto data_avail;
  3163. trace_kvm_pio(1, port, size, 1);
  3164. vcpu->arch.pio.port = port;
  3165. vcpu->arch.pio.in = 1;
  3166. vcpu->arch.pio.count = count;
  3167. vcpu->arch.pio.size = size;
  3168. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3169. data_avail:
  3170. memcpy(val, vcpu->arch.pio_data, size * count);
  3171. vcpu->arch.pio.count = 0;
  3172. return 1;
  3173. }
  3174. vcpu->run->exit_reason = KVM_EXIT_IO;
  3175. vcpu->run->io.direction = KVM_EXIT_IO_IN;
  3176. vcpu->run->io.size = size;
  3177. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3178. vcpu->run->io.count = count;
  3179. vcpu->run->io.port = port;
  3180. return 0;
  3181. }
  3182. static int emulator_pio_out_emulated(int size, unsigned short port,
  3183. const void *val, unsigned int count,
  3184. struct kvm_vcpu *vcpu)
  3185. {
  3186. trace_kvm_pio(0, port, size, 1);
  3187. vcpu->arch.pio.port = port;
  3188. vcpu->arch.pio.in = 0;
  3189. vcpu->arch.pio.count = count;
  3190. vcpu->arch.pio.size = size;
  3191. memcpy(vcpu->arch.pio_data, val, size * count);
  3192. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3193. vcpu->arch.pio.count = 0;
  3194. return 1;
  3195. }
  3196. vcpu->run->exit_reason = KVM_EXIT_IO;
  3197. vcpu->run->io.direction = KVM_EXIT_IO_OUT;
  3198. vcpu->run->io.size = size;
  3199. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3200. vcpu->run->io.count = count;
  3201. vcpu->run->io.port = port;
  3202. return 0;
  3203. }
  3204. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  3205. {
  3206. return kvm_x86_ops->get_segment_base(vcpu, seg);
  3207. }
  3208. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  3209. {
  3210. kvm_mmu_invlpg(vcpu, address);
  3211. return X86EMUL_CONTINUE;
  3212. }
  3213. int emulate_clts(struct kvm_vcpu *vcpu)
  3214. {
  3215. kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
  3216. kvm_x86_ops->fpu_activate(vcpu);
  3217. return X86EMUL_CONTINUE;
  3218. }
  3219. int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu)
  3220. {
  3221. return _kvm_get_dr(vcpu, dr, dest);
  3222. }
  3223. int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu)
  3224. {
  3225. return __kvm_set_dr(vcpu, dr, value);
  3226. }
  3227. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  3228. {
  3229. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  3230. }
  3231. static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
  3232. {
  3233. unsigned long value;
  3234. switch (cr) {
  3235. case 0:
  3236. value = kvm_read_cr0(vcpu);
  3237. break;
  3238. case 2:
  3239. value = vcpu->arch.cr2;
  3240. break;
  3241. case 3:
  3242. value = vcpu->arch.cr3;
  3243. break;
  3244. case 4:
  3245. value = kvm_read_cr4(vcpu);
  3246. break;
  3247. case 8:
  3248. value = kvm_get_cr8(vcpu);
  3249. break;
  3250. default:
  3251. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3252. return 0;
  3253. }
  3254. return value;
  3255. }
  3256. static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
  3257. {
  3258. int res = 0;
  3259. switch (cr) {
  3260. case 0:
  3261. res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
  3262. break;
  3263. case 2:
  3264. vcpu->arch.cr2 = val;
  3265. break;
  3266. case 3:
  3267. res = kvm_set_cr3(vcpu, val);
  3268. break;
  3269. case 4:
  3270. res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
  3271. break;
  3272. case 8:
  3273. res = __kvm_set_cr8(vcpu, val & 0xfUL);
  3274. break;
  3275. default:
  3276. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3277. res = -1;
  3278. }
  3279. return res;
  3280. }
  3281. static int emulator_get_cpl(struct kvm_vcpu *vcpu)
  3282. {
  3283. return kvm_x86_ops->get_cpl(vcpu);
  3284. }
  3285. static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
  3286. {
  3287. kvm_x86_ops->get_gdt(vcpu, dt);
  3288. }
  3289. static unsigned long emulator_get_cached_segment_base(int seg,
  3290. struct kvm_vcpu *vcpu)
  3291. {
  3292. return get_segment_base(vcpu, seg);
  3293. }
  3294. static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
  3295. struct kvm_vcpu *vcpu)
  3296. {
  3297. struct kvm_segment var;
  3298. kvm_get_segment(vcpu, &var, seg);
  3299. if (var.unusable)
  3300. return false;
  3301. if (var.g)
  3302. var.limit >>= 12;
  3303. set_desc_limit(desc, var.limit);
  3304. set_desc_base(desc, (unsigned long)var.base);
  3305. desc->type = var.type;
  3306. desc->s = var.s;
  3307. desc->dpl = var.dpl;
  3308. desc->p = var.present;
  3309. desc->avl = var.avl;
  3310. desc->l = var.l;
  3311. desc->d = var.db;
  3312. desc->g = var.g;
  3313. return true;
  3314. }
  3315. static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
  3316. struct kvm_vcpu *vcpu)
  3317. {
  3318. struct kvm_segment var;
  3319. /* needed to preserve selector */
  3320. kvm_get_segment(vcpu, &var, seg);
  3321. var.base = get_desc_base(desc);
  3322. var.limit = get_desc_limit(desc);
  3323. if (desc->g)
  3324. var.limit = (var.limit << 12) | 0xfff;
  3325. var.type = desc->type;
  3326. var.present = desc->p;
  3327. var.dpl = desc->dpl;
  3328. var.db = desc->d;
  3329. var.s = desc->s;
  3330. var.l = desc->l;
  3331. var.g = desc->g;
  3332. var.avl = desc->avl;
  3333. var.present = desc->p;
  3334. var.unusable = !var.present;
  3335. var.padding = 0;
  3336. kvm_set_segment(vcpu, &var, seg);
  3337. return;
  3338. }
  3339. static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
  3340. {
  3341. struct kvm_segment kvm_seg;
  3342. kvm_get_segment(vcpu, &kvm_seg, seg);
  3343. return kvm_seg.selector;
  3344. }
  3345. static void emulator_set_segment_selector(u16 sel, int seg,
  3346. struct kvm_vcpu *vcpu)
  3347. {
  3348. struct kvm_segment kvm_seg;
  3349. kvm_get_segment(vcpu, &kvm_seg, seg);
  3350. kvm_seg.selector = sel;
  3351. kvm_set_segment(vcpu, &kvm_seg, seg);
  3352. }
  3353. static struct x86_emulate_ops emulate_ops = {
  3354. .read_std = kvm_read_guest_virt_system,
  3355. .write_std = kvm_write_guest_virt_system,
  3356. .fetch = kvm_fetch_guest_virt,
  3357. .read_emulated = emulator_read_emulated,
  3358. .write_emulated = emulator_write_emulated,
  3359. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  3360. .pio_in_emulated = emulator_pio_in_emulated,
  3361. .pio_out_emulated = emulator_pio_out_emulated,
  3362. .get_cached_descriptor = emulator_get_cached_descriptor,
  3363. .set_cached_descriptor = emulator_set_cached_descriptor,
  3364. .get_segment_selector = emulator_get_segment_selector,
  3365. .set_segment_selector = emulator_set_segment_selector,
  3366. .get_cached_segment_base = emulator_get_cached_segment_base,
  3367. .get_gdt = emulator_get_gdt,
  3368. .get_cr = emulator_get_cr,
  3369. .set_cr = emulator_set_cr,
  3370. .cpl = emulator_get_cpl,
  3371. .get_dr = emulator_get_dr,
  3372. .set_dr = emulator_set_dr,
  3373. .set_msr = kvm_set_msr,
  3374. .get_msr = kvm_get_msr,
  3375. };
  3376. static void cache_all_regs(struct kvm_vcpu *vcpu)
  3377. {
  3378. kvm_register_read(vcpu, VCPU_REGS_RAX);
  3379. kvm_register_read(vcpu, VCPU_REGS_RSP);
  3380. kvm_register_read(vcpu, VCPU_REGS_RIP);
  3381. vcpu->arch.regs_dirty = ~0;
  3382. }
  3383. static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
  3384. {
  3385. u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
  3386. /*
  3387. * an sti; sti; sequence only disable interrupts for the first
  3388. * instruction. So, if the last instruction, be it emulated or
  3389. * not, left the system with the INT_STI flag enabled, it
  3390. * means that the last instruction is an sti. We should not
  3391. * leave the flag on in this case. The same goes for mov ss
  3392. */
  3393. if (!(int_shadow & mask))
  3394. kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
  3395. }
  3396. static void inject_emulated_exception(struct kvm_vcpu *vcpu)
  3397. {
  3398. struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
  3399. if (ctxt->exception == PF_VECTOR)
  3400. kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code);
  3401. else if (ctxt->error_code_valid)
  3402. kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
  3403. else
  3404. kvm_queue_exception(vcpu, ctxt->exception);
  3405. }
  3406. static int handle_emulation_failure(struct kvm_vcpu *vcpu)
  3407. {
  3408. ++vcpu->stat.insn_emulation_fail;
  3409. trace_kvm_emulate_insn_failed(vcpu);
  3410. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  3411. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  3412. vcpu->run->internal.ndata = 0;
  3413. kvm_queue_exception(vcpu, UD_VECTOR);
  3414. return EMULATE_FAIL;
  3415. }
  3416. int emulate_instruction(struct kvm_vcpu *vcpu,
  3417. unsigned long cr2,
  3418. u16 error_code,
  3419. int emulation_type)
  3420. {
  3421. int r;
  3422. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  3423. kvm_clear_exception_queue(vcpu);
  3424. vcpu->arch.mmio_fault_cr2 = cr2;
  3425. /*
  3426. * TODO: fix emulate.c to use guest_read/write_register
  3427. * instead of direct ->regs accesses, can save hundred cycles
  3428. * on Intel for instructions that don't read/change RSP, for
  3429. * for example.
  3430. */
  3431. cache_all_regs(vcpu);
  3432. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  3433. int cs_db, cs_l;
  3434. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  3435. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  3436. vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  3437. vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
  3438. vcpu->arch.emulate_ctxt.mode =
  3439. (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
  3440. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  3441. ? X86EMUL_MODE_VM86 : cs_l
  3442. ? X86EMUL_MODE_PROT64 : cs_db
  3443. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  3444. memset(c, 0, sizeof(struct decode_cache));
  3445. memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
  3446. vcpu->arch.emulate_ctxt.interruptibility = 0;
  3447. vcpu->arch.emulate_ctxt.exception = -1;
  3448. r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  3449. trace_kvm_emulate_insn_start(vcpu);
  3450. /* Only allow emulation of specific instructions on #UD
  3451. * (namely VMMCALL, sysenter, sysexit, syscall)*/
  3452. if (emulation_type & EMULTYPE_TRAP_UD) {
  3453. if (!c->twobyte)
  3454. return EMULATE_FAIL;
  3455. switch (c->b) {
  3456. case 0x01: /* VMMCALL */
  3457. if (c->modrm_mod != 3 || c->modrm_rm != 1)
  3458. return EMULATE_FAIL;
  3459. break;
  3460. case 0x34: /* sysenter */
  3461. case 0x35: /* sysexit */
  3462. if (c->modrm_mod != 0 || c->modrm_rm != 0)
  3463. return EMULATE_FAIL;
  3464. break;
  3465. case 0x05: /* syscall */
  3466. if (c->modrm_mod != 0 || c->modrm_rm != 0)
  3467. return EMULATE_FAIL;
  3468. break;
  3469. default:
  3470. return EMULATE_FAIL;
  3471. }
  3472. if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
  3473. return EMULATE_FAIL;
  3474. }
  3475. ++vcpu->stat.insn_emulation;
  3476. if (r) {
  3477. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  3478. return EMULATE_DONE;
  3479. if (emulation_type & EMULTYPE_SKIP)
  3480. return EMULATE_FAIL;
  3481. return handle_emulation_failure(vcpu);
  3482. }
  3483. }
  3484. if (emulation_type & EMULTYPE_SKIP) {
  3485. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
  3486. return EMULATE_DONE;
  3487. }
  3488. /* this is needed for vmware backdor interface to work since it
  3489. changes registers values during IO operation */
  3490. memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
  3491. restart:
  3492. r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  3493. if (r) { /* emulation failed */
  3494. /*
  3495. * if emulation was due to access to shadowed page table
  3496. * and it failed try to unshadow page and re-entetr the
  3497. * guest to let CPU execute the instruction.
  3498. */
  3499. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  3500. return EMULATE_DONE;
  3501. return handle_emulation_failure(vcpu);
  3502. }
  3503. toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
  3504. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  3505. memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
  3506. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
  3507. if (vcpu->arch.emulate_ctxt.exception >= 0) {
  3508. inject_emulated_exception(vcpu);
  3509. return EMULATE_DONE;
  3510. }
  3511. if (vcpu->arch.pio.count) {
  3512. if (!vcpu->arch.pio.in)
  3513. vcpu->arch.pio.count = 0;
  3514. return EMULATE_DO_MMIO;
  3515. }
  3516. if (vcpu->mmio_needed) {
  3517. if (vcpu->mmio_is_write)
  3518. vcpu->mmio_needed = 0;
  3519. return EMULATE_DO_MMIO;
  3520. }
  3521. if (vcpu->arch.emulate_ctxt.restart)
  3522. goto restart;
  3523. return EMULATE_DONE;
  3524. }
  3525. EXPORT_SYMBOL_GPL(emulate_instruction);
  3526. int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
  3527. {
  3528. unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3529. int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
  3530. /* do not return to emulator after return from userspace */
  3531. vcpu->arch.pio.count = 0;
  3532. return ret;
  3533. }
  3534. EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
  3535. static void bounce_off(void *info)
  3536. {
  3537. /* nothing */
  3538. }
  3539. static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  3540. void *data)
  3541. {
  3542. struct cpufreq_freqs *freq = data;
  3543. struct kvm *kvm;
  3544. struct kvm_vcpu *vcpu;
  3545. int i, send_ipi = 0;
  3546. if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
  3547. return 0;
  3548. if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
  3549. return 0;
  3550. per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
  3551. spin_lock(&kvm_lock);
  3552. list_for_each_entry(kvm, &vm_list, vm_list) {
  3553. kvm_for_each_vcpu(i, vcpu, kvm) {
  3554. if (vcpu->cpu != freq->cpu)
  3555. continue;
  3556. if (!kvm_request_guest_time_update(vcpu))
  3557. continue;
  3558. if (vcpu->cpu != smp_processor_id())
  3559. send_ipi++;
  3560. }
  3561. }
  3562. spin_unlock(&kvm_lock);
  3563. if (freq->old < freq->new && send_ipi) {
  3564. /*
  3565. * We upscale the frequency. Must make the guest
  3566. * doesn't see old kvmclock values while running with
  3567. * the new frequency, otherwise we risk the guest sees
  3568. * time go backwards.
  3569. *
  3570. * In case we update the frequency for another cpu
  3571. * (which might be in guest context) send an interrupt
  3572. * to kick the cpu out of guest context. Next time
  3573. * guest context is entered kvmclock will be updated,
  3574. * so the guest will not see stale values.
  3575. */
  3576. smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
  3577. }
  3578. return 0;
  3579. }
  3580. static struct notifier_block kvmclock_cpufreq_notifier_block = {
  3581. .notifier_call = kvmclock_cpufreq_notifier
  3582. };
  3583. static void kvm_timer_init(void)
  3584. {
  3585. int cpu;
  3586. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  3587. cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
  3588. CPUFREQ_TRANSITION_NOTIFIER);
  3589. for_each_online_cpu(cpu) {
  3590. unsigned long khz = cpufreq_get(cpu);
  3591. if (!khz)
  3592. khz = tsc_khz;
  3593. per_cpu(cpu_tsc_khz, cpu) = khz;
  3594. }
  3595. } else {
  3596. for_each_possible_cpu(cpu)
  3597. per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
  3598. }
  3599. }
  3600. static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
  3601. static int kvm_is_in_guest(void)
  3602. {
  3603. return percpu_read(current_vcpu) != NULL;
  3604. }
  3605. static int kvm_is_user_mode(void)
  3606. {
  3607. int user_mode = 3;
  3608. if (percpu_read(current_vcpu))
  3609. user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
  3610. return user_mode != 0;
  3611. }
  3612. static unsigned long kvm_get_guest_ip(void)
  3613. {
  3614. unsigned long ip = 0;
  3615. if (percpu_read(current_vcpu))
  3616. ip = kvm_rip_read(percpu_read(current_vcpu));
  3617. return ip;
  3618. }
  3619. static struct perf_guest_info_callbacks kvm_guest_cbs = {
  3620. .is_in_guest = kvm_is_in_guest,
  3621. .is_user_mode = kvm_is_user_mode,
  3622. .get_guest_ip = kvm_get_guest_ip,
  3623. };
  3624. void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
  3625. {
  3626. percpu_write(current_vcpu, vcpu);
  3627. }
  3628. EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
  3629. void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
  3630. {
  3631. percpu_write(current_vcpu, NULL);
  3632. }
  3633. EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
  3634. int kvm_arch_init(void *opaque)
  3635. {
  3636. int r;
  3637. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  3638. if (kvm_x86_ops) {
  3639. printk(KERN_ERR "kvm: already loaded the other module\n");
  3640. r = -EEXIST;
  3641. goto out;
  3642. }
  3643. if (!ops->cpu_has_kvm_support()) {
  3644. printk(KERN_ERR "kvm: no hardware support\n");
  3645. r = -EOPNOTSUPP;
  3646. goto out;
  3647. }
  3648. if (ops->disabled_by_bios()) {
  3649. printk(KERN_ERR "kvm: disabled by bios\n");
  3650. r = -EOPNOTSUPP;
  3651. goto out;
  3652. }
  3653. r = kvm_mmu_module_init();
  3654. if (r)
  3655. goto out;
  3656. kvm_init_msr_list();
  3657. kvm_x86_ops = ops;
  3658. kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
  3659. kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
  3660. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  3661. PT_DIRTY_MASK, PT64_NX_MASK, 0);
  3662. kvm_timer_init();
  3663. perf_register_guest_info_callbacks(&kvm_guest_cbs);
  3664. if (cpu_has_xsave)
  3665. host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
  3666. return 0;
  3667. out:
  3668. return r;
  3669. }
  3670. void kvm_arch_exit(void)
  3671. {
  3672. perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
  3673. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  3674. cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
  3675. CPUFREQ_TRANSITION_NOTIFIER);
  3676. kvm_x86_ops = NULL;
  3677. kvm_mmu_module_exit();
  3678. }
  3679. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  3680. {
  3681. ++vcpu->stat.halt_exits;
  3682. if (irqchip_in_kernel(vcpu->kvm)) {
  3683. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  3684. return 1;
  3685. } else {
  3686. vcpu->run->exit_reason = KVM_EXIT_HLT;
  3687. return 0;
  3688. }
  3689. }
  3690. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  3691. static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
  3692. unsigned long a1)
  3693. {
  3694. if (is_long_mode(vcpu))
  3695. return a0;
  3696. else
  3697. return a0 | ((gpa_t)a1 << 32);
  3698. }
  3699. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  3700. {
  3701. u64 param, ingpa, outgpa, ret;
  3702. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  3703. bool fast, longmode;
  3704. int cs_db, cs_l;
  3705. /*
  3706. * hypercall generates UD from non zero cpl and real mode
  3707. * per HYPER-V spec
  3708. */
  3709. if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
  3710. kvm_queue_exception(vcpu, UD_VECTOR);
  3711. return 0;
  3712. }
  3713. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  3714. longmode = is_long_mode(vcpu) && cs_l == 1;
  3715. if (!longmode) {
  3716. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  3717. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  3718. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  3719. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  3720. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  3721. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  3722. }
  3723. #ifdef CONFIG_X86_64
  3724. else {
  3725. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3726. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3727. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  3728. }
  3729. #endif
  3730. code = param & 0xffff;
  3731. fast = (param >> 16) & 0x1;
  3732. rep_cnt = (param >> 32) & 0xfff;
  3733. rep_idx = (param >> 48) & 0xfff;
  3734. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  3735. switch (code) {
  3736. case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
  3737. kvm_vcpu_on_spin(vcpu);
  3738. break;
  3739. default:
  3740. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  3741. break;
  3742. }
  3743. ret = res | (((u64)rep_done & 0xfff) << 32);
  3744. if (longmode) {
  3745. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  3746. } else {
  3747. kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
  3748. kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
  3749. }
  3750. return 1;
  3751. }
  3752. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  3753. {
  3754. unsigned long nr, a0, a1, a2, a3, ret;
  3755. int r = 1;
  3756. if (kvm_hv_hypercall_enabled(vcpu->kvm))
  3757. return kvm_hv_hypercall(vcpu);
  3758. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3759. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3760. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3761. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3762. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3763. trace_kvm_hypercall(nr, a0, a1, a2, a3);
  3764. if (!is_long_mode(vcpu)) {
  3765. nr &= 0xFFFFFFFF;
  3766. a0 &= 0xFFFFFFFF;
  3767. a1 &= 0xFFFFFFFF;
  3768. a2 &= 0xFFFFFFFF;
  3769. a3 &= 0xFFFFFFFF;
  3770. }
  3771. if (kvm_x86_ops->get_cpl(vcpu) != 0) {
  3772. ret = -KVM_EPERM;
  3773. goto out;
  3774. }
  3775. switch (nr) {
  3776. case KVM_HC_VAPIC_POLL_IRQ:
  3777. ret = 0;
  3778. break;
  3779. case KVM_HC_MMU_OP:
  3780. r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
  3781. break;
  3782. default:
  3783. ret = -KVM_ENOSYS;
  3784. break;
  3785. }
  3786. out:
  3787. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  3788. ++vcpu->stat.hypercalls;
  3789. return r;
  3790. }
  3791. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  3792. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  3793. {
  3794. char instruction[3];
  3795. unsigned long rip = kvm_rip_read(vcpu);
  3796. /*
  3797. * Blow out the MMU to ensure that no other VCPU has an active mapping
  3798. * to ensure that the updated hypercall appears atomically across all
  3799. * VCPUs.
  3800. */
  3801. kvm_mmu_zap_all(vcpu->kvm);
  3802. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  3803. return emulator_write_emulated(rip, instruction, 3, NULL, vcpu);
  3804. }
  3805. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  3806. {
  3807. struct desc_ptr dt = { limit, base };
  3808. kvm_x86_ops->set_gdt(vcpu, &dt);
  3809. }
  3810. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  3811. {
  3812. struct desc_ptr dt = { limit, base };
  3813. kvm_x86_ops->set_idt(vcpu, &dt);
  3814. }
  3815. static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  3816. {
  3817. struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
  3818. int j, nent = vcpu->arch.cpuid_nent;
  3819. e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
  3820. /* when no next entry is found, the current entry[i] is reselected */
  3821. for (j = i + 1; ; j = (j + 1) % nent) {
  3822. struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
  3823. if (ej->function == e->function) {
  3824. ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  3825. return j;
  3826. }
  3827. }
  3828. return 0; /* silence gcc, even though control never reaches here */
  3829. }
  3830. /* find an entry with matching function, matching index (if needed), and that
  3831. * should be read next (if it's stateful) */
  3832. static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
  3833. u32 function, u32 index)
  3834. {
  3835. if (e->function != function)
  3836. return 0;
  3837. if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
  3838. return 0;
  3839. if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
  3840. !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
  3841. return 0;
  3842. return 1;
  3843. }
  3844. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  3845. u32 function, u32 index)
  3846. {
  3847. int i;
  3848. struct kvm_cpuid_entry2 *best = NULL;
  3849. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  3850. struct kvm_cpuid_entry2 *e;
  3851. e = &vcpu->arch.cpuid_entries[i];
  3852. if (is_matching_cpuid_entry(e, function, index)) {
  3853. if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
  3854. move_to_next_stateful_cpuid_entry(vcpu, i);
  3855. best = e;
  3856. break;
  3857. }
  3858. /*
  3859. * Both basic or both extended?
  3860. */
  3861. if (((e->function ^ function) & 0x80000000) == 0)
  3862. if (!best || e->function > best->function)
  3863. best = e;
  3864. }
  3865. return best;
  3866. }
  3867. EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
  3868. int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
  3869. {
  3870. struct kvm_cpuid_entry2 *best;
  3871. best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
  3872. if (!best || best->eax < 0x80000008)
  3873. goto not_found;
  3874. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  3875. if (best)
  3876. return best->eax & 0xff;
  3877. not_found:
  3878. return 36;
  3879. }
  3880. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  3881. {
  3882. u32 function, index;
  3883. struct kvm_cpuid_entry2 *best;
  3884. function = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3885. index = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3886. kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
  3887. kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
  3888. kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
  3889. kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
  3890. best = kvm_find_cpuid_entry(vcpu, function, index);
  3891. if (best) {
  3892. kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
  3893. kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
  3894. kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
  3895. kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
  3896. }
  3897. kvm_x86_ops->skip_emulated_instruction(vcpu);
  3898. trace_kvm_cpuid(function,
  3899. kvm_register_read(vcpu, VCPU_REGS_RAX),
  3900. kvm_register_read(vcpu, VCPU_REGS_RBX),
  3901. kvm_register_read(vcpu, VCPU_REGS_RCX),
  3902. kvm_register_read(vcpu, VCPU_REGS_RDX));
  3903. }
  3904. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  3905. /*
  3906. * Check if userspace requested an interrupt window, and that the
  3907. * interrupt window is open.
  3908. *
  3909. * No need to exit to userspace if we already have an interrupt queued.
  3910. */
  3911. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
  3912. {
  3913. return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
  3914. vcpu->run->request_interrupt_window &&
  3915. kvm_arch_interrupt_allowed(vcpu));
  3916. }
  3917. static void post_kvm_run_save(struct kvm_vcpu *vcpu)
  3918. {
  3919. struct kvm_run *kvm_run = vcpu->run;
  3920. kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  3921. kvm_run->cr8 = kvm_get_cr8(vcpu);
  3922. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  3923. if (irqchip_in_kernel(vcpu->kvm))
  3924. kvm_run->ready_for_interrupt_injection = 1;
  3925. else
  3926. kvm_run->ready_for_interrupt_injection =
  3927. kvm_arch_interrupt_allowed(vcpu) &&
  3928. !kvm_cpu_has_interrupt(vcpu) &&
  3929. !kvm_event_needs_reinjection(vcpu);
  3930. }
  3931. static void vapic_enter(struct kvm_vcpu *vcpu)
  3932. {
  3933. struct kvm_lapic *apic = vcpu->arch.apic;
  3934. struct page *page;
  3935. if (!apic || !apic->vapic_addr)
  3936. return;
  3937. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  3938. vcpu->arch.apic->vapic_page = page;
  3939. }
  3940. static void vapic_exit(struct kvm_vcpu *vcpu)
  3941. {
  3942. struct kvm_lapic *apic = vcpu->arch.apic;
  3943. int idx;
  3944. if (!apic || !apic->vapic_addr)
  3945. return;
  3946. idx = srcu_read_lock(&vcpu->kvm->srcu);
  3947. kvm_release_page_dirty(apic->vapic_page);
  3948. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  3949. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  3950. }
  3951. static void update_cr8_intercept(struct kvm_vcpu *vcpu)
  3952. {
  3953. int max_irr, tpr;
  3954. if (!kvm_x86_ops->update_cr8_intercept)
  3955. return;
  3956. if (!vcpu->arch.apic)
  3957. return;
  3958. if (!vcpu->arch.apic->vapic_addr)
  3959. max_irr = kvm_lapic_find_highest_irr(vcpu);
  3960. else
  3961. max_irr = -1;
  3962. if (max_irr != -1)
  3963. max_irr >>= 4;
  3964. tpr = kvm_lapic_get_cr8(vcpu);
  3965. kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
  3966. }
  3967. static void inject_pending_event(struct kvm_vcpu *vcpu)
  3968. {
  3969. /* try to reinject previous events if any */
  3970. if (vcpu->arch.exception.pending) {
  3971. trace_kvm_inj_exception(vcpu->arch.exception.nr,
  3972. vcpu->arch.exception.has_error_code,
  3973. vcpu->arch.exception.error_code);
  3974. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  3975. vcpu->arch.exception.has_error_code,
  3976. vcpu->arch.exception.error_code,
  3977. vcpu->arch.exception.reinject);
  3978. return;
  3979. }
  3980. if (vcpu->arch.nmi_injected) {
  3981. kvm_x86_ops->set_nmi(vcpu);
  3982. return;
  3983. }
  3984. if (vcpu->arch.interrupt.pending) {
  3985. kvm_x86_ops->set_irq(vcpu);
  3986. return;
  3987. }
  3988. /* try to inject new event if pending */
  3989. if (vcpu->arch.nmi_pending) {
  3990. if (kvm_x86_ops->nmi_allowed(vcpu)) {
  3991. vcpu->arch.nmi_pending = false;
  3992. vcpu->arch.nmi_injected = true;
  3993. kvm_x86_ops->set_nmi(vcpu);
  3994. }
  3995. } else if (kvm_cpu_has_interrupt(vcpu)) {
  3996. if (kvm_x86_ops->interrupt_allowed(vcpu)) {
  3997. kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
  3998. false);
  3999. kvm_x86_ops->set_irq(vcpu);
  4000. }
  4001. }
  4002. }
  4003. static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
  4004. {
  4005. if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
  4006. !vcpu->guest_xcr0_loaded) {
  4007. /* kvm_set_xcr() also depends on this */
  4008. xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
  4009. vcpu->guest_xcr0_loaded = 1;
  4010. }
  4011. }
  4012. static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
  4013. {
  4014. if (vcpu->guest_xcr0_loaded) {
  4015. if (vcpu->arch.xcr0 != host_xcr0)
  4016. xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
  4017. vcpu->guest_xcr0_loaded = 0;
  4018. }
  4019. }
  4020. static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
  4021. {
  4022. int r;
  4023. bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
  4024. vcpu->run->request_interrupt_window;
  4025. if (vcpu->requests)
  4026. if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
  4027. kvm_mmu_unload(vcpu);
  4028. r = kvm_mmu_reload(vcpu);
  4029. if (unlikely(r))
  4030. goto out;
  4031. if (vcpu->requests) {
  4032. if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
  4033. __kvm_migrate_timers(vcpu);
  4034. if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
  4035. kvm_write_guest_time(vcpu);
  4036. if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
  4037. kvm_mmu_sync_roots(vcpu);
  4038. if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
  4039. kvm_x86_ops->tlb_flush(vcpu);
  4040. if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
  4041. &vcpu->requests)) {
  4042. vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
  4043. r = 0;
  4044. goto out;
  4045. }
  4046. if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
  4047. vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  4048. r = 0;
  4049. goto out;
  4050. }
  4051. if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
  4052. vcpu->fpu_active = 0;
  4053. kvm_x86_ops->fpu_deactivate(vcpu);
  4054. }
  4055. }
  4056. preempt_disable();
  4057. kvm_x86_ops->prepare_guest_switch(vcpu);
  4058. if (vcpu->fpu_active)
  4059. kvm_load_guest_fpu(vcpu);
  4060. kvm_load_guest_xcr0(vcpu);
  4061. atomic_set(&vcpu->guest_mode, 1);
  4062. smp_wmb();
  4063. local_irq_disable();
  4064. if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
  4065. || need_resched() || signal_pending(current)) {
  4066. atomic_set(&vcpu->guest_mode, 0);
  4067. smp_wmb();
  4068. local_irq_enable();
  4069. preempt_enable();
  4070. r = 1;
  4071. goto out;
  4072. }
  4073. inject_pending_event(vcpu);
  4074. /* enable NMI/IRQ window open exits if needed */
  4075. if (vcpu->arch.nmi_pending)
  4076. kvm_x86_ops->enable_nmi_window(vcpu);
  4077. else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
  4078. kvm_x86_ops->enable_irq_window(vcpu);
  4079. if (kvm_lapic_enabled(vcpu)) {
  4080. update_cr8_intercept(vcpu);
  4081. kvm_lapic_sync_to_vapic(vcpu);
  4082. }
  4083. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4084. kvm_guest_enter();
  4085. if (unlikely(vcpu->arch.switch_db_regs)) {
  4086. set_debugreg(0, 7);
  4087. set_debugreg(vcpu->arch.eff_db[0], 0);
  4088. set_debugreg(vcpu->arch.eff_db[1], 1);
  4089. set_debugreg(vcpu->arch.eff_db[2], 2);
  4090. set_debugreg(vcpu->arch.eff_db[3], 3);
  4091. }
  4092. trace_kvm_entry(vcpu->vcpu_id);
  4093. kvm_x86_ops->run(vcpu);
  4094. /*
  4095. * If the guest has used debug registers, at least dr7
  4096. * will be disabled while returning to the host.
  4097. * If we don't have active breakpoints in the host, we don't
  4098. * care about the messed up debug address registers. But if
  4099. * we have some of them active, restore the old state.
  4100. */
  4101. if (hw_breakpoint_active())
  4102. hw_breakpoint_restore();
  4103. atomic_set(&vcpu->guest_mode, 0);
  4104. smp_wmb();
  4105. local_irq_enable();
  4106. ++vcpu->stat.exits;
  4107. /*
  4108. * We must have an instruction between local_irq_enable() and
  4109. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  4110. * the interrupt shadow. The stat.exits increment will do nicely.
  4111. * But we need to prevent reordering, hence this barrier():
  4112. */
  4113. barrier();
  4114. kvm_guest_exit();
  4115. preempt_enable();
  4116. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4117. /*
  4118. * Profile KVM exit RIPs:
  4119. */
  4120. if (unlikely(prof_on == KVM_PROFILING)) {
  4121. unsigned long rip = kvm_rip_read(vcpu);
  4122. profile_hit(KVM_PROFILING, (void *)rip);
  4123. }
  4124. kvm_lapic_sync_from_vapic(vcpu);
  4125. r = kvm_x86_ops->handle_exit(vcpu);
  4126. out:
  4127. return r;
  4128. }
  4129. static int __vcpu_run(struct kvm_vcpu *vcpu)
  4130. {
  4131. int r;
  4132. struct kvm *kvm = vcpu->kvm;
  4133. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  4134. pr_debug("vcpu %d received sipi with vector # %x\n",
  4135. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  4136. kvm_lapic_reset(vcpu);
  4137. r = kvm_arch_vcpu_reset(vcpu);
  4138. if (r)
  4139. return r;
  4140. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4141. }
  4142. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4143. vapic_enter(vcpu);
  4144. r = 1;
  4145. while (r > 0) {
  4146. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
  4147. r = vcpu_enter_guest(vcpu);
  4148. else {
  4149. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4150. kvm_vcpu_block(vcpu);
  4151. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4152. if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
  4153. {
  4154. switch(vcpu->arch.mp_state) {
  4155. case KVM_MP_STATE_HALTED:
  4156. vcpu->arch.mp_state =
  4157. KVM_MP_STATE_RUNNABLE;
  4158. case KVM_MP_STATE_RUNNABLE:
  4159. break;
  4160. case KVM_MP_STATE_SIPI_RECEIVED:
  4161. default:
  4162. r = -EINTR;
  4163. break;
  4164. }
  4165. }
  4166. }
  4167. if (r <= 0)
  4168. break;
  4169. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  4170. if (kvm_cpu_has_pending_timer(vcpu))
  4171. kvm_inject_pending_timer_irqs(vcpu);
  4172. if (dm_request_for_irq_injection(vcpu)) {
  4173. r = -EINTR;
  4174. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4175. ++vcpu->stat.request_irq_exits;
  4176. }
  4177. if (signal_pending(current)) {
  4178. r = -EINTR;
  4179. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4180. ++vcpu->stat.signal_exits;
  4181. }
  4182. if (need_resched()) {
  4183. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4184. kvm_resched(vcpu);
  4185. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4186. }
  4187. }
  4188. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4189. vapic_exit(vcpu);
  4190. return r;
  4191. }
  4192. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  4193. {
  4194. int r;
  4195. sigset_t sigsaved;
  4196. if (vcpu->sigset_active)
  4197. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  4198. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  4199. kvm_vcpu_block(vcpu);
  4200. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  4201. r = -EAGAIN;
  4202. goto out;
  4203. }
  4204. /* re-sync apic's tpr */
  4205. if (!irqchip_in_kernel(vcpu->kvm))
  4206. kvm_set_cr8(vcpu, kvm_run->cr8);
  4207. if (vcpu->arch.pio.count || vcpu->mmio_needed ||
  4208. vcpu->arch.emulate_ctxt.restart) {
  4209. if (vcpu->mmio_needed) {
  4210. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  4211. vcpu->mmio_read_completed = 1;
  4212. vcpu->mmio_needed = 0;
  4213. }
  4214. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4215. r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
  4216. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4217. if (r != EMULATE_DONE) {
  4218. r = 0;
  4219. goto out;
  4220. }
  4221. }
  4222. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
  4223. kvm_register_write(vcpu, VCPU_REGS_RAX,
  4224. kvm_run->hypercall.ret);
  4225. r = __vcpu_run(vcpu);
  4226. out:
  4227. post_kvm_run_save(vcpu);
  4228. if (vcpu->sigset_active)
  4229. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  4230. return r;
  4231. }
  4232. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4233. {
  4234. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4235. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4236. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4237. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4238. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4239. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  4240. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  4241. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  4242. #ifdef CONFIG_X86_64
  4243. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  4244. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  4245. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  4246. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  4247. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  4248. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  4249. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  4250. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  4251. #endif
  4252. regs->rip = kvm_rip_read(vcpu);
  4253. regs->rflags = kvm_get_rflags(vcpu);
  4254. return 0;
  4255. }
  4256. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4257. {
  4258. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  4259. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  4260. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  4261. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  4262. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  4263. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  4264. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  4265. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  4266. #ifdef CONFIG_X86_64
  4267. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  4268. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  4269. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  4270. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  4271. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  4272. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  4273. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  4274. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  4275. #endif
  4276. kvm_rip_write(vcpu, regs->rip);
  4277. kvm_set_rflags(vcpu, regs->rflags);
  4278. vcpu->arch.exception.pending = false;
  4279. return 0;
  4280. }
  4281. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  4282. {
  4283. struct kvm_segment cs;
  4284. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  4285. *db = cs.db;
  4286. *l = cs.l;
  4287. }
  4288. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  4289. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  4290. struct kvm_sregs *sregs)
  4291. {
  4292. struct desc_ptr dt;
  4293. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  4294. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  4295. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  4296. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  4297. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  4298. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  4299. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  4300. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  4301. kvm_x86_ops->get_idt(vcpu, &dt);
  4302. sregs->idt.limit = dt.size;
  4303. sregs->idt.base = dt.address;
  4304. kvm_x86_ops->get_gdt(vcpu, &dt);
  4305. sregs->gdt.limit = dt.size;
  4306. sregs->gdt.base = dt.address;
  4307. sregs->cr0 = kvm_read_cr0(vcpu);
  4308. sregs->cr2 = vcpu->arch.cr2;
  4309. sregs->cr3 = vcpu->arch.cr3;
  4310. sregs->cr4 = kvm_read_cr4(vcpu);
  4311. sregs->cr8 = kvm_get_cr8(vcpu);
  4312. sregs->efer = vcpu->arch.efer;
  4313. sregs->apic_base = kvm_get_apic_base(vcpu);
  4314. memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
  4315. if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
  4316. set_bit(vcpu->arch.interrupt.nr,
  4317. (unsigned long *)sregs->interrupt_bitmap);
  4318. return 0;
  4319. }
  4320. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  4321. struct kvm_mp_state *mp_state)
  4322. {
  4323. mp_state->mp_state = vcpu->arch.mp_state;
  4324. return 0;
  4325. }
  4326. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  4327. struct kvm_mp_state *mp_state)
  4328. {
  4329. vcpu->arch.mp_state = mp_state->mp_state;
  4330. return 0;
  4331. }
  4332. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
  4333. bool has_error_code, u32 error_code)
  4334. {
  4335. struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
  4336. int cs_db, cs_l, ret;
  4337. cache_all_regs(vcpu);
  4338. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  4339. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  4340. vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  4341. vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
  4342. vcpu->arch.emulate_ctxt.mode =
  4343. (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
  4344. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  4345. ? X86EMUL_MODE_VM86 : cs_l
  4346. ? X86EMUL_MODE_PROT64 : cs_db
  4347. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  4348. memset(c, 0, sizeof(struct decode_cache));
  4349. memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
  4350. ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
  4351. tss_selector, reason, has_error_code,
  4352. error_code);
  4353. if (ret)
  4354. return EMULATE_FAIL;
  4355. memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
  4356. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
  4357. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  4358. return EMULATE_DONE;
  4359. }
  4360. EXPORT_SYMBOL_GPL(kvm_task_switch);
  4361. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  4362. struct kvm_sregs *sregs)
  4363. {
  4364. int mmu_reset_needed = 0;
  4365. int pending_vec, max_bits;
  4366. struct desc_ptr dt;
  4367. dt.size = sregs->idt.limit;
  4368. dt.address = sregs->idt.base;
  4369. kvm_x86_ops->set_idt(vcpu, &dt);
  4370. dt.size = sregs->gdt.limit;
  4371. dt.address = sregs->gdt.base;
  4372. kvm_x86_ops->set_gdt(vcpu, &dt);
  4373. vcpu->arch.cr2 = sregs->cr2;
  4374. mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
  4375. vcpu->arch.cr3 = sregs->cr3;
  4376. kvm_set_cr8(vcpu, sregs->cr8);
  4377. mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
  4378. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  4379. kvm_set_apic_base(vcpu, sregs->apic_base);
  4380. mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
  4381. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  4382. vcpu->arch.cr0 = sregs->cr0;
  4383. mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
  4384. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  4385. if (!is_long_mode(vcpu) && is_pae(vcpu)) {
  4386. load_pdptrs(vcpu, vcpu->arch.cr3);
  4387. mmu_reset_needed = 1;
  4388. }
  4389. if (mmu_reset_needed)
  4390. kvm_mmu_reset_context(vcpu);
  4391. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  4392. pending_vec = find_first_bit(
  4393. (const unsigned long *)sregs->interrupt_bitmap, max_bits);
  4394. if (pending_vec < max_bits) {
  4395. kvm_queue_interrupt(vcpu, pending_vec, false);
  4396. pr_debug("Set back pending irq %d\n", pending_vec);
  4397. if (irqchip_in_kernel(vcpu->kvm))
  4398. kvm_pic_clear_isr_ack(vcpu->kvm);
  4399. }
  4400. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  4401. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  4402. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  4403. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  4404. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  4405. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  4406. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  4407. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  4408. update_cr8_intercept(vcpu);
  4409. /* Older userspace won't unhalt the vcpu on reset. */
  4410. if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
  4411. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  4412. !is_protmode(vcpu))
  4413. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4414. return 0;
  4415. }
  4416. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  4417. struct kvm_guest_debug *dbg)
  4418. {
  4419. unsigned long rflags;
  4420. int i, r;
  4421. if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
  4422. r = -EBUSY;
  4423. if (vcpu->arch.exception.pending)
  4424. goto out;
  4425. if (dbg->control & KVM_GUESTDBG_INJECT_DB)
  4426. kvm_queue_exception(vcpu, DB_VECTOR);
  4427. else
  4428. kvm_queue_exception(vcpu, BP_VECTOR);
  4429. }
  4430. /*
  4431. * Read rflags as long as potentially injected trace flags are still
  4432. * filtered out.
  4433. */
  4434. rflags = kvm_get_rflags(vcpu);
  4435. vcpu->guest_debug = dbg->control;
  4436. if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
  4437. vcpu->guest_debug = 0;
  4438. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
  4439. for (i = 0; i < KVM_NR_DB_REGS; ++i)
  4440. vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
  4441. vcpu->arch.switch_db_regs =
  4442. (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
  4443. } else {
  4444. for (i = 0; i < KVM_NR_DB_REGS; i++)
  4445. vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  4446. vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
  4447. }
  4448. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  4449. vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
  4450. get_segment_base(vcpu, VCPU_SREG_CS);
  4451. /*
  4452. * Trigger an rflags update that will inject or remove the trace
  4453. * flags.
  4454. */
  4455. kvm_set_rflags(vcpu, rflags);
  4456. kvm_x86_ops->set_guest_debug(vcpu, dbg);
  4457. r = 0;
  4458. out:
  4459. return r;
  4460. }
  4461. /*
  4462. * Translate a guest virtual address to a guest physical address.
  4463. */
  4464. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  4465. struct kvm_translation *tr)
  4466. {
  4467. unsigned long vaddr = tr->linear_address;
  4468. gpa_t gpa;
  4469. int idx;
  4470. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4471. gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
  4472. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4473. tr->physical_address = gpa;
  4474. tr->valid = gpa != UNMAPPED_GVA;
  4475. tr->writeable = 1;
  4476. tr->usermode = 0;
  4477. return 0;
  4478. }
  4479. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  4480. {
  4481. struct i387_fxsave_struct *fxsave =
  4482. &vcpu->arch.guest_fpu.state->fxsave;
  4483. memcpy(fpu->fpr, fxsave->st_space, 128);
  4484. fpu->fcw = fxsave->cwd;
  4485. fpu->fsw = fxsave->swd;
  4486. fpu->ftwx = fxsave->twd;
  4487. fpu->last_opcode = fxsave->fop;
  4488. fpu->last_ip = fxsave->rip;
  4489. fpu->last_dp = fxsave->rdp;
  4490. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  4491. return 0;
  4492. }
  4493. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  4494. {
  4495. struct i387_fxsave_struct *fxsave =
  4496. &vcpu->arch.guest_fpu.state->fxsave;
  4497. memcpy(fxsave->st_space, fpu->fpr, 128);
  4498. fxsave->cwd = fpu->fcw;
  4499. fxsave->swd = fpu->fsw;
  4500. fxsave->twd = fpu->ftwx;
  4501. fxsave->fop = fpu->last_opcode;
  4502. fxsave->rip = fpu->last_ip;
  4503. fxsave->rdp = fpu->last_dp;
  4504. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  4505. return 0;
  4506. }
  4507. int fx_init(struct kvm_vcpu *vcpu)
  4508. {
  4509. int err;
  4510. err = fpu_alloc(&vcpu->arch.guest_fpu);
  4511. if (err)
  4512. return err;
  4513. fpu_finit(&vcpu->arch.guest_fpu);
  4514. /*
  4515. * Ensure guest xcr0 is valid for loading
  4516. */
  4517. vcpu->arch.xcr0 = XSTATE_FP;
  4518. vcpu->arch.cr0 |= X86_CR0_ET;
  4519. return 0;
  4520. }
  4521. EXPORT_SYMBOL_GPL(fx_init);
  4522. static void fx_free(struct kvm_vcpu *vcpu)
  4523. {
  4524. fpu_free(&vcpu->arch.guest_fpu);
  4525. }
  4526. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  4527. {
  4528. if (vcpu->guest_fpu_loaded)
  4529. return;
  4530. /*
  4531. * Restore all possible states in the guest,
  4532. * and assume host would use all available bits.
  4533. * Guest xcr0 would be loaded later.
  4534. */
  4535. kvm_put_guest_xcr0(vcpu);
  4536. vcpu->guest_fpu_loaded = 1;
  4537. unlazy_fpu(current);
  4538. fpu_restore_checking(&vcpu->arch.guest_fpu);
  4539. trace_kvm_fpu(1);
  4540. }
  4541. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  4542. {
  4543. kvm_put_guest_xcr0(vcpu);
  4544. if (!vcpu->guest_fpu_loaded)
  4545. return;
  4546. vcpu->guest_fpu_loaded = 0;
  4547. fpu_save_init(&vcpu->arch.guest_fpu);
  4548. ++vcpu->stat.fpu_reload;
  4549. set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
  4550. trace_kvm_fpu(0);
  4551. }
  4552. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  4553. {
  4554. if (vcpu->arch.time_page) {
  4555. kvm_release_page_dirty(vcpu->arch.time_page);
  4556. vcpu->arch.time_page = NULL;
  4557. }
  4558. fx_free(vcpu);
  4559. kvm_x86_ops->vcpu_free(vcpu);
  4560. }
  4561. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  4562. unsigned int id)
  4563. {
  4564. return kvm_x86_ops->vcpu_create(kvm, id);
  4565. }
  4566. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  4567. {
  4568. int r;
  4569. vcpu->arch.mtrr_state.have_fixed = 1;
  4570. vcpu_load(vcpu);
  4571. r = kvm_arch_vcpu_reset(vcpu);
  4572. if (r == 0)
  4573. r = kvm_mmu_setup(vcpu);
  4574. vcpu_put(vcpu);
  4575. if (r < 0)
  4576. goto free_vcpu;
  4577. return 0;
  4578. free_vcpu:
  4579. kvm_x86_ops->vcpu_free(vcpu);
  4580. return r;
  4581. }
  4582. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  4583. {
  4584. vcpu_load(vcpu);
  4585. kvm_mmu_unload(vcpu);
  4586. vcpu_put(vcpu);
  4587. fx_free(vcpu);
  4588. kvm_x86_ops->vcpu_free(vcpu);
  4589. }
  4590. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  4591. {
  4592. vcpu->arch.nmi_pending = false;
  4593. vcpu->arch.nmi_injected = false;
  4594. vcpu->arch.switch_db_regs = 0;
  4595. memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
  4596. vcpu->arch.dr6 = DR6_FIXED_1;
  4597. vcpu->arch.dr7 = DR7_FIXED_1;
  4598. return kvm_x86_ops->vcpu_reset(vcpu);
  4599. }
  4600. int kvm_arch_hardware_enable(void *garbage)
  4601. {
  4602. /*
  4603. * Since this may be called from a hotplug notifcation,
  4604. * we can't get the CPU frequency directly.
  4605. */
  4606. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  4607. int cpu = raw_smp_processor_id();
  4608. per_cpu(cpu_tsc_khz, cpu) = 0;
  4609. }
  4610. kvm_shared_msr_cpu_online();
  4611. return kvm_x86_ops->hardware_enable(garbage);
  4612. }
  4613. void kvm_arch_hardware_disable(void *garbage)
  4614. {
  4615. kvm_x86_ops->hardware_disable(garbage);
  4616. drop_user_return_notifiers(garbage);
  4617. }
  4618. int kvm_arch_hardware_setup(void)
  4619. {
  4620. return kvm_x86_ops->hardware_setup();
  4621. }
  4622. void kvm_arch_hardware_unsetup(void)
  4623. {
  4624. kvm_x86_ops->hardware_unsetup();
  4625. }
  4626. void kvm_arch_check_processor_compat(void *rtn)
  4627. {
  4628. kvm_x86_ops->check_processor_compatibility(rtn);
  4629. }
  4630. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  4631. {
  4632. struct page *page;
  4633. struct kvm *kvm;
  4634. int r;
  4635. BUG_ON(vcpu->kvm == NULL);
  4636. kvm = vcpu->kvm;
  4637. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  4638. if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
  4639. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4640. else
  4641. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  4642. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  4643. if (!page) {
  4644. r = -ENOMEM;
  4645. goto fail;
  4646. }
  4647. vcpu->arch.pio_data = page_address(page);
  4648. r = kvm_mmu_create(vcpu);
  4649. if (r < 0)
  4650. goto fail_free_pio_data;
  4651. if (irqchip_in_kernel(kvm)) {
  4652. r = kvm_create_lapic(vcpu);
  4653. if (r < 0)
  4654. goto fail_mmu_destroy;
  4655. }
  4656. vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
  4657. GFP_KERNEL);
  4658. if (!vcpu->arch.mce_banks) {
  4659. r = -ENOMEM;
  4660. goto fail_free_lapic;
  4661. }
  4662. vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
  4663. return 0;
  4664. fail_free_lapic:
  4665. kvm_free_lapic(vcpu);
  4666. fail_mmu_destroy:
  4667. kvm_mmu_destroy(vcpu);
  4668. fail_free_pio_data:
  4669. free_page((unsigned long)vcpu->arch.pio_data);
  4670. fail:
  4671. return r;
  4672. }
  4673. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  4674. {
  4675. int idx;
  4676. kfree(vcpu->arch.mce_banks);
  4677. kvm_free_lapic(vcpu);
  4678. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4679. kvm_mmu_destroy(vcpu);
  4680. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4681. free_page((unsigned long)vcpu->arch.pio_data);
  4682. }
  4683. struct kvm *kvm_arch_create_vm(void)
  4684. {
  4685. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  4686. if (!kvm)
  4687. return ERR_PTR(-ENOMEM);
  4688. kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
  4689. if (!kvm->arch.aliases) {
  4690. kfree(kvm);
  4691. return ERR_PTR(-ENOMEM);
  4692. }
  4693. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  4694. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  4695. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  4696. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  4697. rdtscll(kvm->arch.vm_init_tsc);
  4698. return kvm;
  4699. }
  4700. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  4701. {
  4702. vcpu_load(vcpu);
  4703. kvm_mmu_unload(vcpu);
  4704. vcpu_put(vcpu);
  4705. }
  4706. static void kvm_free_vcpus(struct kvm *kvm)
  4707. {
  4708. unsigned int i;
  4709. struct kvm_vcpu *vcpu;
  4710. /*
  4711. * Unpin any mmu pages first.
  4712. */
  4713. kvm_for_each_vcpu(i, vcpu, kvm)
  4714. kvm_unload_vcpu_mmu(vcpu);
  4715. kvm_for_each_vcpu(i, vcpu, kvm)
  4716. kvm_arch_vcpu_free(vcpu);
  4717. mutex_lock(&kvm->lock);
  4718. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  4719. kvm->vcpus[i] = NULL;
  4720. atomic_set(&kvm->online_vcpus, 0);
  4721. mutex_unlock(&kvm->lock);
  4722. }
  4723. void kvm_arch_sync_events(struct kvm *kvm)
  4724. {
  4725. kvm_free_all_assigned_devices(kvm);
  4726. }
  4727. void kvm_arch_destroy_vm(struct kvm *kvm)
  4728. {
  4729. kvm_iommu_unmap_guest(kvm);
  4730. kvm_free_pit(kvm);
  4731. kfree(kvm->arch.vpic);
  4732. kfree(kvm->arch.vioapic);
  4733. kvm_free_vcpus(kvm);
  4734. kvm_free_physmem(kvm);
  4735. if (kvm->arch.apic_access_page)
  4736. put_page(kvm->arch.apic_access_page);
  4737. if (kvm->arch.ept_identity_pagetable)
  4738. put_page(kvm->arch.ept_identity_pagetable);
  4739. cleanup_srcu_struct(&kvm->srcu);
  4740. kfree(kvm->arch.aliases);
  4741. kfree(kvm);
  4742. }
  4743. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  4744. struct kvm_memory_slot *memslot,
  4745. struct kvm_memory_slot old,
  4746. struct kvm_userspace_memory_region *mem,
  4747. int user_alloc)
  4748. {
  4749. int npages = memslot->npages;
  4750. /*To keep backward compatibility with older userspace,
  4751. *x86 needs to hanlde !user_alloc case.
  4752. */
  4753. if (!user_alloc) {
  4754. if (npages && !old.rmap) {
  4755. unsigned long userspace_addr;
  4756. down_write(&current->mm->mmap_sem);
  4757. userspace_addr = do_mmap(NULL, 0,
  4758. npages * PAGE_SIZE,
  4759. PROT_READ | PROT_WRITE,
  4760. MAP_PRIVATE | MAP_ANONYMOUS,
  4761. 0);
  4762. up_write(&current->mm->mmap_sem);
  4763. if (IS_ERR((void *)userspace_addr))
  4764. return PTR_ERR((void *)userspace_addr);
  4765. memslot->userspace_addr = userspace_addr;
  4766. }
  4767. }
  4768. return 0;
  4769. }
  4770. void kvm_arch_commit_memory_region(struct kvm *kvm,
  4771. struct kvm_userspace_memory_region *mem,
  4772. struct kvm_memory_slot old,
  4773. int user_alloc)
  4774. {
  4775. int npages = mem->memory_size >> PAGE_SHIFT;
  4776. if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
  4777. int ret;
  4778. down_write(&current->mm->mmap_sem);
  4779. ret = do_munmap(current->mm, old.userspace_addr,
  4780. old.npages * PAGE_SIZE);
  4781. up_write(&current->mm->mmap_sem);
  4782. if (ret < 0)
  4783. printk(KERN_WARNING
  4784. "kvm_vm_ioctl_set_memory_region: "
  4785. "failed to munmap memory\n");
  4786. }
  4787. spin_lock(&kvm->mmu_lock);
  4788. if (!kvm->arch.n_requested_mmu_pages) {
  4789. unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  4790. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  4791. }
  4792. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  4793. spin_unlock(&kvm->mmu_lock);
  4794. }
  4795. void kvm_arch_flush_shadow(struct kvm *kvm)
  4796. {
  4797. kvm_mmu_zap_all(kvm);
  4798. kvm_reload_remote_mmus(kvm);
  4799. }
  4800. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  4801. {
  4802. return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
  4803. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
  4804. || vcpu->arch.nmi_pending ||
  4805. (kvm_arch_interrupt_allowed(vcpu) &&
  4806. kvm_cpu_has_interrupt(vcpu));
  4807. }
  4808. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  4809. {
  4810. int me;
  4811. int cpu = vcpu->cpu;
  4812. if (waitqueue_active(&vcpu->wq)) {
  4813. wake_up_interruptible(&vcpu->wq);
  4814. ++vcpu->stat.halt_wakeup;
  4815. }
  4816. me = get_cpu();
  4817. if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
  4818. if (atomic_xchg(&vcpu->guest_mode, 0))
  4819. smp_send_reschedule(cpu);
  4820. put_cpu();
  4821. }
  4822. int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
  4823. {
  4824. return kvm_x86_ops->interrupt_allowed(vcpu);
  4825. }
  4826. bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
  4827. {
  4828. unsigned long current_rip = kvm_rip_read(vcpu) +
  4829. get_segment_base(vcpu, VCPU_SREG_CS);
  4830. return current_rip == linear_rip;
  4831. }
  4832. EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
  4833. unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
  4834. {
  4835. unsigned long rflags;
  4836. rflags = kvm_x86_ops->get_rflags(vcpu);
  4837. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  4838. rflags &= ~X86_EFLAGS_TF;
  4839. return rflags;
  4840. }
  4841. EXPORT_SYMBOL_GPL(kvm_get_rflags);
  4842. void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  4843. {
  4844. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
  4845. kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
  4846. rflags |= X86_EFLAGS_TF;
  4847. kvm_x86_ops->set_rflags(vcpu, rflags);
  4848. }
  4849. EXPORT_SYMBOL_GPL(kvm_set_rflags);
  4850. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
  4851. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
  4852. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
  4853. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
  4854. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
  4855. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
  4856. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
  4857. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
  4858. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
  4859. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
  4860. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
  4861. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);