x86.c 138 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Amit Shah <amit.shah@qumranet.com>
  14. * Ben-Ami Yassour <benami@il.ibm.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. #include <linux/kvm_host.h>
  21. #include "irq.h"
  22. #include "mmu.h"
  23. #include "i8254.h"
  24. #include "tss.h"
  25. #include "kvm_cache_regs.h"
  26. #include "x86.h"
  27. #include <linux/clocksource.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/kvm.h>
  30. #include <linux/fs.h>
  31. #include <linux/vmalloc.h>
  32. #include <linux/module.h>
  33. #include <linux/mman.h>
  34. #include <linux/highmem.h>
  35. #include <linux/iommu.h>
  36. #include <linux/intel-iommu.h>
  37. #include <linux/cpufreq.h>
  38. #include <linux/user-return-notifier.h>
  39. #include <linux/srcu.h>
  40. #include <trace/events/kvm.h>
  41. #undef TRACE_INCLUDE_FILE
  42. #define CREATE_TRACE_POINTS
  43. #include "trace.h"
  44. #include <asm/debugreg.h>
  45. #include <asm/uaccess.h>
  46. #include <asm/msr.h>
  47. #include <asm/desc.h>
  48. #include <asm/mtrr.h>
  49. #include <asm/mce.h>
  50. #define MAX_IO_MSRS 256
  51. #define CR0_RESERVED_BITS \
  52. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  53. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  54. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  55. #define CR4_RESERVED_BITS \
  56. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  57. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  58. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  59. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  60. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  61. #define KVM_MAX_MCE_BANKS 32
  62. #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
  63. /* EFER defaults:
  64. * - enable syscall per default because its emulated by KVM
  65. * - enable LME and LMA per default on 64 bit KVM
  66. */
  67. #ifdef CONFIG_X86_64
  68. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
  69. #else
  70. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
  71. #endif
  72. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  73. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  74. static void update_cr8_intercept(struct kvm_vcpu *vcpu);
  75. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  76. struct kvm_cpuid_entry2 __user *entries);
  77. struct kvm_x86_ops *kvm_x86_ops;
  78. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  79. int ignore_msrs = 0;
  80. module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
  81. #define KVM_NR_SHARED_MSRS 16
  82. struct kvm_shared_msrs_global {
  83. int nr;
  84. u32 msrs[KVM_NR_SHARED_MSRS];
  85. };
  86. struct kvm_shared_msrs {
  87. struct user_return_notifier urn;
  88. bool registered;
  89. struct kvm_shared_msr_values {
  90. u64 host;
  91. u64 curr;
  92. } values[KVM_NR_SHARED_MSRS];
  93. };
  94. static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
  95. static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
  96. struct kvm_stats_debugfs_item debugfs_entries[] = {
  97. { "pf_fixed", VCPU_STAT(pf_fixed) },
  98. { "pf_guest", VCPU_STAT(pf_guest) },
  99. { "tlb_flush", VCPU_STAT(tlb_flush) },
  100. { "invlpg", VCPU_STAT(invlpg) },
  101. { "exits", VCPU_STAT(exits) },
  102. { "io_exits", VCPU_STAT(io_exits) },
  103. { "mmio_exits", VCPU_STAT(mmio_exits) },
  104. { "signal_exits", VCPU_STAT(signal_exits) },
  105. { "irq_window", VCPU_STAT(irq_window_exits) },
  106. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  107. { "halt_exits", VCPU_STAT(halt_exits) },
  108. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  109. { "hypercalls", VCPU_STAT(hypercalls) },
  110. { "request_irq", VCPU_STAT(request_irq_exits) },
  111. { "irq_exits", VCPU_STAT(irq_exits) },
  112. { "host_state_reload", VCPU_STAT(host_state_reload) },
  113. { "efer_reload", VCPU_STAT(efer_reload) },
  114. { "fpu_reload", VCPU_STAT(fpu_reload) },
  115. { "insn_emulation", VCPU_STAT(insn_emulation) },
  116. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  117. { "irq_injections", VCPU_STAT(irq_injections) },
  118. { "nmi_injections", VCPU_STAT(nmi_injections) },
  119. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  120. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  121. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  122. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  123. { "mmu_flooded", VM_STAT(mmu_flooded) },
  124. { "mmu_recycled", VM_STAT(mmu_recycled) },
  125. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  126. { "mmu_unsync", VM_STAT(mmu_unsync) },
  127. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  128. { "largepages", VM_STAT(lpages) },
  129. { NULL }
  130. };
  131. static void kvm_on_user_return(struct user_return_notifier *urn)
  132. {
  133. unsigned slot;
  134. struct kvm_shared_msrs *locals
  135. = container_of(urn, struct kvm_shared_msrs, urn);
  136. struct kvm_shared_msr_values *values;
  137. for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
  138. values = &locals->values[slot];
  139. if (values->host != values->curr) {
  140. wrmsrl(shared_msrs_global.msrs[slot], values->host);
  141. values->curr = values->host;
  142. }
  143. }
  144. locals->registered = false;
  145. user_return_notifier_unregister(urn);
  146. }
  147. static void shared_msr_update(unsigned slot, u32 msr)
  148. {
  149. struct kvm_shared_msrs *smsr;
  150. u64 value;
  151. smsr = &__get_cpu_var(shared_msrs);
  152. /* only read, and nobody should modify it at this time,
  153. * so don't need lock */
  154. if (slot >= shared_msrs_global.nr) {
  155. printk(KERN_ERR "kvm: invalid MSR slot!");
  156. return;
  157. }
  158. rdmsrl_safe(msr, &value);
  159. smsr->values[slot].host = value;
  160. smsr->values[slot].curr = value;
  161. }
  162. void kvm_define_shared_msr(unsigned slot, u32 msr)
  163. {
  164. if (slot >= shared_msrs_global.nr)
  165. shared_msrs_global.nr = slot + 1;
  166. shared_msrs_global.msrs[slot] = msr;
  167. /* we need ensured the shared_msr_global have been updated */
  168. smp_wmb();
  169. }
  170. EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
  171. static void kvm_shared_msr_cpu_online(void)
  172. {
  173. unsigned i;
  174. for (i = 0; i < shared_msrs_global.nr; ++i)
  175. shared_msr_update(i, shared_msrs_global.msrs[i]);
  176. }
  177. void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
  178. {
  179. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  180. if (((value ^ smsr->values[slot].curr) & mask) == 0)
  181. return;
  182. smsr->values[slot].curr = value;
  183. wrmsrl(shared_msrs_global.msrs[slot], value);
  184. if (!smsr->registered) {
  185. smsr->urn.on_user_return = kvm_on_user_return;
  186. user_return_notifier_register(&smsr->urn);
  187. smsr->registered = true;
  188. }
  189. }
  190. EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
  191. static void drop_user_return_notifiers(void *ignore)
  192. {
  193. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  194. if (smsr->registered)
  195. kvm_on_user_return(&smsr->urn);
  196. }
  197. unsigned long segment_base(u16 selector)
  198. {
  199. struct descriptor_table gdt;
  200. struct desc_struct *d;
  201. unsigned long table_base;
  202. unsigned long v;
  203. if (selector == 0)
  204. return 0;
  205. kvm_get_gdt(&gdt);
  206. table_base = gdt.base;
  207. if (selector & 4) { /* from ldt */
  208. u16 ldt_selector = kvm_read_ldt();
  209. table_base = segment_base(ldt_selector);
  210. }
  211. d = (struct desc_struct *)(table_base + (selector & ~7));
  212. v = get_desc_base(d);
  213. #ifdef CONFIG_X86_64
  214. if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
  215. v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
  216. #endif
  217. return v;
  218. }
  219. EXPORT_SYMBOL_GPL(segment_base);
  220. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  221. {
  222. if (irqchip_in_kernel(vcpu->kvm))
  223. return vcpu->arch.apic_base;
  224. else
  225. return vcpu->arch.apic_base;
  226. }
  227. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  228. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  229. {
  230. /* TODO: reserve bits check */
  231. if (irqchip_in_kernel(vcpu->kvm))
  232. kvm_lapic_set_base(vcpu, data);
  233. else
  234. vcpu->arch.apic_base = data;
  235. }
  236. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  237. #define EXCPT_BENIGN 0
  238. #define EXCPT_CONTRIBUTORY 1
  239. #define EXCPT_PF 2
  240. static int exception_class(int vector)
  241. {
  242. switch (vector) {
  243. case PF_VECTOR:
  244. return EXCPT_PF;
  245. case DE_VECTOR:
  246. case TS_VECTOR:
  247. case NP_VECTOR:
  248. case SS_VECTOR:
  249. case GP_VECTOR:
  250. return EXCPT_CONTRIBUTORY;
  251. default:
  252. break;
  253. }
  254. return EXCPT_BENIGN;
  255. }
  256. static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
  257. unsigned nr, bool has_error, u32 error_code)
  258. {
  259. u32 prev_nr;
  260. int class1, class2;
  261. if (!vcpu->arch.exception.pending) {
  262. queue:
  263. vcpu->arch.exception.pending = true;
  264. vcpu->arch.exception.has_error_code = has_error;
  265. vcpu->arch.exception.nr = nr;
  266. vcpu->arch.exception.error_code = error_code;
  267. return;
  268. }
  269. /* to check exception */
  270. prev_nr = vcpu->arch.exception.nr;
  271. if (prev_nr == DF_VECTOR) {
  272. /* triple fault -> shutdown */
  273. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  274. return;
  275. }
  276. class1 = exception_class(prev_nr);
  277. class2 = exception_class(nr);
  278. if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
  279. || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
  280. /* generate double fault per SDM Table 5-5 */
  281. vcpu->arch.exception.pending = true;
  282. vcpu->arch.exception.has_error_code = true;
  283. vcpu->arch.exception.nr = DF_VECTOR;
  284. vcpu->arch.exception.error_code = 0;
  285. } else
  286. /* replace previous exception with a new one in a hope
  287. that instruction re-execution will regenerate lost
  288. exception */
  289. goto queue;
  290. }
  291. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  292. {
  293. kvm_multiple_exception(vcpu, nr, false, 0);
  294. }
  295. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  296. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  297. u32 error_code)
  298. {
  299. ++vcpu->stat.pf_guest;
  300. vcpu->arch.cr2 = addr;
  301. kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
  302. }
  303. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  304. {
  305. vcpu->arch.nmi_pending = 1;
  306. }
  307. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  308. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  309. {
  310. kvm_multiple_exception(vcpu, nr, true, error_code);
  311. }
  312. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  313. /*
  314. * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
  315. * a #GP and return false.
  316. */
  317. bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
  318. {
  319. if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
  320. return true;
  321. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  322. return false;
  323. }
  324. EXPORT_SYMBOL_GPL(kvm_require_cpl);
  325. /*
  326. * Load the pae pdptrs. Return true is they are all valid.
  327. */
  328. int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  329. {
  330. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  331. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  332. int i;
  333. int ret;
  334. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  335. ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
  336. offset * sizeof(u64), sizeof(pdpte));
  337. if (ret < 0) {
  338. ret = 0;
  339. goto out;
  340. }
  341. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  342. if (is_present_gpte(pdpte[i]) &&
  343. (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
  344. ret = 0;
  345. goto out;
  346. }
  347. }
  348. ret = 1;
  349. memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
  350. __set_bit(VCPU_EXREG_PDPTR,
  351. (unsigned long *)&vcpu->arch.regs_avail);
  352. __set_bit(VCPU_EXREG_PDPTR,
  353. (unsigned long *)&vcpu->arch.regs_dirty);
  354. out:
  355. return ret;
  356. }
  357. EXPORT_SYMBOL_GPL(load_pdptrs);
  358. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  359. {
  360. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  361. bool changed = true;
  362. int r;
  363. if (is_long_mode(vcpu) || !is_pae(vcpu))
  364. return false;
  365. if (!test_bit(VCPU_EXREG_PDPTR,
  366. (unsigned long *)&vcpu->arch.regs_avail))
  367. return true;
  368. r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
  369. if (r < 0)
  370. goto out;
  371. changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
  372. out:
  373. return changed;
  374. }
  375. void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  376. {
  377. cr0 |= X86_CR0_ET;
  378. #ifdef CONFIG_X86_64
  379. if (cr0 & 0xffffffff00000000UL) {
  380. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  381. cr0, kvm_read_cr0(vcpu));
  382. kvm_inject_gp(vcpu, 0);
  383. return;
  384. }
  385. #endif
  386. cr0 &= ~CR0_RESERVED_BITS;
  387. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
  388. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  389. kvm_inject_gp(vcpu, 0);
  390. return;
  391. }
  392. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
  393. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  394. "and a clear PE flag\n");
  395. kvm_inject_gp(vcpu, 0);
  396. return;
  397. }
  398. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  399. #ifdef CONFIG_X86_64
  400. if ((vcpu->arch.shadow_efer & EFER_LME)) {
  401. int cs_db, cs_l;
  402. if (!is_pae(vcpu)) {
  403. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  404. "in long mode while PAE is disabled\n");
  405. kvm_inject_gp(vcpu, 0);
  406. return;
  407. }
  408. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  409. if (cs_l) {
  410. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  411. "in long mode while CS.L == 1\n");
  412. kvm_inject_gp(vcpu, 0);
  413. return;
  414. }
  415. } else
  416. #endif
  417. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  418. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  419. "reserved bits\n");
  420. kvm_inject_gp(vcpu, 0);
  421. return;
  422. }
  423. }
  424. kvm_x86_ops->set_cr0(vcpu, cr0);
  425. vcpu->arch.cr0 = cr0;
  426. kvm_mmu_reset_context(vcpu);
  427. return;
  428. }
  429. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  430. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  431. {
  432. kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
  433. }
  434. EXPORT_SYMBOL_GPL(kvm_lmsw);
  435. void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  436. {
  437. unsigned long old_cr4 = kvm_read_cr4(vcpu);
  438. unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
  439. if (cr4 & CR4_RESERVED_BITS) {
  440. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  441. kvm_inject_gp(vcpu, 0);
  442. return;
  443. }
  444. if (is_long_mode(vcpu)) {
  445. if (!(cr4 & X86_CR4_PAE)) {
  446. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  447. "in long mode\n");
  448. kvm_inject_gp(vcpu, 0);
  449. return;
  450. }
  451. } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
  452. && ((cr4 ^ old_cr4) & pdptr_bits)
  453. && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  454. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  455. kvm_inject_gp(vcpu, 0);
  456. return;
  457. }
  458. if (cr4 & X86_CR4_VMXE) {
  459. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  460. kvm_inject_gp(vcpu, 0);
  461. return;
  462. }
  463. kvm_x86_ops->set_cr4(vcpu, cr4);
  464. vcpu->arch.cr4 = cr4;
  465. vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
  466. kvm_mmu_reset_context(vcpu);
  467. }
  468. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  469. void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  470. {
  471. if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
  472. kvm_mmu_sync_roots(vcpu);
  473. kvm_mmu_flush_tlb(vcpu);
  474. return;
  475. }
  476. if (is_long_mode(vcpu)) {
  477. if (cr3 & CR3_L_MODE_RESERVED_BITS) {
  478. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  479. kvm_inject_gp(vcpu, 0);
  480. return;
  481. }
  482. } else {
  483. if (is_pae(vcpu)) {
  484. if (cr3 & CR3_PAE_RESERVED_BITS) {
  485. printk(KERN_DEBUG
  486. "set_cr3: #GP, reserved bits\n");
  487. kvm_inject_gp(vcpu, 0);
  488. return;
  489. }
  490. if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
  491. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  492. "reserved bits\n");
  493. kvm_inject_gp(vcpu, 0);
  494. return;
  495. }
  496. }
  497. /*
  498. * We don't check reserved bits in nonpae mode, because
  499. * this isn't enforced, and VMware depends on this.
  500. */
  501. }
  502. /*
  503. * Does the new cr3 value map to physical memory? (Note, we
  504. * catch an invalid cr3 even in real-mode, because it would
  505. * cause trouble later on when we turn on paging anyway.)
  506. *
  507. * A real CPU would silently accept an invalid cr3 and would
  508. * attempt to use it - with largely undefined (and often hard
  509. * to debug) behavior on the guest side.
  510. */
  511. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  512. kvm_inject_gp(vcpu, 0);
  513. else {
  514. vcpu->arch.cr3 = cr3;
  515. vcpu->arch.mmu.new_cr3(vcpu);
  516. }
  517. }
  518. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  519. void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  520. {
  521. if (cr8 & CR8_RESERVED_BITS) {
  522. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  523. kvm_inject_gp(vcpu, 0);
  524. return;
  525. }
  526. if (irqchip_in_kernel(vcpu->kvm))
  527. kvm_lapic_set_tpr(vcpu, cr8);
  528. else
  529. vcpu->arch.cr8 = cr8;
  530. }
  531. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  532. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  533. {
  534. if (irqchip_in_kernel(vcpu->kvm))
  535. return kvm_lapic_get_cr8(vcpu);
  536. else
  537. return vcpu->arch.cr8;
  538. }
  539. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  540. static inline u32 bit(int bitno)
  541. {
  542. return 1 << (bitno & 31);
  543. }
  544. /*
  545. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  546. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  547. *
  548. * This list is modified at module load time to reflect the
  549. * capabilities of the host cpu. This capabilities test skips MSRs that are
  550. * kvm-specific. Those are put in the beginning of the list.
  551. */
  552. #define KVM_SAVE_MSRS_BEGIN 5
  553. static u32 msrs_to_save[] = {
  554. MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  555. HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
  556. HV_X64_MSR_APIC_ASSIST_PAGE,
  557. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  558. MSR_K6_STAR,
  559. #ifdef CONFIG_X86_64
  560. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  561. #endif
  562. MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  563. };
  564. static unsigned num_msrs_to_save;
  565. static u32 emulated_msrs[] = {
  566. MSR_IA32_MISC_ENABLE,
  567. };
  568. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  569. {
  570. if (efer & efer_reserved_bits) {
  571. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  572. efer);
  573. kvm_inject_gp(vcpu, 0);
  574. return;
  575. }
  576. if (is_paging(vcpu)
  577. && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  578. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  579. kvm_inject_gp(vcpu, 0);
  580. return;
  581. }
  582. if (efer & EFER_FFXSR) {
  583. struct kvm_cpuid_entry2 *feat;
  584. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  585. if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
  586. printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
  587. kvm_inject_gp(vcpu, 0);
  588. return;
  589. }
  590. }
  591. if (efer & EFER_SVME) {
  592. struct kvm_cpuid_entry2 *feat;
  593. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  594. if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
  595. printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
  596. kvm_inject_gp(vcpu, 0);
  597. return;
  598. }
  599. }
  600. kvm_x86_ops->set_efer(vcpu, efer);
  601. efer &= ~EFER_LMA;
  602. efer |= vcpu->arch.shadow_efer & EFER_LMA;
  603. vcpu->arch.shadow_efer = efer;
  604. vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
  605. kvm_mmu_reset_context(vcpu);
  606. }
  607. void kvm_enable_efer_bits(u64 mask)
  608. {
  609. efer_reserved_bits &= ~mask;
  610. }
  611. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  612. /*
  613. * Writes msr value into into the appropriate "register".
  614. * Returns 0 on success, non-0 otherwise.
  615. * Assumes vcpu_load() was already called.
  616. */
  617. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  618. {
  619. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  620. }
  621. /*
  622. * Adapt set_msr() to msr_io()'s calling convention
  623. */
  624. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  625. {
  626. return kvm_set_msr(vcpu, index, *data);
  627. }
  628. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  629. {
  630. static int version;
  631. struct pvclock_wall_clock wc;
  632. struct timespec boot;
  633. if (!wall_clock)
  634. return;
  635. version++;
  636. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  637. /*
  638. * The guest calculates current wall clock time by adding
  639. * system time (updated by kvm_write_guest_time below) to the
  640. * wall clock specified here. guest system time equals host
  641. * system time for us, thus we must fill in host boot time here.
  642. */
  643. getboottime(&boot);
  644. wc.sec = boot.tv_sec;
  645. wc.nsec = boot.tv_nsec;
  646. wc.version = version;
  647. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  648. version++;
  649. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  650. }
  651. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  652. {
  653. uint32_t quotient, remainder;
  654. /* Don't try to replace with do_div(), this one calculates
  655. * "(dividend << 32) / divisor" */
  656. __asm__ ( "divl %4"
  657. : "=a" (quotient), "=d" (remainder)
  658. : "0" (0), "1" (dividend), "r" (divisor) );
  659. return quotient;
  660. }
  661. static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
  662. {
  663. uint64_t nsecs = 1000000000LL;
  664. int32_t shift = 0;
  665. uint64_t tps64;
  666. uint32_t tps32;
  667. tps64 = tsc_khz * 1000LL;
  668. while (tps64 > nsecs*2) {
  669. tps64 >>= 1;
  670. shift--;
  671. }
  672. tps32 = (uint32_t)tps64;
  673. while (tps32 <= (uint32_t)nsecs) {
  674. tps32 <<= 1;
  675. shift++;
  676. }
  677. hv_clock->tsc_shift = shift;
  678. hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
  679. pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
  680. __func__, tsc_khz, hv_clock->tsc_shift,
  681. hv_clock->tsc_to_system_mul);
  682. }
  683. static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
  684. static void kvm_write_guest_time(struct kvm_vcpu *v)
  685. {
  686. struct timespec ts;
  687. unsigned long flags;
  688. struct kvm_vcpu_arch *vcpu = &v->arch;
  689. void *shared_kaddr;
  690. unsigned long this_tsc_khz;
  691. if ((!vcpu->time_page))
  692. return;
  693. this_tsc_khz = get_cpu_var(cpu_tsc_khz);
  694. if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
  695. kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
  696. vcpu->hv_clock_tsc_khz = this_tsc_khz;
  697. }
  698. put_cpu_var(cpu_tsc_khz);
  699. /* Keep irq disabled to prevent changes to the clock */
  700. local_irq_save(flags);
  701. kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
  702. ktime_get_ts(&ts);
  703. monotonic_to_bootbased(&ts);
  704. local_irq_restore(flags);
  705. /* With all the info we got, fill in the values */
  706. vcpu->hv_clock.system_time = ts.tv_nsec +
  707. (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
  708. /*
  709. * The interface expects us to write an even number signaling that the
  710. * update is finished. Since the guest won't see the intermediate
  711. * state, we just increase by 2 at the end.
  712. */
  713. vcpu->hv_clock.version += 2;
  714. shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
  715. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  716. sizeof(vcpu->hv_clock));
  717. kunmap_atomic(shared_kaddr, KM_USER0);
  718. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  719. }
  720. static int kvm_request_guest_time_update(struct kvm_vcpu *v)
  721. {
  722. struct kvm_vcpu_arch *vcpu = &v->arch;
  723. if (!vcpu->time_page)
  724. return 0;
  725. set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
  726. return 1;
  727. }
  728. static bool msr_mtrr_valid(unsigned msr)
  729. {
  730. switch (msr) {
  731. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  732. case MSR_MTRRfix64K_00000:
  733. case MSR_MTRRfix16K_80000:
  734. case MSR_MTRRfix16K_A0000:
  735. case MSR_MTRRfix4K_C0000:
  736. case MSR_MTRRfix4K_C8000:
  737. case MSR_MTRRfix4K_D0000:
  738. case MSR_MTRRfix4K_D8000:
  739. case MSR_MTRRfix4K_E0000:
  740. case MSR_MTRRfix4K_E8000:
  741. case MSR_MTRRfix4K_F0000:
  742. case MSR_MTRRfix4K_F8000:
  743. case MSR_MTRRdefType:
  744. case MSR_IA32_CR_PAT:
  745. return true;
  746. case 0x2f8:
  747. return true;
  748. }
  749. return false;
  750. }
  751. static bool valid_pat_type(unsigned t)
  752. {
  753. return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
  754. }
  755. static bool valid_mtrr_type(unsigned t)
  756. {
  757. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  758. }
  759. static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  760. {
  761. int i;
  762. if (!msr_mtrr_valid(msr))
  763. return false;
  764. if (msr == MSR_IA32_CR_PAT) {
  765. for (i = 0; i < 8; i++)
  766. if (!valid_pat_type((data >> (i * 8)) & 0xff))
  767. return false;
  768. return true;
  769. } else if (msr == MSR_MTRRdefType) {
  770. if (data & ~0xcff)
  771. return false;
  772. return valid_mtrr_type(data & 0xff);
  773. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  774. for (i = 0; i < 8 ; i++)
  775. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  776. return false;
  777. return true;
  778. }
  779. /* variable MTRRs */
  780. return valid_mtrr_type(data & 0xff);
  781. }
  782. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  783. {
  784. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  785. if (!mtrr_valid(vcpu, msr, data))
  786. return 1;
  787. if (msr == MSR_MTRRdefType) {
  788. vcpu->arch.mtrr_state.def_type = data;
  789. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  790. } else if (msr == MSR_MTRRfix64K_00000)
  791. p[0] = data;
  792. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  793. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  794. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  795. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  796. else if (msr == MSR_IA32_CR_PAT)
  797. vcpu->arch.pat = data;
  798. else { /* Variable MTRRs */
  799. int idx, is_mtrr_mask;
  800. u64 *pt;
  801. idx = (msr - 0x200) / 2;
  802. is_mtrr_mask = msr - 0x200 - 2 * idx;
  803. if (!is_mtrr_mask)
  804. pt =
  805. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  806. else
  807. pt =
  808. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  809. *pt = data;
  810. }
  811. kvm_mmu_reset_context(vcpu);
  812. return 0;
  813. }
  814. static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  815. {
  816. u64 mcg_cap = vcpu->arch.mcg_cap;
  817. unsigned bank_num = mcg_cap & 0xff;
  818. switch (msr) {
  819. case MSR_IA32_MCG_STATUS:
  820. vcpu->arch.mcg_status = data;
  821. break;
  822. case MSR_IA32_MCG_CTL:
  823. if (!(mcg_cap & MCG_CTL_P))
  824. return 1;
  825. if (data != 0 && data != ~(u64)0)
  826. return -1;
  827. vcpu->arch.mcg_ctl = data;
  828. break;
  829. default:
  830. if (msr >= MSR_IA32_MC0_CTL &&
  831. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  832. u32 offset = msr - MSR_IA32_MC0_CTL;
  833. /* only 0 or all 1s can be written to IA32_MCi_CTL */
  834. if ((offset & 0x3) == 0 &&
  835. data != 0 && data != ~(u64)0)
  836. return -1;
  837. vcpu->arch.mce_banks[offset] = data;
  838. break;
  839. }
  840. return 1;
  841. }
  842. return 0;
  843. }
  844. static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
  845. {
  846. struct kvm *kvm = vcpu->kvm;
  847. int lm = is_long_mode(vcpu);
  848. u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
  849. : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
  850. u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
  851. : kvm->arch.xen_hvm_config.blob_size_32;
  852. u32 page_num = data & ~PAGE_MASK;
  853. u64 page_addr = data & PAGE_MASK;
  854. u8 *page;
  855. int r;
  856. r = -E2BIG;
  857. if (page_num >= blob_size)
  858. goto out;
  859. r = -ENOMEM;
  860. page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  861. if (!page)
  862. goto out;
  863. r = -EFAULT;
  864. if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
  865. goto out_free;
  866. if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
  867. goto out_free;
  868. r = 0;
  869. out_free:
  870. kfree(page);
  871. out:
  872. return r;
  873. }
  874. static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  875. {
  876. return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
  877. }
  878. static bool kvm_hv_msr_partition_wide(u32 msr)
  879. {
  880. bool r = false;
  881. switch (msr) {
  882. case HV_X64_MSR_GUEST_OS_ID:
  883. case HV_X64_MSR_HYPERCALL:
  884. r = true;
  885. break;
  886. }
  887. return r;
  888. }
  889. static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  890. {
  891. struct kvm *kvm = vcpu->kvm;
  892. switch (msr) {
  893. case HV_X64_MSR_GUEST_OS_ID:
  894. kvm->arch.hv_guest_os_id = data;
  895. /* setting guest os id to zero disables hypercall page */
  896. if (!kvm->arch.hv_guest_os_id)
  897. kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  898. break;
  899. case HV_X64_MSR_HYPERCALL: {
  900. u64 gfn;
  901. unsigned long addr;
  902. u8 instructions[4];
  903. /* if guest os id is not set hypercall should remain disabled */
  904. if (!kvm->arch.hv_guest_os_id)
  905. break;
  906. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  907. kvm->arch.hv_hypercall = data;
  908. break;
  909. }
  910. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  911. addr = gfn_to_hva(kvm, gfn);
  912. if (kvm_is_error_hva(addr))
  913. return 1;
  914. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  915. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  916. if (copy_to_user((void __user *)addr, instructions, 4))
  917. return 1;
  918. kvm->arch.hv_hypercall = data;
  919. break;
  920. }
  921. default:
  922. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  923. "data 0x%llx\n", msr, data);
  924. return 1;
  925. }
  926. return 0;
  927. }
  928. static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  929. {
  930. switch (msr) {
  931. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  932. unsigned long addr;
  933. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  934. vcpu->arch.hv_vapic = data;
  935. break;
  936. }
  937. addr = gfn_to_hva(vcpu->kvm, data >>
  938. HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
  939. if (kvm_is_error_hva(addr))
  940. return 1;
  941. if (clear_user((void __user *)addr, PAGE_SIZE))
  942. return 1;
  943. vcpu->arch.hv_vapic = data;
  944. break;
  945. }
  946. case HV_X64_MSR_EOI:
  947. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  948. case HV_X64_MSR_ICR:
  949. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  950. case HV_X64_MSR_TPR:
  951. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  952. default:
  953. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  954. "data 0x%llx\n", msr, data);
  955. return 1;
  956. }
  957. return 0;
  958. }
  959. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  960. {
  961. switch (msr) {
  962. case MSR_EFER:
  963. set_efer(vcpu, data);
  964. break;
  965. case MSR_K7_HWCR:
  966. data &= ~(u64)0x40; /* ignore flush filter disable */
  967. if (data != 0) {
  968. pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
  969. data);
  970. return 1;
  971. }
  972. break;
  973. case MSR_FAM10H_MMIO_CONF_BASE:
  974. if (data != 0) {
  975. pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
  976. "0x%llx\n", data);
  977. return 1;
  978. }
  979. break;
  980. case MSR_AMD64_NB_CFG:
  981. break;
  982. case MSR_IA32_DEBUGCTLMSR:
  983. if (!data) {
  984. /* We support the non-activated case already */
  985. break;
  986. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  987. /* Values other than LBR and BTF are vendor-specific,
  988. thus reserved and should throw a #GP */
  989. return 1;
  990. }
  991. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  992. __func__, data);
  993. break;
  994. case MSR_IA32_UCODE_REV:
  995. case MSR_IA32_UCODE_WRITE:
  996. case MSR_VM_HSAVE_PA:
  997. case MSR_AMD64_PATCH_LOADER:
  998. break;
  999. case 0x200 ... 0x2ff:
  1000. return set_msr_mtrr(vcpu, msr, data);
  1001. case MSR_IA32_APICBASE:
  1002. kvm_set_apic_base(vcpu, data);
  1003. break;
  1004. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1005. return kvm_x2apic_msr_write(vcpu, msr, data);
  1006. case MSR_IA32_MISC_ENABLE:
  1007. vcpu->arch.ia32_misc_enable_msr = data;
  1008. break;
  1009. case MSR_KVM_WALL_CLOCK:
  1010. vcpu->kvm->arch.wall_clock = data;
  1011. kvm_write_wall_clock(vcpu->kvm, data);
  1012. break;
  1013. case MSR_KVM_SYSTEM_TIME: {
  1014. if (vcpu->arch.time_page) {
  1015. kvm_release_page_dirty(vcpu->arch.time_page);
  1016. vcpu->arch.time_page = NULL;
  1017. }
  1018. vcpu->arch.time = data;
  1019. /* we verify if the enable bit is set... */
  1020. if (!(data & 1))
  1021. break;
  1022. /* ...but clean it before doing the actual write */
  1023. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  1024. vcpu->arch.time_page =
  1025. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  1026. if (is_error_page(vcpu->arch.time_page)) {
  1027. kvm_release_page_clean(vcpu->arch.time_page);
  1028. vcpu->arch.time_page = NULL;
  1029. }
  1030. kvm_request_guest_time_update(vcpu);
  1031. break;
  1032. }
  1033. case MSR_IA32_MCG_CTL:
  1034. case MSR_IA32_MCG_STATUS:
  1035. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1036. return set_msr_mce(vcpu, msr, data);
  1037. /* Performance counters are not protected by a CPUID bit,
  1038. * so we should check all of them in the generic path for the sake of
  1039. * cross vendor migration.
  1040. * Writing a zero into the event select MSRs disables them,
  1041. * which we perfectly emulate ;-). Any other value should be at least
  1042. * reported, some guests depend on them.
  1043. */
  1044. case MSR_P6_EVNTSEL0:
  1045. case MSR_P6_EVNTSEL1:
  1046. case MSR_K7_EVNTSEL0:
  1047. case MSR_K7_EVNTSEL1:
  1048. case MSR_K7_EVNTSEL2:
  1049. case MSR_K7_EVNTSEL3:
  1050. if (data != 0)
  1051. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1052. "0x%x data 0x%llx\n", msr, data);
  1053. break;
  1054. /* at least RHEL 4 unconditionally writes to the perfctr registers,
  1055. * so we ignore writes to make it happy.
  1056. */
  1057. case MSR_P6_PERFCTR0:
  1058. case MSR_P6_PERFCTR1:
  1059. case MSR_K7_PERFCTR0:
  1060. case MSR_K7_PERFCTR1:
  1061. case MSR_K7_PERFCTR2:
  1062. case MSR_K7_PERFCTR3:
  1063. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1064. "0x%x data 0x%llx\n", msr, data);
  1065. break;
  1066. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1067. if (kvm_hv_msr_partition_wide(msr)) {
  1068. int r;
  1069. mutex_lock(&vcpu->kvm->lock);
  1070. r = set_msr_hyperv_pw(vcpu, msr, data);
  1071. mutex_unlock(&vcpu->kvm->lock);
  1072. return r;
  1073. } else
  1074. return set_msr_hyperv(vcpu, msr, data);
  1075. break;
  1076. default:
  1077. if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
  1078. return xen_hvm_config(vcpu, data);
  1079. if (!ignore_msrs) {
  1080. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
  1081. msr, data);
  1082. return 1;
  1083. } else {
  1084. pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
  1085. msr, data);
  1086. break;
  1087. }
  1088. }
  1089. return 0;
  1090. }
  1091. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1092. /*
  1093. * Reads an msr value (of 'msr_index') into 'pdata'.
  1094. * Returns 0 on success, non-0 otherwise.
  1095. * Assumes vcpu_load() was already called.
  1096. */
  1097. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1098. {
  1099. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  1100. }
  1101. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1102. {
  1103. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1104. if (!msr_mtrr_valid(msr))
  1105. return 1;
  1106. if (msr == MSR_MTRRdefType)
  1107. *pdata = vcpu->arch.mtrr_state.def_type +
  1108. (vcpu->arch.mtrr_state.enabled << 10);
  1109. else if (msr == MSR_MTRRfix64K_00000)
  1110. *pdata = p[0];
  1111. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1112. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  1113. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1114. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  1115. else if (msr == MSR_IA32_CR_PAT)
  1116. *pdata = vcpu->arch.pat;
  1117. else { /* Variable MTRRs */
  1118. int idx, is_mtrr_mask;
  1119. u64 *pt;
  1120. idx = (msr - 0x200) / 2;
  1121. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1122. if (!is_mtrr_mask)
  1123. pt =
  1124. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1125. else
  1126. pt =
  1127. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1128. *pdata = *pt;
  1129. }
  1130. return 0;
  1131. }
  1132. static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1133. {
  1134. u64 data;
  1135. u64 mcg_cap = vcpu->arch.mcg_cap;
  1136. unsigned bank_num = mcg_cap & 0xff;
  1137. switch (msr) {
  1138. case MSR_IA32_P5_MC_ADDR:
  1139. case MSR_IA32_P5_MC_TYPE:
  1140. data = 0;
  1141. break;
  1142. case MSR_IA32_MCG_CAP:
  1143. data = vcpu->arch.mcg_cap;
  1144. break;
  1145. case MSR_IA32_MCG_CTL:
  1146. if (!(mcg_cap & MCG_CTL_P))
  1147. return 1;
  1148. data = vcpu->arch.mcg_ctl;
  1149. break;
  1150. case MSR_IA32_MCG_STATUS:
  1151. data = vcpu->arch.mcg_status;
  1152. break;
  1153. default:
  1154. if (msr >= MSR_IA32_MC0_CTL &&
  1155. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1156. u32 offset = msr - MSR_IA32_MC0_CTL;
  1157. data = vcpu->arch.mce_banks[offset];
  1158. break;
  1159. }
  1160. return 1;
  1161. }
  1162. *pdata = data;
  1163. return 0;
  1164. }
  1165. static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1166. {
  1167. u64 data = 0;
  1168. struct kvm *kvm = vcpu->kvm;
  1169. switch (msr) {
  1170. case HV_X64_MSR_GUEST_OS_ID:
  1171. data = kvm->arch.hv_guest_os_id;
  1172. break;
  1173. case HV_X64_MSR_HYPERCALL:
  1174. data = kvm->arch.hv_hypercall;
  1175. break;
  1176. default:
  1177. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1178. return 1;
  1179. }
  1180. *pdata = data;
  1181. return 0;
  1182. }
  1183. static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1184. {
  1185. u64 data = 0;
  1186. switch (msr) {
  1187. case HV_X64_MSR_VP_INDEX: {
  1188. int r;
  1189. struct kvm_vcpu *v;
  1190. kvm_for_each_vcpu(r, v, vcpu->kvm)
  1191. if (v == vcpu)
  1192. data = r;
  1193. break;
  1194. }
  1195. case HV_X64_MSR_EOI:
  1196. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  1197. case HV_X64_MSR_ICR:
  1198. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  1199. case HV_X64_MSR_TPR:
  1200. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  1201. default:
  1202. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1203. return 1;
  1204. }
  1205. *pdata = data;
  1206. return 0;
  1207. }
  1208. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1209. {
  1210. u64 data;
  1211. switch (msr) {
  1212. case MSR_IA32_PLATFORM_ID:
  1213. case MSR_IA32_UCODE_REV:
  1214. case MSR_IA32_EBL_CR_POWERON:
  1215. case MSR_IA32_DEBUGCTLMSR:
  1216. case MSR_IA32_LASTBRANCHFROMIP:
  1217. case MSR_IA32_LASTBRANCHTOIP:
  1218. case MSR_IA32_LASTINTFROMIP:
  1219. case MSR_IA32_LASTINTTOIP:
  1220. case MSR_K8_SYSCFG:
  1221. case MSR_K7_HWCR:
  1222. case MSR_VM_HSAVE_PA:
  1223. case MSR_P6_PERFCTR0:
  1224. case MSR_P6_PERFCTR1:
  1225. case MSR_P6_EVNTSEL0:
  1226. case MSR_P6_EVNTSEL1:
  1227. case MSR_K7_EVNTSEL0:
  1228. case MSR_K7_PERFCTR0:
  1229. case MSR_K8_INT_PENDING_MSG:
  1230. case MSR_AMD64_NB_CFG:
  1231. case MSR_FAM10H_MMIO_CONF_BASE:
  1232. data = 0;
  1233. break;
  1234. case MSR_MTRRcap:
  1235. data = 0x500 | KVM_NR_VAR_MTRR;
  1236. break;
  1237. case 0x200 ... 0x2ff:
  1238. return get_msr_mtrr(vcpu, msr, pdata);
  1239. case 0xcd: /* fsb frequency */
  1240. data = 3;
  1241. break;
  1242. case MSR_IA32_APICBASE:
  1243. data = kvm_get_apic_base(vcpu);
  1244. break;
  1245. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1246. return kvm_x2apic_msr_read(vcpu, msr, pdata);
  1247. break;
  1248. case MSR_IA32_MISC_ENABLE:
  1249. data = vcpu->arch.ia32_misc_enable_msr;
  1250. break;
  1251. case MSR_IA32_PERF_STATUS:
  1252. /* TSC increment by tick */
  1253. data = 1000ULL;
  1254. /* CPU multiplier */
  1255. data |= (((uint64_t)4ULL) << 40);
  1256. break;
  1257. case MSR_EFER:
  1258. data = vcpu->arch.shadow_efer;
  1259. break;
  1260. case MSR_KVM_WALL_CLOCK:
  1261. data = vcpu->kvm->arch.wall_clock;
  1262. break;
  1263. case MSR_KVM_SYSTEM_TIME:
  1264. data = vcpu->arch.time;
  1265. break;
  1266. case MSR_IA32_P5_MC_ADDR:
  1267. case MSR_IA32_P5_MC_TYPE:
  1268. case MSR_IA32_MCG_CAP:
  1269. case MSR_IA32_MCG_CTL:
  1270. case MSR_IA32_MCG_STATUS:
  1271. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1272. return get_msr_mce(vcpu, msr, pdata);
  1273. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1274. if (kvm_hv_msr_partition_wide(msr)) {
  1275. int r;
  1276. mutex_lock(&vcpu->kvm->lock);
  1277. r = get_msr_hyperv_pw(vcpu, msr, pdata);
  1278. mutex_unlock(&vcpu->kvm->lock);
  1279. return r;
  1280. } else
  1281. return get_msr_hyperv(vcpu, msr, pdata);
  1282. break;
  1283. default:
  1284. if (!ignore_msrs) {
  1285. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  1286. return 1;
  1287. } else {
  1288. pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
  1289. data = 0;
  1290. }
  1291. break;
  1292. }
  1293. *pdata = data;
  1294. return 0;
  1295. }
  1296. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1297. /*
  1298. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1299. *
  1300. * @return number of msrs set successfully.
  1301. */
  1302. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  1303. struct kvm_msr_entry *entries,
  1304. int (*do_msr)(struct kvm_vcpu *vcpu,
  1305. unsigned index, u64 *data))
  1306. {
  1307. int i, idx;
  1308. vcpu_load(vcpu);
  1309. idx = srcu_read_lock(&vcpu->kvm->srcu);
  1310. for (i = 0; i < msrs->nmsrs; ++i)
  1311. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  1312. break;
  1313. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1314. vcpu_put(vcpu);
  1315. return i;
  1316. }
  1317. /*
  1318. * Read or write a bunch of msrs. Parameters are user addresses.
  1319. *
  1320. * @return number of msrs set successfully.
  1321. */
  1322. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  1323. int (*do_msr)(struct kvm_vcpu *vcpu,
  1324. unsigned index, u64 *data),
  1325. int writeback)
  1326. {
  1327. struct kvm_msrs msrs;
  1328. struct kvm_msr_entry *entries;
  1329. int r, n;
  1330. unsigned size;
  1331. r = -EFAULT;
  1332. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  1333. goto out;
  1334. r = -E2BIG;
  1335. if (msrs.nmsrs >= MAX_IO_MSRS)
  1336. goto out;
  1337. r = -ENOMEM;
  1338. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  1339. entries = vmalloc(size);
  1340. if (!entries)
  1341. goto out;
  1342. r = -EFAULT;
  1343. if (copy_from_user(entries, user_msrs->entries, size))
  1344. goto out_free;
  1345. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  1346. if (r < 0)
  1347. goto out_free;
  1348. r = -EFAULT;
  1349. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  1350. goto out_free;
  1351. r = n;
  1352. out_free:
  1353. vfree(entries);
  1354. out:
  1355. return r;
  1356. }
  1357. int kvm_dev_ioctl_check_extension(long ext)
  1358. {
  1359. int r;
  1360. switch (ext) {
  1361. case KVM_CAP_IRQCHIP:
  1362. case KVM_CAP_HLT:
  1363. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  1364. case KVM_CAP_SET_TSS_ADDR:
  1365. case KVM_CAP_EXT_CPUID:
  1366. case KVM_CAP_CLOCKSOURCE:
  1367. case KVM_CAP_PIT:
  1368. case KVM_CAP_NOP_IO_DELAY:
  1369. case KVM_CAP_MP_STATE:
  1370. case KVM_CAP_SYNC_MMU:
  1371. case KVM_CAP_REINJECT_CONTROL:
  1372. case KVM_CAP_IRQ_INJECT_STATUS:
  1373. case KVM_CAP_ASSIGN_DEV_IRQ:
  1374. case KVM_CAP_IRQFD:
  1375. case KVM_CAP_IOEVENTFD:
  1376. case KVM_CAP_PIT2:
  1377. case KVM_CAP_PIT_STATE2:
  1378. case KVM_CAP_SET_IDENTITY_MAP_ADDR:
  1379. case KVM_CAP_XEN_HVM:
  1380. case KVM_CAP_ADJUST_CLOCK:
  1381. case KVM_CAP_VCPU_EVENTS:
  1382. case KVM_CAP_HYPERV:
  1383. case KVM_CAP_HYPERV_VAPIC:
  1384. case KVM_CAP_HYPERV_SPIN:
  1385. r = 1;
  1386. break;
  1387. case KVM_CAP_COALESCED_MMIO:
  1388. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  1389. break;
  1390. case KVM_CAP_VAPIC:
  1391. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  1392. break;
  1393. case KVM_CAP_NR_VCPUS:
  1394. r = KVM_MAX_VCPUS;
  1395. break;
  1396. case KVM_CAP_NR_MEMSLOTS:
  1397. r = KVM_MEMORY_SLOTS;
  1398. break;
  1399. case KVM_CAP_PV_MMU: /* obsolete */
  1400. r = 0;
  1401. break;
  1402. case KVM_CAP_IOMMU:
  1403. r = iommu_found();
  1404. break;
  1405. case KVM_CAP_MCE:
  1406. r = KVM_MAX_MCE_BANKS;
  1407. break;
  1408. default:
  1409. r = 0;
  1410. break;
  1411. }
  1412. return r;
  1413. }
  1414. long kvm_arch_dev_ioctl(struct file *filp,
  1415. unsigned int ioctl, unsigned long arg)
  1416. {
  1417. void __user *argp = (void __user *)arg;
  1418. long r;
  1419. switch (ioctl) {
  1420. case KVM_GET_MSR_INDEX_LIST: {
  1421. struct kvm_msr_list __user *user_msr_list = argp;
  1422. struct kvm_msr_list msr_list;
  1423. unsigned n;
  1424. r = -EFAULT;
  1425. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  1426. goto out;
  1427. n = msr_list.nmsrs;
  1428. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  1429. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  1430. goto out;
  1431. r = -E2BIG;
  1432. if (n < msr_list.nmsrs)
  1433. goto out;
  1434. r = -EFAULT;
  1435. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  1436. num_msrs_to_save * sizeof(u32)))
  1437. goto out;
  1438. if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
  1439. &emulated_msrs,
  1440. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  1441. goto out;
  1442. r = 0;
  1443. break;
  1444. }
  1445. case KVM_GET_SUPPORTED_CPUID: {
  1446. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1447. struct kvm_cpuid2 cpuid;
  1448. r = -EFAULT;
  1449. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1450. goto out;
  1451. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  1452. cpuid_arg->entries);
  1453. if (r)
  1454. goto out;
  1455. r = -EFAULT;
  1456. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  1457. goto out;
  1458. r = 0;
  1459. break;
  1460. }
  1461. case KVM_X86_GET_MCE_CAP_SUPPORTED: {
  1462. u64 mce_cap;
  1463. mce_cap = KVM_MCE_CAP_SUPPORTED;
  1464. r = -EFAULT;
  1465. if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
  1466. goto out;
  1467. r = 0;
  1468. break;
  1469. }
  1470. default:
  1471. r = -EINVAL;
  1472. }
  1473. out:
  1474. return r;
  1475. }
  1476. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1477. {
  1478. kvm_x86_ops->vcpu_load(vcpu, cpu);
  1479. if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
  1480. unsigned long khz = cpufreq_quick_get(cpu);
  1481. if (!khz)
  1482. khz = tsc_khz;
  1483. per_cpu(cpu_tsc_khz, cpu) = khz;
  1484. }
  1485. kvm_request_guest_time_update(vcpu);
  1486. }
  1487. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  1488. {
  1489. kvm_put_guest_fpu(vcpu);
  1490. kvm_x86_ops->vcpu_put(vcpu);
  1491. }
  1492. static int is_efer_nx(void)
  1493. {
  1494. unsigned long long efer = 0;
  1495. rdmsrl_safe(MSR_EFER, &efer);
  1496. return efer & EFER_NX;
  1497. }
  1498. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  1499. {
  1500. int i;
  1501. struct kvm_cpuid_entry2 *e, *entry;
  1502. entry = NULL;
  1503. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  1504. e = &vcpu->arch.cpuid_entries[i];
  1505. if (e->function == 0x80000001) {
  1506. entry = e;
  1507. break;
  1508. }
  1509. }
  1510. if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
  1511. entry->edx &= ~(1 << 20);
  1512. printk(KERN_INFO "kvm: guest NX capability removed\n");
  1513. }
  1514. }
  1515. /* when an old userspace process fills a new kernel module */
  1516. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  1517. struct kvm_cpuid *cpuid,
  1518. struct kvm_cpuid_entry __user *entries)
  1519. {
  1520. int r, i;
  1521. struct kvm_cpuid_entry *cpuid_entries;
  1522. r = -E2BIG;
  1523. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1524. goto out;
  1525. r = -ENOMEM;
  1526. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  1527. if (!cpuid_entries)
  1528. goto out;
  1529. r = -EFAULT;
  1530. if (copy_from_user(cpuid_entries, entries,
  1531. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  1532. goto out_free;
  1533. for (i = 0; i < cpuid->nent; i++) {
  1534. vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
  1535. vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
  1536. vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
  1537. vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
  1538. vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
  1539. vcpu->arch.cpuid_entries[i].index = 0;
  1540. vcpu->arch.cpuid_entries[i].flags = 0;
  1541. vcpu->arch.cpuid_entries[i].padding[0] = 0;
  1542. vcpu->arch.cpuid_entries[i].padding[1] = 0;
  1543. vcpu->arch.cpuid_entries[i].padding[2] = 0;
  1544. }
  1545. vcpu->arch.cpuid_nent = cpuid->nent;
  1546. cpuid_fix_nx_cap(vcpu);
  1547. r = 0;
  1548. kvm_apic_set_version(vcpu);
  1549. kvm_x86_ops->cpuid_update(vcpu);
  1550. out_free:
  1551. vfree(cpuid_entries);
  1552. out:
  1553. return r;
  1554. }
  1555. static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  1556. struct kvm_cpuid2 *cpuid,
  1557. struct kvm_cpuid_entry2 __user *entries)
  1558. {
  1559. int r;
  1560. r = -E2BIG;
  1561. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1562. goto out;
  1563. r = -EFAULT;
  1564. if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
  1565. cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
  1566. goto out;
  1567. vcpu->arch.cpuid_nent = cpuid->nent;
  1568. kvm_apic_set_version(vcpu);
  1569. kvm_x86_ops->cpuid_update(vcpu);
  1570. return 0;
  1571. out:
  1572. return r;
  1573. }
  1574. static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  1575. struct kvm_cpuid2 *cpuid,
  1576. struct kvm_cpuid_entry2 __user *entries)
  1577. {
  1578. int r;
  1579. r = -E2BIG;
  1580. if (cpuid->nent < vcpu->arch.cpuid_nent)
  1581. goto out;
  1582. r = -EFAULT;
  1583. if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
  1584. vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
  1585. goto out;
  1586. return 0;
  1587. out:
  1588. cpuid->nent = vcpu->arch.cpuid_nent;
  1589. return r;
  1590. }
  1591. static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1592. u32 index)
  1593. {
  1594. entry->function = function;
  1595. entry->index = index;
  1596. cpuid_count(entry->function, entry->index,
  1597. &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
  1598. entry->flags = 0;
  1599. }
  1600. #define F(x) bit(X86_FEATURE_##x)
  1601. static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1602. u32 index, int *nent, int maxnent)
  1603. {
  1604. unsigned f_nx = is_efer_nx() ? F(NX) : 0;
  1605. #ifdef CONFIG_X86_64
  1606. unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
  1607. ? F(GBPAGES) : 0;
  1608. unsigned f_lm = F(LM);
  1609. #else
  1610. unsigned f_gbpages = 0;
  1611. unsigned f_lm = 0;
  1612. #endif
  1613. unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
  1614. /* cpuid 1.edx */
  1615. const u32 kvm_supported_word0_x86_features =
  1616. F(FPU) | F(VME) | F(DE) | F(PSE) |
  1617. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  1618. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
  1619. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  1620. F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
  1621. 0 /* Reserved, DS, ACPI */ | F(MMX) |
  1622. F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
  1623. 0 /* HTT, TM, Reserved, PBE */;
  1624. /* cpuid 0x80000001.edx */
  1625. const u32 kvm_supported_word1_x86_features =
  1626. F(FPU) | F(VME) | F(DE) | F(PSE) |
  1627. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  1628. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
  1629. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  1630. F(PAT) | F(PSE36) | 0 /* Reserved */ |
  1631. f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
  1632. F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
  1633. 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
  1634. /* cpuid 1.ecx */
  1635. const u32 kvm_supported_word4_x86_features =
  1636. F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
  1637. 0 /* DS-CPL, VMX, SMX, EST */ |
  1638. 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
  1639. 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
  1640. 0 /* Reserved, DCA */ | F(XMM4_1) |
  1641. F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
  1642. 0 /* Reserved, XSAVE, OSXSAVE */;
  1643. /* cpuid 0x80000001.ecx */
  1644. const u32 kvm_supported_word6_x86_features =
  1645. F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
  1646. F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
  1647. F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
  1648. 0 /* SKINIT */ | 0 /* WDT */;
  1649. /* all calls to cpuid_count() should be made on the same cpu */
  1650. get_cpu();
  1651. do_cpuid_1_ent(entry, function, index);
  1652. ++*nent;
  1653. switch (function) {
  1654. case 0:
  1655. entry->eax = min(entry->eax, (u32)0xb);
  1656. break;
  1657. case 1:
  1658. entry->edx &= kvm_supported_word0_x86_features;
  1659. entry->ecx &= kvm_supported_word4_x86_features;
  1660. /* we support x2apic emulation even if host does not support
  1661. * it since we emulate x2apic in software */
  1662. entry->ecx |= F(X2APIC);
  1663. break;
  1664. /* function 2 entries are STATEFUL. That is, repeated cpuid commands
  1665. * may return different values. This forces us to get_cpu() before
  1666. * issuing the first command, and also to emulate this annoying behavior
  1667. * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
  1668. case 2: {
  1669. int t, times = entry->eax & 0xff;
  1670. entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1671. entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  1672. for (t = 1; t < times && *nent < maxnent; ++t) {
  1673. do_cpuid_1_ent(&entry[t], function, 0);
  1674. entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1675. ++*nent;
  1676. }
  1677. break;
  1678. }
  1679. /* function 4 and 0xb have additional index. */
  1680. case 4: {
  1681. int i, cache_type;
  1682. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1683. /* read more entries until cache_type is zero */
  1684. for (i = 1; *nent < maxnent; ++i) {
  1685. cache_type = entry[i - 1].eax & 0x1f;
  1686. if (!cache_type)
  1687. break;
  1688. do_cpuid_1_ent(&entry[i], function, i);
  1689. entry[i].flags |=
  1690. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1691. ++*nent;
  1692. }
  1693. break;
  1694. }
  1695. case 0xb: {
  1696. int i, level_type;
  1697. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1698. /* read more entries until level_type is zero */
  1699. for (i = 1; *nent < maxnent; ++i) {
  1700. level_type = entry[i - 1].ecx & 0xff00;
  1701. if (!level_type)
  1702. break;
  1703. do_cpuid_1_ent(&entry[i], function, i);
  1704. entry[i].flags |=
  1705. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1706. ++*nent;
  1707. }
  1708. break;
  1709. }
  1710. case 0x80000000:
  1711. entry->eax = min(entry->eax, 0x8000001a);
  1712. break;
  1713. case 0x80000001:
  1714. entry->edx &= kvm_supported_word1_x86_features;
  1715. entry->ecx &= kvm_supported_word6_x86_features;
  1716. break;
  1717. }
  1718. put_cpu();
  1719. }
  1720. #undef F
  1721. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  1722. struct kvm_cpuid_entry2 __user *entries)
  1723. {
  1724. struct kvm_cpuid_entry2 *cpuid_entries;
  1725. int limit, nent = 0, r = -E2BIG;
  1726. u32 func;
  1727. if (cpuid->nent < 1)
  1728. goto out;
  1729. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1730. cpuid->nent = KVM_MAX_CPUID_ENTRIES;
  1731. r = -ENOMEM;
  1732. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
  1733. if (!cpuid_entries)
  1734. goto out;
  1735. do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
  1736. limit = cpuid_entries[0].eax;
  1737. for (func = 1; func <= limit && nent < cpuid->nent; ++func)
  1738. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1739. &nent, cpuid->nent);
  1740. r = -E2BIG;
  1741. if (nent >= cpuid->nent)
  1742. goto out_free;
  1743. do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
  1744. limit = cpuid_entries[nent - 1].eax;
  1745. for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
  1746. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1747. &nent, cpuid->nent);
  1748. r = -E2BIG;
  1749. if (nent >= cpuid->nent)
  1750. goto out_free;
  1751. r = -EFAULT;
  1752. if (copy_to_user(entries, cpuid_entries,
  1753. nent * sizeof(struct kvm_cpuid_entry2)))
  1754. goto out_free;
  1755. cpuid->nent = nent;
  1756. r = 0;
  1757. out_free:
  1758. vfree(cpuid_entries);
  1759. out:
  1760. return r;
  1761. }
  1762. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  1763. struct kvm_lapic_state *s)
  1764. {
  1765. vcpu_load(vcpu);
  1766. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  1767. vcpu_put(vcpu);
  1768. return 0;
  1769. }
  1770. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  1771. struct kvm_lapic_state *s)
  1772. {
  1773. vcpu_load(vcpu);
  1774. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  1775. kvm_apic_post_state_restore(vcpu);
  1776. update_cr8_intercept(vcpu);
  1777. vcpu_put(vcpu);
  1778. return 0;
  1779. }
  1780. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  1781. struct kvm_interrupt *irq)
  1782. {
  1783. if (irq->irq < 0 || irq->irq >= 256)
  1784. return -EINVAL;
  1785. if (irqchip_in_kernel(vcpu->kvm))
  1786. return -ENXIO;
  1787. vcpu_load(vcpu);
  1788. kvm_queue_interrupt(vcpu, irq->irq, false);
  1789. vcpu_put(vcpu);
  1790. return 0;
  1791. }
  1792. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  1793. {
  1794. vcpu_load(vcpu);
  1795. kvm_inject_nmi(vcpu);
  1796. vcpu_put(vcpu);
  1797. return 0;
  1798. }
  1799. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  1800. struct kvm_tpr_access_ctl *tac)
  1801. {
  1802. if (tac->flags)
  1803. return -EINVAL;
  1804. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  1805. return 0;
  1806. }
  1807. static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
  1808. u64 mcg_cap)
  1809. {
  1810. int r;
  1811. unsigned bank_num = mcg_cap & 0xff, bank;
  1812. r = -EINVAL;
  1813. if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
  1814. goto out;
  1815. if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
  1816. goto out;
  1817. r = 0;
  1818. vcpu->arch.mcg_cap = mcg_cap;
  1819. /* Init IA32_MCG_CTL to all 1s */
  1820. if (mcg_cap & MCG_CTL_P)
  1821. vcpu->arch.mcg_ctl = ~(u64)0;
  1822. /* Init IA32_MCi_CTL to all 1s */
  1823. for (bank = 0; bank < bank_num; bank++)
  1824. vcpu->arch.mce_banks[bank*4] = ~(u64)0;
  1825. out:
  1826. return r;
  1827. }
  1828. static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
  1829. struct kvm_x86_mce *mce)
  1830. {
  1831. u64 mcg_cap = vcpu->arch.mcg_cap;
  1832. unsigned bank_num = mcg_cap & 0xff;
  1833. u64 *banks = vcpu->arch.mce_banks;
  1834. if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
  1835. return -EINVAL;
  1836. /*
  1837. * if IA32_MCG_CTL is not all 1s, the uncorrected error
  1838. * reporting is disabled
  1839. */
  1840. if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
  1841. vcpu->arch.mcg_ctl != ~(u64)0)
  1842. return 0;
  1843. banks += 4 * mce->bank;
  1844. /*
  1845. * if IA32_MCi_CTL is not all 1s, the uncorrected error
  1846. * reporting is disabled for the bank
  1847. */
  1848. if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
  1849. return 0;
  1850. if (mce->status & MCI_STATUS_UC) {
  1851. if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
  1852. !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
  1853. printk(KERN_DEBUG "kvm: set_mce: "
  1854. "injects mce exception while "
  1855. "previous one is in progress!\n");
  1856. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  1857. return 0;
  1858. }
  1859. if (banks[1] & MCI_STATUS_VAL)
  1860. mce->status |= MCI_STATUS_OVER;
  1861. banks[2] = mce->addr;
  1862. banks[3] = mce->misc;
  1863. vcpu->arch.mcg_status = mce->mcg_status;
  1864. banks[1] = mce->status;
  1865. kvm_queue_exception(vcpu, MC_VECTOR);
  1866. } else if (!(banks[1] & MCI_STATUS_VAL)
  1867. || !(banks[1] & MCI_STATUS_UC)) {
  1868. if (banks[1] & MCI_STATUS_VAL)
  1869. mce->status |= MCI_STATUS_OVER;
  1870. banks[2] = mce->addr;
  1871. banks[3] = mce->misc;
  1872. banks[1] = mce->status;
  1873. } else
  1874. banks[1] |= MCI_STATUS_OVER;
  1875. return 0;
  1876. }
  1877. static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
  1878. struct kvm_vcpu_events *events)
  1879. {
  1880. vcpu_load(vcpu);
  1881. events->exception.injected = vcpu->arch.exception.pending;
  1882. events->exception.nr = vcpu->arch.exception.nr;
  1883. events->exception.has_error_code = vcpu->arch.exception.has_error_code;
  1884. events->exception.error_code = vcpu->arch.exception.error_code;
  1885. events->interrupt.injected = vcpu->arch.interrupt.pending;
  1886. events->interrupt.nr = vcpu->arch.interrupt.nr;
  1887. events->interrupt.soft = vcpu->arch.interrupt.soft;
  1888. events->nmi.injected = vcpu->arch.nmi_injected;
  1889. events->nmi.pending = vcpu->arch.nmi_pending;
  1890. events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
  1891. events->sipi_vector = vcpu->arch.sipi_vector;
  1892. events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
  1893. | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
  1894. vcpu_put(vcpu);
  1895. }
  1896. static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
  1897. struct kvm_vcpu_events *events)
  1898. {
  1899. if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
  1900. | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
  1901. return -EINVAL;
  1902. vcpu_load(vcpu);
  1903. vcpu->arch.exception.pending = events->exception.injected;
  1904. vcpu->arch.exception.nr = events->exception.nr;
  1905. vcpu->arch.exception.has_error_code = events->exception.has_error_code;
  1906. vcpu->arch.exception.error_code = events->exception.error_code;
  1907. vcpu->arch.interrupt.pending = events->interrupt.injected;
  1908. vcpu->arch.interrupt.nr = events->interrupt.nr;
  1909. vcpu->arch.interrupt.soft = events->interrupt.soft;
  1910. if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
  1911. kvm_pic_clear_isr_ack(vcpu->kvm);
  1912. vcpu->arch.nmi_injected = events->nmi.injected;
  1913. if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
  1914. vcpu->arch.nmi_pending = events->nmi.pending;
  1915. kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
  1916. if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
  1917. vcpu->arch.sipi_vector = events->sipi_vector;
  1918. vcpu_put(vcpu);
  1919. return 0;
  1920. }
  1921. long kvm_arch_vcpu_ioctl(struct file *filp,
  1922. unsigned int ioctl, unsigned long arg)
  1923. {
  1924. struct kvm_vcpu *vcpu = filp->private_data;
  1925. void __user *argp = (void __user *)arg;
  1926. int r;
  1927. struct kvm_lapic_state *lapic = NULL;
  1928. switch (ioctl) {
  1929. case KVM_GET_LAPIC: {
  1930. r = -EINVAL;
  1931. if (!vcpu->arch.apic)
  1932. goto out;
  1933. lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  1934. r = -ENOMEM;
  1935. if (!lapic)
  1936. goto out;
  1937. r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
  1938. if (r)
  1939. goto out;
  1940. r = -EFAULT;
  1941. if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
  1942. goto out;
  1943. r = 0;
  1944. break;
  1945. }
  1946. case KVM_SET_LAPIC: {
  1947. r = -EINVAL;
  1948. if (!vcpu->arch.apic)
  1949. goto out;
  1950. lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  1951. r = -ENOMEM;
  1952. if (!lapic)
  1953. goto out;
  1954. r = -EFAULT;
  1955. if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
  1956. goto out;
  1957. r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
  1958. if (r)
  1959. goto out;
  1960. r = 0;
  1961. break;
  1962. }
  1963. case KVM_INTERRUPT: {
  1964. struct kvm_interrupt irq;
  1965. r = -EFAULT;
  1966. if (copy_from_user(&irq, argp, sizeof irq))
  1967. goto out;
  1968. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  1969. if (r)
  1970. goto out;
  1971. r = 0;
  1972. break;
  1973. }
  1974. case KVM_NMI: {
  1975. r = kvm_vcpu_ioctl_nmi(vcpu);
  1976. if (r)
  1977. goto out;
  1978. r = 0;
  1979. break;
  1980. }
  1981. case KVM_SET_CPUID: {
  1982. struct kvm_cpuid __user *cpuid_arg = argp;
  1983. struct kvm_cpuid cpuid;
  1984. r = -EFAULT;
  1985. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1986. goto out;
  1987. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  1988. if (r)
  1989. goto out;
  1990. break;
  1991. }
  1992. case KVM_SET_CPUID2: {
  1993. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1994. struct kvm_cpuid2 cpuid;
  1995. r = -EFAULT;
  1996. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1997. goto out;
  1998. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  1999. cpuid_arg->entries);
  2000. if (r)
  2001. goto out;
  2002. break;
  2003. }
  2004. case KVM_GET_CPUID2: {
  2005. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2006. struct kvm_cpuid2 cpuid;
  2007. r = -EFAULT;
  2008. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2009. goto out;
  2010. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  2011. cpuid_arg->entries);
  2012. if (r)
  2013. goto out;
  2014. r = -EFAULT;
  2015. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  2016. goto out;
  2017. r = 0;
  2018. break;
  2019. }
  2020. case KVM_GET_MSRS:
  2021. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  2022. break;
  2023. case KVM_SET_MSRS:
  2024. r = msr_io(vcpu, argp, do_set_msr, 0);
  2025. break;
  2026. case KVM_TPR_ACCESS_REPORTING: {
  2027. struct kvm_tpr_access_ctl tac;
  2028. r = -EFAULT;
  2029. if (copy_from_user(&tac, argp, sizeof tac))
  2030. goto out;
  2031. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  2032. if (r)
  2033. goto out;
  2034. r = -EFAULT;
  2035. if (copy_to_user(argp, &tac, sizeof tac))
  2036. goto out;
  2037. r = 0;
  2038. break;
  2039. };
  2040. case KVM_SET_VAPIC_ADDR: {
  2041. struct kvm_vapic_addr va;
  2042. r = -EINVAL;
  2043. if (!irqchip_in_kernel(vcpu->kvm))
  2044. goto out;
  2045. r = -EFAULT;
  2046. if (copy_from_user(&va, argp, sizeof va))
  2047. goto out;
  2048. r = 0;
  2049. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  2050. break;
  2051. }
  2052. case KVM_X86_SETUP_MCE: {
  2053. u64 mcg_cap;
  2054. r = -EFAULT;
  2055. if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
  2056. goto out;
  2057. r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
  2058. break;
  2059. }
  2060. case KVM_X86_SET_MCE: {
  2061. struct kvm_x86_mce mce;
  2062. r = -EFAULT;
  2063. if (copy_from_user(&mce, argp, sizeof mce))
  2064. goto out;
  2065. r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
  2066. break;
  2067. }
  2068. case KVM_GET_VCPU_EVENTS: {
  2069. struct kvm_vcpu_events events;
  2070. kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
  2071. r = -EFAULT;
  2072. if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
  2073. break;
  2074. r = 0;
  2075. break;
  2076. }
  2077. case KVM_SET_VCPU_EVENTS: {
  2078. struct kvm_vcpu_events events;
  2079. r = -EFAULT;
  2080. if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
  2081. break;
  2082. r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
  2083. break;
  2084. }
  2085. default:
  2086. r = -EINVAL;
  2087. }
  2088. out:
  2089. kfree(lapic);
  2090. return r;
  2091. }
  2092. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  2093. {
  2094. int ret;
  2095. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  2096. return -1;
  2097. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  2098. return ret;
  2099. }
  2100. static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
  2101. u64 ident_addr)
  2102. {
  2103. kvm->arch.ept_identity_map_addr = ident_addr;
  2104. return 0;
  2105. }
  2106. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  2107. u32 kvm_nr_mmu_pages)
  2108. {
  2109. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  2110. return -EINVAL;
  2111. mutex_lock(&kvm->slots_lock);
  2112. spin_lock(&kvm->mmu_lock);
  2113. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  2114. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  2115. spin_unlock(&kvm->mmu_lock);
  2116. mutex_unlock(&kvm->slots_lock);
  2117. return 0;
  2118. }
  2119. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  2120. {
  2121. return kvm->arch.n_alloc_mmu_pages;
  2122. }
  2123. gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
  2124. {
  2125. int i;
  2126. struct kvm_mem_alias *alias;
  2127. struct kvm_mem_aliases *aliases;
  2128. aliases = rcu_dereference(kvm->arch.aliases);
  2129. for (i = 0; i < aliases->naliases; ++i) {
  2130. alias = &aliases->aliases[i];
  2131. if (alias->flags & KVM_ALIAS_INVALID)
  2132. continue;
  2133. if (gfn >= alias->base_gfn
  2134. && gfn < alias->base_gfn + alias->npages)
  2135. return alias->target_gfn + gfn - alias->base_gfn;
  2136. }
  2137. return gfn;
  2138. }
  2139. gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  2140. {
  2141. int i;
  2142. struct kvm_mem_alias *alias;
  2143. struct kvm_mem_aliases *aliases;
  2144. aliases = rcu_dereference(kvm->arch.aliases);
  2145. for (i = 0; i < aliases->naliases; ++i) {
  2146. alias = &aliases->aliases[i];
  2147. if (gfn >= alias->base_gfn
  2148. && gfn < alias->base_gfn + alias->npages)
  2149. return alias->target_gfn + gfn - alias->base_gfn;
  2150. }
  2151. return gfn;
  2152. }
  2153. /*
  2154. * Set a new alias region. Aliases map a portion of physical memory into
  2155. * another portion. This is useful for memory windows, for example the PC
  2156. * VGA region.
  2157. */
  2158. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  2159. struct kvm_memory_alias *alias)
  2160. {
  2161. int r, n;
  2162. struct kvm_mem_alias *p;
  2163. struct kvm_mem_aliases *aliases, *old_aliases;
  2164. r = -EINVAL;
  2165. /* General sanity checks */
  2166. if (alias->memory_size & (PAGE_SIZE - 1))
  2167. goto out;
  2168. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  2169. goto out;
  2170. if (alias->slot >= KVM_ALIAS_SLOTS)
  2171. goto out;
  2172. if (alias->guest_phys_addr + alias->memory_size
  2173. < alias->guest_phys_addr)
  2174. goto out;
  2175. if (alias->target_phys_addr + alias->memory_size
  2176. < alias->target_phys_addr)
  2177. goto out;
  2178. r = -ENOMEM;
  2179. aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
  2180. if (!aliases)
  2181. goto out;
  2182. mutex_lock(&kvm->slots_lock);
  2183. /* invalidate any gfn reference in case of deletion/shrinking */
  2184. memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
  2185. aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
  2186. old_aliases = kvm->arch.aliases;
  2187. rcu_assign_pointer(kvm->arch.aliases, aliases);
  2188. synchronize_srcu_expedited(&kvm->srcu);
  2189. kvm_mmu_zap_all(kvm);
  2190. kfree(old_aliases);
  2191. r = -ENOMEM;
  2192. aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
  2193. if (!aliases)
  2194. goto out_unlock;
  2195. memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
  2196. p = &aliases->aliases[alias->slot];
  2197. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  2198. p->npages = alias->memory_size >> PAGE_SHIFT;
  2199. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  2200. p->flags &= ~(KVM_ALIAS_INVALID);
  2201. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  2202. if (aliases->aliases[n - 1].npages)
  2203. break;
  2204. aliases->naliases = n;
  2205. old_aliases = kvm->arch.aliases;
  2206. rcu_assign_pointer(kvm->arch.aliases, aliases);
  2207. synchronize_srcu_expedited(&kvm->srcu);
  2208. kfree(old_aliases);
  2209. r = 0;
  2210. out_unlock:
  2211. mutex_unlock(&kvm->slots_lock);
  2212. out:
  2213. return r;
  2214. }
  2215. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2216. {
  2217. int r;
  2218. r = 0;
  2219. switch (chip->chip_id) {
  2220. case KVM_IRQCHIP_PIC_MASTER:
  2221. memcpy(&chip->chip.pic,
  2222. &pic_irqchip(kvm)->pics[0],
  2223. sizeof(struct kvm_pic_state));
  2224. break;
  2225. case KVM_IRQCHIP_PIC_SLAVE:
  2226. memcpy(&chip->chip.pic,
  2227. &pic_irqchip(kvm)->pics[1],
  2228. sizeof(struct kvm_pic_state));
  2229. break;
  2230. case KVM_IRQCHIP_IOAPIC:
  2231. r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
  2232. break;
  2233. default:
  2234. r = -EINVAL;
  2235. break;
  2236. }
  2237. return r;
  2238. }
  2239. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2240. {
  2241. int r;
  2242. r = 0;
  2243. switch (chip->chip_id) {
  2244. case KVM_IRQCHIP_PIC_MASTER:
  2245. spin_lock(&pic_irqchip(kvm)->lock);
  2246. memcpy(&pic_irqchip(kvm)->pics[0],
  2247. &chip->chip.pic,
  2248. sizeof(struct kvm_pic_state));
  2249. spin_unlock(&pic_irqchip(kvm)->lock);
  2250. break;
  2251. case KVM_IRQCHIP_PIC_SLAVE:
  2252. spin_lock(&pic_irqchip(kvm)->lock);
  2253. memcpy(&pic_irqchip(kvm)->pics[1],
  2254. &chip->chip.pic,
  2255. sizeof(struct kvm_pic_state));
  2256. spin_unlock(&pic_irqchip(kvm)->lock);
  2257. break;
  2258. case KVM_IRQCHIP_IOAPIC:
  2259. r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
  2260. break;
  2261. default:
  2262. r = -EINVAL;
  2263. break;
  2264. }
  2265. kvm_pic_update_irq(pic_irqchip(kvm));
  2266. return r;
  2267. }
  2268. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2269. {
  2270. int r = 0;
  2271. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2272. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  2273. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2274. return r;
  2275. }
  2276. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2277. {
  2278. int r = 0;
  2279. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2280. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  2281. kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
  2282. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2283. return r;
  2284. }
  2285. static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2286. {
  2287. int r = 0;
  2288. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2289. memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
  2290. sizeof(ps->channels));
  2291. ps->flags = kvm->arch.vpit->pit_state.flags;
  2292. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2293. return r;
  2294. }
  2295. static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2296. {
  2297. int r = 0, start = 0;
  2298. u32 prev_legacy, cur_legacy;
  2299. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2300. prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2301. cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2302. if (!prev_legacy && cur_legacy)
  2303. start = 1;
  2304. memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
  2305. sizeof(kvm->arch.vpit->pit_state.channels));
  2306. kvm->arch.vpit->pit_state.flags = ps->flags;
  2307. kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
  2308. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2309. return r;
  2310. }
  2311. static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  2312. struct kvm_reinject_control *control)
  2313. {
  2314. if (!kvm->arch.vpit)
  2315. return -ENXIO;
  2316. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2317. kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
  2318. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2319. return 0;
  2320. }
  2321. /*
  2322. * Get (and clear) the dirty memory log for a memory slot.
  2323. */
  2324. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  2325. struct kvm_dirty_log *log)
  2326. {
  2327. int r, n, i;
  2328. struct kvm_memory_slot *memslot;
  2329. unsigned long is_dirty = 0;
  2330. unsigned long *dirty_bitmap = NULL;
  2331. mutex_lock(&kvm->slots_lock);
  2332. r = -EINVAL;
  2333. if (log->slot >= KVM_MEMORY_SLOTS)
  2334. goto out;
  2335. memslot = &kvm->memslots->memslots[log->slot];
  2336. r = -ENOENT;
  2337. if (!memslot->dirty_bitmap)
  2338. goto out;
  2339. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  2340. r = -ENOMEM;
  2341. dirty_bitmap = vmalloc(n);
  2342. if (!dirty_bitmap)
  2343. goto out;
  2344. memset(dirty_bitmap, 0, n);
  2345. for (i = 0; !is_dirty && i < n/sizeof(long); i++)
  2346. is_dirty = memslot->dirty_bitmap[i];
  2347. /* If nothing is dirty, don't bother messing with page tables. */
  2348. if (is_dirty) {
  2349. struct kvm_memslots *slots, *old_slots;
  2350. spin_lock(&kvm->mmu_lock);
  2351. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  2352. spin_unlock(&kvm->mmu_lock);
  2353. slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  2354. if (!slots)
  2355. goto out_free;
  2356. memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
  2357. slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
  2358. old_slots = kvm->memslots;
  2359. rcu_assign_pointer(kvm->memslots, slots);
  2360. synchronize_srcu_expedited(&kvm->srcu);
  2361. dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
  2362. kfree(old_slots);
  2363. }
  2364. r = 0;
  2365. if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
  2366. r = -EFAULT;
  2367. out_free:
  2368. vfree(dirty_bitmap);
  2369. out:
  2370. mutex_unlock(&kvm->slots_lock);
  2371. return r;
  2372. }
  2373. long kvm_arch_vm_ioctl(struct file *filp,
  2374. unsigned int ioctl, unsigned long arg)
  2375. {
  2376. struct kvm *kvm = filp->private_data;
  2377. void __user *argp = (void __user *)arg;
  2378. int r = -ENOTTY;
  2379. /*
  2380. * This union makes it completely explicit to gcc-3.x
  2381. * that these two variables' stack usage should be
  2382. * combined, not added together.
  2383. */
  2384. union {
  2385. struct kvm_pit_state ps;
  2386. struct kvm_pit_state2 ps2;
  2387. struct kvm_memory_alias alias;
  2388. struct kvm_pit_config pit_config;
  2389. } u;
  2390. switch (ioctl) {
  2391. case KVM_SET_TSS_ADDR:
  2392. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  2393. if (r < 0)
  2394. goto out;
  2395. break;
  2396. case KVM_SET_IDENTITY_MAP_ADDR: {
  2397. u64 ident_addr;
  2398. r = -EFAULT;
  2399. if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
  2400. goto out;
  2401. r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
  2402. if (r < 0)
  2403. goto out;
  2404. break;
  2405. }
  2406. case KVM_SET_MEMORY_REGION: {
  2407. struct kvm_memory_region kvm_mem;
  2408. struct kvm_userspace_memory_region kvm_userspace_mem;
  2409. r = -EFAULT;
  2410. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  2411. goto out;
  2412. kvm_userspace_mem.slot = kvm_mem.slot;
  2413. kvm_userspace_mem.flags = kvm_mem.flags;
  2414. kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
  2415. kvm_userspace_mem.memory_size = kvm_mem.memory_size;
  2416. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
  2417. if (r)
  2418. goto out;
  2419. break;
  2420. }
  2421. case KVM_SET_NR_MMU_PAGES:
  2422. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  2423. if (r)
  2424. goto out;
  2425. break;
  2426. case KVM_GET_NR_MMU_PAGES:
  2427. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  2428. break;
  2429. case KVM_SET_MEMORY_ALIAS:
  2430. r = -EFAULT;
  2431. if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
  2432. goto out;
  2433. r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
  2434. if (r)
  2435. goto out;
  2436. break;
  2437. case KVM_CREATE_IRQCHIP: {
  2438. struct kvm_pic *vpic;
  2439. mutex_lock(&kvm->lock);
  2440. r = -EEXIST;
  2441. if (kvm->arch.vpic)
  2442. goto create_irqchip_unlock;
  2443. r = -ENOMEM;
  2444. vpic = kvm_create_pic(kvm);
  2445. if (vpic) {
  2446. r = kvm_ioapic_init(kvm);
  2447. if (r) {
  2448. kfree(vpic);
  2449. goto create_irqchip_unlock;
  2450. }
  2451. } else
  2452. goto create_irqchip_unlock;
  2453. smp_wmb();
  2454. kvm->arch.vpic = vpic;
  2455. smp_wmb();
  2456. r = kvm_setup_default_irq_routing(kvm);
  2457. if (r) {
  2458. mutex_lock(&kvm->irq_lock);
  2459. kfree(kvm->arch.vpic);
  2460. kfree(kvm->arch.vioapic);
  2461. kvm->arch.vpic = NULL;
  2462. kvm->arch.vioapic = NULL;
  2463. mutex_unlock(&kvm->irq_lock);
  2464. }
  2465. create_irqchip_unlock:
  2466. mutex_unlock(&kvm->lock);
  2467. break;
  2468. }
  2469. case KVM_CREATE_PIT:
  2470. u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
  2471. goto create_pit;
  2472. case KVM_CREATE_PIT2:
  2473. r = -EFAULT;
  2474. if (copy_from_user(&u.pit_config, argp,
  2475. sizeof(struct kvm_pit_config)))
  2476. goto out;
  2477. create_pit:
  2478. mutex_lock(&kvm->slots_lock);
  2479. r = -EEXIST;
  2480. if (kvm->arch.vpit)
  2481. goto create_pit_unlock;
  2482. r = -ENOMEM;
  2483. kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
  2484. if (kvm->arch.vpit)
  2485. r = 0;
  2486. create_pit_unlock:
  2487. mutex_unlock(&kvm->slots_lock);
  2488. break;
  2489. case KVM_IRQ_LINE_STATUS:
  2490. case KVM_IRQ_LINE: {
  2491. struct kvm_irq_level irq_event;
  2492. r = -EFAULT;
  2493. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  2494. goto out;
  2495. if (irqchip_in_kernel(kvm)) {
  2496. __s32 status;
  2497. status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  2498. irq_event.irq, irq_event.level);
  2499. if (ioctl == KVM_IRQ_LINE_STATUS) {
  2500. irq_event.status = status;
  2501. if (copy_to_user(argp, &irq_event,
  2502. sizeof irq_event))
  2503. goto out;
  2504. }
  2505. r = 0;
  2506. }
  2507. break;
  2508. }
  2509. case KVM_GET_IRQCHIP: {
  2510. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2511. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  2512. r = -ENOMEM;
  2513. if (!chip)
  2514. goto out;
  2515. r = -EFAULT;
  2516. if (copy_from_user(chip, argp, sizeof *chip))
  2517. goto get_irqchip_out;
  2518. r = -ENXIO;
  2519. if (!irqchip_in_kernel(kvm))
  2520. goto get_irqchip_out;
  2521. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  2522. if (r)
  2523. goto get_irqchip_out;
  2524. r = -EFAULT;
  2525. if (copy_to_user(argp, chip, sizeof *chip))
  2526. goto get_irqchip_out;
  2527. r = 0;
  2528. get_irqchip_out:
  2529. kfree(chip);
  2530. if (r)
  2531. goto out;
  2532. break;
  2533. }
  2534. case KVM_SET_IRQCHIP: {
  2535. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2536. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  2537. r = -ENOMEM;
  2538. if (!chip)
  2539. goto out;
  2540. r = -EFAULT;
  2541. if (copy_from_user(chip, argp, sizeof *chip))
  2542. goto set_irqchip_out;
  2543. r = -ENXIO;
  2544. if (!irqchip_in_kernel(kvm))
  2545. goto set_irqchip_out;
  2546. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  2547. if (r)
  2548. goto set_irqchip_out;
  2549. r = 0;
  2550. set_irqchip_out:
  2551. kfree(chip);
  2552. if (r)
  2553. goto out;
  2554. break;
  2555. }
  2556. case KVM_GET_PIT: {
  2557. r = -EFAULT;
  2558. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  2559. goto out;
  2560. r = -ENXIO;
  2561. if (!kvm->arch.vpit)
  2562. goto out;
  2563. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  2564. if (r)
  2565. goto out;
  2566. r = -EFAULT;
  2567. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  2568. goto out;
  2569. r = 0;
  2570. break;
  2571. }
  2572. case KVM_SET_PIT: {
  2573. r = -EFAULT;
  2574. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  2575. goto out;
  2576. r = -ENXIO;
  2577. if (!kvm->arch.vpit)
  2578. goto out;
  2579. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  2580. if (r)
  2581. goto out;
  2582. r = 0;
  2583. break;
  2584. }
  2585. case KVM_GET_PIT2: {
  2586. r = -ENXIO;
  2587. if (!kvm->arch.vpit)
  2588. goto out;
  2589. r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
  2590. if (r)
  2591. goto out;
  2592. r = -EFAULT;
  2593. if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
  2594. goto out;
  2595. r = 0;
  2596. break;
  2597. }
  2598. case KVM_SET_PIT2: {
  2599. r = -EFAULT;
  2600. if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
  2601. goto out;
  2602. r = -ENXIO;
  2603. if (!kvm->arch.vpit)
  2604. goto out;
  2605. r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
  2606. if (r)
  2607. goto out;
  2608. r = 0;
  2609. break;
  2610. }
  2611. case KVM_REINJECT_CONTROL: {
  2612. struct kvm_reinject_control control;
  2613. r = -EFAULT;
  2614. if (copy_from_user(&control, argp, sizeof(control)))
  2615. goto out;
  2616. r = kvm_vm_ioctl_reinject(kvm, &control);
  2617. if (r)
  2618. goto out;
  2619. r = 0;
  2620. break;
  2621. }
  2622. case KVM_XEN_HVM_CONFIG: {
  2623. r = -EFAULT;
  2624. if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
  2625. sizeof(struct kvm_xen_hvm_config)))
  2626. goto out;
  2627. r = -EINVAL;
  2628. if (kvm->arch.xen_hvm_config.flags)
  2629. goto out;
  2630. r = 0;
  2631. break;
  2632. }
  2633. case KVM_SET_CLOCK: {
  2634. struct timespec now;
  2635. struct kvm_clock_data user_ns;
  2636. u64 now_ns;
  2637. s64 delta;
  2638. r = -EFAULT;
  2639. if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
  2640. goto out;
  2641. r = -EINVAL;
  2642. if (user_ns.flags)
  2643. goto out;
  2644. r = 0;
  2645. ktime_get_ts(&now);
  2646. now_ns = timespec_to_ns(&now);
  2647. delta = user_ns.clock - now_ns;
  2648. kvm->arch.kvmclock_offset = delta;
  2649. break;
  2650. }
  2651. case KVM_GET_CLOCK: {
  2652. struct timespec now;
  2653. struct kvm_clock_data user_ns;
  2654. u64 now_ns;
  2655. ktime_get_ts(&now);
  2656. now_ns = timespec_to_ns(&now);
  2657. user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
  2658. user_ns.flags = 0;
  2659. r = -EFAULT;
  2660. if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
  2661. goto out;
  2662. r = 0;
  2663. break;
  2664. }
  2665. default:
  2666. ;
  2667. }
  2668. out:
  2669. return r;
  2670. }
  2671. static void kvm_init_msr_list(void)
  2672. {
  2673. u32 dummy[2];
  2674. unsigned i, j;
  2675. /* skip the first msrs in the list. KVM-specific */
  2676. for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
  2677. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  2678. continue;
  2679. if (j < i)
  2680. msrs_to_save[j] = msrs_to_save[i];
  2681. j++;
  2682. }
  2683. num_msrs_to_save = j;
  2684. }
  2685. static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
  2686. const void *v)
  2687. {
  2688. if (vcpu->arch.apic &&
  2689. !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
  2690. return 0;
  2691. return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
  2692. }
  2693. static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
  2694. {
  2695. if (vcpu->arch.apic &&
  2696. !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
  2697. return 0;
  2698. return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
  2699. }
  2700. static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
  2701. struct kvm_vcpu *vcpu)
  2702. {
  2703. void *data = val;
  2704. int r = X86EMUL_CONTINUE;
  2705. while (bytes) {
  2706. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  2707. unsigned offset = addr & (PAGE_SIZE-1);
  2708. unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
  2709. int ret;
  2710. if (gpa == UNMAPPED_GVA) {
  2711. r = X86EMUL_PROPAGATE_FAULT;
  2712. goto out;
  2713. }
  2714. ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
  2715. if (ret < 0) {
  2716. r = X86EMUL_UNHANDLEABLE;
  2717. goto out;
  2718. }
  2719. bytes -= toread;
  2720. data += toread;
  2721. addr += toread;
  2722. }
  2723. out:
  2724. return r;
  2725. }
  2726. static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
  2727. struct kvm_vcpu *vcpu)
  2728. {
  2729. void *data = val;
  2730. int r = X86EMUL_CONTINUE;
  2731. while (bytes) {
  2732. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  2733. unsigned offset = addr & (PAGE_SIZE-1);
  2734. unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  2735. int ret;
  2736. if (gpa == UNMAPPED_GVA) {
  2737. r = X86EMUL_PROPAGATE_FAULT;
  2738. goto out;
  2739. }
  2740. ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
  2741. if (ret < 0) {
  2742. r = X86EMUL_UNHANDLEABLE;
  2743. goto out;
  2744. }
  2745. bytes -= towrite;
  2746. data += towrite;
  2747. addr += towrite;
  2748. }
  2749. out:
  2750. return r;
  2751. }
  2752. static int emulator_read_emulated(unsigned long addr,
  2753. void *val,
  2754. unsigned int bytes,
  2755. struct kvm_vcpu *vcpu)
  2756. {
  2757. gpa_t gpa;
  2758. if (vcpu->mmio_read_completed) {
  2759. memcpy(val, vcpu->mmio_data, bytes);
  2760. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
  2761. vcpu->mmio_phys_addr, *(u64 *)val);
  2762. vcpu->mmio_read_completed = 0;
  2763. return X86EMUL_CONTINUE;
  2764. }
  2765. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  2766. /* For APIC access vmexit */
  2767. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  2768. goto mmio;
  2769. if (kvm_read_guest_virt(addr, val, bytes, vcpu)
  2770. == X86EMUL_CONTINUE)
  2771. return X86EMUL_CONTINUE;
  2772. if (gpa == UNMAPPED_GVA)
  2773. return X86EMUL_PROPAGATE_FAULT;
  2774. mmio:
  2775. /*
  2776. * Is this MMIO handled locally?
  2777. */
  2778. if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
  2779. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
  2780. return X86EMUL_CONTINUE;
  2781. }
  2782. trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
  2783. vcpu->mmio_needed = 1;
  2784. vcpu->mmio_phys_addr = gpa;
  2785. vcpu->mmio_size = bytes;
  2786. vcpu->mmio_is_write = 0;
  2787. return X86EMUL_UNHANDLEABLE;
  2788. }
  2789. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  2790. const void *val, int bytes)
  2791. {
  2792. int ret;
  2793. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  2794. if (ret < 0)
  2795. return 0;
  2796. kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
  2797. return 1;
  2798. }
  2799. static int emulator_write_emulated_onepage(unsigned long addr,
  2800. const void *val,
  2801. unsigned int bytes,
  2802. struct kvm_vcpu *vcpu)
  2803. {
  2804. gpa_t gpa;
  2805. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  2806. if (gpa == UNMAPPED_GVA) {
  2807. kvm_inject_page_fault(vcpu, addr, 2);
  2808. return X86EMUL_PROPAGATE_FAULT;
  2809. }
  2810. /* For APIC access vmexit */
  2811. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  2812. goto mmio;
  2813. if (emulator_write_phys(vcpu, gpa, val, bytes))
  2814. return X86EMUL_CONTINUE;
  2815. mmio:
  2816. trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
  2817. /*
  2818. * Is this MMIO handled locally?
  2819. */
  2820. if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
  2821. return X86EMUL_CONTINUE;
  2822. vcpu->mmio_needed = 1;
  2823. vcpu->mmio_phys_addr = gpa;
  2824. vcpu->mmio_size = bytes;
  2825. vcpu->mmio_is_write = 1;
  2826. memcpy(vcpu->mmio_data, val, bytes);
  2827. return X86EMUL_CONTINUE;
  2828. }
  2829. int emulator_write_emulated(unsigned long addr,
  2830. const void *val,
  2831. unsigned int bytes,
  2832. struct kvm_vcpu *vcpu)
  2833. {
  2834. /* Crossing a page boundary? */
  2835. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  2836. int rc, now;
  2837. now = -addr & ~PAGE_MASK;
  2838. rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
  2839. if (rc != X86EMUL_CONTINUE)
  2840. return rc;
  2841. addr += now;
  2842. val += now;
  2843. bytes -= now;
  2844. }
  2845. return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
  2846. }
  2847. EXPORT_SYMBOL_GPL(emulator_write_emulated);
  2848. static int emulator_cmpxchg_emulated(unsigned long addr,
  2849. const void *old,
  2850. const void *new,
  2851. unsigned int bytes,
  2852. struct kvm_vcpu *vcpu)
  2853. {
  2854. printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
  2855. #ifndef CONFIG_X86_64
  2856. /* guests cmpxchg8b have to be emulated atomically */
  2857. if (bytes == 8) {
  2858. gpa_t gpa;
  2859. struct page *page;
  2860. char *kaddr;
  2861. u64 val;
  2862. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  2863. if (gpa == UNMAPPED_GVA ||
  2864. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  2865. goto emul_write;
  2866. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  2867. goto emul_write;
  2868. val = *(u64 *)new;
  2869. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  2870. kaddr = kmap_atomic(page, KM_USER0);
  2871. set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
  2872. kunmap_atomic(kaddr, KM_USER0);
  2873. kvm_release_page_dirty(page);
  2874. }
  2875. emul_write:
  2876. #endif
  2877. return emulator_write_emulated(addr, new, bytes, vcpu);
  2878. }
  2879. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  2880. {
  2881. return kvm_x86_ops->get_segment_base(vcpu, seg);
  2882. }
  2883. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  2884. {
  2885. kvm_mmu_invlpg(vcpu, address);
  2886. return X86EMUL_CONTINUE;
  2887. }
  2888. int emulate_clts(struct kvm_vcpu *vcpu)
  2889. {
  2890. kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
  2891. return X86EMUL_CONTINUE;
  2892. }
  2893. int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  2894. {
  2895. return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
  2896. }
  2897. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  2898. {
  2899. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  2900. return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
  2901. }
  2902. void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
  2903. {
  2904. u8 opcodes[4];
  2905. unsigned long rip = kvm_rip_read(vcpu);
  2906. unsigned long rip_linear;
  2907. if (!printk_ratelimit())
  2908. return;
  2909. rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
  2910. kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
  2911. printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
  2912. context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  2913. }
  2914. EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
  2915. static struct x86_emulate_ops emulate_ops = {
  2916. .read_std = kvm_read_guest_virt,
  2917. .read_emulated = emulator_read_emulated,
  2918. .write_emulated = emulator_write_emulated,
  2919. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  2920. };
  2921. static void cache_all_regs(struct kvm_vcpu *vcpu)
  2922. {
  2923. kvm_register_read(vcpu, VCPU_REGS_RAX);
  2924. kvm_register_read(vcpu, VCPU_REGS_RSP);
  2925. kvm_register_read(vcpu, VCPU_REGS_RIP);
  2926. vcpu->arch.regs_dirty = ~0;
  2927. }
  2928. int emulate_instruction(struct kvm_vcpu *vcpu,
  2929. unsigned long cr2,
  2930. u16 error_code,
  2931. int emulation_type)
  2932. {
  2933. int r, shadow_mask;
  2934. struct decode_cache *c;
  2935. struct kvm_run *run = vcpu->run;
  2936. kvm_clear_exception_queue(vcpu);
  2937. vcpu->arch.mmio_fault_cr2 = cr2;
  2938. /*
  2939. * TODO: fix emulate.c to use guest_read/write_register
  2940. * instead of direct ->regs accesses, can save hundred cycles
  2941. * on Intel for instructions that don't read/change RSP, for
  2942. * for example.
  2943. */
  2944. cache_all_regs(vcpu);
  2945. vcpu->mmio_is_write = 0;
  2946. vcpu->arch.pio.string = 0;
  2947. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  2948. int cs_db, cs_l;
  2949. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  2950. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  2951. vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
  2952. vcpu->arch.emulate_ctxt.mode =
  2953. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  2954. ? X86EMUL_MODE_REAL : cs_l
  2955. ? X86EMUL_MODE_PROT64 : cs_db
  2956. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  2957. r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  2958. /* Only allow emulation of specific instructions on #UD
  2959. * (namely VMMCALL, sysenter, sysexit, syscall)*/
  2960. c = &vcpu->arch.emulate_ctxt.decode;
  2961. if (emulation_type & EMULTYPE_TRAP_UD) {
  2962. if (!c->twobyte)
  2963. return EMULATE_FAIL;
  2964. switch (c->b) {
  2965. case 0x01: /* VMMCALL */
  2966. if (c->modrm_mod != 3 || c->modrm_rm != 1)
  2967. return EMULATE_FAIL;
  2968. break;
  2969. case 0x34: /* sysenter */
  2970. case 0x35: /* sysexit */
  2971. if (c->modrm_mod != 0 || c->modrm_rm != 0)
  2972. return EMULATE_FAIL;
  2973. break;
  2974. case 0x05: /* syscall */
  2975. if (c->modrm_mod != 0 || c->modrm_rm != 0)
  2976. return EMULATE_FAIL;
  2977. break;
  2978. default:
  2979. return EMULATE_FAIL;
  2980. }
  2981. if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
  2982. return EMULATE_FAIL;
  2983. }
  2984. ++vcpu->stat.insn_emulation;
  2985. if (r) {
  2986. ++vcpu->stat.insn_emulation_fail;
  2987. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  2988. return EMULATE_DONE;
  2989. return EMULATE_FAIL;
  2990. }
  2991. }
  2992. if (emulation_type & EMULTYPE_SKIP) {
  2993. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
  2994. return EMULATE_DONE;
  2995. }
  2996. r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  2997. shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
  2998. if (r == 0)
  2999. kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
  3000. if (vcpu->arch.pio.string)
  3001. return EMULATE_DO_MMIO;
  3002. if ((r || vcpu->mmio_is_write) && run) {
  3003. run->exit_reason = KVM_EXIT_MMIO;
  3004. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  3005. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  3006. run->mmio.len = vcpu->mmio_size;
  3007. run->mmio.is_write = vcpu->mmio_is_write;
  3008. }
  3009. if (r) {
  3010. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  3011. return EMULATE_DONE;
  3012. if (!vcpu->mmio_needed) {
  3013. kvm_report_emulation_failure(vcpu, "mmio");
  3014. return EMULATE_FAIL;
  3015. }
  3016. return EMULATE_DO_MMIO;
  3017. }
  3018. kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  3019. if (vcpu->mmio_is_write) {
  3020. vcpu->mmio_needed = 0;
  3021. return EMULATE_DO_MMIO;
  3022. }
  3023. return EMULATE_DONE;
  3024. }
  3025. EXPORT_SYMBOL_GPL(emulate_instruction);
  3026. static int pio_copy_data(struct kvm_vcpu *vcpu)
  3027. {
  3028. void *p = vcpu->arch.pio_data;
  3029. gva_t q = vcpu->arch.pio.guest_gva;
  3030. unsigned bytes;
  3031. int ret;
  3032. bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
  3033. if (vcpu->arch.pio.in)
  3034. ret = kvm_write_guest_virt(q, p, bytes, vcpu);
  3035. else
  3036. ret = kvm_read_guest_virt(q, p, bytes, vcpu);
  3037. return ret;
  3038. }
  3039. int complete_pio(struct kvm_vcpu *vcpu)
  3040. {
  3041. struct kvm_pio_request *io = &vcpu->arch.pio;
  3042. long delta;
  3043. int r;
  3044. unsigned long val;
  3045. if (!io->string) {
  3046. if (io->in) {
  3047. val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3048. memcpy(&val, vcpu->arch.pio_data, io->size);
  3049. kvm_register_write(vcpu, VCPU_REGS_RAX, val);
  3050. }
  3051. } else {
  3052. if (io->in) {
  3053. r = pio_copy_data(vcpu);
  3054. if (r)
  3055. return r;
  3056. }
  3057. delta = 1;
  3058. if (io->rep) {
  3059. delta *= io->cur_count;
  3060. /*
  3061. * The size of the register should really depend on
  3062. * current address size.
  3063. */
  3064. val = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3065. val -= delta;
  3066. kvm_register_write(vcpu, VCPU_REGS_RCX, val);
  3067. }
  3068. if (io->down)
  3069. delta = -delta;
  3070. delta *= io->size;
  3071. if (io->in) {
  3072. val = kvm_register_read(vcpu, VCPU_REGS_RDI);
  3073. val += delta;
  3074. kvm_register_write(vcpu, VCPU_REGS_RDI, val);
  3075. } else {
  3076. val = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3077. val += delta;
  3078. kvm_register_write(vcpu, VCPU_REGS_RSI, val);
  3079. }
  3080. }
  3081. io->count -= io->cur_count;
  3082. io->cur_count = 0;
  3083. return 0;
  3084. }
  3085. static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
  3086. {
  3087. /* TODO: String I/O for in kernel device */
  3088. int r;
  3089. if (vcpu->arch.pio.in)
  3090. r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
  3091. vcpu->arch.pio.size, pd);
  3092. else
  3093. r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
  3094. vcpu->arch.pio.port, vcpu->arch.pio.size,
  3095. pd);
  3096. return r;
  3097. }
  3098. static int pio_string_write(struct kvm_vcpu *vcpu)
  3099. {
  3100. struct kvm_pio_request *io = &vcpu->arch.pio;
  3101. void *pd = vcpu->arch.pio_data;
  3102. int i, r = 0;
  3103. for (i = 0; i < io->cur_count; i++) {
  3104. if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
  3105. io->port, io->size, pd)) {
  3106. r = -EOPNOTSUPP;
  3107. break;
  3108. }
  3109. pd += io->size;
  3110. }
  3111. return r;
  3112. }
  3113. int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
  3114. {
  3115. unsigned long val;
  3116. vcpu->run->exit_reason = KVM_EXIT_IO;
  3117. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  3118. vcpu->run->io.size = vcpu->arch.pio.size = size;
  3119. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3120. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
  3121. vcpu->run->io.port = vcpu->arch.pio.port = port;
  3122. vcpu->arch.pio.in = in;
  3123. vcpu->arch.pio.string = 0;
  3124. vcpu->arch.pio.down = 0;
  3125. vcpu->arch.pio.rep = 0;
  3126. trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
  3127. size, 1);
  3128. val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3129. memcpy(vcpu->arch.pio_data, &val, 4);
  3130. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3131. complete_pio(vcpu);
  3132. return 1;
  3133. }
  3134. return 0;
  3135. }
  3136. EXPORT_SYMBOL_GPL(kvm_emulate_pio);
  3137. int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
  3138. int size, unsigned long count, int down,
  3139. gva_t address, int rep, unsigned port)
  3140. {
  3141. unsigned now, in_page;
  3142. int ret = 0;
  3143. vcpu->run->exit_reason = KVM_EXIT_IO;
  3144. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  3145. vcpu->run->io.size = vcpu->arch.pio.size = size;
  3146. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3147. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
  3148. vcpu->run->io.port = vcpu->arch.pio.port = port;
  3149. vcpu->arch.pio.in = in;
  3150. vcpu->arch.pio.string = 1;
  3151. vcpu->arch.pio.down = down;
  3152. vcpu->arch.pio.rep = rep;
  3153. trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
  3154. size, count);
  3155. if (!count) {
  3156. kvm_x86_ops->skip_emulated_instruction(vcpu);
  3157. return 1;
  3158. }
  3159. if (!down)
  3160. in_page = PAGE_SIZE - offset_in_page(address);
  3161. else
  3162. in_page = offset_in_page(address) + size;
  3163. now = min(count, (unsigned long)in_page / size);
  3164. if (!now)
  3165. now = 1;
  3166. if (down) {
  3167. /*
  3168. * String I/O in reverse. Yuck. Kill the guest, fix later.
  3169. */
  3170. pr_unimpl(vcpu, "guest string pio down\n");
  3171. kvm_inject_gp(vcpu, 0);
  3172. return 1;
  3173. }
  3174. vcpu->run->io.count = now;
  3175. vcpu->arch.pio.cur_count = now;
  3176. if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
  3177. kvm_x86_ops->skip_emulated_instruction(vcpu);
  3178. vcpu->arch.pio.guest_gva = address;
  3179. if (!vcpu->arch.pio.in) {
  3180. /* string PIO write */
  3181. ret = pio_copy_data(vcpu);
  3182. if (ret == X86EMUL_PROPAGATE_FAULT) {
  3183. kvm_inject_gp(vcpu, 0);
  3184. return 1;
  3185. }
  3186. if (ret == 0 && !pio_string_write(vcpu)) {
  3187. complete_pio(vcpu);
  3188. if (vcpu->arch.pio.count == 0)
  3189. ret = 1;
  3190. }
  3191. }
  3192. /* no string PIO read support yet */
  3193. return ret;
  3194. }
  3195. EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
  3196. static void bounce_off(void *info)
  3197. {
  3198. /* nothing */
  3199. }
  3200. static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  3201. void *data)
  3202. {
  3203. struct cpufreq_freqs *freq = data;
  3204. struct kvm *kvm;
  3205. struct kvm_vcpu *vcpu;
  3206. int i, send_ipi = 0;
  3207. if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
  3208. return 0;
  3209. if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
  3210. return 0;
  3211. per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
  3212. spin_lock(&kvm_lock);
  3213. list_for_each_entry(kvm, &vm_list, vm_list) {
  3214. kvm_for_each_vcpu(i, vcpu, kvm) {
  3215. if (vcpu->cpu != freq->cpu)
  3216. continue;
  3217. if (!kvm_request_guest_time_update(vcpu))
  3218. continue;
  3219. if (vcpu->cpu != smp_processor_id())
  3220. send_ipi++;
  3221. }
  3222. }
  3223. spin_unlock(&kvm_lock);
  3224. if (freq->old < freq->new && send_ipi) {
  3225. /*
  3226. * We upscale the frequency. Must make the guest
  3227. * doesn't see old kvmclock values while running with
  3228. * the new frequency, otherwise we risk the guest sees
  3229. * time go backwards.
  3230. *
  3231. * In case we update the frequency for another cpu
  3232. * (which might be in guest context) send an interrupt
  3233. * to kick the cpu out of guest context. Next time
  3234. * guest context is entered kvmclock will be updated,
  3235. * so the guest will not see stale values.
  3236. */
  3237. smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
  3238. }
  3239. return 0;
  3240. }
  3241. static struct notifier_block kvmclock_cpufreq_notifier_block = {
  3242. .notifier_call = kvmclock_cpufreq_notifier
  3243. };
  3244. static void kvm_timer_init(void)
  3245. {
  3246. int cpu;
  3247. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  3248. cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
  3249. CPUFREQ_TRANSITION_NOTIFIER);
  3250. for_each_online_cpu(cpu) {
  3251. unsigned long khz = cpufreq_get(cpu);
  3252. if (!khz)
  3253. khz = tsc_khz;
  3254. per_cpu(cpu_tsc_khz, cpu) = khz;
  3255. }
  3256. } else {
  3257. for_each_possible_cpu(cpu)
  3258. per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
  3259. }
  3260. }
  3261. int kvm_arch_init(void *opaque)
  3262. {
  3263. int r;
  3264. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  3265. if (kvm_x86_ops) {
  3266. printk(KERN_ERR "kvm: already loaded the other module\n");
  3267. r = -EEXIST;
  3268. goto out;
  3269. }
  3270. if (!ops->cpu_has_kvm_support()) {
  3271. printk(KERN_ERR "kvm: no hardware support\n");
  3272. r = -EOPNOTSUPP;
  3273. goto out;
  3274. }
  3275. if (ops->disabled_by_bios()) {
  3276. printk(KERN_ERR "kvm: disabled by bios\n");
  3277. r = -EOPNOTSUPP;
  3278. goto out;
  3279. }
  3280. r = kvm_mmu_module_init();
  3281. if (r)
  3282. goto out;
  3283. kvm_init_msr_list();
  3284. kvm_x86_ops = ops;
  3285. kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
  3286. kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
  3287. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  3288. PT_DIRTY_MASK, PT64_NX_MASK, 0);
  3289. kvm_timer_init();
  3290. return 0;
  3291. out:
  3292. return r;
  3293. }
  3294. void kvm_arch_exit(void)
  3295. {
  3296. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  3297. cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
  3298. CPUFREQ_TRANSITION_NOTIFIER);
  3299. kvm_x86_ops = NULL;
  3300. kvm_mmu_module_exit();
  3301. }
  3302. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  3303. {
  3304. ++vcpu->stat.halt_exits;
  3305. if (irqchip_in_kernel(vcpu->kvm)) {
  3306. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  3307. return 1;
  3308. } else {
  3309. vcpu->run->exit_reason = KVM_EXIT_HLT;
  3310. return 0;
  3311. }
  3312. }
  3313. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  3314. static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
  3315. unsigned long a1)
  3316. {
  3317. if (is_long_mode(vcpu))
  3318. return a0;
  3319. else
  3320. return a0 | ((gpa_t)a1 << 32);
  3321. }
  3322. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  3323. {
  3324. u64 param, ingpa, outgpa, ret;
  3325. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  3326. bool fast, longmode;
  3327. int cs_db, cs_l;
  3328. /*
  3329. * hypercall generates UD from non zero cpl and real mode
  3330. * per HYPER-V spec
  3331. */
  3332. if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
  3333. !kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
  3334. kvm_queue_exception(vcpu, UD_VECTOR);
  3335. return 0;
  3336. }
  3337. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  3338. longmode = is_long_mode(vcpu) && cs_l == 1;
  3339. if (!longmode) {
  3340. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  3341. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  3342. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  3343. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  3344. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  3345. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  3346. }
  3347. #ifdef CONFIG_X86_64
  3348. else {
  3349. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3350. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3351. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  3352. }
  3353. #endif
  3354. code = param & 0xffff;
  3355. fast = (param >> 16) & 0x1;
  3356. rep_cnt = (param >> 32) & 0xfff;
  3357. rep_idx = (param >> 48) & 0xfff;
  3358. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  3359. switch (code) {
  3360. case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
  3361. kvm_vcpu_on_spin(vcpu);
  3362. break;
  3363. default:
  3364. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  3365. break;
  3366. }
  3367. ret = res | (((u64)rep_done & 0xfff) << 32);
  3368. if (longmode) {
  3369. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  3370. } else {
  3371. kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
  3372. kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
  3373. }
  3374. return 1;
  3375. }
  3376. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  3377. {
  3378. unsigned long nr, a0, a1, a2, a3, ret;
  3379. int r = 1;
  3380. if (kvm_hv_hypercall_enabled(vcpu->kvm))
  3381. return kvm_hv_hypercall(vcpu);
  3382. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3383. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3384. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3385. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3386. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3387. trace_kvm_hypercall(nr, a0, a1, a2, a3);
  3388. if (!is_long_mode(vcpu)) {
  3389. nr &= 0xFFFFFFFF;
  3390. a0 &= 0xFFFFFFFF;
  3391. a1 &= 0xFFFFFFFF;
  3392. a2 &= 0xFFFFFFFF;
  3393. a3 &= 0xFFFFFFFF;
  3394. }
  3395. if (kvm_x86_ops->get_cpl(vcpu) != 0) {
  3396. ret = -KVM_EPERM;
  3397. goto out;
  3398. }
  3399. switch (nr) {
  3400. case KVM_HC_VAPIC_POLL_IRQ:
  3401. ret = 0;
  3402. break;
  3403. case KVM_HC_MMU_OP:
  3404. r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
  3405. break;
  3406. default:
  3407. ret = -KVM_ENOSYS;
  3408. break;
  3409. }
  3410. out:
  3411. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  3412. ++vcpu->stat.hypercalls;
  3413. return r;
  3414. }
  3415. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  3416. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  3417. {
  3418. char instruction[3];
  3419. int ret = 0;
  3420. unsigned long rip = kvm_rip_read(vcpu);
  3421. /*
  3422. * Blow out the MMU to ensure that no other VCPU has an active mapping
  3423. * to ensure that the updated hypercall appears atomically across all
  3424. * VCPUs.
  3425. */
  3426. kvm_mmu_zap_all(vcpu->kvm);
  3427. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  3428. if (emulator_write_emulated(rip, instruction, 3, vcpu)
  3429. != X86EMUL_CONTINUE)
  3430. ret = -EFAULT;
  3431. return ret;
  3432. }
  3433. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  3434. {
  3435. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  3436. }
  3437. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  3438. {
  3439. struct descriptor_table dt = { limit, base };
  3440. kvm_x86_ops->set_gdt(vcpu, &dt);
  3441. }
  3442. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  3443. {
  3444. struct descriptor_table dt = { limit, base };
  3445. kvm_x86_ops->set_idt(vcpu, &dt);
  3446. }
  3447. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  3448. unsigned long *rflags)
  3449. {
  3450. kvm_lmsw(vcpu, msw);
  3451. *rflags = kvm_get_rflags(vcpu);
  3452. }
  3453. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  3454. {
  3455. unsigned long value;
  3456. switch (cr) {
  3457. case 0:
  3458. value = kvm_read_cr0(vcpu);
  3459. break;
  3460. case 2:
  3461. value = vcpu->arch.cr2;
  3462. break;
  3463. case 3:
  3464. value = vcpu->arch.cr3;
  3465. break;
  3466. case 4:
  3467. value = kvm_read_cr4(vcpu);
  3468. break;
  3469. case 8:
  3470. value = kvm_get_cr8(vcpu);
  3471. break;
  3472. default:
  3473. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3474. return 0;
  3475. }
  3476. return value;
  3477. }
  3478. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  3479. unsigned long *rflags)
  3480. {
  3481. switch (cr) {
  3482. case 0:
  3483. kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
  3484. *rflags = kvm_get_rflags(vcpu);
  3485. break;
  3486. case 2:
  3487. vcpu->arch.cr2 = val;
  3488. break;
  3489. case 3:
  3490. kvm_set_cr3(vcpu, val);
  3491. break;
  3492. case 4:
  3493. kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
  3494. break;
  3495. case 8:
  3496. kvm_set_cr8(vcpu, val & 0xfUL);
  3497. break;
  3498. default:
  3499. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3500. }
  3501. }
  3502. static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  3503. {
  3504. struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
  3505. int j, nent = vcpu->arch.cpuid_nent;
  3506. e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
  3507. /* when no next entry is found, the current entry[i] is reselected */
  3508. for (j = i + 1; ; j = (j + 1) % nent) {
  3509. struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
  3510. if (ej->function == e->function) {
  3511. ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  3512. return j;
  3513. }
  3514. }
  3515. return 0; /* silence gcc, even though control never reaches here */
  3516. }
  3517. /* find an entry with matching function, matching index (if needed), and that
  3518. * should be read next (if it's stateful) */
  3519. static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
  3520. u32 function, u32 index)
  3521. {
  3522. if (e->function != function)
  3523. return 0;
  3524. if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
  3525. return 0;
  3526. if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
  3527. !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
  3528. return 0;
  3529. return 1;
  3530. }
  3531. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  3532. u32 function, u32 index)
  3533. {
  3534. int i;
  3535. struct kvm_cpuid_entry2 *best = NULL;
  3536. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  3537. struct kvm_cpuid_entry2 *e;
  3538. e = &vcpu->arch.cpuid_entries[i];
  3539. if (is_matching_cpuid_entry(e, function, index)) {
  3540. if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
  3541. move_to_next_stateful_cpuid_entry(vcpu, i);
  3542. best = e;
  3543. break;
  3544. }
  3545. /*
  3546. * Both basic or both extended?
  3547. */
  3548. if (((e->function ^ function) & 0x80000000) == 0)
  3549. if (!best || e->function > best->function)
  3550. best = e;
  3551. }
  3552. return best;
  3553. }
  3554. EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
  3555. int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
  3556. {
  3557. struct kvm_cpuid_entry2 *best;
  3558. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  3559. if (best)
  3560. return best->eax & 0xff;
  3561. return 36;
  3562. }
  3563. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  3564. {
  3565. u32 function, index;
  3566. struct kvm_cpuid_entry2 *best;
  3567. function = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3568. index = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3569. kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
  3570. kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
  3571. kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
  3572. kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
  3573. best = kvm_find_cpuid_entry(vcpu, function, index);
  3574. if (best) {
  3575. kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
  3576. kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
  3577. kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
  3578. kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
  3579. }
  3580. kvm_x86_ops->skip_emulated_instruction(vcpu);
  3581. trace_kvm_cpuid(function,
  3582. kvm_register_read(vcpu, VCPU_REGS_RAX),
  3583. kvm_register_read(vcpu, VCPU_REGS_RBX),
  3584. kvm_register_read(vcpu, VCPU_REGS_RCX),
  3585. kvm_register_read(vcpu, VCPU_REGS_RDX));
  3586. }
  3587. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  3588. /*
  3589. * Check if userspace requested an interrupt window, and that the
  3590. * interrupt window is open.
  3591. *
  3592. * No need to exit to userspace if we already have an interrupt queued.
  3593. */
  3594. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
  3595. {
  3596. return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
  3597. vcpu->run->request_interrupt_window &&
  3598. kvm_arch_interrupt_allowed(vcpu));
  3599. }
  3600. static void post_kvm_run_save(struct kvm_vcpu *vcpu)
  3601. {
  3602. struct kvm_run *kvm_run = vcpu->run;
  3603. kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  3604. kvm_run->cr8 = kvm_get_cr8(vcpu);
  3605. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  3606. if (irqchip_in_kernel(vcpu->kvm))
  3607. kvm_run->ready_for_interrupt_injection = 1;
  3608. else
  3609. kvm_run->ready_for_interrupt_injection =
  3610. kvm_arch_interrupt_allowed(vcpu) &&
  3611. !kvm_cpu_has_interrupt(vcpu) &&
  3612. !kvm_event_needs_reinjection(vcpu);
  3613. }
  3614. static void vapic_enter(struct kvm_vcpu *vcpu)
  3615. {
  3616. struct kvm_lapic *apic = vcpu->arch.apic;
  3617. struct page *page;
  3618. if (!apic || !apic->vapic_addr)
  3619. return;
  3620. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  3621. vcpu->arch.apic->vapic_page = page;
  3622. }
  3623. static void vapic_exit(struct kvm_vcpu *vcpu)
  3624. {
  3625. struct kvm_lapic *apic = vcpu->arch.apic;
  3626. int idx;
  3627. if (!apic || !apic->vapic_addr)
  3628. return;
  3629. idx = srcu_read_lock(&vcpu->kvm->srcu);
  3630. kvm_release_page_dirty(apic->vapic_page);
  3631. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  3632. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  3633. }
  3634. static void update_cr8_intercept(struct kvm_vcpu *vcpu)
  3635. {
  3636. int max_irr, tpr;
  3637. if (!kvm_x86_ops->update_cr8_intercept)
  3638. return;
  3639. if (!vcpu->arch.apic)
  3640. return;
  3641. if (!vcpu->arch.apic->vapic_addr)
  3642. max_irr = kvm_lapic_find_highest_irr(vcpu);
  3643. else
  3644. max_irr = -1;
  3645. if (max_irr != -1)
  3646. max_irr >>= 4;
  3647. tpr = kvm_lapic_get_cr8(vcpu);
  3648. kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
  3649. }
  3650. static void inject_pending_event(struct kvm_vcpu *vcpu)
  3651. {
  3652. /* try to reinject previous events if any */
  3653. if (vcpu->arch.exception.pending) {
  3654. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  3655. vcpu->arch.exception.has_error_code,
  3656. vcpu->arch.exception.error_code);
  3657. return;
  3658. }
  3659. if (vcpu->arch.nmi_injected) {
  3660. kvm_x86_ops->set_nmi(vcpu);
  3661. return;
  3662. }
  3663. if (vcpu->arch.interrupt.pending) {
  3664. kvm_x86_ops->set_irq(vcpu);
  3665. return;
  3666. }
  3667. /* try to inject new event if pending */
  3668. if (vcpu->arch.nmi_pending) {
  3669. if (kvm_x86_ops->nmi_allowed(vcpu)) {
  3670. vcpu->arch.nmi_pending = false;
  3671. vcpu->arch.nmi_injected = true;
  3672. kvm_x86_ops->set_nmi(vcpu);
  3673. }
  3674. } else if (kvm_cpu_has_interrupt(vcpu)) {
  3675. if (kvm_x86_ops->interrupt_allowed(vcpu)) {
  3676. kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
  3677. false);
  3678. kvm_x86_ops->set_irq(vcpu);
  3679. }
  3680. }
  3681. }
  3682. static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
  3683. {
  3684. int r;
  3685. bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
  3686. vcpu->run->request_interrupt_window;
  3687. if (vcpu->requests)
  3688. if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
  3689. kvm_mmu_unload(vcpu);
  3690. r = kvm_mmu_reload(vcpu);
  3691. if (unlikely(r))
  3692. goto out;
  3693. if (vcpu->requests) {
  3694. if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
  3695. __kvm_migrate_timers(vcpu);
  3696. if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
  3697. kvm_write_guest_time(vcpu);
  3698. if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
  3699. kvm_mmu_sync_roots(vcpu);
  3700. if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
  3701. kvm_x86_ops->tlb_flush(vcpu);
  3702. if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
  3703. &vcpu->requests)) {
  3704. vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
  3705. r = 0;
  3706. goto out;
  3707. }
  3708. if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
  3709. vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  3710. r = 0;
  3711. goto out;
  3712. }
  3713. if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
  3714. vcpu->fpu_active = 0;
  3715. kvm_x86_ops->fpu_deactivate(vcpu);
  3716. }
  3717. }
  3718. preempt_disable();
  3719. kvm_x86_ops->prepare_guest_switch(vcpu);
  3720. kvm_load_guest_fpu(vcpu);
  3721. local_irq_disable();
  3722. clear_bit(KVM_REQ_KICK, &vcpu->requests);
  3723. smp_mb__after_clear_bit();
  3724. if (vcpu->requests || need_resched() || signal_pending(current)) {
  3725. set_bit(KVM_REQ_KICK, &vcpu->requests);
  3726. local_irq_enable();
  3727. preempt_enable();
  3728. r = 1;
  3729. goto out;
  3730. }
  3731. inject_pending_event(vcpu);
  3732. /* enable NMI/IRQ window open exits if needed */
  3733. if (vcpu->arch.nmi_pending)
  3734. kvm_x86_ops->enable_nmi_window(vcpu);
  3735. else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
  3736. kvm_x86_ops->enable_irq_window(vcpu);
  3737. if (kvm_lapic_enabled(vcpu)) {
  3738. update_cr8_intercept(vcpu);
  3739. kvm_lapic_sync_to_vapic(vcpu);
  3740. }
  3741. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  3742. kvm_guest_enter();
  3743. if (unlikely(vcpu->arch.switch_db_regs)) {
  3744. set_debugreg(0, 7);
  3745. set_debugreg(vcpu->arch.eff_db[0], 0);
  3746. set_debugreg(vcpu->arch.eff_db[1], 1);
  3747. set_debugreg(vcpu->arch.eff_db[2], 2);
  3748. set_debugreg(vcpu->arch.eff_db[3], 3);
  3749. }
  3750. trace_kvm_entry(vcpu->vcpu_id);
  3751. kvm_x86_ops->run(vcpu);
  3752. /*
  3753. * If the guest has used debug registers, at least dr7
  3754. * will be disabled while returning to the host.
  3755. * If we don't have active breakpoints in the host, we don't
  3756. * care about the messed up debug address registers. But if
  3757. * we have some of them active, restore the old state.
  3758. */
  3759. if (hw_breakpoint_active())
  3760. hw_breakpoint_restore();
  3761. set_bit(KVM_REQ_KICK, &vcpu->requests);
  3762. local_irq_enable();
  3763. ++vcpu->stat.exits;
  3764. /*
  3765. * We must have an instruction between local_irq_enable() and
  3766. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  3767. * the interrupt shadow. The stat.exits increment will do nicely.
  3768. * But we need to prevent reordering, hence this barrier():
  3769. */
  3770. barrier();
  3771. kvm_guest_exit();
  3772. preempt_enable();
  3773. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  3774. /*
  3775. * Profile KVM exit RIPs:
  3776. */
  3777. if (unlikely(prof_on == KVM_PROFILING)) {
  3778. unsigned long rip = kvm_rip_read(vcpu);
  3779. profile_hit(KVM_PROFILING, (void *)rip);
  3780. }
  3781. kvm_lapic_sync_from_vapic(vcpu);
  3782. r = kvm_x86_ops->handle_exit(vcpu);
  3783. out:
  3784. return r;
  3785. }
  3786. static int __vcpu_run(struct kvm_vcpu *vcpu)
  3787. {
  3788. int r;
  3789. struct kvm *kvm = vcpu->kvm;
  3790. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  3791. pr_debug("vcpu %d received sipi with vector # %x\n",
  3792. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  3793. kvm_lapic_reset(vcpu);
  3794. r = kvm_arch_vcpu_reset(vcpu);
  3795. if (r)
  3796. return r;
  3797. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  3798. }
  3799. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  3800. vapic_enter(vcpu);
  3801. r = 1;
  3802. while (r > 0) {
  3803. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
  3804. r = vcpu_enter_guest(vcpu);
  3805. else {
  3806. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  3807. kvm_vcpu_block(vcpu);
  3808. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  3809. if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
  3810. {
  3811. switch(vcpu->arch.mp_state) {
  3812. case KVM_MP_STATE_HALTED:
  3813. vcpu->arch.mp_state =
  3814. KVM_MP_STATE_RUNNABLE;
  3815. case KVM_MP_STATE_RUNNABLE:
  3816. break;
  3817. case KVM_MP_STATE_SIPI_RECEIVED:
  3818. default:
  3819. r = -EINTR;
  3820. break;
  3821. }
  3822. }
  3823. }
  3824. if (r <= 0)
  3825. break;
  3826. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  3827. if (kvm_cpu_has_pending_timer(vcpu))
  3828. kvm_inject_pending_timer_irqs(vcpu);
  3829. if (dm_request_for_irq_injection(vcpu)) {
  3830. r = -EINTR;
  3831. vcpu->run->exit_reason = KVM_EXIT_INTR;
  3832. ++vcpu->stat.request_irq_exits;
  3833. }
  3834. if (signal_pending(current)) {
  3835. r = -EINTR;
  3836. vcpu->run->exit_reason = KVM_EXIT_INTR;
  3837. ++vcpu->stat.signal_exits;
  3838. }
  3839. if (need_resched()) {
  3840. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  3841. kvm_resched(vcpu);
  3842. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  3843. }
  3844. }
  3845. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  3846. post_kvm_run_save(vcpu);
  3847. vapic_exit(vcpu);
  3848. return r;
  3849. }
  3850. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  3851. {
  3852. int r;
  3853. sigset_t sigsaved;
  3854. vcpu_load(vcpu);
  3855. if (vcpu->sigset_active)
  3856. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  3857. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  3858. kvm_vcpu_block(vcpu);
  3859. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  3860. r = -EAGAIN;
  3861. goto out;
  3862. }
  3863. /* re-sync apic's tpr */
  3864. if (!irqchip_in_kernel(vcpu->kvm))
  3865. kvm_set_cr8(vcpu, kvm_run->cr8);
  3866. if (vcpu->arch.pio.cur_count) {
  3867. r = complete_pio(vcpu);
  3868. if (r)
  3869. goto out;
  3870. }
  3871. if (vcpu->mmio_needed) {
  3872. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  3873. vcpu->mmio_read_completed = 1;
  3874. vcpu->mmio_needed = 0;
  3875. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  3876. r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
  3877. EMULTYPE_NO_DECODE);
  3878. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  3879. if (r == EMULATE_DO_MMIO) {
  3880. /*
  3881. * Read-modify-write. Back to userspace.
  3882. */
  3883. r = 0;
  3884. goto out;
  3885. }
  3886. }
  3887. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
  3888. kvm_register_write(vcpu, VCPU_REGS_RAX,
  3889. kvm_run->hypercall.ret);
  3890. r = __vcpu_run(vcpu);
  3891. out:
  3892. if (vcpu->sigset_active)
  3893. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  3894. vcpu_put(vcpu);
  3895. return r;
  3896. }
  3897. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  3898. {
  3899. vcpu_load(vcpu);
  3900. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3901. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3902. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3903. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3904. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3905. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  3906. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  3907. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  3908. #ifdef CONFIG_X86_64
  3909. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  3910. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  3911. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  3912. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  3913. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  3914. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  3915. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  3916. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  3917. #endif
  3918. regs->rip = kvm_rip_read(vcpu);
  3919. regs->rflags = kvm_get_rflags(vcpu);
  3920. vcpu_put(vcpu);
  3921. return 0;
  3922. }
  3923. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  3924. {
  3925. vcpu_load(vcpu);
  3926. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  3927. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  3928. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  3929. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  3930. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  3931. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  3932. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  3933. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  3934. #ifdef CONFIG_X86_64
  3935. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  3936. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  3937. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  3938. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  3939. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  3940. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  3941. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  3942. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  3943. #endif
  3944. kvm_rip_write(vcpu, regs->rip);
  3945. kvm_set_rflags(vcpu, regs->rflags);
  3946. vcpu->arch.exception.pending = false;
  3947. vcpu_put(vcpu);
  3948. return 0;
  3949. }
  3950. void kvm_get_segment(struct kvm_vcpu *vcpu,
  3951. struct kvm_segment *var, int seg)
  3952. {
  3953. kvm_x86_ops->get_segment(vcpu, var, seg);
  3954. }
  3955. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  3956. {
  3957. struct kvm_segment cs;
  3958. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  3959. *db = cs.db;
  3960. *l = cs.l;
  3961. }
  3962. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  3963. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  3964. struct kvm_sregs *sregs)
  3965. {
  3966. struct descriptor_table dt;
  3967. vcpu_load(vcpu);
  3968. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  3969. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  3970. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  3971. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  3972. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  3973. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  3974. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  3975. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  3976. kvm_x86_ops->get_idt(vcpu, &dt);
  3977. sregs->idt.limit = dt.limit;
  3978. sregs->idt.base = dt.base;
  3979. kvm_x86_ops->get_gdt(vcpu, &dt);
  3980. sregs->gdt.limit = dt.limit;
  3981. sregs->gdt.base = dt.base;
  3982. sregs->cr0 = kvm_read_cr0(vcpu);
  3983. sregs->cr2 = vcpu->arch.cr2;
  3984. sregs->cr3 = vcpu->arch.cr3;
  3985. sregs->cr4 = kvm_read_cr4(vcpu);
  3986. sregs->cr8 = kvm_get_cr8(vcpu);
  3987. sregs->efer = vcpu->arch.shadow_efer;
  3988. sregs->apic_base = kvm_get_apic_base(vcpu);
  3989. memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
  3990. if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
  3991. set_bit(vcpu->arch.interrupt.nr,
  3992. (unsigned long *)sregs->interrupt_bitmap);
  3993. vcpu_put(vcpu);
  3994. return 0;
  3995. }
  3996. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  3997. struct kvm_mp_state *mp_state)
  3998. {
  3999. vcpu_load(vcpu);
  4000. mp_state->mp_state = vcpu->arch.mp_state;
  4001. vcpu_put(vcpu);
  4002. return 0;
  4003. }
  4004. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  4005. struct kvm_mp_state *mp_state)
  4006. {
  4007. vcpu_load(vcpu);
  4008. vcpu->arch.mp_state = mp_state->mp_state;
  4009. vcpu_put(vcpu);
  4010. return 0;
  4011. }
  4012. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  4013. struct kvm_segment *var, int seg)
  4014. {
  4015. kvm_x86_ops->set_segment(vcpu, var, seg);
  4016. }
  4017. static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
  4018. struct kvm_segment *kvm_desct)
  4019. {
  4020. kvm_desct->base = get_desc_base(seg_desc);
  4021. kvm_desct->limit = get_desc_limit(seg_desc);
  4022. if (seg_desc->g) {
  4023. kvm_desct->limit <<= 12;
  4024. kvm_desct->limit |= 0xfff;
  4025. }
  4026. kvm_desct->selector = selector;
  4027. kvm_desct->type = seg_desc->type;
  4028. kvm_desct->present = seg_desc->p;
  4029. kvm_desct->dpl = seg_desc->dpl;
  4030. kvm_desct->db = seg_desc->d;
  4031. kvm_desct->s = seg_desc->s;
  4032. kvm_desct->l = seg_desc->l;
  4033. kvm_desct->g = seg_desc->g;
  4034. kvm_desct->avl = seg_desc->avl;
  4035. if (!selector)
  4036. kvm_desct->unusable = 1;
  4037. else
  4038. kvm_desct->unusable = 0;
  4039. kvm_desct->padding = 0;
  4040. }
  4041. static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
  4042. u16 selector,
  4043. struct descriptor_table *dtable)
  4044. {
  4045. if (selector & 1 << 2) {
  4046. struct kvm_segment kvm_seg;
  4047. kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
  4048. if (kvm_seg.unusable)
  4049. dtable->limit = 0;
  4050. else
  4051. dtable->limit = kvm_seg.limit;
  4052. dtable->base = kvm_seg.base;
  4053. }
  4054. else
  4055. kvm_x86_ops->get_gdt(vcpu, dtable);
  4056. }
  4057. /* allowed just for 8 bytes segments */
  4058. static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  4059. struct desc_struct *seg_desc)
  4060. {
  4061. struct descriptor_table dtable;
  4062. u16 index = selector >> 3;
  4063. get_segment_descriptor_dtable(vcpu, selector, &dtable);
  4064. if (dtable.limit < index * 8 + 7) {
  4065. kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
  4066. return 1;
  4067. }
  4068. return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
  4069. }
  4070. /* allowed just for 8 bytes segments */
  4071. static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  4072. struct desc_struct *seg_desc)
  4073. {
  4074. struct descriptor_table dtable;
  4075. u16 index = selector >> 3;
  4076. get_segment_descriptor_dtable(vcpu, selector, &dtable);
  4077. if (dtable.limit < index * 8 + 7)
  4078. return 1;
  4079. return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
  4080. }
  4081. static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
  4082. struct desc_struct *seg_desc)
  4083. {
  4084. u32 base_addr = get_desc_base(seg_desc);
  4085. return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
  4086. }
  4087. static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
  4088. {
  4089. struct kvm_segment kvm_seg;
  4090. kvm_get_segment(vcpu, &kvm_seg, seg);
  4091. return kvm_seg.selector;
  4092. }
  4093. static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
  4094. u16 selector,
  4095. struct kvm_segment *kvm_seg)
  4096. {
  4097. struct desc_struct seg_desc;
  4098. if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
  4099. return 1;
  4100. seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
  4101. return 0;
  4102. }
  4103. static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
  4104. {
  4105. struct kvm_segment segvar = {
  4106. .base = selector << 4,
  4107. .limit = 0xffff,
  4108. .selector = selector,
  4109. .type = 3,
  4110. .present = 1,
  4111. .dpl = 3,
  4112. .db = 0,
  4113. .s = 1,
  4114. .l = 0,
  4115. .g = 0,
  4116. .avl = 0,
  4117. .unusable = 0,
  4118. };
  4119. kvm_x86_ops->set_segment(vcpu, &segvar, seg);
  4120. return 0;
  4121. }
  4122. static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
  4123. {
  4124. return (seg != VCPU_SREG_LDTR) &&
  4125. (seg != VCPU_SREG_TR) &&
  4126. (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
  4127. }
  4128. static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg,
  4129. u16 selector)
  4130. {
  4131. /* NULL selector is not valid for CS and SS */
  4132. if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
  4133. if (!selector)
  4134. kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3);
  4135. }
  4136. int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  4137. int type_bits, int seg)
  4138. {
  4139. struct kvm_segment kvm_seg;
  4140. if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
  4141. return kvm_load_realmode_segment(vcpu, selector, seg);
  4142. if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
  4143. return 1;
  4144. kvm_check_segment_descriptor(vcpu, seg, selector);
  4145. kvm_seg.type |= type_bits;
  4146. if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
  4147. seg != VCPU_SREG_LDTR)
  4148. if (!kvm_seg.s)
  4149. kvm_seg.unusable = 1;
  4150. kvm_set_segment(vcpu, &kvm_seg, seg);
  4151. return 0;
  4152. }
  4153. static void save_state_to_tss32(struct kvm_vcpu *vcpu,
  4154. struct tss_segment_32 *tss)
  4155. {
  4156. tss->cr3 = vcpu->arch.cr3;
  4157. tss->eip = kvm_rip_read(vcpu);
  4158. tss->eflags = kvm_get_rflags(vcpu);
  4159. tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4160. tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4161. tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4162. tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4163. tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  4164. tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  4165. tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4166. tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  4167. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  4168. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  4169. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  4170. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  4171. tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
  4172. tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
  4173. tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  4174. }
  4175. static int load_state_from_tss32(struct kvm_vcpu *vcpu,
  4176. struct tss_segment_32 *tss)
  4177. {
  4178. kvm_set_cr3(vcpu, tss->cr3);
  4179. kvm_rip_write(vcpu, tss->eip);
  4180. kvm_set_rflags(vcpu, tss->eflags | 2);
  4181. kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
  4182. kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
  4183. kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
  4184. kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
  4185. kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
  4186. kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
  4187. kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
  4188. kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
  4189. if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
  4190. return 1;
  4191. if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  4192. return 1;
  4193. if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  4194. return 1;
  4195. if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  4196. return 1;
  4197. if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  4198. return 1;
  4199. if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
  4200. return 1;
  4201. if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
  4202. return 1;
  4203. return 0;
  4204. }
  4205. static void save_state_to_tss16(struct kvm_vcpu *vcpu,
  4206. struct tss_segment_16 *tss)
  4207. {
  4208. tss->ip = kvm_rip_read(vcpu);
  4209. tss->flag = kvm_get_rflags(vcpu);
  4210. tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4211. tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4212. tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4213. tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4214. tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  4215. tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  4216. tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4217. tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
  4218. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  4219. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  4220. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  4221. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  4222. tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  4223. }
  4224. static int load_state_from_tss16(struct kvm_vcpu *vcpu,
  4225. struct tss_segment_16 *tss)
  4226. {
  4227. kvm_rip_write(vcpu, tss->ip);
  4228. kvm_set_rflags(vcpu, tss->flag | 2);
  4229. kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
  4230. kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
  4231. kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
  4232. kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
  4233. kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
  4234. kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
  4235. kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
  4236. kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
  4237. if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
  4238. return 1;
  4239. if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  4240. return 1;
  4241. if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  4242. return 1;
  4243. if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  4244. return 1;
  4245. if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  4246. return 1;
  4247. return 0;
  4248. }
  4249. static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
  4250. u16 old_tss_sel, u32 old_tss_base,
  4251. struct desc_struct *nseg_desc)
  4252. {
  4253. struct tss_segment_16 tss_segment_16;
  4254. int ret = 0;
  4255. if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
  4256. sizeof tss_segment_16))
  4257. goto out;
  4258. save_state_to_tss16(vcpu, &tss_segment_16);
  4259. if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
  4260. sizeof tss_segment_16))
  4261. goto out;
  4262. if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
  4263. &tss_segment_16, sizeof tss_segment_16))
  4264. goto out;
  4265. if (old_tss_sel != 0xffff) {
  4266. tss_segment_16.prev_task_link = old_tss_sel;
  4267. if (kvm_write_guest(vcpu->kvm,
  4268. get_tss_base_addr(vcpu, nseg_desc),
  4269. &tss_segment_16.prev_task_link,
  4270. sizeof tss_segment_16.prev_task_link))
  4271. goto out;
  4272. }
  4273. if (load_state_from_tss16(vcpu, &tss_segment_16))
  4274. goto out;
  4275. ret = 1;
  4276. out:
  4277. return ret;
  4278. }
  4279. static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
  4280. u16 old_tss_sel, u32 old_tss_base,
  4281. struct desc_struct *nseg_desc)
  4282. {
  4283. struct tss_segment_32 tss_segment_32;
  4284. int ret = 0;
  4285. if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
  4286. sizeof tss_segment_32))
  4287. goto out;
  4288. save_state_to_tss32(vcpu, &tss_segment_32);
  4289. if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
  4290. sizeof tss_segment_32))
  4291. goto out;
  4292. if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
  4293. &tss_segment_32, sizeof tss_segment_32))
  4294. goto out;
  4295. if (old_tss_sel != 0xffff) {
  4296. tss_segment_32.prev_task_link = old_tss_sel;
  4297. if (kvm_write_guest(vcpu->kvm,
  4298. get_tss_base_addr(vcpu, nseg_desc),
  4299. &tss_segment_32.prev_task_link,
  4300. sizeof tss_segment_32.prev_task_link))
  4301. goto out;
  4302. }
  4303. if (load_state_from_tss32(vcpu, &tss_segment_32))
  4304. goto out;
  4305. ret = 1;
  4306. out:
  4307. return ret;
  4308. }
  4309. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
  4310. {
  4311. struct kvm_segment tr_seg;
  4312. struct desc_struct cseg_desc;
  4313. struct desc_struct nseg_desc;
  4314. int ret = 0;
  4315. u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
  4316. u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
  4317. old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
  4318. /* FIXME: Handle errors. Failure to read either TSS or their
  4319. * descriptors should generate a pagefault.
  4320. */
  4321. if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
  4322. goto out;
  4323. if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
  4324. goto out;
  4325. if (reason != TASK_SWITCH_IRET) {
  4326. int cpl;
  4327. cpl = kvm_x86_ops->get_cpl(vcpu);
  4328. if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
  4329. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  4330. return 1;
  4331. }
  4332. }
  4333. if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
  4334. kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
  4335. return 1;
  4336. }
  4337. if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
  4338. cseg_desc.type &= ~(1 << 1); //clear the B flag
  4339. save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
  4340. }
  4341. if (reason == TASK_SWITCH_IRET) {
  4342. u32 eflags = kvm_get_rflags(vcpu);
  4343. kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
  4344. }
  4345. /* set back link to prev task only if NT bit is set in eflags
  4346. note that old_tss_sel is not used afetr this point */
  4347. if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
  4348. old_tss_sel = 0xffff;
  4349. if (nseg_desc.type & 8)
  4350. ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
  4351. old_tss_base, &nseg_desc);
  4352. else
  4353. ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
  4354. old_tss_base, &nseg_desc);
  4355. if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
  4356. u32 eflags = kvm_get_rflags(vcpu);
  4357. kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
  4358. }
  4359. if (reason != TASK_SWITCH_IRET) {
  4360. nseg_desc.type |= (1 << 1);
  4361. save_guest_segment_descriptor(vcpu, tss_selector,
  4362. &nseg_desc);
  4363. }
  4364. kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
  4365. seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
  4366. tr_seg.type = 11;
  4367. kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
  4368. out:
  4369. return ret;
  4370. }
  4371. EXPORT_SYMBOL_GPL(kvm_task_switch);
  4372. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  4373. struct kvm_sregs *sregs)
  4374. {
  4375. int mmu_reset_needed = 0;
  4376. int pending_vec, max_bits;
  4377. struct descriptor_table dt;
  4378. vcpu_load(vcpu);
  4379. dt.limit = sregs->idt.limit;
  4380. dt.base = sregs->idt.base;
  4381. kvm_x86_ops->set_idt(vcpu, &dt);
  4382. dt.limit = sregs->gdt.limit;
  4383. dt.base = sregs->gdt.base;
  4384. kvm_x86_ops->set_gdt(vcpu, &dt);
  4385. vcpu->arch.cr2 = sregs->cr2;
  4386. mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
  4387. vcpu->arch.cr3 = sregs->cr3;
  4388. kvm_set_cr8(vcpu, sregs->cr8);
  4389. mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
  4390. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  4391. kvm_set_apic_base(vcpu, sregs->apic_base);
  4392. mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
  4393. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  4394. vcpu->arch.cr0 = sregs->cr0;
  4395. mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
  4396. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  4397. if (!is_long_mode(vcpu) && is_pae(vcpu)) {
  4398. load_pdptrs(vcpu, vcpu->arch.cr3);
  4399. mmu_reset_needed = 1;
  4400. }
  4401. if (mmu_reset_needed)
  4402. kvm_mmu_reset_context(vcpu);
  4403. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  4404. pending_vec = find_first_bit(
  4405. (const unsigned long *)sregs->interrupt_bitmap, max_bits);
  4406. if (pending_vec < max_bits) {
  4407. kvm_queue_interrupt(vcpu, pending_vec, false);
  4408. pr_debug("Set back pending irq %d\n", pending_vec);
  4409. if (irqchip_in_kernel(vcpu->kvm))
  4410. kvm_pic_clear_isr_ack(vcpu->kvm);
  4411. }
  4412. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  4413. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  4414. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  4415. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  4416. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  4417. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  4418. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  4419. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  4420. update_cr8_intercept(vcpu);
  4421. /* Older userspace won't unhalt the vcpu on reset. */
  4422. if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
  4423. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  4424. !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
  4425. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4426. vcpu_put(vcpu);
  4427. return 0;
  4428. }
  4429. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  4430. struct kvm_guest_debug *dbg)
  4431. {
  4432. unsigned long rflags;
  4433. int i, r;
  4434. vcpu_load(vcpu);
  4435. if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
  4436. r = -EBUSY;
  4437. if (vcpu->arch.exception.pending)
  4438. goto unlock_out;
  4439. if (dbg->control & KVM_GUESTDBG_INJECT_DB)
  4440. kvm_queue_exception(vcpu, DB_VECTOR);
  4441. else
  4442. kvm_queue_exception(vcpu, BP_VECTOR);
  4443. }
  4444. /*
  4445. * Read rflags as long as potentially injected trace flags are still
  4446. * filtered out.
  4447. */
  4448. rflags = kvm_get_rflags(vcpu);
  4449. vcpu->guest_debug = dbg->control;
  4450. if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
  4451. vcpu->guest_debug = 0;
  4452. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
  4453. for (i = 0; i < KVM_NR_DB_REGS; ++i)
  4454. vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
  4455. vcpu->arch.switch_db_regs =
  4456. (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
  4457. } else {
  4458. for (i = 0; i < KVM_NR_DB_REGS; i++)
  4459. vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  4460. vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
  4461. }
  4462. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
  4463. vcpu->arch.singlestep_cs =
  4464. get_segment_selector(vcpu, VCPU_SREG_CS);
  4465. vcpu->arch.singlestep_rip = kvm_rip_read(vcpu);
  4466. }
  4467. /*
  4468. * Trigger an rflags update that will inject or remove the trace
  4469. * flags.
  4470. */
  4471. kvm_set_rflags(vcpu, rflags);
  4472. kvm_x86_ops->set_guest_debug(vcpu, dbg);
  4473. r = 0;
  4474. unlock_out:
  4475. vcpu_put(vcpu);
  4476. return r;
  4477. }
  4478. /*
  4479. * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
  4480. * we have asm/x86/processor.h
  4481. */
  4482. struct fxsave {
  4483. u16 cwd;
  4484. u16 swd;
  4485. u16 twd;
  4486. u16 fop;
  4487. u64 rip;
  4488. u64 rdp;
  4489. u32 mxcsr;
  4490. u32 mxcsr_mask;
  4491. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  4492. #ifdef CONFIG_X86_64
  4493. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  4494. #else
  4495. u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
  4496. #endif
  4497. };
  4498. /*
  4499. * Translate a guest virtual address to a guest physical address.
  4500. */
  4501. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  4502. struct kvm_translation *tr)
  4503. {
  4504. unsigned long vaddr = tr->linear_address;
  4505. gpa_t gpa;
  4506. int idx;
  4507. vcpu_load(vcpu);
  4508. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4509. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
  4510. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4511. tr->physical_address = gpa;
  4512. tr->valid = gpa != UNMAPPED_GVA;
  4513. tr->writeable = 1;
  4514. tr->usermode = 0;
  4515. vcpu_put(vcpu);
  4516. return 0;
  4517. }
  4518. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  4519. {
  4520. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  4521. vcpu_load(vcpu);
  4522. memcpy(fpu->fpr, fxsave->st_space, 128);
  4523. fpu->fcw = fxsave->cwd;
  4524. fpu->fsw = fxsave->swd;
  4525. fpu->ftwx = fxsave->twd;
  4526. fpu->last_opcode = fxsave->fop;
  4527. fpu->last_ip = fxsave->rip;
  4528. fpu->last_dp = fxsave->rdp;
  4529. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  4530. vcpu_put(vcpu);
  4531. return 0;
  4532. }
  4533. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  4534. {
  4535. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  4536. vcpu_load(vcpu);
  4537. memcpy(fxsave->st_space, fpu->fpr, 128);
  4538. fxsave->cwd = fpu->fcw;
  4539. fxsave->swd = fpu->fsw;
  4540. fxsave->twd = fpu->ftwx;
  4541. fxsave->fop = fpu->last_opcode;
  4542. fxsave->rip = fpu->last_ip;
  4543. fxsave->rdp = fpu->last_dp;
  4544. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  4545. vcpu_put(vcpu);
  4546. return 0;
  4547. }
  4548. void fx_init(struct kvm_vcpu *vcpu)
  4549. {
  4550. unsigned after_mxcsr_mask;
  4551. /*
  4552. * Touch the fpu the first time in non atomic context as if
  4553. * this is the first fpu instruction the exception handler
  4554. * will fire before the instruction returns and it'll have to
  4555. * allocate ram with GFP_KERNEL.
  4556. */
  4557. if (!used_math())
  4558. kvm_fx_save(&vcpu->arch.host_fx_image);
  4559. /* Initialize guest FPU by resetting ours and saving into guest's */
  4560. preempt_disable();
  4561. kvm_fx_save(&vcpu->arch.host_fx_image);
  4562. kvm_fx_finit();
  4563. kvm_fx_save(&vcpu->arch.guest_fx_image);
  4564. kvm_fx_restore(&vcpu->arch.host_fx_image);
  4565. preempt_enable();
  4566. vcpu->arch.cr0 |= X86_CR0_ET;
  4567. after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
  4568. vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
  4569. memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
  4570. 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
  4571. }
  4572. EXPORT_SYMBOL_GPL(fx_init);
  4573. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  4574. {
  4575. if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
  4576. return;
  4577. vcpu->guest_fpu_loaded = 1;
  4578. kvm_fx_save(&vcpu->arch.host_fx_image);
  4579. kvm_fx_restore(&vcpu->arch.guest_fx_image);
  4580. }
  4581. EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
  4582. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  4583. {
  4584. if (!vcpu->guest_fpu_loaded)
  4585. return;
  4586. vcpu->guest_fpu_loaded = 0;
  4587. kvm_fx_save(&vcpu->arch.guest_fx_image);
  4588. kvm_fx_restore(&vcpu->arch.host_fx_image);
  4589. ++vcpu->stat.fpu_reload;
  4590. set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
  4591. }
  4592. EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
  4593. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  4594. {
  4595. if (vcpu->arch.time_page) {
  4596. kvm_release_page_dirty(vcpu->arch.time_page);
  4597. vcpu->arch.time_page = NULL;
  4598. }
  4599. kvm_x86_ops->vcpu_free(vcpu);
  4600. }
  4601. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  4602. unsigned int id)
  4603. {
  4604. return kvm_x86_ops->vcpu_create(kvm, id);
  4605. }
  4606. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  4607. {
  4608. int r;
  4609. /* We do fxsave: this must be aligned. */
  4610. BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
  4611. vcpu->arch.mtrr_state.have_fixed = 1;
  4612. vcpu_load(vcpu);
  4613. r = kvm_arch_vcpu_reset(vcpu);
  4614. if (r == 0)
  4615. r = kvm_mmu_setup(vcpu);
  4616. vcpu_put(vcpu);
  4617. if (r < 0)
  4618. goto free_vcpu;
  4619. return 0;
  4620. free_vcpu:
  4621. kvm_x86_ops->vcpu_free(vcpu);
  4622. return r;
  4623. }
  4624. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  4625. {
  4626. vcpu_load(vcpu);
  4627. kvm_mmu_unload(vcpu);
  4628. vcpu_put(vcpu);
  4629. kvm_x86_ops->vcpu_free(vcpu);
  4630. }
  4631. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  4632. {
  4633. vcpu->arch.nmi_pending = false;
  4634. vcpu->arch.nmi_injected = false;
  4635. vcpu->arch.switch_db_regs = 0;
  4636. memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
  4637. vcpu->arch.dr6 = DR6_FIXED_1;
  4638. vcpu->arch.dr7 = DR7_FIXED_1;
  4639. return kvm_x86_ops->vcpu_reset(vcpu);
  4640. }
  4641. int kvm_arch_hardware_enable(void *garbage)
  4642. {
  4643. /*
  4644. * Since this may be called from a hotplug notifcation,
  4645. * we can't get the CPU frequency directly.
  4646. */
  4647. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  4648. int cpu = raw_smp_processor_id();
  4649. per_cpu(cpu_tsc_khz, cpu) = 0;
  4650. }
  4651. kvm_shared_msr_cpu_online();
  4652. return kvm_x86_ops->hardware_enable(garbage);
  4653. }
  4654. void kvm_arch_hardware_disable(void *garbage)
  4655. {
  4656. kvm_x86_ops->hardware_disable(garbage);
  4657. drop_user_return_notifiers(garbage);
  4658. }
  4659. int kvm_arch_hardware_setup(void)
  4660. {
  4661. return kvm_x86_ops->hardware_setup();
  4662. }
  4663. void kvm_arch_hardware_unsetup(void)
  4664. {
  4665. kvm_x86_ops->hardware_unsetup();
  4666. }
  4667. void kvm_arch_check_processor_compat(void *rtn)
  4668. {
  4669. kvm_x86_ops->check_processor_compatibility(rtn);
  4670. }
  4671. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  4672. {
  4673. struct page *page;
  4674. struct kvm *kvm;
  4675. int r;
  4676. BUG_ON(vcpu->kvm == NULL);
  4677. kvm = vcpu->kvm;
  4678. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  4679. if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
  4680. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4681. else
  4682. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  4683. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  4684. if (!page) {
  4685. r = -ENOMEM;
  4686. goto fail;
  4687. }
  4688. vcpu->arch.pio_data = page_address(page);
  4689. r = kvm_mmu_create(vcpu);
  4690. if (r < 0)
  4691. goto fail_free_pio_data;
  4692. if (irqchip_in_kernel(kvm)) {
  4693. r = kvm_create_lapic(vcpu);
  4694. if (r < 0)
  4695. goto fail_mmu_destroy;
  4696. }
  4697. vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
  4698. GFP_KERNEL);
  4699. if (!vcpu->arch.mce_banks) {
  4700. r = -ENOMEM;
  4701. goto fail_free_lapic;
  4702. }
  4703. vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
  4704. return 0;
  4705. fail_free_lapic:
  4706. kvm_free_lapic(vcpu);
  4707. fail_mmu_destroy:
  4708. kvm_mmu_destroy(vcpu);
  4709. fail_free_pio_data:
  4710. free_page((unsigned long)vcpu->arch.pio_data);
  4711. fail:
  4712. return r;
  4713. }
  4714. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  4715. {
  4716. int idx;
  4717. kfree(vcpu->arch.mce_banks);
  4718. kvm_free_lapic(vcpu);
  4719. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4720. kvm_mmu_destroy(vcpu);
  4721. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4722. free_page((unsigned long)vcpu->arch.pio_data);
  4723. }
  4724. struct kvm *kvm_arch_create_vm(void)
  4725. {
  4726. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  4727. if (!kvm)
  4728. return ERR_PTR(-ENOMEM);
  4729. kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
  4730. if (!kvm->arch.aliases) {
  4731. kfree(kvm);
  4732. return ERR_PTR(-ENOMEM);
  4733. }
  4734. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  4735. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  4736. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  4737. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  4738. rdtscll(kvm->arch.vm_init_tsc);
  4739. return kvm;
  4740. }
  4741. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  4742. {
  4743. vcpu_load(vcpu);
  4744. kvm_mmu_unload(vcpu);
  4745. vcpu_put(vcpu);
  4746. }
  4747. static void kvm_free_vcpus(struct kvm *kvm)
  4748. {
  4749. unsigned int i;
  4750. struct kvm_vcpu *vcpu;
  4751. /*
  4752. * Unpin any mmu pages first.
  4753. */
  4754. kvm_for_each_vcpu(i, vcpu, kvm)
  4755. kvm_unload_vcpu_mmu(vcpu);
  4756. kvm_for_each_vcpu(i, vcpu, kvm)
  4757. kvm_arch_vcpu_free(vcpu);
  4758. mutex_lock(&kvm->lock);
  4759. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  4760. kvm->vcpus[i] = NULL;
  4761. atomic_set(&kvm->online_vcpus, 0);
  4762. mutex_unlock(&kvm->lock);
  4763. }
  4764. void kvm_arch_sync_events(struct kvm *kvm)
  4765. {
  4766. kvm_free_all_assigned_devices(kvm);
  4767. }
  4768. void kvm_arch_destroy_vm(struct kvm *kvm)
  4769. {
  4770. kvm_iommu_unmap_guest(kvm);
  4771. kvm_free_pit(kvm);
  4772. kfree(kvm->arch.vpic);
  4773. kfree(kvm->arch.vioapic);
  4774. kvm_free_vcpus(kvm);
  4775. kvm_free_physmem(kvm);
  4776. if (kvm->arch.apic_access_page)
  4777. put_page(kvm->arch.apic_access_page);
  4778. if (kvm->arch.ept_identity_pagetable)
  4779. put_page(kvm->arch.ept_identity_pagetable);
  4780. cleanup_srcu_struct(&kvm->srcu);
  4781. kfree(kvm->arch.aliases);
  4782. kfree(kvm);
  4783. }
  4784. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  4785. struct kvm_memory_slot *memslot,
  4786. struct kvm_memory_slot old,
  4787. struct kvm_userspace_memory_region *mem,
  4788. int user_alloc)
  4789. {
  4790. int npages = memslot->npages;
  4791. /*To keep backward compatibility with older userspace,
  4792. *x86 needs to hanlde !user_alloc case.
  4793. */
  4794. if (!user_alloc) {
  4795. if (npages && !old.rmap) {
  4796. unsigned long userspace_addr;
  4797. down_write(&current->mm->mmap_sem);
  4798. userspace_addr = do_mmap(NULL, 0,
  4799. npages * PAGE_SIZE,
  4800. PROT_READ | PROT_WRITE,
  4801. MAP_PRIVATE | MAP_ANONYMOUS,
  4802. 0);
  4803. up_write(&current->mm->mmap_sem);
  4804. if (IS_ERR((void *)userspace_addr))
  4805. return PTR_ERR((void *)userspace_addr);
  4806. memslot->userspace_addr = userspace_addr;
  4807. }
  4808. }
  4809. return 0;
  4810. }
  4811. void kvm_arch_commit_memory_region(struct kvm *kvm,
  4812. struct kvm_userspace_memory_region *mem,
  4813. struct kvm_memory_slot old,
  4814. int user_alloc)
  4815. {
  4816. int npages = mem->memory_size >> PAGE_SHIFT;
  4817. if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
  4818. int ret;
  4819. down_write(&current->mm->mmap_sem);
  4820. ret = do_munmap(current->mm, old.userspace_addr,
  4821. old.npages * PAGE_SIZE);
  4822. up_write(&current->mm->mmap_sem);
  4823. if (ret < 0)
  4824. printk(KERN_WARNING
  4825. "kvm_vm_ioctl_set_memory_region: "
  4826. "failed to munmap memory\n");
  4827. }
  4828. spin_lock(&kvm->mmu_lock);
  4829. if (!kvm->arch.n_requested_mmu_pages) {
  4830. unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  4831. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  4832. }
  4833. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  4834. spin_unlock(&kvm->mmu_lock);
  4835. }
  4836. void kvm_arch_flush_shadow(struct kvm *kvm)
  4837. {
  4838. kvm_mmu_zap_all(kvm);
  4839. kvm_reload_remote_mmus(kvm);
  4840. }
  4841. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  4842. {
  4843. return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
  4844. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
  4845. || vcpu->arch.nmi_pending ||
  4846. (kvm_arch_interrupt_allowed(vcpu) &&
  4847. kvm_cpu_has_interrupt(vcpu));
  4848. }
  4849. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  4850. {
  4851. int me;
  4852. int cpu = vcpu->cpu;
  4853. if (waitqueue_active(&vcpu->wq)) {
  4854. wake_up_interruptible(&vcpu->wq);
  4855. ++vcpu->stat.halt_wakeup;
  4856. }
  4857. me = get_cpu();
  4858. if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
  4859. if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
  4860. smp_send_reschedule(cpu);
  4861. put_cpu();
  4862. }
  4863. int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
  4864. {
  4865. return kvm_x86_ops->interrupt_allowed(vcpu);
  4866. }
  4867. unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
  4868. {
  4869. unsigned long rflags;
  4870. rflags = kvm_x86_ops->get_rflags(vcpu);
  4871. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  4872. rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
  4873. return rflags;
  4874. }
  4875. EXPORT_SYMBOL_GPL(kvm_get_rflags);
  4876. void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  4877. {
  4878. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
  4879. vcpu->arch.singlestep_cs ==
  4880. get_segment_selector(vcpu, VCPU_SREG_CS) &&
  4881. vcpu->arch.singlestep_rip == kvm_rip_read(vcpu))
  4882. rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
  4883. kvm_x86_ops->set_rflags(vcpu, rflags);
  4884. }
  4885. EXPORT_SYMBOL_GPL(kvm_set_rflags);
  4886. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
  4887. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
  4888. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
  4889. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
  4890. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
  4891. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
  4892. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
  4893. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
  4894. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
  4895. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
  4896. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);