x86.c 134 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Amit Shah <amit.shah@qumranet.com>
  14. * Ben-Ami Yassour <benami@il.ibm.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. #include <linux/kvm_host.h>
  21. #include "irq.h"
  22. #include "mmu.h"
  23. #include "i8254.h"
  24. #include "tss.h"
  25. #include "kvm_cache_regs.h"
  26. #include "x86.h"
  27. #include <linux/clocksource.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/kvm.h>
  30. #include <linux/fs.h>
  31. #include <linux/vmalloc.h>
  32. #include <linux/module.h>
  33. #include <linux/mman.h>
  34. #include <linux/highmem.h>
  35. #include <linux/iommu.h>
  36. #include <linux/intel-iommu.h>
  37. #include <linux/cpufreq.h>
  38. #include <linux/user-return-notifier.h>
  39. #include <linux/srcu.h>
  40. #include <linux/slab.h>
  41. #include <linux/perf_event.h>
  42. #include <trace/events/kvm.h>
  43. #define CREATE_TRACE_POINTS
  44. #include "trace.h"
  45. #include <asm/debugreg.h>
  46. #include <asm/uaccess.h>
  47. #include <asm/msr.h>
  48. #include <asm/desc.h>
  49. #include <asm/mtrr.h>
  50. #include <asm/mce.h>
  51. #define MAX_IO_MSRS 256
  52. #define CR0_RESERVED_BITS \
  53. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  54. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  55. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  56. #define CR4_RESERVED_BITS \
  57. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  58. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  59. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  60. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  61. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  62. #define KVM_MAX_MCE_BANKS 32
  63. #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
  64. /* EFER defaults:
  65. * - enable syscall per default because its emulated by KVM
  66. * - enable LME and LMA per default on 64 bit KVM
  67. */
  68. #ifdef CONFIG_X86_64
  69. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
  70. #else
  71. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
  72. #endif
  73. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  74. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  75. static void update_cr8_intercept(struct kvm_vcpu *vcpu);
  76. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  77. struct kvm_cpuid_entry2 __user *entries);
  78. struct kvm_x86_ops *kvm_x86_ops;
  79. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  80. int ignore_msrs = 0;
  81. module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
  82. #define KVM_NR_SHARED_MSRS 16
  83. struct kvm_shared_msrs_global {
  84. int nr;
  85. u32 msrs[KVM_NR_SHARED_MSRS];
  86. };
  87. struct kvm_shared_msrs {
  88. struct user_return_notifier urn;
  89. bool registered;
  90. struct kvm_shared_msr_values {
  91. u64 host;
  92. u64 curr;
  93. } values[KVM_NR_SHARED_MSRS];
  94. };
  95. static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
  96. static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
  97. struct kvm_stats_debugfs_item debugfs_entries[] = {
  98. { "pf_fixed", VCPU_STAT(pf_fixed) },
  99. { "pf_guest", VCPU_STAT(pf_guest) },
  100. { "tlb_flush", VCPU_STAT(tlb_flush) },
  101. { "invlpg", VCPU_STAT(invlpg) },
  102. { "exits", VCPU_STAT(exits) },
  103. { "io_exits", VCPU_STAT(io_exits) },
  104. { "mmio_exits", VCPU_STAT(mmio_exits) },
  105. { "signal_exits", VCPU_STAT(signal_exits) },
  106. { "irq_window", VCPU_STAT(irq_window_exits) },
  107. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  108. { "halt_exits", VCPU_STAT(halt_exits) },
  109. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  110. { "hypercalls", VCPU_STAT(hypercalls) },
  111. { "request_irq", VCPU_STAT(request_irq_exits) },
  112. { "irq_exits", VCPU_STAT(irq_exits) },
  113. { "host_state_reload", VCPU_STAT(host_state_reload) },
  114. { "efer_reload", VCPU_STAT(efer_reload) },
  115. { "fpu_reload", VCPU_STAT(fpu_reload) },
  116. { "insn_emulation", VCPU_STAT(insn_emulation) },
  117. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  118. { "irq_injections", VCPU_STAT(irq_injections) },
  119. { "nmi_injections", VCPU_STAT(nmi_injections) },
  120. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  121. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  122. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  123. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  124. { "mmu_flooded", VM_STAT(mmu_flooded) },
  125. { "mmu_recycled", VM_STAT(mmu_recycled) },
  126. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  127. { "mmu_unsync", VM_STAT(mmu_unsync) },
  128. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  129. { "largepages", VM_STAT(lpages) },
  130. { NULL }
  131. };
  132. static void kvm_on_user_return(struct user_return_notifier *urn)
  133. {
  134. unsigned slot;
  135. struct kvm_shared_msrs *locals
  136. = container_of(urn, struct kvm_shared_msrs, urn);
  137. struct kvm_shared_msr_values *values;
  138. for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
  139. values = &locals->values[slot];
  140. if (values->host != values->curr) {
  141. wrmsrl(shared_msrs_global.msrs[slot], values->host);
  142. values->curr = values->host;
  143. }
  144. }
  145. locals->registered = false;
  146. user_return_notifier_unregister(urn);
  147. }
  148. static void shared_msr_update(unsigned slot, u32 msr)
  149. {
  150. struct kvm_shared_msrs *smsr;
  151. u64 value;
  152. smsr = &__get_cpu_var(shared_msrs);
  153. /* only read, and nobody should modify it at this time,
  154. * so don't need lock */
  155. if (slot >= shared_msrs_global.nr) {
  156. printk(KERN_ERR "kvm: invalid MSR slot!");
  157. return;
  158. }
  159. rdmsrl_safe(msr, &value);
  160. smsr->values[slot].host = value;
  161. smsr->values[slot].curr = value;
  162. }
  163. void kvm_define_shared_msr(unsigned slot, u32 msr)
  164. {
  165. if (slot >= shared_msrs_global.nr)
  166. shared_msrs_global.nr = slot + 1;
  167. shared_msrs_global.msrs[slot] = msr;
  168. /* we need ensured the shared_msr_global have been updated */
  169. smp_wmb();
  170. }
  171. EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
  172. static void kvm_shared_msr_cpu_online(void)
  173. {
  174. unsigned i;
  175. for (i = 0; i < shared_msrs_global.nr; ++i)
  176. shared_msr_update(i, shared_msrs_global.msrs[i]);
  177. }
  178. void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
  179. {
  180. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  181. if (((value ^ smsr->values[slot].curr) & mask) == 0)
  182. return;
  183. smsr->values[slot].curr = value;
  184. wrmsrl(shared_msrs_global.msrs[slot], value);
  185. if (!smsr->registered) {
  186. smsr->urn.on_user_return = kvm_on_user_return;
  187. user_return_notifier_register(&smsr->urn);
  188. smsr->registered = true;
  189. }
  190. }
  191. EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
  192. static void drop_user_return_notifiers(void *ignore)
  193. {
  194. struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
  195. if (smsr->registered)
  196. kvm_on_user_return(&smsr->urn);
  197. }
  198. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  199. {
  200. if (irqchip_in_kernel(vcpu->kvm))
  201. return vcpu->arch.apic_base;
  202. else
  203. return vcpu->arch.apic_base;
  204. }
  205. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  206. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  207. {
  208. /* TODO: reserve bits check */
  209. if (irqchip_in_kernel(vcpu->kvm))
  210. kvm_lapic_set_base(vcpu, data);
  211. else
  212. vcpu->arch.apic_base = data;
  213. }
  214. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  215. #define EXCPT_BENIGN 0
  216. #define EXCPT_CONTRIBUTORY 1
  217. #define EXCPT_PF 2
  218. static int exception_class(int vector)
  219. {
  220. switch (vector) {
  221. case PF_VECTOR:
  222. return EXCPT_PF;
  223. case DE_VECTOR:
  224. case TS_VECTOR:
  225. case NP_VECTOR:
  226. case SS_VECTOR:
  227. case GP_VECTOR:
  228. return EXCPT_CONTRIBUTORY;
  229. default:
  230. break;
  231. }
  232. return EXCPT_BENIGN;
  233. }
  234. static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
  235. unsigned nr, bool has_error, u32 error_code,
  236. bool reinject)
  237. {
  238. u32 prev_nr;
  239. int class1, class2;
  240. if (!vcpu->arch.exception.pending) {
  241. queue:
  242. vcpu->arch.exception.pending = true;
  243. vcpu->arch.exception.has_error_code = has_error;
  244. vcpu->arch.exception.nr = nr;
  245. vcpu->arch.exception.error_code = error_code;
  246. vcpu->arch.exception.reinject = reinject;
  247. return;
  248. }
  249. /* to check exception */
  250. prev_nr = vcpu->arch.exception.nr;
  251. if (prev_nr == DF_VECTOR) {
  252. /* triple fault -> shutdown */
  253. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  254. return;
  255. }
  256. class1 = exception_class(prev_nr);
  257. class2 = exception_class(nr);
  258. if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
  259. || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
  260. /* generate double fault per SDM Table 5-5 */
  261. vcpu->arch.exception.pending = true;
  262. vcpu->arch.exception.has_error_code = true;
  263. vcpu->arch.exception.nr = DF_VECTOR;
  264. vcpu->arch.exception.error_code = 0;
  265. } else
  266. /* replace previous exception with a new one in a hope
  267. that instruction re-execution will regenerate lost
  268. exception */
  269. goto queue;
  270. }
  271. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  272. {
  273. kvm_multiple_exception(vcpu, nr, false, 0, false);
  274. }
  275. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  276. void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  277. {
  278. kvm_multiple_exception(vcpu, nr, false, 0, true);
  279. }
  280. EXPORT_SYMBOL_GPL(kvm_requeue_exception);
  281. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  282. u32 error_code)
  283. {
  284. ++vcpu->stat.pf_guest;
  285. vcpu->arch.cr2 = addr;
  286. kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
  287. }
  288. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  289. {
  290. vcpu->arch.nmi_pending = 1;
  291. }
  292. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  293. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  294. {
  295. kvm_multiple_exception(vcpu, nr, true, error_code, false);
  296. }
  297. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  298. void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  299. {
  300. kvm_multiple_exception(vcpu, nr, true, error_code, true);
  301. }
  302. EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
  303. /*
  304. * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
  305. * a #GP and return false.
  306. */
  307. bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
  308. {
  309. if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
  310. return true;
  311. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  312. return false;
  313. }
  314. EXPORT_SYMBOL_GPL(kvm_require_cpl);
  315. /*
  316. * Load the pae pdptrs. Return true is they are all valid.
  317. */
  318. int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  319. {
  320. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  321. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  322. int i;
  323. int ret;
  324. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  325. ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
  326. offset * sizeof(u64), sizeof(pdpte));
  327. if (ret < 0) {
  328. ret = 0;
  329. goto out;
  330. }
  331. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  332. if (is_present_gpte(pdpte[i]) &&
  333. (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
  334. ret = 0;
  335. goto out;
  336. }
  337. }
  338. ret = 1;
  339. memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
  340. __set_bit(VCPU_EXREG_PDPTR,
  341. (unsigned long *)&vcpu->arch.regs_avail);
  342. __set_bit(VCPU_EXREG_PDPTR,
  343. (unsigned long *)&vcpu->arch.regs_dirty);
  344. out:
  345. return ret;
  346. }
  347. EXPORT_SYMBOL_GPL(load_pdptrs);
  348. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  349. {
  350. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  351. bool changed = true;
  352. int r;
  353. if (is_long_mode(vcpu) || !is_pae(vcpu))
  354. return false;
  355. if (!test_bit(VCPU_EXREG_PDPTR,
  356. (unsigned long *)&vcpu->arch.regs_avail))
  357. return true;
  358. r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
  359. if (r < 0)
  360. goto out;
  361. changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
  362. out:
  363. return changed;
  364. }
  365. void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  366. {
  367. cr0 |= X86_CR0_ET;
  368. #ifdef CONFIG_X86_64
  369. if (cr0 & 0xffffffff00000000UL) {
  370. kvm_inject_gp(vcpu, 0);
  371. return;
  372. }
  373. #endif
  374. cr0 &= ~CR0_RESERVED_BITS;
  375. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
  376. kvm_inject_gp(vcpu, 0);
  377. return;
  378. }
  379. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
  380. kvm_inject_gp(vcpu, 0);
  381. return;
  382. }
  383. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  384. #ifdef CONFIG_X86_64
  385. if ((vcpu->arch.efer & EFER_LME)) {
  386. int cs_db, cs_l;
  387. if (!is_pae(vcpu)) {
  388. kvm_inject_gp(vcpu, 0);
  389. return;
  390. }
  391. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  392. if (cs_l) {
  393. kvm_inject_gp(vcpu, 0);
  394. return;
  395. }
  396. } else
  397. #endif
  398. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  399. kvm_inject_gp(vcpu, 0);
  400. return;
  401. }
  402. }
  403. kvm_x86_ops->set_cr0(vcpu, cr0);
  404. kvm_mmu_reset_context(vcpu);
  405. return;
  406. }
  407. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  408. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  409. {
  410. kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
  411. }
  412. EXPORT_SYMBOL_GPL(kvm_lmsw);
  413. void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  414. {
  415. unsigned long old_cr4 = kvm_read_cr4(vcpu);
  416. unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
  417. if (cr4 & CR4_RESERVED_BITS) {
  418. kvm_inject_gp(vcpu, 0);
  419. return;
  420. }
  421. if (is_long_mode(vcpu)) {
  422. if (!(cr4 & X86_CR4_PAE)) {
  423. kvm_inject_gp(vcpu, 0);
  424. return;
  425. }
  426. } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
  427. && ((cr4 ^ old_cr4) & pdptr_bits)
  428. && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  429. kvm_inject_gp(vcpu, 0);
  430. return;
  431. }
  432. if (cr4 & X86_CR4_VMXE) {
  433. kvm_inject_gp(vcpu, 0);
  434. return;
  435. }
  436. kvm_x86_ops->set_cr4(vcpu, cr4);
  437. vcpu->arch.cr4 = cr4;
  438. kvm_mmu_reset_context(vcpu);
  439. }
  440. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  441. void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  442. {
  443. if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
  444. kvm_mmu_sync_roots(vcpu);
  445. kvm_mmu_flush_tlb(vcpu);
  446. return;
  447. }
  448. if (is_long_mode(vcpu)) {
  449. if (cr3 & CR3_L_MODE_RESERVED_BITS) {
  450. kvm_inject_gp(vcpu, 0);
  451. return;
  452. }
  453. } else {
  454. if (is_pae(vcpu)) {
  455. if (cr3 & CR3_PAE_RESERVED_BITS) {
  456. kvm_inject_gp(vcpu, 0);
  457. return;
  458. }
  459. if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
  460. kvm_inject_gp(vcpu, 0);
  461. return;
  462. }
  463. }
  464. /*
  465. * We don't check reserved bits in nonpae mode, because
  466. * this isn't enforced, and VMware depends on this.
  467. */
  468. }
  469. /*
  470. * Does the new cr3 value map to physical memory? (Note, we
  471. * catch an invalid cr3 even in real-mode, because it would
  472. * cause trouble later on when we turn on paging anyway.)
  473. *
  474. * A real CPU would silently accept an invalid cr3 and would
  475. * attempt to use it - with largely undefined (and often hard
  476. * to debug) behavior on the guest side.
  477. */
  478. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  479. kvm_inject_gp(vcpu, 0);
  480. else {
  481. vcpu->arch.cr3 = cr3;
  482. vcpu->arch.mmu.new_cr3(vcpu);
  483. }
  484. }
  485. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  486. void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  487. {
  488. if (cr8 & CR8_RESERVED_BITS) {
  489. kvm_inject_gp(vcpu, 0);
  490. return;
  491. }
  492. if (irqchip_in_kernel(vcpu->kvm))
  493. kvm_lapic_set_tpr(vcpu, cr8);
  494. else
  495. vcpu->arch.cr8 = cr8;
  496. }
  497. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  498. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  499. {
  500. if (irqchip_in_kernel(vcpu->kvm))
  501. return kvm_lapic_get_cr8(vcpu);
  502. else
  503. return vcpu->arch.cr8;
  504. }
  505. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  506. int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
  507. {
  508. switch (dr) {
  509. case 0 ... 3:
  510. vcpu->arch.db[dr] = val;
  511. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
  512. vcpu->arch.eff_db[dr] = val;
  513. break;
  514. case 4:
  515. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
  516. kvm_queue_exception(vcpu, UD_VECTOR);
  517. return 1;
  518. }
  519. /* fall through */
  520. case 6:
  521. if (val & 0xffffffff00000000ULL) {
  522. kvm_inject_gp(vcpu, 0);
  523. return 1;
  524. }
  525. vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
  526. break;
  527. case 5:
  528. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
  529. kvm_queue_exception(vcpu, UD_VECTOR);
  530. return 1;
  531. }
  532. /* fall through */
  533. default: /* 7 */
  534. if (val & 0xffffffff00000000ULL) {
  535. kvm_inject_gp(vcpu, 0);
  536. return 1;
  537. }
  538. vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
  539. if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
  540. kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
  541. vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
  542. }
  543. break;
  544. }
  545. return 0;
  546. }
  547. EXPORT_SYMBOL_GPL(kvm_set_dr);
  548. int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
  549. {
  550. switch (dr) {
  551. case 0 ... 3:
  552. *val = vcpu->arch.db[dr];
  553. break;
  554. case 4:
  555. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
  556. kvm_queue_exception(vcpu, UD_VECTOR);
  557. return 1;
  558. }
  559. /* fall through */
  560. case 6:
  561. *val = vcpu->arch.dr6;
  562. break;
  563. case 5:
  564. if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
  565. kvm_queue_exception(vcpu, UD_VECTOR);
  566. return 1;
  567. }
  568. /* fall through */
  569. default: /* 7 */
  570. *val = vcpu->arch.dr7;
  571. break;
  572. }
  573. return 0;
  574. }
  575. EXPORT_SYMBOL_GPL(kvm_get_dr);
  576. static inline u32 bit(int bitno)
  577. {
  578. return 1 << (bitno & 31);
  579. }
  580. /*
  581. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  582. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  583. *
  584. * This list is modified at module load time to reflect the
  585. * capabilities of the host cpu. This capabilities test skips MSRs that are
  586. * kvm-specific. Those are put in the beginning of the list.
  587. */
  588. #define KVM_SAVE_MSRS_BEGIN 7
  589. static u32 msrs_to_save[] = {
  590. MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  591. MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
  592. HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
  593. HV_X64_MSR_APIC_ASSIST_PAGE,
  594. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  595. MSR_K6_STAR,
  596. #ifdef CONFIG_X86_64
  597. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  598. #endif
  599. MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  600. };
  601. static unsigned num_msrs_to_save;
  602. static u32 emulated_msrs[] = {
  603. MSR_IA32_MISC_ENABLE,
  604. };
  605. static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
  606. {
  607. if (efer & efer_reserved_bits)
  608. return 1;
  609. if (is_paging(vcpu)
  610. && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
  611. return 1;
  612. if (efer & EFER_FFXSR) {
  613. struct kvm_cpuid_entry2 *feat;
  614. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  615. if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
  616. return 1;
  617. }
  618. if (efer & EFER_SVME) {
  619. struct kvm_cpuid_entry2 *feat;
  620. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  621. if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
  622. return 1;
  623. }
  624. efer &= ~EFER_LMA;
  625. efer |= vcpu->arch.efer & EFER_LMA;
  626. kvm_x86_ops->set_efer(vcpu, efer);
  627. vcpu->arch.efer = efer;
  628. vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
  629. kvm_mmu_reset_context(vcpu);
  630. return 0;
  631. }
  632. void kvm_enable_efer_bits(u64 mask)
  633. {
  634. efer_reserved_bits &= ~mask;
  635. }
  636. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  637. /*
  638. * Writes msr value into into the appropriate "register".
  639. * Returns 0 on success, non-0 otherwise.
  640. * Assumes vcpu_load() was already called.
  641. */
  642. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  643. {
  644. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  645. }
  646. /*
  647. * Adapt set_msr() to msr_io()'s calling convention
  648. */
  649. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  650. {
  651. return kvm_set_msr(vcpu, index, *data);
  652. }
  653. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  654. {
  655. int version;
  656. int r;
  657. struct pvclock_wall_clock wc;
  658. struct timespec boot;
  659. if (!wall_clock)
  660. return;
  661. r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
  662. if (r)
  663. return;
  664. if (version & 1)
  665. ++version; /* first time write, random junk */
  666. ++version;
  667. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  668. /*
  669. * The guest calculates current wall clock time by adding
  670. * system time (updated by kvm_write_guest_time below) to the
  671. * wall clock specified here. guest system time equals host
  672. * system time for us, thus we must fill in host boot time here.
  673. */
  674. getboottime(&boot);
  675. wc.sec = boot.tv_sec;
  676. wc.nsec = boot.tv_nsec;
  677. wc.version = version;
  678. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  679. version++;
  680. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  681. }
  682. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  683. {
  684. uint32_t quotient, remainder;
  685. /* Don't try to replace with do_div(), this one calculates
  686. * "(dividend << 32) / divisor" */
  687. __asm__ ( "divl %4"
  688. : "=a" (quotient), "=d" (remainder)
  689. : "0" (0), "1" (dividend), "r" (divisor) );
  690. return quotient;
  691. }
  692. static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
  693. {
  694. uint64_t nsecs = 1000000000LL;
  695. int32_t shift = 0;
  696. uint64_t tps64;
  697. uint32_t tps32;
  698. tps64 = tsc_khz * 1000LL;
  699. while (tps64 > nsecs*2) {
  700. tps64 >>= 1;
  701. shift--;
  702. }
  703. tps32 = (uint32_t)tps64;
  704. while (tps32 <= (uint32_t)nsecs) {
  705. tps32 <<= 1;
  706. shift++;
  707. }
  708. hv_clock->tsc_shift = shift;
  709. hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
  710. pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
  711. __func__, tsc_khz, hv_clock->tsc_shift,
  712. hv_clock->tsc_to_system_mul);
  713. }
  714. static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
  715. static void kvm_write_guest_time(struct kvm_vcpu *v)
  716. {
  717. struct timespec ts;
  718. unsigned long flags;
  719. struct kvm_vcpu_arch *vcpu = &v->arch;
  720. void *shared_kaddr;
  721. unsigned long this_tsc_khz;
  722. if ((!vcpu->time_page))
  723. return;
  724. this_tsc_khz = get_cpu_var(cpu_tsc_khz);
  725. if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
  726. kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
  727. vcpu->hv_clock_tsc_khz = this_tsc_khz;
  728. }
  729. put_cpu_var(cpu_tsc_khz);
  730. /* Keep irq disabled to prevent changes to the clock */
  731. local_irq_save(flags);
  732. kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
  733. ktime_get_ts(&ts);
  734. monotonic_to_bootbased(&ts);
  735. local_irq_restore(flags);
  736. /* With all the info we got, fill in the values */
  737. vcpu->hv_clock.system_time = ts.tv_nsec +
  738. (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
  739. vcpu->hv_clock.flags = 0;
  740. /*
  741. * The interface expects us to write an even number signaling that the
  742. * update is finished. Since the guest won't see the intermediate
  743. * state, we just increase by 2 at the end.
  744. */
  745. vcpu->hv_clock.version += 2;
  746. shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
  747. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  748. sizeof(vcpu->hv_clock));
  749. kunmap_atomic(shared_kaddr, KM_USER0);
  750. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  751. }
  752. static int kvm_request_guest_time_update(struct kvm_vcpu *v)
  753. {
  754. struct kvm_vcpu_arch *vcpu = &v->arch;
  755. if (!vcpu->time_page)
  756. return 0;
  757. set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
  758. return 1;
  759. }
  760. static bool msr_mtrr_valid(unsigned msr)
  761. {
  762. switch (msr) {
  763. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  764. case MSR_MTRRfix64K_00000:
  765. case MSR_MTRRfix16K_80000:
  766. case MSR_MTRRfix16K_A0000:
  767. case MSR_MTRRfix4K_C0000:
  768. case MSR_MTRRfix4K_C8000:
  769. case MSR_MTRRfix4K_D0000:
  770. case MSR_MTRRfix4K_D8000:
  771. case MSR_MTRRfix4K_E0000:
  772. case MSR_MTRRfix4K_E8000:
  773. case MSR_MTRRfix4K_F0000:
  774. case MSR_MTRRfix4K_F8000:
  775. case MSR_MTRRdefType:
  776. case MSR_IA32_CR_PAT:
  777. return true;
  778. case 0x2f8:
  779. return true;
  780. }
  781. return false;
  782. }
  783. static bool valid_pat_type(unsigned t)
  784. {
  785. return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
  786. }
  787. static bool valid_mtrr_type(unsigned t)
  788. {
  789. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  790. }
  791. static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  792. {
  793. int i;
  794. if (!msr_mtrr_valid(msr))
  795. return false;
  796. if (msr == MSR_IA32_CR_PAT) {
  797. for (i = 0; i < 8; i++)
  798. if (!valid_pat_type((data >> (i * 8)) & 0xff))
  799. return false;
  800. return true;
  801. } else if (msr == MSR_MTRRdefType) {
  802. if (data & ~0xcff)
  803. return false;
  804. return valid_mtrr_type(data & 0xff);
  805. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  806. for (i = 0; i < 8 ; i++)
  807. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  808. return false;
  809. return true;
  810. }
  811. /* variable MTRRs */
  812. return valid_mtrr_type(data & 0xff);
  813. }
  814. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  815. {
  816. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  817. if (!mtrr_valid(vcpu, msr, data))
  818. return 1;
  819. if (msr == MSR_MTRRdefType) {
  820. vcpu->arch.mtrr_state.def_type = data;
  821. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  822. } else if (msr == MSR_MTRRfix64K_00000)
  823. p[0] = data;
  824. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  825. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  826. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  827. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  828. else if (msr == MSR_IA32_CR_PAT)
  829. vcpu->arch.pat = data;
  830. else { /* Variable MTRRs */
  831. int idx, is_mtrr_mask;
  832. u64 *pt;
  833. idx = (msr - 0x200) / 2;
  834. is_mtrr_mask = msr - 0x200 - 2 * idx;
  835. if (!is_mtrr_mask)
  836. pt =
  837. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  838. else
  839. pt =
  840. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  841. *pt = data;
  842. }
  843. kvm_mmu_reset_context(vcpu);
  844. return 0;
  845. }
  846. static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  847. {
  848. u64 mcg_cap = vcpu->arch.mcg_cap;
  849. unsigned bank_num = mcg_cap & 0xff;
  850. switch (msr) {
  851. case MSR_IA32_MCG_STATUS:
  852. vcpu->arch.mcg_status = data;
  853. break;
  854. case MSR_IA32_MCG_CTL:
  855. if (!(mcg_cap & MCG_CTL_P))
  856. return 1;
  857. if (data != 0 && data != ~(u64)0)
  858. return -1;
  859. vcpu->arch.mcg_ctl = data;
  860. break;
  861. default:
  862. if (msr >= MSR_IA32_MC0_CTL &&
  863. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  864. u32 offset = msr - MSR_IA32_MC0_CTL;
  865. /* only 0 or all 1s can be written to IA32_MCi_CTL
  866. * some Linux kernels though clear bit 10 in bank 4 to
  867. * workaround a BIOS/GART TBL issue on AMD K8s, ignore
  868. * this to avoid an uncatched #GP in the guest
  869. */
  870. if ((offset & 0x3) == 0 &&
  871. data != 0 && (data | (1 << 10)) != ~(u64)0)
  872. return -1;
  873. vcpu->arch.mce_banks[offset] = data;
  874. break;
  875. }
  876. return 1;
  877. }
  878. return 0;
  879. }
  880. static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
  881. {
  882. struct kvm *kvm = vcpu->kvm;
  883. int lm = is_long_mode(vcpu);
  884. u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
  885. : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
  886. u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
  887. : kvm->arch.xen_hvm_config.blob_size_32;
  888. u32 page_num = data & ~PAGE_MASK;
  889. u64 page_addr = data & PAGE_MASK;
  890. u8 *page;
  891. int r;
  892. r = -E2BIG;
  893. if (page_num >= blob_size)
  894. goto out;
  895. r = -ENOMEM;
  896. page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  897. if (!page)
  898. goto out;
  899. r = -EFAULT;
  900. if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
  901. goto out_free;
  902. if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
  903. goto out_free;
  904. r = 0;
  905. out_free:
  906. kfree(page);
  907. out:
  908. return r;
  909. }
  910. static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  911. {
  912. return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
  913. }
  914. static bool kvm_hv_msr_partition_wide(u32 msr)
  915. {
  916. bool r = false;
  917. switch (msr) {
  918. case HV_X64_MSR_GUEST_OS_ID:
  919. case HV_X64_MSR_HYPERCALL:
  920. r = true;
  921. break;
  922. }
  923. return r;
  924. }
  925. static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  926. {
  927. struct kvm *kvm = vcpu->kvm;
  928. switch (msr) {
  929. case HV_X64_MSR_GUEST_OS_ID:
  930. kvm->arch.hv_guest_os_id = data;
  931. /* setting guest os id to zero disables hypercall page */
  932. if (!kvm->arch.hv_guest_os_id)
  933. kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  934. break;
  935. case HV_X64_MSR_HYPERCALL: {
  936. u64 gfn;
  937. unsigned long addr;
  938. u8 instructions[4];
  939. /* if guest os id is not set hypercall should remain disabled */
  940. if (!kvm->arch.hv_guest_os_id)
  941. break;
  942. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  943. kvm->arch.hv_hypercall = data;
  944. break;
  945. }
  946. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  947. addr = gfn_to_hva(kvm, gfn);
  948. if (kvm_is_error_hva(addr))
  949. return 1;
  950. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  951. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  952. if (copy_to_user((void __user *)addr, instructions, 4))
  953. return 1;
  954. kvm->arch.hv_hypercall = data;
  955. break;
  956. }
  957. default:
  958. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  959. "data 0x%llx\n", msr, data);
  960. return 1;
  961. }
  962. return 0;
  963. }
  964. static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  965. {
  966. switch (msr) {
  967. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  968. unsigned long addr;
  969. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  970. vcpu->arch.hv_vapic = data;
  971. break;
  972. }
  973. addr = gfn_to_hva(vcpu->kvm, data >>
  974. HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
  975. if (kvm_is_error_hva(addr))
  976. return 1;
  977. if (clear_user((void __user *)addr, PAGE_SIZE))
  978. return 1;
  979. vcpu->arch.hv_vapic = data;
  980. break;
  981. }
  982. case HV_X64_MSR_EOI:
  983. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  984. case HV_X64_MSR_ICR:
  985. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  986. case HV_X64_MSR_TPR:
  987. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  988. default:
  989. pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
  990. "data 0x%llx\n", msr, data);
  991. return 1;
  992. }
  993. return 0;
  994. }
  995. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  996. {
  997. switch (msr) {
  998. case MSR_EFER:
  999. return set_efer(vcpu, data);
  1000. case MSR_K7_HWCR:
  1001. data &= ~(u64)0x40; /* ignore flush filter disable */
  1002. data &= ~(u64)0x100; /* ignore ignne emulation enable */
  1003. if (data != 0) {
  1004. pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
  1005. data);
  1006. return 1;
  1007. }
  1008. break;
  1009. case MSR_FAM10H_MMIO_CONF_BASE:
  1010. if (data != 0) {
  1011. pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
  1012. "0x%llx\n", data);
  1013. return 1;
  1014. }
  1015. break;
  1016. case MSR_AMD64_NB_CFG:
  1017. break;
  1018. case MSR_IA32_DEBUGCTLMSR:
  1019. if (!data) {
  1020. /* We support the non-activated case already */
  1021. break;
  1022. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  1023. /* Values other than LBR and BTF are vendor-specific,
  1024. thus reserved and should throw a #GP */
  1025. return 1;
  1026. }
  1027. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  1028. __func__, data);
  1029. break;
  1030. case MSR_IA32_UCODE_REV:
  1031. case MSR_IA32_UCODE_WRITE:
  1032. case MSR_VM_HSAVE_PA:
  1033. case MSR_AMD64_PATCH_LOADER:
  1034. break;
  1035. case 0x200 ... 0x2ff:
  1036. return set_msr_mtrr(vcpu, msr, data);
  1037. case MSR_IA32_APICBASE:
  1038. kvm_set_apic_base(vcpu, data);
  1039. break;
  1040. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1041. return kvm_x2apic_msr_write(vcpu, msr, data);
  1042. case MSR_IA32_MISC_ENABLE:
  1043. vcpu->arch.ia32_misc_enable_msr = data;
  1044. break;
  1045. case MSR_KVM_WALL_CLOCK_NEW:
  1046. case MSR_KVM_WALL_CLOCK:
  1047. vcpu->kvm->arch.wall_clock = data;
  1048. kvm_write_wall_clock(vcpu->kvm, data);
  1049. break;
  1050. case MSR_KVM_SYSTEM_TIME_NEW:
  1051. case MSR_KVM_SYSTEM_TIME: {
  1052. if (vcpu->arch.time_page) {
  1053. kvm_release_page_dirty(vcpu->arch.time_page);
  1054. vcpu->arch.time_page = NULL;
  1055. }
  1056. vcpu->arch.time = data;
  1057. /* we verify if the enable bit is set... */
  1058. if (!(data & 1))
  1059. break;
  1060. /* ...but clean it before doing the actual write */
  1061. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  1062. vcpu->arch.time_page =
  1063. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  1064. if (is_error_page(vcpu->arch.time_page)) {
  1065. kvm_release_page_clean(vcpu->arch.time_page);
  1066. vcpu->arch.time_page = NULL;
  1067. }
  1068. kvm_request_guest_time_update(vcpu);
  1069. break;
  1070. }
  1071. case MSR_IA32_MCG_CTL:
  1072. case MSR_IA32_MCG_STATUS:
  1073. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1074. return set_msr_mce(vcpu, msr, data);
  1075. /* Performance counters are not protected by a CPUID bit,
  1076. * so we should check all of them in the generic path for the sake of
  1077. * cross vendor migration.
  1078. * Writing a zero into the event select MSRs disables them,
  1079. * which we perfectly emulate ;-). Any other value should be at least
  1080. * reported, some guests depend on them.
  1081. */
  1082. case MSR_P6_EVNTSEL0:
  1083. case MSR_P6_EVNTSEL1:
  1084. case MSR_K7_EVNTSEL0:
  1085. case MSR_K7_EVNTSEL1:
  1086. case MSR_K7_EVNTSEL2:
  1087. case MSR_K7_EVNTSEL3:
  1088. if (data != 0)
  1089. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1090. "0x%x data 0x%llx\n", msr, data);
  1091. break;
  1092. /* at least RHEL 4 unconditionally writes to the perfctr registers,
  1093. * so we ignore writes to make it happy.
  1094. */
  1095. case MSR_P6_PERFCTR0:
  1096. case MSR_P6_PERFCTR1:
  1097. case MSR_K7_PERFCTR0:
  1098. case MSR_K7_PERFCTR1:
  1099. case MSR_K7_PERFCTR2:
  1100. case MSR_K7_PERFCTR3:
  1101. pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
  1102. "0x%x data 0x%llx\n", msr, data);
  1103. break;
  1104. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1105. if (kvm_hv_msr_partition_wide(msr)) {
  1106. int r;
  1107. mutex_lock(&vcpu->kvm->lock);
  1108. r = set_msr_hyperv_pw(vcpu, msr, data);
  1109. mutex_unlock(&vcpu->kvm->lock);
  1110. return r;
  1111. } else
  1112. return set_msr_hyperv(vcpu, msr, data);
  1113. break;
  1114. default:
  1115. if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
  1116. return xen_hvm_config(vcpu, data);
  1117. if (!ignore_msrs) {
  1118. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
  1119. msr, data);
  1120. return 1;
  1121. } else {
  1122. pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
  1123. msr, data);
  1124. break;
  1125. }
  1126. }
  1127. return 0;
  1128. }
  1129. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1130. /*
  1131. * Reads an msr value (of 'msr_index') into 'pdata'.
  1132. * Returns 0 on success, non-0 otherwise.
  1133. * Assumes vcpu_load() was already called.
  1134. */
  1135. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1136. {
  1137. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  1138. }
  1139. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1140. {
  1141. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  1142. if (!msr_mtrr_valid(msr))
  1143. return 1;
  1144. if (msr == MSR_MTRRdefType)
  1145. *pdata = vcpu->arch.mtrr_state.def_type +
  1146. (vcpu->arch.mtrr_state.enabled << 10);
  1147. else if (msr == MSR_MTRRfix64K_00000)
  1148. *pdata = p[0];
  1149. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  1150. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  1151. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  1152. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  1153. else if (msr == MSR_IA32_CR_PAT)
  1154. *pdata = vcpu->arch.pat;
  1155. else { /* Variable MTRRs */
  1156. int idx, is_mtrr_mask;
  1157. u64 *pt;
  1158. idx = (msr - 0x200) / 2;
  1159. is_mtrr_mask = msr - 0x200 - 2 * idx;
  1160. if (!is_mtrr_mask)
  1161. pt =
  1162. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  1163. else
  1164. pt =
  1165. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  1166. *pdata = *pt;
  1167. }
  1168. return 0;
  1169. }
  1170. static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1171. {
  1172. u64 data;
  1173. u64 mcg_cap = vcpu->arch.mcg_cap;
  1174. unsigned bank_num = mcg_cap & 0xff;
  1175. switch (msr) {
  1176. case MSR_IA32_P5_MC_ADDR:
  1177. case MSR_IA32_P5_MC_TYPE:
  1178. data = 0;
  1179. break;
  1180. case MSR_IA32_MCG_CAP:
  1181. data = vcpu->arch.mcg_cap;
  1182. break;
  1183. case MSR_IA32_MCG_CTL:
  1184. if (!(mcg_cap & MCG_CTL_P))
  1185. return 1;
  1186. data = vcpu->arch.mcg_ctl;
  1187. break;
  1188. case MSR_IA32_MCG_STATUS:
  1189. data = vcpu->arch.mcg_status;
  1190. break;
  1191. default:
  1192. if (msr >= MSR_IA32_MC0_CTL &&
  1193. msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
  1194. u32 offset = msr - MSR_IA32_MC0_CTL;
  1195. data = vcpu->arch.mce_banks[offset];
  1196. break;
  1197. }
  1198. return 1;
  1199. }
  1200. *pdata = data;
  1201. return 0;
  1202. }
  1203. static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1204. {
  1205. u64 data = 0;
  1206. struct kvm *kvm = vcpu->kvm;
  1207. switch (msr) {
  1208. case HV_X64_MSR_GUEST_OS_ID:
  1209. data = kvm->arch.hv_guest_os_id;
  1210. break;
  1211. case HV_X64_MSR_HYPERCALL:
  1212. data = kvm->arch.hv_hypercall;
  1213. break;
  1214. default:
  1215. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1216. return 1;
  1217. }
  1218. *pdata = data;
  1219. return 0;
  1220. }
  1221. static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1222. {
  1223. u64 data = 0;
  1224. switch (msr) {
  1225. case HV_X64_MSR_VP_INDEX: {
  1226. int r;
  1227. struct kvm_vcpu *v;
  1228. kvm_for_each_vcpu(r, v, vcpu->kvm)
  1229. if (v == vcpu)
  1230. data = r;
  1231. break;
  1232. }
  1233. case HV_X64_MSR_EOI:
  1234. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  1235. case HV_X64_MSR_ICR:
  1236. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  1237. case HV_X64_MSR_TPR:
  1238. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  1239. default:
  1240. pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1241. return 1;
  1242. }
  1243. *pdata = data;
  1244. return 0;
  1245. }
  1246. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1247. {
  1248. u64 data;
  1249. switch (msr) {
  1250. case MSR_IA32_PLATFORM_ID:
  1251. case MSR_IA32_UCODE_REV:
  1252. case MSR_IA32_EBL_CR_POWERON:
  1253. case MSR_IA32_DEBUGCTLMSR:
  1254. case MSR_IA32_LASTBRANCHFROMIP:
  1255. case MSR_IA32_LASTBRANCHTOIP:
  1256. case MSR_IA32_LASTINTFROMIP:
  1257. case MSR_IA32_LASTINTTOIP:
  1258. case MSR_K8_SYSCFG:
  1259. case MSR_K7_HWCR:
  1260. case MSR_VM_HSAVE_PA:
  1261. case MSR_P6_PERFCTR0:
  1262. case MSR_P6_PERFCTR1:
  1263. case MSR_P6_EVNTSEL0:
  1264. case MSR_P6_EVNTSEL1:
  1265. case MSR_K7_EVNTSEL0:
  1266. case MSR_K7_PERFCTR0:
  1267. case MSR_K8_INT_PENDING_MSG:
  1268. case MSR_AMD64_NB_CFG:
  1269. case MSR_FAM10H_MMIO_CONF_BASE:
  1270. data = 0;
  1271. break;
  1272. case MSR_MTRRcap:
  1273. data = 0x500 | KVM_NR_VAR_MTRR;
  1274. break;
  1275. case 0x200 ... 0x2ff:
  1276. return get_msr_mtrr(vcpu, msr, pdata);
  1277. case 0xcd: /* fsb frequency */
  1278. data = 3;
  1279. break;
  1280. case MSR_IA32_APICBASE:
  1281. data = kvm_get_apic_base(vcpu);
  1282. break;
  1283. case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
  1284. return kvm_x2apic_msr_read(vcpu, msr, pdata);
  1285. break;
  1286. case MSR_IA32_MISC_ENABLE:
  1287. data = vcpu->arch.ia32_misc_enable_msr;
  1288. break;
  1289. case MSR_IA32_PERF_STATUS:
  1290. /* TSC increment by tick */
  1291. data = 1000ULL;
  1292. /* CPU multiplier */
  1293. data |= (((uint64_t)4ULL) << 40);
  1294. break;
  1295. case MSR_EFER:
  1296. data = vcpu->arch.efer;
  1297. break;
  1298. case MSR_KVM_WALL_CLOCK:
  1299. case MSR_KVM_WALL_CLOCK_NEW:
  1300. data = vcpu->kvm->arch.wall_clock;
  1301. break;
  1302. case MSR_KVM_SYSTEM_TIME:
  1303. case MSR_KVM_SYSTEM_TIME_NEW:
  1304. data = vcpu->arch.time;
  1305. break;
  1306. case MSR_IA32_P5_MC_ADDR:
  1307. case MSR_IA32_P5_MC_TYPE:
  1308. case MSR_IA32_MCG_CAP:
  1309. case MSR_IA32_MCG_CTL:
  1310. case MSR_IA32_MCG_STATUS:
  1311. case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
  1312. return get_msr_mce(vcpu, msr, pdata);
  1313. case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
  1314. if (kvm_hv_msr_partition_wide(msr)) {
  1315. int r;
  1316. mutex_lock(&vcpu->kvm->lock);
  1317. r = get_msr_hyperv_pw(vcpu, msr, pdata);
  1318. mutex_unlock(&vcpu->kvm->lock);
  1319. return r;
  1320. } else
  1321. return get_msr_hyperv(vcpu, msr, pdata);
  1322. break;
  1323. default:
  1324. if (!ignore_msrs) {
  1325. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  1326. return 1;
  1327. } else {
  1328. pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
  1329. data = 0;
  1330. }
  1331. break;
  1332. }
  1333. *pdata = data;
  1334. return 0;
  1335. }
  1336. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1337. /*
  1338. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1339. *
  1340. * @return number of msrs set successfully.
  1341. */
  1342. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  1343. struct kvm_msr_entry *entries,
  1344. int (*do_msr)(struct kvm_vcpu *vcpu,
  1345. unsigned index, u64 *data))
  1346. {
  1347. int i, idx;
  1348. vcpu_load(vcpu);
  1349. idx = srcu_read_lock(&vcpu->kvm->srcu);
  1350. for (i = 0; i < msrs->nmsrs; ++i)
  1351. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  1352. break;
  1353. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  1354. vcpu_put(vcpu);
  1355. return i;
  1356. }
  1357. /*
  1358. * Read or write a bunch of msrs. Parameters are user addresses.
  1359. *
  1360. * @return number of msrs set successfully.
  1361. */
  1362. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  1363. int (*do_msr)(struct kvm_vcpu *vcpu,
  1364. unsigned index, u64 *data),
  1365. int writeback)
  1366. {
  1367. struct kvm_msrs msrs;
  1368. struct kvm_msr_entry *entries;
  1369. int r, n;
  1370. unsigned size;
  1371. r = -EFAULT;
  1372. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  1373. goto out;
  1374. r = -E2BIG;
  1375. if (msrs.nmsrs >= MAX_IO_MSRS)
  1376. goto out;
  1377. r = -ENOMEM;
  1378. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  1379. entries = kmalloc(size, GFP_KERNEL);
  1380. if (!entries)
  1381. goto out;
  1382. r = -EFAULT;
  1383. if (copy_from_user(entries, user_msrs->entries, size))
  1384. goto out_free;
  1385. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  1386. if (r < 0)
  1387. goto out_free;
  1388. r = -EFAULT;
  1389. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  1390. goto out_free;
  1391. r = n;
  1392. out_free:
  1393. kfree(entries);
  1394. out:
  1395. return r;
  1396. }
  1397. int kvm_dev_ioctl_check_extension(long ext)
  1398. {
  1399. int r;
  1400. switch (ext) {
  1401. case KVM_CAP_IRQCHIP:
  1402. case KVM_CAP_HLT:
  1403. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  1404. case KVM_CAP_SET_TSS_ADDR:
  1405. case KVM_CAP_EXT_CPUID:
  1406. case KVM_CAP_CLOCKSOURCE:
  1407. case KVM_CAP_PIT:
  1408. case KVM_CAP_NOP_IO_DELAY:
  1409. case KVM_CAP_MP_STATE:
  1410. case KVM_CAP_SYNC_MMU:
  1411. case KVM_CAP_REINJECT_CONTROL:
  1412. case KVM_CAP_IRQ_INJECT_STATUS:
  1413. case KVM_CAP_ASSIGN_DEV_IRQ:
  1414. case KVM_CAP_IRQFD:
  1415. case KVM_CAP_IOEVENTFD:
  1416. case KVM_CAP_PIT2:
  1417. case KVM_CAP_PIT_STATE2:
  1418. case KVM_CAP_SET_IDENTITY_MAP_ADDR:
  1419. case KVM_CAP_XEN_HVM:
  1420. case KVM_CAP_ADJUST_CLOCK:
  1421. case KVM_CAP_VCPU_EVENTS:
  1422. case KVM_CAP_HYPERV:
  1423. case KVM_CAP_HYPERV_VAPIC:
  1424. case KVM_CAP_HYPERV_SPIN:
  1425. case KVM_CAP_PCI_SEGMENT:
  1426. case KVM_CAP_DEBUGREGS:
  1427. case KVM_CAP_X86_ROBUST_SINGLESTEP:
  1428. r = 1;
  1429. break;
  1430. case KVM_CAP_COALESCED_MMIO:
  1431. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  1432. break;
  1433. case KVM_CAP_VAPIC:
  1434. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  1435. break;
  1436. case KVM_CAP_NR_VCPUS:
  1437. r = KVM_MAX_VCPUS;
  1438. break;
  1439. case KVM_CAP_NR_MEMSLOTS:
  1440. r = KVM_MEMORY_SLOTS;
  1441. break;
  1442. case KVM_CAP_PV_MMU: /* obsolete */
  1443. r = 0;
  1444. break;
  1445. case KVM_CAP_IOMMU:
  1446. r = iommu_found();
  1447. break;
  1448. case KVM_CAP_MCE:
  1449. r = KVM_MAX_MCE_BANKS;
  1450. break;
  1451. default:
  1452. r = 0;
  1453. break;
  1454. }
  1455. return r;
  1456. }
  1457. long kvm_arch_dev_ioctl(struct file *filp,
  1458. unsigned int ioctl, unsigned long arg)
  1459. {
  1460. void __user *argp = (void __user *)arg;
  1461. long r;
  1462. switch (ioctl) {
  1463. case KVM_GET_MSR_INDEX_LIST: {
  1464. struct kvm_msr_list __user *user_msr_list = argp;
  1465. struct kvm_msr_list msr_list;
  1466. unsigned n;
  1467. r = -EFAULT;
  1468. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  1469. goto out;
  1470. n = msr_list.nmsrs;
  1471. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  1472. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  1473. goto out;
  1474. r = -E2BIG;
  1475. if (n < msr_list.nmsrs)
  1476. goto out;
  1477. r = -EFAULT;
  1478. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  1479. num_msrs_to_save * sizeof(u32)))
  1480. goto out;
  1481. if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
  1482. &emulated_msrs,
  1483. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  1484. goto out;
  1485. r = 0;
  1486. break;
  1487. }
  1488. case KVM_GET_SUPPORTED_CPUID: {
  1489. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1490. struct kvm_cpuid2 cpuid;
  1491. r = -EFAULT;
  1492. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1493. goto out;
  1494. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  1495. cpuid_arg->entries);
  1496. if (r)
  1497. goto out;
  1498. r = -EFAULT;
  1499. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  1500. goto out;
  1501. r = 0;
  1502. break;
  1503. }
  1504. case KVM_X86_GET_MCE_CAP_SUPPORTED: {
  1505. u64 mce_cap;
  1506. mce_cap = KVM_MCE_CAP_SUPPORTED;
  1507. r = -EFAULT;
  1508. if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
  1509. goto out;
  1510. r = 0;
  1511. break;
  1512. }
  1513. default:
  1514. r = -EINVAL;
  1515. }
  1516. out:
  1517. return r;
  1518. }
  1519. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  1520. {
  1521. kvm_x86_ops->vcpu_load(vcpu, cpu);
  1522. if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
  1523. unsigned long khz = cpufreq_quick_get(cpu);
  1524. if (!khz)
  1525. khz = tsc_khz;
  1526. per_cpu(cpu_tsc_khz, cpu) = khz;
  1527. }
  1528. kvm_request_guest_time_update(vcpu);
  1529. }
  1530. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  1531. {
  1532. kvm_x86_ops->vcpu_put(vcpu);
  1533. kvm_put_guest_fpu(vcpu);
  1534. }
  1535. static int is_efer_nx(void)
  1536. {
  1537. unsigned long long efer = 0;
  1538. rdmsrl_safe(MSR_EFER, &efer);
  1539. return efer & EFER_NX;
  1540. }
  1541. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  1542. {
  1543. int i;
  1544. struct kvm_cpuid_entry2 *e, *entry;
  1545. entry = NULL;
  1546. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  1547. e = &vcpu->arch.cpuid_entries[i];
  1548. if (e->function == 0x80000001) {
  1549. entry = e;
  1550. break;
  1551. }
  1552. }
  1553. if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
  1554. entry->edx &= ~(1 << 20);
  1555. printk(KERN_INFO "kvm: guest NX capability removed\n");
  1556. }
  1557. }
  1558. /* when an old userspace process fills a new kernel module */
  1559. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  1560. struct kvm_cpuid *cpuid,
  1561. struct kvm_cpuid_entry __user *entries)
  1562. {
  1563. int r, i;
  1564. struct kvm_cpuid_entry *cpuid_entries;
  1565. r = -E2BIG;
  1566. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1567. goto out;
  1568. r = -ENOMEM;
  1569. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  1570. if (!cpuid_entries)
  1571. goto out;
  1572. r = -EFAULT;
  1573. if (copy_from_user(cpuid_entries, entries,
  1574. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  1575. goto out_free;
  1576. vcpu_load(vcpu);
  1577. for (i = 0; i < cpuid->nent; i++) {
  1578. vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
  1579. vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
  1580. vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
  1581. vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
  1582. vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
  1583. vcpu->arch.cpuid_entries[i].index = 0;
  1584. vcpu->arch.cpuid_entries[i].flags = 0;
  1585. vcpu->arch.cpuid_entries[i].padding[0] = 0;
  1586. vcpu->arch.cpuid_entries[i].padding[1] = 0;
  1587. vcpu->arch.cpuid_entries[i].padding[2] = 0;
  1588. }
  1589. vcpu->arch.cpuid_nent = cpuid->nent;
  1590. cpuid_fix_nx_cap(vcpu);
  1591. r = 0;
  1592. kvm_apic_set_version(vcpu);
  1593. kvm_x86_ops->cpuid_update(vcpu);
  1594. vcpu_put(vcpu);
  1595. out_free:
  1596. vfree(cpuid_entries);
  1597. out:
  1598. return r;
  1599. }
  1600. static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  1601. struct kvm_cpuid2 *cpuid,
  1602. struct kvm_cpuid_entry2 __user *entries)
  1603. {
  1604. int r;
  1605. r = -E2BIG;
  1606. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1607. goto out;
  1608. r = -EFAULT;
  1609. if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
  1610. cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
  1611. goto out;
  1612. vcpu_load(vcpu);
  1613. vcpu->arch.cpuid_nent = cpuid->nent;
  1614. kvm_apic_set_version(vcpu);
  1615. kvm_x86_ops->cpuid_update(vcpu);
  1616. vcpu_put(vcpu);
  1617. return 0;
  1618. out:
  1619. return r;
  1620. }
  1621. static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  1622. struct kvm_cpuid2 *cpuid,
  1623. struct kvm_cpuid_entry2 __user *entries)
  1624. {
  1625. int r;
  1626. vcpu_load(vcpu);
  1627. r = -E2BIG;
  1628. if (cpuid->nent < vcpu->arch.cpuid_nent)
  1629. goto out;
  1630. r = -EFAULT;
  1631. if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
  1632. vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
  1633. goto out;
  1634. return 0;
  1635. out:
  1636. cpuid->nent = vcpu->arch.cpuid_nent;
  1637. vcpu_put(vcpu);
  1638. return r;
  1639. }
  1640. static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1641. u32 index)
  1642. {
  1643. entry->function = function;
  1644. entry->index = index;
  1645. cpuid_count(entry->function, entry->index,
  1646. &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
  1647. entry->flags = 0;
  1648. }
  1649. #define F(x) bit(X86_FEATURE_##x)
  1650. static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1651. u32 index, int *nent, int maxnent)
  1652. {
  1653. unsigned f_nx = is_efer_nx() ? F(NX) : 0;
  1654. #ifdef CONFIG_X86_64
  1655. unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
  1656. ? F(GBPAGES) : 0;
  1657. unsigned f_lm = F(LM);
  1658. #else
  1659. unsigned f_gbpages = 0;
  1660. unsigned f_lm = 0;
  1661. #endif
  1662. unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
  1663. /* cpuid 1.edx */
  1664. const u32 kvm_supported_word0_x86_features =
  1665. F(FPU) | F(VME) | F(DE) | F(PSE) |
  1666. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  1667. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
  1668. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  1669. F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
  1670. 0 /* Reserved, DS, ACPI */ | F(MMX) |
  1671. F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
  1672. 0 /* HTT, TM, Reserved, PBE */;
  1673. /* cpuid 0x80000001.edx */
  1674. const u32 kvm_supported_word1_x86_features =
  1675. F(FPU) | F(VME) | F(DE) | F(PSE) |
  1676. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  1677. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
  1678. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  1679. F(PAT) | F(PSE36) | 0 /* Reserved */ |
  1680. f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
  1681. F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
  1682. 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
  1683. /* cpuid 1.ecx */
  1684. const u32 kvm_supported_word4_x86_features =
  1685. F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
  1686. 0 /* DS-CPL, VMX, SMX, EST */ |
  1687. 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
  1688. 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
  1689. 0 /* Reserved, DCA */ | F(XMM4_1) |
  1690. F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
  1691. 0 /* Reserved, XSAVE, OSXSAVE */;
  1692. /* cpuid 0x80000001.ecx */
  1693. const u32 kvm_supported_word6_x86_features =
  1694. F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
  1695. F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
  1696. F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
  1697. 0 /* SKINIT */ | 0 /* WDT */;
  1698. /* all calls to cpuid_count() should be made on the same cpu */
  1699. get_cpu();
  1700. do_cpuid_1_ent(entry, function, index);
  1701. ++*nent;
  1702. switch (function) {
  1703. case 0:
  1704. entry->eax = min(entry->eax, (u32)0xb);
  1705. break;
  1706. case 1:
  1707. entry->edx &= kvm_supported_word0_x86_features;
  1708. entry->ecx &= kvm_supported_word4_x86_features;
  1709. /* we support x2apic emulation even if host does not support
  1710. * it since we emulate x2apic in software */
  1711. entry->ecx |= F(X2APIC);
  1712. break;
  1713. /* function 2 entries are STATEFUL. That is, repeated cpuid commands
  1714. * may return different values. This forces us to get_cpu() before
  1715. * issuing the first command, and also to emulate this annoying behavior
  1716. * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
  1717. case 2: {
  1718. int t, times = entry->eax & 0xff;
  1719. entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1720. entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  1721. for (t = 1; t < times && *nent < maxnent; ++t) {
  1722. do_cpuid_1_ent(&entry[t], function, 0);
  1723. entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1724. ++*nent;
  1725. }
  1726. break;
  1727. }
  1728. /* function 4 and 0xb have additional index. */
  1729. case 4: {
  1730. int i, cache_type;
  1731. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1732. /* read more entries until cache_type is zero */
  1733. for (i = 1; *nent < maxnent; ++i) {
  1734. cache_type = entry[i - 1].eax & 0x1f;
  1735. if (!cache_type)
  1736. break;
  1737. do_cpuid_1_ent(&entry[i], function, i);
  1738. entry[i].flags |=
  1739. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1740. ++*nent;
  1741. }
  1742. break;
  1743. }
  1744. case 0xb: {
  1745. int i, level_type;
  1746. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1747. /* read more entries until level_type is zero */
  1748. for (i = 1; *nent < maxnent; ++i) {
  1749. level_type = entry[i - 1].ecx & 0xff00;
  1750. if (!level_type)
  1751. break;
  1752. do_cpuid_1_ent(&entry[i], function, i);
  1753. entry[i].flags |=
  1754. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1755. ++*nent;
  1756. }
  1757. break;
  1758. }
  1759. case KVM_CPUID_SIGNATURE: {
  1760. char signature[12] = "KVMKVMKVM\0\0";
  1761. u32 *sigptr = (u32 *)signature;
  1762. entry->eax = 0;
  1763. entry->ebx = sigptr[0];
  1764. entry->ecx = sigptr[1];
  1765. entry->edx = sigptr[2];
  1766. break;
  1767. }
  1768. case KVM_CPUID_FEATURES:
  1769. entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
  1770. (1 << KVM_FEATURE_NOP_IO_DELAY) |
  1771. (1 << KVM_FEATURE_CLOCKSOURCE2) |
  1772. (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
  1773. entry->ebx = 0;
  1774. entry->ecx = 0;
  1775. entry->edx = 0;
  1776. break;
  1777. case 0x80000000:
  1778. entry->eax = min(entry->eax, 0x8000001a);
  1779. break;
  1780. case 0x80000001:
  1781. entry->edx &= kvm_supported_word1_x86_features;
  1782. entry->ecx &= kvm_supported_word6_x86_features;
  1783. break;
  1784. }
  1785. kvm_x86_ops->set_supported_cpuid(function, entry);
  1786. put_cpu();
  1787. }
  1788. #undef F
  1789. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  1790. struct kvm_cpuid_entry2 __user *entries)
  1791. {
  1792. struct kvm_cpuid_entry2 *cpuid_entries;
  1793. int limit, nent = 0, r = -E2BIG;
  1794. u32 func;
  1795. if (cpuid->nent < 1)
  1796. goto out;
  1797. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1798. cpuid->nent = KVM_MAX_CPUID_ENTRIES;
  1799. r = -ENOMEM;
  1800. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
  1801. if (!cpuid_entries)
  1802. goto out;
  1803. do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
  1804. limit = cpuid_entries[0].eax;
  1805. for (func = 1; func <= limit && nent < cpuid->nent; ++func)
  1806. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1807. &nent, cpuid->nent);
  1808. r = -E2BIG;
  1809. if (nent >= cpuid->nent)
  1810. goto out_free;
  1811. do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
  1812. limit = cpuid_entries[nent - 1].eax;
  1813. for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
  1814. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1815. &nent, cpuid->nent);
  1816. r = -E2BIG;
  1817. if (nent >= cpuid->nent)
  1818. goto out_free;
  1819. do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
  1820. cpuid->nent);
  1821. r = -E2BIG;
  1822. if (nent >= cpuid->nent)
  1823. goto out_free;
  1824. do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
  1825. cpuid->nent);
  1826. r = -E2BIG;
  1827. if (nent >= cpuid->nent)
  1828. goto out_free;
  1829. r = -EFAULT;
  1830. if (copy_to_user(entries, cpuid_entries,
  1831. nent * sizeof(struct kvm_cpuid_entry2)))
  1832. goto out_free;
  1833. cpuid->nent = nent;
  1834. r = 0;
  1835. out_free:
  1836. vfree(cpuid_entries);
  1837. out:
  1838. return r;
  1839. }
  1840. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  1841. struct kvm_lapic_state *s)
  1842. {
  1843. vcpu_load(vcpu);
  1844. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  1845. vcpu_put(vcpu);
  1846. return 0;
  1847. }
  1848. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  1849. struct kvm_lapic_state *s)
  1850. {
  1851. vcpu_load(vcpu);
  1852. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  1853. kvm_apic_post_state_restore(vcpu);
  1854. update_cr8_intercept(vcpu);
  1855. vcpu_put(vcpu);
  1856. return 0;
  1857. }
  1858. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  1859. struct kvm_interrupt *irq)
  1860. {
  1861. if (irq->irq < 0 || irq->irq >= 256)
  1862. return -EINVAL;
  1863. if (irqchip_in_kernel(vcpu->kvm))
  1864. return -ENXIO;
  1865. vcpu_load(vcpu);
  1866. kvm_queue_interrupt(vcpu, irq->irq, false);
  1867. vcpu_put(vcpu);
  1868. return 0;
  1869. }
  1870. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  1871. {
  1872. vcpu_load(vcpu);
  1873. kvm_inject_nmi(vcpu);
  1874. vcpu_put(vcpu);
  1875. return 0;
  1876. }
  1877. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  1878. struct kvm_tpr_access_ctl *tac)
  1879. {
  1880. if (tac->flags)
  1881. return -EINVAL;
  1882. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  1883. return 0;
  1884. }
  1885. static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
  1886. u64 mcg_cap)
  1887. {
  1888. int r;
  1889. unsigned bank_num = mcg_cap & 0xff, bank;
  1890. vcpu_load(vcpu);
  1891. r = -EINVAL;
  1892. if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
  1893. goto out;
  1894. if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
  1895. goto out;
  1896. r = 0;
  1897. vcpu->arch.mcg_cap = mcg_cap;
  1898. /* Init IA32_MCG_CTL to all 1s */
  1899. if (mcg_cap & MCG_CTL_P)
  1900. vcpu->arch.mcg_ctl = ~(u64)0;
  1901. /* Init IA32_MCi_CTL to all 1s */
  1902. for (bank = 0; bank < bank_num; bank++)
  1903. vcpu->arch.mce_banks[bank*4] = ~(u64)0;
  1904. out:
  1905. vcpu_put(vcpu);
  1906. return r;
  1907. }
  1908. static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
  1909. struct kvm_x86_mce *mce)
  1910. {
  1911. u64 mcg_cap = vcpu->arch.mcg_cap;
  1912. unsigned bank_num = mcg_cap & 0xff;
  1913. u64 *banks = vcpu->arch.mce_banks;
  1914. if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
  1915. return -EINVAL;
  1916. /*
  1917. * if IA32_MCG_CTL is not all 1s, the uncorrected error
  1918. * reporting is disabled
  1919. */
  1920. if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
  1921. vcpu->arch.mcg_ctl != ~(u64)0)
  1922. return 0;
  1923. banks += 4 * mce->bank;
  1924. /*
  1925. * if IA32_MCi_CTL is not all 1s, the uncorrected error
  1926. * reporting is disabled for the bank
  1927. */
  1928. if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
  1929. return 0;
  1930. if (mce->status & MCI_STATUS_UC) {
  1931. if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
  1932. !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
  1933. printk(KERN_DEBUG "kvm: set_mce: "
  1934. "injects mce exception while "
  1935. "previous one is in progress!\n");
  1936. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  1937. return 0;
  1938. }
  1939. if (banks[1] & MCI_STATUS_VAL)
  1940. mce->status |= MCI_STATUS_OVER;
  1941. banks[2] = mce->addr;
  1942. banks[3] = mce->misc;
  1943. vcpu->arch.mcg_status = mce->mcg_status;
  1944. banks[1] = mce->status;
  1945. kvm_queue_exception(vcpu, MC_VECTOR);
  1946. } else if (!(banks[1] & MCI_STATUS_VAL)
  1947. || !(banks[1] & MCI_STATUS_UC)) {
  1948. if (banks[1] & MCI_STATUS_VAL)
  1949. mce->status |= MCI_STATUS_OVER;
  1950. banks[2] = mce->addr;
  1951. banks[3] = mce->misc;
  1952. banks[1] = mce->status;
  1953. } else
  1954. banks[1] |= MCI_STATUS_OVER;
  1955. return 0;
  1956. }
  1957. static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
  1958. struct kvm_vcpu_events *events)
  1959. {
  1960. vcpu_load(vcpu);
  1961. events->exception.injected =
  1962. vcpu->arch.exception.pending &&
  1963. !kvm_exception_is_soft(vcpu->arch.exception.nr);
  1964. events->exception.nr = vcpu->arch.exception.nr;
  1965. events->exception.has_error_code = vcpu->arch.exception.has_error_code;
  1966. events->exception.error_code = vcpu->arch.exception.error_code;
  1967. events->interrupt.injected =
  1968. vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
  1969. events->interrupt.nr = vcpu->arch.interrupt.nr;
  1970. events->interrupt.soft = 0;
  1971. events->interrupt.shadow =
  1972. kvm_x86_ops->get_interrupt_shadow(vcpu,
  1973. KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
  1974. events->nmi.injected = vcpu->arch.nmi_injected;
  1975. events->nmi.pending = vcpu->arch.nmi_pending;
  1976. events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
  1977. events->sipi_vector = vcpu->arch.sipi_vector;
  1978. events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
  1979. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  1980. | KVM_VCPUEVENT_VALID_SHADOW);
  1981. vcpu_put(vcpu);
  1982. }
  1983. static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
  1984. struct kvm_vcpu_events *events)
  1985. {
  1986. if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
  1987. | KVM_VCPUEVENT_VALID_SIPI_VECTOR
  1988. | KVM_VCPUEVENT_VALID_SHADOW))
  1989. return -EINVAL;
  1990. vcpu_load(vcpu);
  1991. vcpu->arch.exception.pending = events->exception.injected;
  1992. vcpu->arch.exception.nr = events->exception.nr;
  1993. vcpu->arch.exception.has_error_code = events->exception.has_error_code;
  1994. vcpu->arch.exception.error_code = events->exception.error_code;
  1995. vcpu->arch.interrupt.pending = events->interrupt.injected;
  1996. vcpu->arch.interrupt.nr = events->interrupt.nr;
  1997. vcpu->arch.interrupt.soft = events->interrupt.soft;
  1998. if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
  1999. kvm_pic_clear_isr_ack(vcpu->kvm);
  2000. if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
  2001. kvm_x86_ops->set_interrupt_shadow(vcpu,
  2002. events->interrupt.shadow);
  2003. vcpu->arch.nmi_injected = events->nmi.injected;
  2004. if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
  2005. vcpu->arch.nmi_pending = events->nmi.pending;
  2006. kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
  2007. if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
  2008. vcpu->arch.sipi_vector = events->sipi_vector;
  2009. vcpu_put(vcpu);
  2010. return 0;
  2011. }
  2012. static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
  2013. struct kvm_debugregs *dbgregs)
  2014. {
  2015. vcpu_load(vcpu);
  2016. memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
  2017. dbgregs->dr6 = vcpu->arch.dr6;
  2018. dbgregs->dr7 = vcpu->arch.dr7;
  2019. dbgregs->flags = 0;
  2020. vcpu_put(vcpu);
  2021. }
  2022. static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
  2023. struct kvm_debugregs *dbgregs)
  2024. {
  2025. if (dbgregs->flags)
  2026. return -EINVAL;
  2027. vcpu_load(vcpu);
  2028. memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
  2029. vcpu->arch.dr6 = dbgregs->dr6;
  2030. vcpu->arch.dr7 = dbgregs->dr7;
  2031. vcpu_put(vcpu);
  2032. return 0;
  2033. }
  2034. long kvm_arch_vcpu_ioctl(struct file *filp,
  2035. unsigned int ioctl, unsigned long arg)
  2036. {
  2037. struct kvm_vcpu *vcpu = filp->private_data;
  2038. void __user *argp = (void __user *)arg;
  2039. int r;
  2040. struct kvm_lapic_state *lapic = NULL;
  2041. switch (ioctl) {
  2042. case KVM_GET_LAPIC: {
  2043. r = -EINVAL;
  2044. if (!vcpu->arch.apic)
  2045. goto out;
  2046. lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2047. r = -ENOMEM;
  2048. if (!lapic)
  2049. goto out;
  2050. r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
  2051. if (r)
  2052. goto out;
  2053. r = -EFAULT;
  2054. if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
  2055. goto out;
  2056. r = 0;
  2057. break;
  2058. }
  2059. case KVM_SET_LAPIC: {
  2060. r = -EINVAL;
  2061. if (!vcpu->arch.apic)
  2062. goto out;
  2063. lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  2064. r = -ENOMEM;
  2065. if (!lapic)
  2066. goto out;
  2067. r = -EFAULT;
  2068. if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
  2069. goto out;
  2070. r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
  2071. if (r)
  2072. goto out;
  2073. r = 0;
  2074. break;
  2075. }
  2076. case KVM_INTERRUPT: {
  2077. struct kvm_interrupt irq;
  2078. r = -EFAULT;
  2079. if (copy_from_user(&irq, argp, sizeof irq))
  2080. goto out;
  2081. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  2082. if (r)
  2083. goto out;
  2084. r = 0;
  2085. break;
  2086. }
  2087. case KVM_NMI: {
  2088. r = kvm_vcpu_ioctl_nmi(vcpu);
  2089. if (r)
  2090. goto out;
  2091. r = 0;
  2092. break;
  2093. }
  2094. case KVM_SET_CPUID: {
  2095. struct kvm_cpuid __user *cpuid_arg = argp;
  2096. struct kvm_cpuid cpuid;
  2097. r = -EFAULT;
  2098. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2099. goto out;
  2100. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  2101. if (r)
  2102. goto out;
  2103. break;
  2104. }
  2105. case KVM_SET_CPUID2: {
  2106. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2107. struct kvm_cpuid2 cpuid;
  2108. r = -EFAULT;
  2109. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2110. goto out;
  2111. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  2112. cpuid_arg->entries);
  2113. if (r)
  2114. goto out;
  2115. break;
  2116. }
  2117. case KVM_GET_CPUID2: {
  2118. struct kvm_cpuid2 __user *cpuid_arg = argp;
  2119. struct kvm_cpuid2 cpuid;
  2120. r = -EFAULT;
  2121. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2122. goto out;
  2123. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  2124. cpuid_arg->entries);
  2125. if (r)
  2126. goto out;
  2127. r = -EFAULT;
  2128. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  2129. goto out;
  2130. r = 0;
  2131. break;
  2132. }
  2133. case KVM_GET_MSRS:
  2134. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  2135. break;
  2136. case KVM_SET_MSRS:
  2137. r = msr_io(vcpu, argp, do_set_msr, 0);
  2138. break;
  2139. case KVM_TPR_ACCESS_REPORTING: {
  2140. struct kvm_tpr_access_ctl tac;
  2141. r = -EFAULT;
  2142. if (copy_from_user(&tac, argp, sizeof tac))
  2143. goto out;
  2144. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  2145. if (r)
  2146. goto out;
  2147. r = -EFAULT;
  2148. if (copy_to_user(argp, &tac, sizeof tac))
  2149. goto out;
  2150. r = 0;
  2151. break;
  2152. };
  2153. case KVM_SET_VAPIC_ADDR: {
  2154. struct kvm_vapic_addr va;
  2155. r = -EINVAL;
  2156. if (!irqchip_in_kernel(vcpu->kvm))
  2157. goto out;
  2158. r = -EFAULT;
  2159. if (copy_from_user(&va, argp, sizeof va))
  2160. goto out;
  2161. r = 0;
  2162. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  2163. break;
  2164. }
  2165. case KVM_X86_SETUP_MCE: {
  2166. u64 mcg_cap;
  2167. r = -EFAULT;
  2168. if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
  2169. goto out;
  2170. r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
  2171. break;
  2172. }
  2173. case KVM_X86_SET_MCE: {
  2174. struct kvm_x86_mce mce;
  2175. r = -EFAULT;
  2176. if (copy_from_user(&mce, argp, sizeof mce))
  2177. goto out;
  2178. vcpu_load(vcpu);
  2179. r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
  2180. vcpu_put(vcpu);
  2181. break;
  2182. }
  2183. case KVM_GET_VCPU_EVENTS: {
  2184. struct kvm_vcpu_events events;
  2185. kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
  2186. r = -EFAULT;
  2187. if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
  2188. break;
  2189. r = 0;
  2190. break;
  2191. }
  2192. case KVM_SET_VCPU_EVENTS: {
  2193. struct kvm_vcpu_events events;
  2194. r = -EFAULT;
  2195. if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
  2196. break;
  2197. r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
  2198. break;
  2199. }
  2200. case KVM_GET_DEBUGREGS: {
  2201. struct kvm_debugregs dbgregs;
  2202. kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
  2203. r = -EFAULT;
  2204. if (copy_to_user(argp, &dbgregs,
  2205. sizeof(struct kvm_debugregs)))
  2206. break;
  2207. r = 0;
  2208. break;
  2209. }
  2210. case KVM_SET_DEBUGREGS: {
  2211. struct kvm_debugregs dbgregs;
  2212. r = -EFAULT;
  2213. if (copy_from_user(&dbgregs, argp,
  2214. sizeof(struct kvm_debugregs)))
  2215. break;
  2216. r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
  2217. break;
  2218. }
  2219. default:
  2220. r = -EINVAL;
  2221. }
  2222. out:
  2223. kfree(lapic);
  2224. return r;
  2225. }
  2226. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  2227. {
  2228. int ret;
  2229. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  2230. return -1;
  2231. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  2232. return ret;
  2233. }
  2234. static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
  2235. u64 ident_addr)
  2236. {
  2237. kvm->arch.ept_identity_map_addr = ident_addr;
  2238. return 0;
  2239. }
  2240. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  2241. u32 kvm_nr_mmu_pages)
  2242. {
  2243. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  2244. return -EINVAL;
  2245. mutex_lock(&kvm->slots_lock);
  2246. spin_lock(&kvm->mmu_lock);
  2247. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  2248. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  2249. spin_unlock(&kvm->mmu_lock);
  2250. mutex_unlock(&kvm->slots_lock);
  2251. return 0;
  2252. }
  2253. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  2254. {
  2255. return kvm->arch.n_alloc_mmu_pages;
  2256. }
  2257. gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
  2258. {
  2259. int i;
  2260. struct kvm_mem_alias *alias;
  2261. struct kvm_mem_aliases *aliases;
  2262. aliases = kvm_aliases(kvm);
  2263. for (i = 0; i < aliases->naliases; ++i) {
  2264. alias = &aliases->aliases[i];
  2265. if (alias->flags & KVM_ALIAS_INVALID)
  2266. continue;
  2267. if (gfn >= alias->base_gfn
  2268. && gfn < alias->base_gfn + alias->npages)
  2269. return alias->target_gfn + gfn - alias->base_gfn;
  2270. }
  2271. return gfn;
  2272. }
  2273. gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  2274. {
  2275. int i;
  2276. struct kvm_mem_alias *alias;
  2277. struct kvm_mem_aliases *aliases;
  2278. aliases = kvm_aliases(kvm);
  2279. for (i = 0; i < aliases->naliases; ++i) {
  2280. alias = &aliases->aliases[i];
  2281. if (gfn >= alias->base_gfn
  2282. && gfn < alias->base_gfn + alias->npages)
  2283. return alias->target_gfn + gfn - alias->base_gfn;
  2284. }
  2285. return gfn;
  2286. }
  2287. /*
  2288. * Set a new alias region. Aliases map a portion of physical memory into
  2289. * another portion. This is useful for memory windows, for example the PC
  2290. * VGA region.
  2291. */
  2292. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  2293. struct kvm_memory_alias *alias)
  2294. {
  2295. int r, n;
  2296. struct kvm_mem_alias *p;
  2297. struct kvm_mem_aliases *aliases, *old_aliases;
  2298. r = -EINVAL;
  2299. /* General sanity checks */
  2300. if (alias->memory_size & (PAGE_SIZE - 1))
  2301. goto out;
  2302. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  2303. goto out;
  2304. if (alias->slot >= KVM_ALIAS_SLOTS)
  2305. goto out;
  2306. if (alias->guest_phys_addr + alias->memory_size
  2307. < alias->guest_phys_addr)
  2308. goto out;
  2309. if (alias->target_phys_addr + alias->memory_size
  2310. < alias->target_phys_addr)
  2311. goto out;
  2312. r = -ENOMEM;
  2313. aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
  2314. if (!aliases)
  2315. goto out;
  2316. mutex_lock(&kvm->slots_lock);
  2317. /* invalidate any gfn reference in case of deletion/shrinking */
  2318. memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
  2319. aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
  2320. old_aliases = kvm->arch.aliases;
  2321. rcu_assign_pointer(kvm->arch.aliases, aliases);
  2322. synchronize_srcu_expedited(&kvm->srcu);
  2323. kvm_mmu_zap_all(kvm);
  2324. kfree(old_aliases);
  2325. r = -ENOMEM;
  2326. aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
  2327. if (!aliases)
  2328. goto out_unlock;
  2329. memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
  2330. p = &aliases->aliases[alias->slot];
  2331. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  2332. p->npages = alias->memory_size >> PAGE_SHIFT;
  2333. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  2334. p->flags &= ~(KVM_ALIAS_INVALID);
  2335. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  2336. if (aliases->aliases[n - 1].npages)
  2337. break;
  2338. aliases->naliases = n;
  2339. old_aliases = kvm->arch.aliases;
  2340. rcu_assign_pointer(kvm->arch.aliases, aliases);
  2341. synchronize_srcu_expedited(&kvm->srcu);
  2342. kfree(old_aliases);
  2343. r = 0;
  2344. out_unlock:
  2345. mutex_unlock(&kvm->slots_lock);
  2346. out:
  2347. return r;
  2348. }
  2349. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2350. {
  2351. int r;
  2352. r = 0;
  2353. switch (chip->chip_id) {
  2354. case KVM_IRQCHIP_PIC_MASTER:
  2355. memcpy(&chip->chip.pic,
  2356. &pic_irqchip(kvm)->pics[0],
  2357. sizeof(struct kvm_pic_state));
  2358. break;
  2359. case KVM_IRQCHIP_PIC_SLAVE:
  2360. memcpy(&chip->chip.pic,
  2361. &pic_irqchip(kvm)->pics[1],
  2362. sizeof(struct kvm_pic_state));
  2363. break;
  2364. case KVM_IRQCHIP_IOAPIC:
  2365. r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
  2366. break;
  2367. default:
  2368. r = -EINVAL;
  2369. break;
  2370. }
  2371. return r;
  2372. }
  2373. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  2374. {
  2375. int r;
  2376. r = 0;
  2377. switch (chip->chip_id) {
  2378. case KVM_IRQCHIP_PIC_MASTER:
  2379. raw_spin_lock(&pic_irqchip(kvm)->lock);
  2380. memcpy(&pic_irqchip(kvm)->pics[0],
  2381. &chip->chip.pic,
  2382. sizeof(struct kvm_pic_state));
  2383. raw_spin_unlock(&pic_irqchip(kvm)->lock);
  2384. break;
  2385. case KVM_IRQCHIP_PIC_SLAVE:
  2386. raw_spin_lock(&pic_irqchip(kvm)->lock);
  2387. memcpy(&pic_irqchip(kvm)->pics[1],
  2388. &chip->chip.pic,
  2389. sizeof(struct kvm_pic_state));
  2390. raw_spin_unlock(&pic_irqchip(kvm)->lock);
  2391. break;
  2392. case KVM_IRQCHIP_IOAPIC:
  2393. r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
  2394. break;
  2395. default:
  2396. r = -EINVAL;
  2397. break;
  2398. }
  2399. kvm_pic_update_irq(pic_irqchip(kvm));
  2400. return r;
  2401. }
  2402. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2403. {
  2404. int r = 0;
  2405. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2406. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  2407. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2408. return r;
  2409. }
  2410. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  2411. {
  2412. int r = 0;
  2413. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2414. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  2415. kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
  2416. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2417. return r;
  2418. }
  2419. static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2420. {
  2421. int r = 0;
  2422. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2423. memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
  2424. sizeof(ps->channels));
  2425. ps->flags = kvm->arch.vpit->pit_state.flags;
  2426. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2427. return r;
  2428. }
  2429. static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
  2430. {
  2431. int r = 0, start = 0;
  2432. u32 prev_legacy, cur_legacy;
  2433. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2434. prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2435. cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
  2436. if (!prev_legacy && cur_legacy)
  2437. start = 1;
  2438. memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
  2439. sizeof(kvm->arch.vpit->pit_state.channels));
  2440. kvm->arch.vpit->pit_state.flags = ps->flags;
  2441. kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
  2442. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2443. return r;
  2444. }
  2445. static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  2446. struct kvm_reinject_control *control)
  2447. {
  2448. if (!kvm->arch.vpit)
  2449. return -ENXIO;
  2450. mutex_lock(&kvm->arch.vpit->pit_state.lock);
  2451. kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
  2452. mutex_unlock(&kvm->arch.vpit->pit_state.lock);
  2453. return 0;
  2454. }
  2455. /*
  2456. * Get (and clear) the dirty memory log for a memory slot.
  2457. */
  2458. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  2459. struct kvm_dirty_log *log)
  2460. {
  2461. int r, i;
  2462. struct kvm_memory_slot *memslot;
  2463. unsigned long n;
  2464. unsigned long is_dirty = 0;
  2465. mutex_lock(&kvm->slots_lock);
  2466. r = -EINVAL;
  2467. if (log->slot >= KVM_MEMORY_SLOTS)
  2468. goto out;
  2469. memslot = &kvm->memslots->memslots[log->slot];
  2470. r = -ENOENT;
  2471. if (!memslot->dirty_bitmap)
  2472. goto out;
  2473. n = kvm_dirty_bitmap_bytes(memslot);
  2474. for (i = 0; !is_dirty && i < n/sizeof(long); i++)
  2475. is_dirty = memslot->dirty_bitmap[i];
  2476. /* If nothing is dirty, don't bother messing with page tables. */
  2477. if (is_dirty) {
  2478. struct kvm_memslots *slots, *old_slots;
  2479. unsigned long *dirty_bitmap;
  2480. spin_lock(&kvm->mmu_lock);
  2481. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  2482. spin_unlock(&kvm->mmu_lock);
  2483. r = -ENOMEM;
  2484. dirty_bitmap = vmalloc(n);
  2485. if (!dirty_bitmap)
  2486. goto out;
  2487. memset(dirty_bitmap, 0, n);
  2488. r = -ENOMEM;
  2489. slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  2490. if (!slots) {
  2491. vfree(dirty_bitmap);
  2492. goto out;
  2493. }
  2494. memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
  2495. slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
  2496. old_slots = kvm->memslots;
  2497. rcu_assign_pointer(kvm->memslots, slots);
  2498. synchronize_srcu_expedited(&kvm->srcu);
  2499. dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
  2500. kfree(old_slots);
  2501. r = -EFAULT;
  2502. if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
  2503. vfree(dirty_bitmap);
  2504. goto out;
  2505. }
  2506. vfree(dirty_bitmap);
  2507. } else {
  2508. r = -EFAULT;
  2509. if (clear_user(log->dirty_bitmap, n))
  2510. goto out;
  2511. }
  2512. r = 0;
  2513. out:
  2514. mutex_unlock(&kvm->slots_lock);
  2515. return r;
  2516. }
  2517. long kvm_arch_vm_ioctl(struct file *filp,
  2518. unsigned int ioctl, unsigned long arg)
  2519. {
  2520. struct kvm *kvm = filp->private_data;
  2521. void __user *argp = (void __user *)arg;
  2522. int r = -ENOTTY;
  2523. /*
  2524. * This union makes it completely explicit to gcc-3.x
  2525. * that these two variables' stack usage should be
  2526. * combined, not added together.
  2527. */
  2528. union {
  2529. struct kvm_pit_state ps;
  2530. struct kvm_pit_state2 ps2;
  2531. struct kvm_memory_alias alias;
  2532. struct kvm_pit_config pit_config;
  2533. } u;
  2534. switch (ioctl) {
  2535. case KVM_SET_TSS_ADDR:
  2536. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  2537. if (r < 0)
  2538. goto out;
  2539. break;
  2540. case KVM_SET_IDENTITY_MAP_ADDR: {
  2541. u64 ident_addr;
  2542. r = -EFAULT;
  2543. if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
  2544. goto out;
  2545. r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
  2546. if (r < 0)
  2547. goto out;
  2548. break;
  2549. }
  2550. case KVM_SET_MEMORY_REGION: {
  2551. struct kvm_memory_region kvm_mem;
  2552. struct kvm_userspace_memory_region kvm_userspace_mem;
  2553. r = -EFAULT;
  2554. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  2555. goto out;
  2556. kvm_userspace_mem.slot = kvm_mem.slot;
  2557. kvm_userspace_mem.flags = kvm_mem.flags;
  2558. kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
  2559. kvm_userspace_mem.memory_size = kvm_mem.memory_size;
  2560. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
  2561. if (r)
  2562. goto out;
  2563. break;
  2564. }
  2565. case KVM_SET_NR_MMU_PAGES:
  2566. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  2567. if (r)
  2568. goto out;
  2569. break;
  2570. case KVM_GET_NR_MMU_PAGES:
  2571. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  2572. break;
  2573. case KVM_SET_MEMORY_ALIAS:
  2574. r = -EFAULT;
  2575. if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
  2576. goto out;
  2577. r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
  2578. if (r)
  2579. goto out;
  2580. break;
  2581. case KVM_CREATE_IRQCHIP: {
  2582. struct kvm_pic *vpic;
  2583. mutex_lock(&kvm->lock);
  2584. r = -EEXIST;
  2585. if (kvm->arch.vpic)
  2586. goto create_irqchip_unlock;
  2587. r = -ENOMEM;
  2588. vpic = kvm_create_pic(kvm);
  2589. if (vpic) {
  2590. r = kvm_ioapic_init(kvm);
  2591. if (r) {
  2592. kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
  2593. &vpic->dev);
  2594. kfree(vpic);
  2595. goto create_irqchip_unlock;
  2596. }
  2597. } else
  2598. goto create_irqchip_unlock;
  2599. smp_wmb();
  2600. kvm->arch.vpic = vpic;
  2601. smp_wmb();
  2602. r = kvm_setup_default_irq_routing(kvm);
  2603. if (r) {
  2604. mutex_lock(&kvm->irq_lock);
  2605. kvm_ioapic_destroy(kvm);
  2606. kvm_destroy_pic(kvm);
  2607. mutex_unlock(&kvm->irq_lock);
  2608. }
  2609. create_irqchip_unlock:
  2610. mutex_unlock(&kvm->lock);
  2611. break;
  2612. }
  2613. case KVM_CREATE_PIT:
  2614. u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
  2615. goto create_pit;
  2616. case KVM_CREATE_PIT2:
  2617. r = -EFAULT;
  2618. if (copy_from_user(&u.pit_config, argp,
  2619. sizeof(struct kvm_pit_config)))
  2620. goto out;
  2621. create_pit:
  2622. mutex_lock(&kvm->slots_lock);
  2623. r = -EEXIST;
  2624. if (kvm->arch.vpit)
  2625. goto create_pit_unlock;
  2626. r = -ENOMEM;
  2627. kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
  2628. if (kvm->arch.vpit)
  2629. r = 0;
  2630. create_pit_unlock:
  2631. mutex_unlock(&kvm->slots_lock);
  2632. break;
  2633. case KVM_IRQ_LINE_STATUS:
  2634. case KVM_IRQ_LINE: {
  2635. struct kvm_irq_level irq_event;
  2636. r = -EFAULT;
  2637. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  2638. goto out;
  2639. r = -ENXIO;
  2640. if (irqchip_in_kernel(kvm)) {
  2641. __s32 status;
  2642. status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  2643. irq_event.irq, irq_event.level);
  2644. if (ioctl == KVM_IRQ_LINE_STATUS) {
  2645. r = -EFAULT;
  2646. irq_event.status = status;
  2647. if (copy_to_user(argp, &irq_event,
  2648. sizeof irq_event))
  2649. goto out;
  2650. }
  2651. r = 0;
  2652. }
  2653. break;
  2654. }
  2655. case KVM_GET_IRQCHIP: {
  2656. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2657. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  2658. r = -ENOMEM;
  2659. if (!chip)
  2660. goto out;
  2661. r = -EFAULT;
  2662. if (copy_from_user(chip, argp, sizeof *chip))
  2663. goto get_irqchip_out;
  2664. r = -ENXIO;
  2665. if (!irqchip_in_kernel(kvm))
  2666. goto get_irqchip_out;
  2667. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  2668. if (r)
  2669. goto get_irqchip_out;
  2670. r = -EFAULT;
  2671. if (copy_to_user(argp, chip, sizeof *chip))
  2672. goto get_irqchip_out;
  2673. r = 0;
  2674. get_irqchip_out:
  2675. kfree(chip);
  2676. if (r)
  2677. goto out;
  2678. break;
  2679. }
  2680. case KVM_SET_IRQCHIP: {
  2681. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2682. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  2683. r = -ENOMEM;
  2684. if (!chip)
  2685. goto out;
  2686. r = -EFAULT;
  2687. if (copy_from_user(chip, argp, sizeof *chip))
  2688. goto set_irqchip_out;
  2689. r = -ENXIO;
  2690. if (!irqchip_in_kernel(kvm))
  2691. goto set_irqchip_out;
  2692. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  2693. if (r)
  2694. goto set_irqchip_out;
  2695. r = 0;
  2696. set_irqchip_out:
  2697. kfree(chip);
  2698. if (r)
  2699. goto out;
  2700. break;
  2701. }
  2702. case KVM_GET_PIT: {
  2703. r = -EFAULT;
  2704. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  2705. goto out;
  2706. r = -ENXIO;
  2707. if (!kvm->arch.vpit)
  2708. goto out;
  2709. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  2710. if (r)
  2711. goto out;
  2712. r = -EFAULT;
  2713. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  2714. goto out;
  2715. r = 0;
  2716. break;
  2717. }
  2718. case KVM_SET_PIT: {
  2719. r = -EFAULT;
  2720. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  2721. goto out;
  2722. r = -ENXIO;
  2723. if (!kvm->arch.vpit)
  2724. goto out;
  2725. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  2726. if (r)
  2727. goto out;
  2728. r = 0;
  2729. break;
  2730. }
  2731. case KVM_GET_PIT2: {
  2732. r = -ENXIO;
  2733. if (!kvm->arch.vpit)
  2734. goto out;
  2735. r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
  2736. if (r)
  2737. goto out;
  2738. r = -EFAULT;
  2739. if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
  2740. goto out;
  2741. r = 0;
  2742. break;
  2743. }
  2744. case KVM_SET_PIT2: {
  2745. r = -EFAULT;
  2746. if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
  2747. goto out;
  2748. r = -ENXIO;
  2749. if (!kvm->arch.vpit)
  2750. goto out;
  2751. r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
  2752. if (r)
  2753. goto out;
  2754. r = 0;
  2755. break;
  2756. }
  2757. case KVM_REINJECT_CONTROL: {
  2758. struct kvm_reinject_control control;
  2759. r = -EFAULT;
  2760. if (copy_from_user(&control, argp, sizeof(control)))
  2761. goto out;
  2762. r = kvm_vm_ioctl_reinject(kvm, &control);
  2763. if (r)
  2764. goto out;
  2765. r = 0;
  2766. break;
  2767. }
  2768. case KVM_XEN_HVM_CONFIG: {
  2769. r = -EFAULT;
  2770. if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
  2771. sizeof(struct kvm_xen_hvm_config)))
  2772. goto out;
  2773. r = -EINVAL;
  2774. if (kvm->arch.xen_hvm_config.flags)
  2775. goto out;
  2776. r = 0;
  2777. break;
  2778. }
  2779. case KVM_SET_CLOCK: {
  2780. struct timespec now;
  2781. struct kvm_clock_data user_ns;
  2782. u64 now_ns;
  2783. s64 delta;
  2784. r = -EFAULT;
  2785. if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
  2786. goto out;
  2787. r = -EINVAL;
  2788. if (user_ns.flags)
  2789. goto out;
  2790. r = 0;
  2791. ktime_get_ts(&now);
  2792. now_ns = timespec_to_ns(&now);
  2793. delta = user_ns.clock - now_ns;
  2794. kvm->arch.kvmclock_offset = delta;
  2795. break;
  2796. }
  2797. case KVM_GET_CLOCK: {
  2798. struct timespec now;
  2799. struct kvm_clock_data user_ns;
  2800. u64 now_ns;
  2801. ktime_get_ts(&now);
  2802. now_ns = timespec_to_ns(&now);
  2803. user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
  2804. user_ns.flags = 0;
  2805. r = -EFAULT;
  2806. if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
  2807. goto out;
  2808. r = 0;
  2809. break;
  2810. }
  2811. default:
  2812. ;
  2813. }
  2814. out:
  2815. return r;
  2816. }
  2817. static void kvm_init_msr_list(void)
  2818. {
  2819. u32 dummy[2];
  2820. unsigned i, j;
  2821. /* skip the first msrs in the list. KVM-specific */
  2822. for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
  2823. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  2824. continue;
  2825. if (j < i)
  2826. msrs_to_save[j] = msrs_to_save[i];
  2827. j++;
  2828. }
  2829. num_msrs_to_save = j;
  2830. }
  2831. static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
  2832. const void *v)
  2833. {
  2834. if (vcpu->arch.apic &&
  2835. !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
  2836. return 0;
  2837. return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
  2838. }
  2839. static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
  2840. {
  2841. if (vcpu->arch.apic &&
  2842. !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
  2843. return 0;
  2844. return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
  2845. }
  2846. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  2847. struct kvm_segment *var, int seg)
  2848. {
  2849. kvm_x86_ops->set_segment(vcpu, var, seg);
  2850. }
  2851. void kvm_get_segment(struct kvm_vcpu *vcpu,
  2852. struct kvm_segment *var, int seg)
  2853. {
  2854. kvm_x86_ops->get_segment(vcpu, var, seg);
  2855. }
  2856. gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  2857. {
  2858. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2859. return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
  2860. }
  2861. gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  2862. {
  2863. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2864. access |= PFERR_FETCH_MASK;
  2865. return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
  2866. }
  2867. gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  2868. {
  2869. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2870. access |= PFERR_WRITE_MASK;
  2871. return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
  2872. }
  2873. /* uses this to access any guest's mapped memory without checking CPL */
  2874. gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  2875. {
  2876. return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
  2877. }
  2878. static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
  2879. struct kvm_vcpu *vcpu, u32 access,
  2880. u32 *error)
  2881. {
  2882. void *data = val;
  2883. int r = X86EMUL_CONTINUE;
  2884. while (bytes) {
  2885. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
  2886. unsigned offset = addr & (PAGE_SIZE-1);
  2887. unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
  2888. int ret;
  2889. if (gpa == UNMAPPED_GVA) {
  2890. r = X86EMUL_PROPAGATE_FAULT;
  2891. goto out;
  2892. }
  2893. ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
  2894. if (ret < 0) {
  2895. r = X86EMUL_UNHANDLEABLE;
  2896. goto out;
  2897. }
  2898. bytes -= toread;
  2899. data += toread;
  2900. addr += toread;
  2901. }
  2902. out:
  2903. return r;
  2904. }
  2905. /* used for instruction fetching */
  2906. static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
  2907. struct kvm_vcpu *vcpu, u32 *error)
  2908. {
  2909. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2910. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
  2911. access | PFERR_FETCH_MASK, error);
  2912. }
  2913. static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
  2914. struct kvm_vcpu *vcpu, u32 *error)
  2915. {
  2916. u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
  2917. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
  2918. error);
  2919. }
  2920. static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
  2921. struct kvm_vcpu *vcpu, u32 *error)
  2922. {
  2923. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
  2924. }
  2925. static int kvm_write_guest_virt_system(gva_t addr, void *val,
  2926. unsigned int bytes,
  2927. struct kvm_vcpu *vcpu,
  2928. u32 *error)
  2929. {
  2930. void *data = val;
  2931. int r = X86EMUL_CONTINUE;
  2932. while (bytes) {
  2933. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
  2934. PFERR_WRITE_MASK, error);
  2935. unsigned offset = addr & (PAGE_SIZE-1);
  2936. unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  2937. int ret;
  2938. if (gpa == UNMAPPED_GVA) {
  2939. r = X86EMUL_PROPAGATE_FAULT;
  2940. goto out;
  2941. }
  2942. ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
  2943. if (ret < 0) {
  2944. r = X86EMUL_UNHANDLEABLE;
  2945. goto out;
  2946. }
  2947. bytes -= towrite;
  2948. data += towrite;
  2949. addr += towrite;
  2950. }
  2951. out:
  2952. return r;
  2953. }
  2954. static int emulator_read_emulated(unsigned long addr,
  2955. void *val,
  2956. unsigned int bytes,
  2957. struct kvm_vcpu *vcpu)
  2958. {
  2959. gpa_t gpa;
  2960. u32 error_code;
  2961. if (vcpu->mmio_read_completed) {
  2962. memcpy(val, vcpu->mmio_data, bytes);
  2963. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
  2964. vcpu->mmio_phys_addr, *(u64 *)val);
  2965. vcpu->mmio_read_completed = 0;
  2966. return X86EMUL_CONTINUE;
  2967. }
  2968. gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
  2969. if (gpa == UNMAPPED_GVA) {
  2970. kvm_inject_page_fault(vcpu, addr, error_code);
  2971. return X86EMUL_PROPAGATE_FAULT;
  2972. }
  2973. /* For APIC access vmexit */
  2974. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  2975. goto mmio;
  2976. if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
  2977. == X86EMUL_CONTINUE)
  2978. return X86EMUL_CONTINUE;
  2979. mmio:
  2980. /*
  2981. * Is this MMIO handled locally?
  2982. */
  2983. if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
  2984. trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
  2985. return X86EMUL_CONTINUE;
  2986. }
  2987. trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
  2988. vcpu->mmio_needed = 1;
  2989. vcpu->mmio_phys_addr = gpa;
  2990. vcpu->mmio_size = bytes;
  2991. vcpu->mmio_is_write = 0;
  2992. return X86EMUL_UNHANDLEABLE;
  2993. }
  2994. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  2995. const void *val, int bytes)
  2996. {
  2997. int ret;
  2998. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  2999. if (ret < 0)
  3000. return 0;
  3001. kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
  3002. return 1;
  3003. }
  3004. static int emulator_write_emulated_onepage(unsigned long addr,
  3005. const void *val,
  3006. unsigned int bytes,
  3007. struct kvm_vcpu *vcpu)
  3008. {
  3009. gpa_t gpa;
  3010. u32 error_code;
  3011. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
  3012. if (gpa == UNMAPPED_GVA) {
  3013. kvm_inject_page_fault(vcpu, addr, error_code);
  3014. return X86EMUL_PROPAGATE_FAULT;
  3015. }
  3016. /* For APIC access vmexit */
  3017. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3018. goto mmio;
  3019. if (emulator_write_phys(vcpu, gpa, val, bytes))
  3020. return X86EMUL_CONTINUE;
  3021. mmio:
  3022. trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
  3023. /*
  3024. * Is this MMIO handled locally?
  3025. */
  3026. if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
  3027. return X86EMUL_CONTINUE;
  3028. vcpu->mmio_needed = 1;
  3029. vcpu->mmio_phys_addr = gpa;
  3030. vcpu->mmio_size = bytes;
  3031. vcpu->mmio_is_write = 1;
  3032. memcpy(vcpu->mmio_data, val, bytes);
  3033. return X86EMUL_CONTINUE;
  3034. }
  3035. int emulator_write_emulated(unsigned long addr,
  3036. const void *val,
  3037. unsigned int bytes,
  3038. struct kvm_vcpu *vcpu)
  3039. {
  3040. /* Crossing a page boundary? */
  3041. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  3042. int rc, now;
  3043. now = -addr & ~PAGE_MASK;
  3044. rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
  3045. if (rc != X86EMUL_CONTINUE)
  3046. return rc;
  3047. addr += now;
  3048. val += now;
  3049. bytes -= now;
  3050. }
  3051. return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
  3052. }
  3053. EXPORT_SYMBOL_GPL(emulator_write_emulated);
  3054. #define CMPXCHG_TYPE(t, ptr, old, new) \
  3055. (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
  3056. #ifdef CONFIG_X86_64
  3057. # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
  3058. #else
  3059. # define CMPXCHG64(ptr, old, new) \
  3060. (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
  3061. #endif
  3062. static int emulator_cmpxchg_emulated(unsigned long addr,
  3063. const void *old,
  3064. const void *new,
  3065. unsigned int bytes,
  3066. struct kvm_vcpu *vcpu)
  3067. {
  3068. gpa_t gpa;
  3069. struct page *page;
  3070. char *kaddr;
  3071. bool exchanged;
  3072. /* guests cmpxchg8b have to be emulated atomically */
  3073. if (bytes > 8 || (bytes & (bytes - 1)))
  3074. goto emul_write;
  3075. gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
  3076. if (gpa == UNMAPPED_GVA ||
  3077. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  3078. goto emul_write;
  3079. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  3080. goto emul_write;
  3081. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  3082. kaddr = kmap_atomic(page, KM_USER0);
  3083. kaddr += offset_in_page(gpa);
  3084. switch (bytes) {
  3085. case 1:
  3086. exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
  3087. break;
  3088. case 2:
  3089. exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
  3090. break;
  3091. case 4:
  3092. exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
  3093. break;
  3094. case 8:
  3095. exchanged = CMPXCHG64(kaddr, old, new);
  3096. break;
  3097. default:
  3098. BUG();
  3099. }
  3100. kunmap_atomic(kaddr, KM_USER0);
  3101. kvm_release_page_dirty(page);
  3102. if (!exchanged)
  3103. return X86EMUL_CMPXCHG_FAILED;
  3104. kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
  3105. return X86EMUL_CONTINUE;
  3106. emul_write:
  3107. printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
  3108. return emulator_write_emulated(addr, new, bytes, vcpu);
  3109. }
  3110. static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
  3111. {
  3112. /* TODO: String I/O for in kernel device */
  3113. int r;
  3114. if (vcpu->arch.pio.in)
  3115. r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
  3116. vcpu->arch.pio.size, pd);
  3117. else
  3118. r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
  3119. vcpu->arch.pio.port, vcpu->arch.pio.size,
  3120. pd);
  3121. return r;
  3122. }
  3123. static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
  3124. unsigned int count, struct kvm_vcpu *vcpu)
  3125. {
  3126. if (vcpu->arch.pio.count)
  3127. goto data_avail;
  3128. trace_kvm_pio(1, port, size, 1);
  3129. vcpu->arch.pio.port = port;
  3130. vcpu->arch.pio.in = 1;
  3131. vcpu->arch.pio.count = count;
  3132. vcpu->arch.pio.size = size;
  3133. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3134. data_avail:
  3135. memcpy(val, vcpu->arch.pio_data, size * count);
  3136. vcpu->arch.pio.count = 0;
  3137. return 1;
  3138. }
  3139. vcpu->run->exit_reason = KVM_EXIT_IO;
  3140. vcpu->run->io.direction = KVM_EXIT_IO_IN;
  3141. vcpu->run->io.size = size;
  3142. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3143. vcpu->run->io.count = count;
  3144. vcpu->run->io.port = port;
  3145. return 0;
  3146. }
  3147. static int emulator_pio_out_emulated(int size, unsigned short port,
  3148. const void *val, unsigned int count,
  3149. struct kvm_vcpu *vcpu)
  3150. {
  3151. trace_kvm_pio(0, port, size, 1);
  3152. vcpu->arch.pio.port = port;
  3153. vcpu->arch.pio.in = 0;
  3154. vcpu->arch.pio.count = count;
  3155. vcpu->arch.pio.size = size;
  3156. memcpy(vcpu->arch.pio_data, val, size * count);
  3157. if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
  3158. vcpu->arch.pio.count = 0;
  3159. return 1;
  3160. }
  3161. vcpu->run->exit_reason = KVM_EXIT_IO;
  3162. vcpu->run->io.direction = KVM_EXIT_IO_OUT;
  3163. vcpu->run->io.size = size;
  3164. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  3165. vcpu->run->io.count = count;
  3166. vcpu->run->io.port = port;
  3167. return 0;
  3168. }
  3169. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  3170. {
  3171. return kvm_x86_ops->get_segment_base(vcpu, seg);
  3172. }
  3173. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  3174. {
  3175. kvm_mmu_invlpg(vcpu, address);
  3176. return X86EMUL_CONTINUE;
  3177. }
  3178. int emulate_clts(struct kvm_vcpu *vcpu)
  3179. {
  3180. kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
  3181. kvm_x86_ops->fpu_activate(vcpu);
  3182. return X86EMUL_CONTINUE;
  3183. }
  3184. int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu)
  3185. {
  3186. return kvm_get_dr(vcpu, dr, dest);
  3187. }
  3188. int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu)
  3189. {
  3190. return kvm_set_dr(vcpu, dr, value);
  3191. }
  3192. void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
  3193. {
  3194. u8 opcodes[4];
  3195. unsigned long rip = kvm_rip_read(vcpu);
  3196. unsigned long rip_linear;
  3197. if (!printk_ratelimit())
  3198. return;
  3199. rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
  3200. kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
  3201. printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
  3202. context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  3203. }
  3204. EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
  3205. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  3206. {
  3207. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  3208. }
  3209. static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
  3210. {
  3211. unsigned long value;
  3212. switch (cr) {
  3213. case 0:
  3214. value = kvm_read_cr0(vcpu);
  3215. break;
  3216. case 2:
  3217. value = vcpu->arch.cr2;
  3218. break;
  3219. case 3:
  3220. value = vcpu->arch.cr3;
  3221. break;
  3222. case 4:
  3223. value = kvm_read_cr4(vcpu);
  3224. break;
  3225. case 8:
  3226. value = kvm_get_cr8(vcpu);
  3227. break;
  3228. default:
  3229. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3230. return 0;
  3231. }
  3232. return value;
  3233. }
  3234. static void emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
  3235. {
  3236. switch (cr) {
  3237. case 0:
  3238. kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
  3239. break;
  3240. case 2:
  3241. vcpu->arch.cr2 = val;
  3242. break;
  3243. case 3:
  3244. kvm_set_cr3(vcpu, val);
  3245. break;
  3246. case 4:
  3247. kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
  3248. break;
  3249. case 8:
  3250. kvm_set_cr8(vcpu, val & 0xfUL);
  3251. break;
  3252. default:
  3253. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  3254. }
  3255. }
  3256. static int emulator_get_cpl(struct kvm_vcpu *vcpu)
  3257. {
  3258. return kvm_x86_ops->get_cpl(vcpu);
  3259. }
  3260. static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
  3261. {
  3262. kvm_x86_ops->get_gdt(vcpu, dt);
  3263. }
  3264. static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
  3265. struct kvm_vcpu *vcpu)
  3266. {
  3267. struct kvm_segment var;
  3268. kvm_get_segment(vcpu, &var, seg);
  3269. if (var.unusable)
  3270. return false;
  3271. if (var.g)
  3272. var.limit >>= 12;
  3273. set_desc_limit(desc, var.limit);
  3274. set_desc_base(desc, (unsigned long)var.base);
  3275. desc->type = var.type;
  3276. desc->s = var.s;
  3277. desc->dpl = var.dpl;
  3278. desc->p = var.present;
  3279. desc->avl = var.avl;
  3280. desc->l = var.l;
  3281. desc->d = var.db;
  3282. desc->g = var.g;
  3283. return true;
  3284. }
  3285. static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
  3286. struct kvm_vcpu *vcpu)
  3287. {
  3288. struct kvm_segment var;
  3289. /* needed to preserve selector */
  3290. kvm_get_segment(vcpu, &var, seg);
  3291. var.base = get_desc_base(desc);
  3292. var.limit = get_desc_limit(desc);
  3293. if (desc->g)
  3294. var.limit = (var.limit << 12) | 0xfff;
  3295. var.type = desc->type;
  3296. var.present = desc->p;
  3297. var.dpl = desc->dpl;
  3298. var.db = desc->d;
  3299. var.s = desc->s;
  3300. var.l = desc->l;
  3301. var.g = desc->g;
  3302. var.avl = desc->avl;
  3303. var.present = desc->p;
  3304. var.unusable = !var.present;
  3305. var.padding = 0;
  3306. kvm_set_segment(vcpu, &var, seg);
  3307. return;
  3308. }
  3309. static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
  3310. {
  3311. struct kvm_segment kvm_seg;
  3312. kvm_get_segment(vcpu, &kvm_seg, seg);
  3313. return kvm_seg.selector;
  3314. }
  3315. static void emulator_set_segment_selector(u16 sel, int seg,
  3316. struct kvm_vcpu *vcpu)
  3317. {
  3318. struct kvm_segment kvm_seg;
  3319. kvm_get_segment(vcpu, &kvm_seg, seg);
  3320. kvm_seg.selector = sel;
  3321. kvm_set_segment(vcpu, &kvm_seg, seg);
  3322. }
  3323. static void emulator_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  3324. {
  3325. kvm_x86_ops->set_rflags(vcpu, rflags);
  3326. }
  3327. static struct x86_emulate_ops emulate_ops = {
  3328. .read_std = kvm_read_guest_virt_system,
  3329. .write_std = kvm_write_guest_virt_system,
  3330. .fetch = kvm_fetch_guest_virt,
  3331. .read_emulated = emulator_read_emulated,
  3332. .write_emulated = emulator_write_emulated,
  3333. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  3334. .pio_in_emulated = emulator_pio_in_emulated,
  3335. .pio_out_emulated = emulator_pio_out_emulated,
  3336. .get_cached_descriptor = emulator_get_cached_descriptor,
  3337. .set_cached_descriptor = emulator_set_cached_descriptor,
  3338. .get_segment_selector = emulator_get_segment_selector,
  3339. .set_segment_selector = emulator_set_segment_selector,
  3340. .get_gdt = emulator_get_gdt,
  3341. .get_cr = emulator_get_cr,
  3342. .set_cr = emulator_set_cr,
  3343. .cpl = emulator_get_cpl,
  3344. .set_rflags = emulator_set_rflags,
  3345. .get_dr = emulator_get_dr,
  3346. .set_dr = emulator_set_dr,
  3347. .set_msr = kvm_set_msr,
  3348. .get_msr = kvm_get_msr,
  3349. };
  3350. static void cache_all_regs(struct kvm_vcpu *vcpu)
  3351. {
  3352. kvm_register_read(vcpu, VCPU_REGS_RAX);
  3353. kvm_register_read(vcpu, VCPU_REGS_RSP);
  3354. kvm_register_read(vcpu, VCPU_REGS_RIP);
  3355. vcpu->arch.regs_dirty = ~0;
  3356. }
  3357. int emulate_instruction(struct kvm_vcpu *vcpu,
  3358. unsigned long cr2,
  3359. u16 error_code,
  3360. int emulation_type)
  3361. {
  3362. int r, shadow_mask;
  3363. struct decode_cache *c;
  3364. struct kvm_run *run = vcpu->run;
  3365. kvm_clear_exception_queue(vcpu);
  3366. vcpu->arch.mmio_fault_cr2 = cr2;
  3367. /*
  3368. * TODO: fix emulate.c to use guest_read/write_register
  3369. * instead of direct ->regs accesses, can save hundred cycles
  3370. * on Intel for instructions that don't read/change RSP, for
  3371. * for example.
  3372. */
  3373. cache_all_regs(vcpu);
  3374. vcpu->mmio_is_write = 0;
  3375. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  3376. int cs_db, cs_l;
  3377. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  3378. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  3379. vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  3380. vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
  3381. vcpu->arch.emulate_ctxt.mode =
  3382. (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
  3383. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  3384. ? X86EMUL_MODE_VM86 : cs_l
  3385. ? X86EMUL_MODE_PROT64 : cs_db
  3386. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  3387. r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  3388. trace_kvm_emulate_insn_start(vcpu);
  3389. /* Only allow emulation of specific instructions on #UD
  3390. * (namely VMMCALL, sysenter, sysexit, syscall)*/
  3391. c = &vcpu->arch.emulate_ctxt.decode;
  3392. if (emulation_type & EMULTYPE_TRAP_UD) {
  3393. if (!c->twobyte)
  3394. return EMULATE_FAIL;
  3395. switch (c->b) {
  3396. case 0x01: /* VMMCALL */
  3397. if (c->modrm_mod != 3 || c->modrm_rm != 1)
  3398. return EMULATE_FAIL;
  3399. break;
  3400. case 0x34: /* sysenter */
  3401. case 0x35: /* sysexit */
  3402. if (c->modrm_mod != 0 || c->modrm_rm != 0)
  3403. return EMULATE_FAIL;
  3404. break;
  3405. case 0x05: /* syscall */
  3406. if (c->modrm_mod != 0 || c->modrm_rm != 0)
  3407. return EMULATE_FAIL;
  3408. break;
  3409. default:
  3410. return EMULATE_FAIL;
  3411. }
  3412. if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
  3413. return EMULATE_FAIL;
  3414. }
  3415. ++vcpu->stat.insn_emulation;
  3416. if (r) {
  3417. ++vcpu->stat.insn_emulation_fail;
  3418. trace_kvm_emulate_insn_failed(vcpu);
  3419. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  3420. return EMULATE_DONE;
  3421. return EMULATE_FAIL;
  3422. }
  3423. }
  3424. if (emulation_type & EMULTYPE_SKIP) {
  3425. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
  3426. return EMULATE_DONE;
  3427. }
  3428. restart:
  3429. r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  3430. shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
  3431. if (r == 0)
  3432. kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
  3433. if (vcpu->arch.pio.count) {
  3434. if (!vcpu->arch.pio.in)
  3435. vcpu->arch.pio.count = 0;
  3436. return EMULATE_DO_MMIO;
  3437. }
  3438. if (r || vcpu->mmio_is_write) {
  3439. run->exit_reason = KVM_EXIT_MMIO;
  3440. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  3441. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  3442. run->mmio.len = vcpu->mmio_size;
  3443. run->mmio.is_write = vcpu->mmio_is_write;
  3444. }
  3445. if (r) {
  3446. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  3447. goto done;
  3448. if (!vcpu->mmio_needed) {
  3449. ++vcpu->stat.insn_emulation_fail;
  3450. trace_kvm_emulate_insn_failed(vcpu);
  3451. kvm_report_emulation_failure(vcpu, "mmio");
  3452. return EMULATE_FAIL;
  3453. }
  3454. return EMULATE_DO_MMIO;
  3455. }
  3456. if (vcpu->mmio_is_write) {
  3457. vcpu->mmio_needed = 0;
  3458. return EMULATE_DO_MMIO;
  3459. }
  3460. done:
  3461. if (vcpu->arch.exception.pending)
  3462. vcpu->arch.emulate_ctxt.restart = false;
  3463. if (vcpu->arch.emulate_ctxt.restart)
  3464. goto restart;
  3465. return EMULATE_DONE;
  3466. }
  3467. EXPORT_SYMBOL_GPL(emulate_instruction);
  3468. int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
  3469. {
  3470. unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3471. int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
  3472. /* do not return to emulator after return from userspace */
  3473. vcpu->arch.pio.count = 0;
  3474. return ret;
  3475. }
  3476. EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
  3477. static void bounce_off(void *info)
  3478. {
  3479. /* nothing */
  3480. }
  3481. static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  3482. void *data)
  3483. {
  3484. struct cpufreq_freqs *freq = data;
  3485. struct kvm *kvm;
  3486. struct kvm_vcpu *vcpu;
  3487. int i, send_ipi = 0;
  3488. if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
  3489. return 0;
  3490. if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
  3491. return 0;
  3492. per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
  3493. spin_lock(&kvm_lock);
  3494. list_for_each_entry(kvm, &vm_list, vm_list) {
  3495. kvm_for_each_vcpu(i, vcpu, kvm) {
  3496. if (vcpu->cpu != freq->cpu)
  3497. continue;
  3498. if (!kvm_request_guest_time_update(vcpu))
  3499. continue;
  3500. if (vcpu->cpu != smp_processor_id())
  3501. send_ipi++;
  3502. }
  3503. }
  3504. spin_unlock(&kvm_lock);
  3505. if (freq->old < freq->new && send_ipi) {
  3506. /*
  3507. * We upscale the frequency. Must make the guest
  3508. * doesn't see old kvmclock values while running with
  3509. * the new frequency, otherwise we risk the guest sees
  3510. * time go backwards.
  3511. *
  3512. * In case we update the frequency for another cpu
  3513. * (which might be in guest context) send an interrupt
  3514. * to kick the cpu out of guest context. Next time
  3515. * guest context is entered kvmclock will be updated,
  3516. * so the guest will not see stale values.
  3517. */
  3518. smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
  3519. }
  3520. return 0;
  3521. }
  3522. static struct notifier_block kvmclock_cpufreq_notifier_block = {
  3523. .notifier_call = kvmclock_cpufreq_notifier
  3524. };
  3525. static void kvm_timer_init(void)
  3526. {
  3527. int cpu;
  3528. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  3529. cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
  3530. CPUFREQ_TRANSITION_NOTIFIER);
  3531. for_each_online_cpu(cpu) {
  3532. unsigned long khz = cpufreq_get(cpu);
  3533. if (!khz)
  3534. khz = tsc_khz;
  3535. per_cpu(cpu_tsc_khz, cpu) = khz;
  3536. }
  3537. } else {
  3538. for_each_possible_cpu(cpu)
  3539. per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
  3540. }
  3541. }
  3542. static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
  3543. static int kvm_is_in_guest(void)
  3544. {
  3545. return percpu_read(current_vcpu) != NULL;
  3546. }
  3547. static int kvm_is_user_mode(void)
  3548. {
  3549. int user_mode = 3;
  3550. if (percpu_read(current_vcpu))
  3551. user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
  3552. return user_mode != 0;
  3553. }
  3554. static unsigned long kvm_get_guest_ip(void)
  3555. {
  3556. unsigned long ip = 0;
  3557. if (percpu_read(current_vcpu))
  3558. ip = kvm_rip_read(percpu_read(current_vcpu));
  3559. return ip;
  3560. }
  3561. static struct perf_guest_info_callbacks kvm_guest_cbs = {
  3562. .is_in_guest = kvm_is_in_guest,
  3563. .is_user_mode = kvm_is_user_mode,
  3564. .get_guest_ip = kvm_get_guest_ip,
  3565. };
  3566. void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
  3567. {
  3568. percpu_write(current_vcpu, vcpu);
  3569. }
  3570. EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
  3571. void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
  3572. {
  3573. percpu_write(current_vcpu, NULL);
  3574. }
  3575. EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
  3576. int kvm_arch_init(void *opaque)
  3577. {
  3578. int r;
  3579. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  3580. if (kvm_x86_ops) {
  3581. printk(KERN_ERR "kvm: already loaded the other module\n");
  3582. r = -EEXIST;
  3583. goto out;
  3584. }
  3585. if (!ops->cpu_has_kvm_support()) {
  3586. printk(KERN_ERR "kvm: no hardware support\n");
  3587. r = -EOPNOTSUPP;
  3588. goto out;
  3589. }
  3590. if (ops->disabled_by_bios()) {
  3591. printk(KERN_ERR "kvm: disabled by bios\n");
  3592. r = -EOPNOTSUPP;
  3593. goto out;
  3594. }
  3595. r = kvm_mmu_module_init();
  3596. if (r)
  3597. goto out;
  3598. kvm_init_msr_list();
  3599. kvm_x86_ops = ops;
  3600. kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
  3601. kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
  3602. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  3603. PT_DIRTY_MASK, PT64_NX_MASK, 0);
  3604. kvm_timer_init();
  3605. perf_register_guest_info_callbacks(&kvm_guest_cbs);
  3606. return 0;
  3607. out:
  3608. return r;
  3609. }
  3610. void kvm_arch_exit(void)
  3611. {
  3612. perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
  3613. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  3614. cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
  3615. CPUFREQ_TRANSITION_NOTIFIER);
  3616. kvm_x86_ops = NULL;
  3617. kvm_mmu_module_exit();
  3618. }
  3619. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  3620. {
  3621. ++vcpu->stat.halt_exits;
  3622. if (irqchip_in_kernel(vcpu->kvm)) {
  3623. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  3624. return 1;
  3625. } else {
  3626. vcpu->run->exit_reason = KVM_EXIT_HLT;
  3627. return 0;
  3628. }
  3629. }
  3630. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  3631. static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
  3632. unsigned long a1)
  3633. {
  3634. if (is_long_mode(vcpu))
  3635. return a0;
  3636. else
  3637. return a0 | ((gpa_t)a1 << 32);
  3638. }
  3639. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  3640. {
  3641. u64 param, ingpa, outgpa, ret;
  3642. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  3643. bool fast, longmode;
  3644. int cs_db, cs_l;
  3645. /*
  3646. * hypercall generates UD from non zero cpl and real mode
  3647. * per HYPER-V spec
  3648. */
  3649. if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
  3650. kvm_queue_exception(vcpu, UD_VECTOR);
  3651. return 0;
  3652. }
  3653. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  3654. longmode = is_long_mode(vcpu) && cs_l == 1;
  3655. if (!longmode) {
  3656. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  3657. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  3658. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  3659. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  3660. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  3661. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  3662. }
  3663. #ifdef CONFIG_X86_64
  3664. else {
  3665. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3666. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3667. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  3668. }
  3669. #endif
  3670. code = param & 0xffff;
  3671. fast = (param >> 16) & 0x1;
  3672. rep_cnt = (param >> 32) & 0xfff;
  3673. rep_idx = (param >> 48) & 0xfff;
  3674. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  3675. switch (code) {
  3676. case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
  3677. kvm_vcpu_on_spin(vcpu);
  3678. break;
  3679. default:
  3680. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  3681. break;
  3682. }
  3683. ret = res | (((u64)rep_done & 0xfff) << 32);
  3684. if (longmode) {
  3685. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  3686. } else {
  3687. kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
  3688. kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
  3689. }
  3690. return 1;
  3691. }
  3692. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  3693. {
  3694. unsigned long nr, a0, a1, a2, a3, ret;
  3695. int r = 1;
  3696. if (kvm_hv_hypercall_enabled(vcpu->kvm))
  3697. return kvm_hv_hypercall(vcpu);
  3698. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3699. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3700. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3701. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3702. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3703. trace_kvm_hypercall(nr, a0, a1, a2, a3);
  3704. if (!is_long_mode(vcpu)) {
  3705. nr &= 0xFFFFFFFF;
  3706. a0 &= 0xFFFFFFFF;
  3707. a1 &= 0xFFFFFFFF;
  3708. a2 &= 0xFFFFFFFF;
  3709. a3 &= 0xFFFFFFFF;
  3710. }
  3711. if (kvm_x86_ops->get_cpl(vcpu) != 0) {
  3712. ret = -KVM_EPERM;
  3713. goto out;
  3714. }
  3715. switch (nr) {
  3716. case KVM_HC_VAPIC_POLL_IRQ:
  3717. ret = 0;
  3718. break;
  3719. case KVM_HC_MMU_OP:
  3720. r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
  3721. break;
  3722. default:
  3723. ret = -KVM_ENOSYS;
  3724. break;
  3725. }
  3726. out:
  3727. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  3728. ++vcpu->stat.hypercalls;
  3729. return r;
  3730. }
  3731. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  3732. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  3733. {
  3734. char instruction[3];
  3735. unsigned long rip = kvm_rip_read(vcpu);
  3736. /*
  3737. * Blow out the MMU to ensure that no other VCPU has an active mapping
  3738. * to ensure that the updated hypercall appears atomically across all
  3739. * VCPUs.
  3740. */
  3741. kvm_mmu_zap_all(vcpu->kvm);
  3742. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  3743. return emulator_write_emulated(rip, instruction, 3, vcpu);
  3744. }
  3745. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  3746. {
  3747. struct desc_ptr dt = { limit, base };
  3748. kvm_x86_ops->set_gdt(vcpu, &dt);
  3749. }
  3750. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  3751. {
  3752. struct desc_ptr dt = { limit, base };
  3753. kvm_x86_ops->set_idt(vcpu, &dt);
  3754. }
  3755. static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  3756. {
  3757. struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
  3758. int j, nent = vcpu->arch.cpuid_nent;
  3759. e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
  3760. /* when no next entry is found, the current entry[i] is reselected */
  3761. for (j = i + 1; ; j = (j + 1) % nent) {
  3762. struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
  3763. if (ej->function == e->function) {
  3764. ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  3765. return j;
  3766. }
  3767. }
  3768. return 0; /* silence gcc, even though control never reaches here */
  3769. }
  3770. /* find an entry with matching function, matching index (if needed), and that
  3771. * should be read next (if it's stateful) */
  3772. static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
  3773. u32 function, u32 index)
  3774. {
  3775. if (e->function != function)
  3776. return 0;
  3777. if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
  3778. return 0;
  3779. if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
  3780. !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
  3781. return 0;
  3782. return 1;
  3783. }
  3784. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  3785. u32 function, u32 index)
  3786. {
  3787. int i;
  3788. struct kvm_cpuid_entry2 *best = NULL;
  3789. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  3790. struct kvm_cpuid_entry2 *e;
  3791. e = &vcpu->arch.cpuid_entries[i];
  3792. if (is_matching_cpuid_entry(e, function, index)) {
  3793. if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
  3794. move_to_next_stateful_cpuid_entry(vcpu, i);
  3795. best = e;
  3796. break;
  3797. }
  3798. /*
  3799. * Both basic or both extended?
  3800. */
  3801. if (((e->function ^ function) & 0x80000000) == 0)
  3802. if (!best || e->function > best->function)
  3803. best = e;
  3804. }
  3805. return best;
  3806. }
  3807. EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
  3808. int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
  3809. {
  3810. struct kvm_cpuid_entry2 *best;
  3811. best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
  3812. if (!best || best->eax < 0x80000008)
  3813. goto not_found;
  3814. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  3815. if (best)
  3816. return best->eax & 0xff;
  3817. not_found:
  3818. return 36;
  3819. }
  3820. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  3821. {
  3822. u32 function, index;
  3823. struct kvm_cpuid_entry2 *best;
  3824. function = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3825. index = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3826. kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
  3827. kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
  3828. kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
  3829. kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
  3830. best = kvm_find_cpuid_entry(vcpu, function, index);
  3831. if (best) {
  3832. kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
  3833. kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
  3834. kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
  3835. kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
  3836. }
  3837. kvm_x86_ops->skip_emulated_instruction(vcpu);
  3838. trace_kvm_cpuid(function,
  3839. kvm_register_read(vcpu, VCPU_REGS_RAX),
  3840. kvm_register_read(vcpu, VCPU_REGS_RBX),
  3841. kvm_register_read(vcpu, VCPU_REGS_RCX),
  3842. kvm_register_read(vcpu, VCPU_REGS_RDX));
  3843. }
  3844. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  3845. /*
  3846. * Check if userspace requested an interrupt window, and that the
  3847. * interrupt window is open.
  3848. *
  3849. * No need to exit to userspace if we already have an interrupt queued.
  3850. */
  3851. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
  3852. {
  3853. return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
  3854. vcpu->run->request_interrupt_window &&
  3855. kvm_arch_interrupt_allowed(vcpu));
  3856. }
  3857. static void post_kvm_run_save(struct kvm_vcpu *vcpu)
  3858. {
  3859. struct kvm_run *kvm_run = vcpu->run;
  3860. kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  3861. kvm_run->cr8 = kvm_get_cr8(vcpu);
  3862. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  3863. if (irqchip_in_kernel(vcpu->kvm))
  3864. kvm_run->ready_for_interrupt_injection = 1;
  3865. else
  3866. kvm_run->ready_for_interrupt_injection =
  3867. kvm_arch_interrupt_allowed(vcpu) &&
  3868. !kvm_cpu_has_interrupt(vcpu) &&
  3869. !kvm_event_needs_reinjection(vcpu);
  3870. }
  3871. static void vapic_enter(struct kvm_vcpu *vcpu)
  3872. {
  3873. struct kvm_lapic *apic = vcpu->arch.apic;
  3874. struct page *page;
  3875. if (!apic || !apic->vapic_addr)
  3876. return;
  3877. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  3878. vcpu->arch.apic->vapic_page = page;
  3879. }
  3880. static void vapic_exit(struct kvm_vcpu *vcpu)
  3881. {
  3882. struct kvm_lapic *apic = vcpu->arch.apic;
  3883. int idx;
  3884. if (!apic || !apic->vapic_addr)
  3885. return;
  3886. idx = srcu_read_lock(&vcpu->kvm->srcu);
  3887. kvm_release_page_dirty(apic->vapic_page);
  3888. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  3889. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  3890. }
  3891. static void update_cr8_intercept(struct kvm_vcpu *vcpu)
  3892. {
  3893. int max_irr, tpr;
  3894. if (!kvm_x86_ops->update_cr8_intercept)
  3895. return;
  3896. if (!vcpu->arch.apic)
  3897. return;
  3898. if (!vcpu->arch.apic->vapic_addr)
  3899. max_irr = kvm_lapic_find_highest_irr(vcpu);
  3900. else
  3901. max_irr = -1;
  3902. if (max_irr != -1)
  3903. max_irr >>= 4;
  3904. tpr = kvm_lapic_get_cr8(vcpu);
  3905. kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
  3906. }
  3907. static void inject_pending_event(struct kvm_vcpu *vcpu)
  3908. {
  3909. /* try to reinject previous events if any */
  3910. if (vcpu->arch.exception.pending) {
  3911. trace_kvm_inj_exception(vcpu->arch.exception.nr,
  3912. vcpu->arch.exception.has_error_code,
  3913. vcpu->arch.exception.error_code);
  3914. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  3915. vcpu->arch.exception.has_error_code,
  3916. vcpu->arch.exception.error_code,
  3917. vcpu->arch.exception.reinject);
  3918. return;
  3919. }
  3920. if (vcpu->arch.nmi_injected) {
  3921. kvm_x86_ops->set_nmi(vcpu);
  3922. return;
  3923. }
  3924. if (vcpu->arch.interrupt.pending) {
  3925. kvm_x86_ops->set_irq(vcpu);
  3926. return;
  3927. }
  3928. /* try to inject new event if pending */
  3929. if (vcpu->arch.nmi_pending) {
  3930. if (kvm_x86_ops->nmi_allowed(vcpu)) {
  3931. vcpu->arch.nmi_pending = false;
  3932. vcpu->arch.nmi_injected = true;
  3933. kvm_x86_ops->set_nmi(vcpu);
  3934. }
  3935. } else if (kvm_cpu_has_interrupt(vcpu)) {
  3936. if (kvm_x86_ops->interrupt_allowed(vcpu)) {
  3937. kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
  3938. false);
  3939. kvm_x86_ops->set_irq(vcpu);
  3940. }
  3941. }
  3942. }
  3943. static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
  3944. {
  3945. int r;
  3946. bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
  3947. vcpu->run->request_interrupt_window;
  3948. if (vcpu->requests)
  3949. if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
  3950. kvm_mmu_unload(vcpu);
  3951. r = kvm_mmu_reload(vcpu);
  3952. if (unlikely(r))
  3953. goto out;
  3954. if (vcpu->requests) {
  3955. if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
  3956. __kvm_migrate_timers(vcpu);
  3957. if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
  3958. kvm_write_guest_time(vcpu);
  3959. if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
  3960. kvm_mmu_sync_roots(vcpu);
  3961. if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
  3962. kvm_x86_ops->tlb_flush(vcpu);
  3963. if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
  3964. &vcpu->requests)) {
  3965. vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
  3966. r = 0;
  3967. goto out;
  3968. }
  3969. if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
  3970. vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  3971. r = 0;
  3972. goto out;
  3973. }
  3974. if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
  3975. vcpu->fpu_active = 0;
  3976. kvm_x86_ops->fpu_deactivate(vcpu);
  3977. }
  3978. }
  3979. preempt_disable();
  3980. kvm_x86_ops->prepare_guest_switch(vcpu);
  3981. if (vcpu->fpu_active)
  3982. kvm_load_guest_fpu(vcpu);
  3983. local_irq_disable();
  3984. clear_bit(KVM_REQ_KICK, &vcpu->requests);
  3985. smp_mb__after_clear_bit();
  3986. if (vcpu->requests || need_resched() || signal_pending(current)) {
  3987. set_bit(KVM_REQ_KICK, &vcpu->requests);
  3988. local_irq_enable();
  3989. preempt_enable();
  3990. r = 1;
  3991. goto out;
  3992. }
  3993. inject_pending_event(vcpu);
  3994. /* enable NMI/IRQ window open exits if needed */
  3995. if (vcpu->arch.nmi_pending)
  3996. kvm_x86_ops->enable_nmi_window(vcpu);
  3997. else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
  3998. kvm_x86_ops->enable_irq_window(vcpu);
  3999. if (kvm_lapic_enabled(vcpu)) {
  4000. update_cr8_intercept(vcpu);
  4001. kvm_lapic_sync_to_vapic(vcpu);
  4002. }
  4003. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4004. kvm_guest_enter();
  4005. if (unlikely(vcpu->arch.switch_db_regs)) {
  4006. set_debugreg(0, 7);
  4007. set_debugreg(vcpu->arch.eff_db[0], 0);
  4008. set_debugreg(vcpu->arch.eff_db[1], 1);
  4009. set_debugreg(vcpu->arch.eff_db[2], 2);
  4010. set_debugreg(vcpu->arch.eff_db[3], 3);
  4011. }
  4012. trace_kvm_entry(vcpu->vcpu_id);
  4013. kvm_x86_ops->run(vcpu);
  4014. /*
  4015. * If the guest has used debug registers, at least dr7
  4016. * will be disabled while returning to the host.
  4017. * If we don't have active breakpoints in the host, we don't
  4018. * care about the messed up debug address registers. But if
  4019. * we have some of them active, restore the old state.
  4020. */
  4021. if (hw_breakpoint_active())
  4022. hw_breakpoint_restore();
  4023. set_bit(KVM_REQ_KICK, &vcpu->requests);
  4024. local_irq_enable();
  4025. ++vcpu->stat.exits;
  4026. /*
  4027. * We must have an instruction between local_irq_enable() and
  4028. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  4029. * the interrupt shadow. The stat.exits increment will do nicely.
  4030. * But we need to prevent reordering, hence this barrier():
  4031. */
  4032. barrier();
  4033. kvm_guest_exit();
  4034. preempt_enable();
  4035. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4036. /*
  4037. * Profile KVM exit RIPs:
  4038. */
  4039. if (unlikely(prof_on == KVM_PROFILING)) {
  4040. unsigned long rip = kvm_rip_read(vcpu);
  4041. profile_hit(KVM_PROFILING, (void *)rip);
  4042. }
  4043. kvm_lapic_sync_from_vapic(vcpu);
  4044. r = kvm_x86_ops->handle_exit(vcpu);
  4045. out:
  4046. return r;
  4047. }
  4048. static int __vcpu_run(struct kvm_vcpu *vcpu)
  4049. {
  4050. int r;
  4051. struct kvm *kvm = vcpu->kvm;
  4052. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  4053. pr_debug("vcpu %d received sipi with vector # %x\n",
  4054. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  4055. kvm_lapic_reset(vcpu);
  4056. r = kvm_arch_vcpu_reset(vcpu);
  4057. if (r)
  4058. return r;
  4059. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4060. }
  4061. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4062. vapic_enter(vcpu);
  4063. r = 1;
  4064. while (r > 0) {
  4065. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
  4066. r = vcpu_enter_guest(vcpu);
  4067. else {
  4068. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4069. kvm_vcpu_block(vcpu);
  4070. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4071. if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
  4072. {
  4073. switch(vcpu->arch.mp_state) {
  4074. case KVM_MP_STATE_HALTED:
  4075. vcpu->arch.mp_state =
  4076. KVM_MP_STATE_RUNNABLE;
  4077. case KVM_MP_STATE_RUNNABLE:
  4078. break;
  4079. case KVM_MP_STATE_SIPI_RECEIVED:
  4080. default:
  4081. r = -EINTR;
  4082. break;
  4083. }
  4084. }
  4085. }
  4086. if (r <= 0)
  4087. break;
  4088. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  4089. if (kvm_cpu_has_pending_timer(vcpu))
  4090. kvm_inject_pending_timer_irqs(vcpu);
  4091. if (dm_request_for_irq_injection(vcpu)) {
  4092. r = -EINTR;
  4093. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4094. ++vcpu->stat.request_irq_exits;
  4095. }
  4096. if (signal_pending(current)) {
  4097. r = -EINTR;
  4098. vcpu->run->exit_reason = KVM_EXIT_INTR;
  4099. ++vcpu->stat.signal_exits;
  4100. }
  4101. if (need_resched()) {
  4102. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4103. kvm_resched(vcpu);
  4104. vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
  4105. }
  4106. }
  4107. srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
  4108. vapic_exit(vcpu);
  4109. return r;
  4110. }
  4111. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  4112. {
  4113. int r;
  4114. sigset_t sigsaved;
  4115. vcpu_load(vcpu);
  4116. if (vcpu->sigset_active)
  4117. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  4118. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  4119. kvm_vcpu_block(vcpu);
  4120. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  4121. r = -EAGAIN;
  4122. goto out;
  4123. }
  4124. /* re-sync apic's tpr */
  4125. if (!irqchip_in_kernel(vcpu->kvm))
  4126. kvm_set_cr8(vcpu, kvm_run->cr8);
  4127. if (vcpu->arch.pio.count || vcpu->mmio_needed ||
  4128. vcpu->arch.emulate_ctxt.restart) {
  4129. if (vcpu->mmio_needed) {
  4130. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  4131. vcpu->mmio_read_completed = 1;
  4132. vcpu->mmio_needed = 0;
  4133. }
  4134. vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  4135. r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
  4136. srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  4137. if (r == EMULATE_DO_MMIO) {
  4138. r = 0;
  4139. goto out;
  4140. }
  4141. }
  4142. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
  4143. kvm_register_write(vcpu, VCPU_REGS_RAX,
  4144. kvm_run->hypercall.ret);
  4145. r = __vcpu_run(vcpu);
  4146. out:
  4147. post_kvm_run_save(vcpu);
  4148. if (vcpu->sigset_active)
  4149. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  4150. vcpu_put(vcpu);
  4151. return r;
  4152. }
  4153. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4154. {
  4155. vcpu_load(vcpu);
  4156. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  4157. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  4158. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  4159. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  4160. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  4161. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  4162. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  4163. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  4164. #ifdef CONFIG_X86_64
  4165. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  4166. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  4167. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  4168. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  4169. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  4170. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  4171. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  4172. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  4173. #endif
  4174. regs->rip = kvm_rip_read(vcpu);
  4175. regs->rflags = kvm_get_rflags(vcpu);
  4176. vcpu_put(vcpu);
  4177. return 0;
  4178. }
  4179. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  4180. {
  4181. vcpu_load(vcpu);
  4182. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  4183. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  4184. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  4185. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  4186. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  4187. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  4188. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  4189. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  4190. #ifdef CONFIG_X86_64
  4191. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  4192. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  4193. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  4194. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  4195. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  4196. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  4197. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  4198. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  4199. #endif
  4200. kvm_rip_write(vcpu, regs->rip);
  4201. kvm_set_rflags(vcpu, regs->rflags);
  4202. vcpu->arch.exception.pending = false;
  4203. vcpu_put(vcpu);
  4204. return 0;
  4205. }
  4206. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  4207. {
  4208. struct kvm_segment cs;
  4209. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  4210. *db = cs.db;
  4211. *l = cs.l;
  4212. }
  4213. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  4214. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  4215. struct kvm_sregs *sregs)
  4216. {
  4217. struct desc_ptr dt;
  4218. vcpu_load(vcpu);
  4219. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  4220. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  4221. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  4222. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  4223. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  4224. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  4225. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  4226. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  4227. kvm_x86_ops->get_idt(vcpu, &dt);
  4228. sregs->idt.limit = dt.size;
  4229. sregs->idt.base = dt.address;
  4230. kvm_x86_ops->get_gdt(vcpu, &dt);
  4231. sregs->gdt.limit = dt.size;
  4232. sregs->gdt.base = dt.address;
  4233. sregs->cr0 = kvm_read_cr0(vcpu);
  4234. sregs->cr2 = vcpu->arch.cr2;
  4235. sregs->cr3 = vcpu->arch.cr3;
  4236. sregs->cr4 = kvm_read_cr4(vcpu);
  4237. sregs->cr8 = kvm_get_cr8(vcpu);
  4238. sregs->efer = vcpu->arch.efer;
  4239. sregs->apic_base = kvm_get_apic_base(vcpu);
  4240. memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
  4241. if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
  4242. set_bit(vcpu->arch.interrupt.nr,
  4243. (unsigned long *)sregs->interrupt_bitmap);
  4244. vcpu_put(vcpu);
  4245. return 0;
  4246. }
  4247. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  4248. struct kvm_mp_state *mp_state)
  4249. {
  4250. vcpu_load(vcpu);
  4251. mp_state->mp_state = vcpu->arch.mp_state;
  4252. vcpu_put(vcpu);
  4253. return 0;
  4254. }
  4255. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  4256. struct kvm_mp_state *mp_state)
  4257. {
  4258. vcpu_load(vcpu);
  4259. vcpu->arch.mp_state = mp_state->mp_state;
  4260. vcpu_put(vcpu);
  4261. return 0;
  4262. }
  4263. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
  4264. bool has_error_code, u32 error_code)
  4265. {
  4266. int cs_db, cs_l, ret;
  4267. cache_all_regs(vcpu);
  4268. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  4269. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  4270. vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  4271. vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
  4272. vcpu->arch.emulate_ctxt.mode =
  4273. (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
  4274. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  4275. ? X86EMUL_MODE_VM86 : cs_l
  4276. ? X86EMUL_MODE_PROT64 : cs_db
  4277. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  4278. ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
  4279. tss_selector, reason, has_error_code,
  4280. error_code);
  4281. if (ret)
  4282. return EMULATE_FAIL;
  4283. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  4284. return EMULATE_DONE;
  4285. }
  4286. EXPORT_SYMBOL_GPL(kvm_task_switch);
  4287. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  4288. struct kvm_sregs *sregs)
  4289. {
  4290. int mmu_reset_needed = 0;
  4291. int pending_vec, max_bits;
  4292. struct desc_ptr dt;
  4293. vcpu_load(vcpu);
  4294. dt.size = sregs->idt.limit;
  4295. dt.address = sregs->idt.base;
  4296. kvm_x86_ops->set_idt(vcpu, &dt);
  4297. dt.size = sregs->gdt.limit;
  4298. dt.address = sregs->gdt.base;
  4299. kvm_x86_ops->set_gdt(vcpu, &dt);
  4300. vcpu->arch.cr2 = sregs->cr2;
  4301. mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
  4302. vcpu->arch.cr3 = sregs->cr3;
  4303. kvm_set_cr8(vcpu, sregs->cr8);
  4304. mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
  4305. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  4306. kvm_set_apic_base(vcpu, sregs->apic_base);
  4307. mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
  4308. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  4309. vcpu->arch.cr0 = sregs->cr0;
  4310. mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
  4311. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  4312. if (!is_long_mode(vcpu) && is_pae(vcpu)) {
  4313. load_pdptrs(vcpu, vcpu->arch.cr3);
  4314. mmu_reset_needed = 1;
  4315. }
  4316. if (mmu_reset_needed)
  4317. kvm_mmu_reset_context(vcpu);
  4318. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  4319. pending_vec = find_first_bit(
  4320. (const unsigned long *)sregs->interrupt_bitmap, max_bits);
  4321. if (pending_vec < max_bits) {
  4322. kvm_queue_interrupt(vcpu, pending_vec, false);
  4323. pr_debug("Set back pending irq %d\n", pending_vec);
  4324. if (irqchip_in_kernel(vcpu->kvm))
  4325. kvm_pic_clear_isr_ack(vcpu->kvm);
  4326. }
  4327. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  4328. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  4329. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  4330. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  4331. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  4332. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  4333. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  4334. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  4335. update_cr8_intercept(vcpu);
  4336. /* Older userspace won't unhalt the vcpu on reset. */
  4337. if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
  4338. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  4339. !is_protmode(vcpu))
  4340. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4341. vcpu_put(vcpu);
  4342. return 0;
  4343. }
  4344. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  4345. struct kvm_guest_debug *dbg)
  4346. {
  4347. unsigned long rflags;
  4348. int i, r;
  4349. vcpu_load(vcpu);
  4350. if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
  4351. r = -EBUSY;
  4352. if (vcpu->arch.exception.pending)
  4353. goto unlock_out;
  4354. if (dbg->control & KVM_GUESTDBG_INJECT_DB)
  4355. kvm_queue_exception(vcpu, DB_VECTOR);
  4356. else
  4357. kvm_queue_exception(vcpu, BP_VECTOR);
  4358. }
  4359. /*
  4360. * Read rflags as long as potentially injected trace flags are still
  4361. * filtered out.
  4362. */
  4363. rflags = kvm_get_rflags(vcpu);
  4364. vcpu->guest_debug = dbg->control;
  4365. if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
  4366. vcpu->guest_debug = 0;
  4367. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
  4368. for (i = 0; i < KVM_NR_DB_REGS; ++i)
  4369. vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
  4370. vcpu->arch.switch_db_regs =
  4371. (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
  4372. } else {
  4373. for (i = 0; i < KVM_NR_DB_REGS; i++)
  4374. vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  4375. vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
  4376. }
  4377. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  4378. vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
  4379. get_segment_base(vcpu, VCPU_SREG_CS);
  4380. /*
  4381. * Trigger an rflags update that will inject or remove the trace
  4382. * flags.
  4383. */
  4384. kvm_set_rflags(vcpu, rflags);
  4385. kvm_x86_ops->set_guest_debug(vcpu, dbg);
  4386. r = 0;
  4387. unlock_out:
  4388. vcpu_put(vcpu);
  4389. return r;
  4390. }
  4391. /*
  4392. * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
  4393. * we have asm/x86/processor.h
  4394. */
  4395. struct fxsave {
  4396. u16 cwd;
  4397. u16 swd;
  4398. u16 twd;
  4399. u16 fop;
  4400. u64 rip;
  4401. u64 rdp;
  4402. u32 mxcsr;
  4403. u32 mxcsr_mask;
  4404. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  4405. #ifdef CONFIG_X86_64
  4406. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  4407. #else
  4408. u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
  4409. #endif
  4410. };
  4411. /*
  4412. * Translate a guest virtual address to a guest physical address.
  4413. */
  4414. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  4415. struct kvm_translation *tr)
  4416. {
  4417. unsigned long vaddr = tr->linear_address;
  4418. gpa_t gpa;
  4419. int idx;
  4420. vcpu_load(vcpu);
  4421. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4422. gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
  4423. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4424. tr->physical_address = gpa;
  4425. tr->valid = gpa != UNMAPPED_GVA;
  4426. tr->writeable = 1;
  4427. tr->usermode = 0;
  4428. vcpu_put(vcpu);
  4429. return 0;
  4430. }
  4431. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  4432. {
  4433. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  4434. vcpu_load(vcpu);
  4435. memcpy(fpu->fpr, fxsave->st_space, 128);
  4436. fpu->fcw = fxsave->cwd;
  4437. fpu->fsw = fxsave->swd;
  4438. fpu->ftwx = fxsave->twd;
  4439. fpu->last_opcode = fxsave->fop;
  4440. fpu->last_ip = fxsave->rip;
  4441. fpu->last_dp = fxsave->rdp;
  4442. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  4443. vcpu_put(vcpu);
  4444. return 0;
  4445. }
  4446. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  4447. {
  4448. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  4449. vcpu_load(vcpu);
  4450. memcpy(fxsave->st_space, fpu->fpr, 128);
  4451. fxsave->cwd = fpu->fcw;
  4452. fxsave->swd = fpu->fsw;
  4453. fxsave->twd = fpu->ftwx;
  4454. fxsave->fop = fpu->last_opcode;
  4455. fxsave->rip = fpu->last_ip;
  4456. fxsave->rdp = fpu->last_dp;
  4457. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  4458. vcpu_put(vcpu);
  4459. return 0;
  4460. }
  4461. void fx_init(struct kvm_vcpu *vcpu)
  4462. {
  4463. unsigned after_mxcsr_mask;
  4464. /*
  4465. * Touch the fpu the first time in non atomic context as if
  4466. * this is the first fpu instruction the exception handler
  4467. * will fire before the instruction returns and it'll have to
  4468. * allocate ram with GFP_KERNEL.
  4469. */
  4470. if (!used_math())
  4471. kvm_fx_save(&vcpu->arch.host_fx_image);
  4472. /* Initialize guest FPU by resetting ours and saving into guest's */
  4473. preempt_disable();
  4474. kvm_fx_save(&vcpu->arch.host_fx_image);
  4475. kvm_fx_finit();
  4476. kvm_fx_save(&vcpu->arch.guest_fx_image);
  4477. kvm_fx_restore(&vcpu->arch.host_fx_image);
  4478. preempt_enable();
  4479. vcpu->arch.cr0 |= X86_CR0_ET;
  4480. after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
  4481. vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
  4482. memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
  4483. 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
  4484. }
  4485. EXPORT_SYMBOL_GPL(fx_init);
  4486. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  4487. {
  4488. if (vcpu->guest_fpu_loaded)
  4489. return;
  4490. vcpu->guest_fpu_loaded = 1;
  4491. kvm_fx_save(&vcpu->arch.host_fx_image);
  4492. kvm_fx_restore(&vcpu->arch.guest_fx_image);
  4493. trace_kvm_fpu(1);
  4494. }
  4495. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  4496. {
  4497. if (!vcpu->guest_fpu_loaded)
  4498. return;
  4499. vcpu->guest_fpu_loaded = 0;
  4500. kvm_fx_save(&vcpu->arch.guest_fx_image);
  4501. kvm_fx_restore(&vcpu->arch.host_fx_image);
  4502. ++vcpu->stat.fpu_reload;
  4503. set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
  4504. trace_kvm_fpu(0);
  4505. }
  4506. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  4507. {
  4508. if (vcpu->arch.time_page) {
  4509. kvm_release_page_dirty(vcpu->arch.time_page);
  4510. vcpu->arch.time_page = NULL;
  4511. }
  4512. kvm_x86_ops->vcpu_free(vcpu);
  4513. }
  4514. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  4515. unsigned int id)
  4516. {
  4517. return kvm_x86_ops->vcpu_create(kvm, id);
  4518. }
  4519. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  4520. {
  4521. int r;
  4522. /* We do fxsave: this must be aligned. */
  4523. BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
  4524. vcpu->arch.mtrr_state.have_fixed = 1;
  4525. vcpu_load(vcpu);
  4526. r = kvm_arch_vcpu_reset(vcpu);
  4527. if (r == 0)
  4528. r = kvm_mmu_setup(vcpu);
  4529. vcpu_put(vcpu);
  4530. if (r < 0)
  4531. goto free_vcpu;
  4532. return 0;
  4533. free_vcpu:
  4534. kvm_x86_ops->vcpu_free(vcpu);
  4535. return r;
  4536. }
  4537. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  4538. {
  4539. vcpu_load(vcpu);
  4540. kvm_mmu_unload(vcpu);
  4541. vcpu_put(vcpu);
  4542. kvm_x86_ops->vcpu_free(vcpu);
  4543. }
  4544. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  4545. {
  4546. vcpu->arch.nmi_pending = false;
  4547. vcpu->arch.nmi_injected = false;
  4548. vcpu->arch.switch_db_regs = 0;
  4549. memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
  4550. vcpu->arch.dr6 = DR6_FIXED_1;
  4551. vcpu->arch.dr7 = DR7_FIXED_1;
  4552. return kvm_x86_ops->vcpu_reset(vcpu);
  4553. }
  4554. int kvm_arch_hardware_enable(void *garbage)
  4555. {
  4556. /*
  4557. * Since this may be called from a hotplug notifcation,
  4558. * we can't get the CPU frequency directly.
  4559. */
  4560. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  4561. int cpu = raw_smp_processor_id();
  4562. per_cpu(cpu_tsc_khz, cpu) = 0;
  4563. }
  4564. kvm_shared_msr_cpu_online();
  4565. return kvm_x86_ops->hardware_enable(garbage);
  4566. }
  4567. void kvm_arch_hardware_disable(void *garbage)
  4568. {
  4569. kvm_x86_ops->hardware_disable(garbage);
  4570. drop_user_return_notifiers(garbage);
  4571. }
  4572. int kvm_arch_hardware_setup(void)
  4573. {
  4574. return kvm_x86_ops->hardware_setup();
  4575. }
  4576. void kvm_arch_hardware_unsetup(void)
  4577. {
  4578. kvm_x86_ops->hardware_unsetup();
  4579. }
  4580. void kvm_arch_check_processor_compat(void *rtn)
  4581. {
  4582. kvm_x86_ops->check_processor_compatibility(rtn);
  4583. }
  4584. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  4585. {
  4586. struct page *page;
  4587. struct kvm *kvm;
  4588. int r;
  4589. BUG_ON(vcpu->kvm == NULL);
  4590. kvm = vcpu->kvm;
  4591. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  4592. if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
  4593. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  4594. else
  4595. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  4596. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  4597. if (!page) {
  4598. r = -ENOMEM;
  4599. goto fail;
  4600. }
  4601. vcpu->arch.pio_data = page_address(page);
  4602. r = kvm_mmu_create(vcpu);
  4603. if (r < 0)
  4604. goto fail_free_pio_data;
  4605. if (irqchip_in_kernel(kvm)) {
  4606. r = kvm_create_lapic(vcpu);
  4607. if (r < 0)
  4608. goto fail_mmu_destroy;
  4609. }
  4610. vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
  4611. GFP_KERNEL);
  4612. if (!vcpu->arch.mce_banks) {
  4613. r = -ENOMEM;
  4614. goto fail_free_lapic;
  4615. }
  4616. vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
  4617. return 0;
  4618. fail_free_lapic:
  4619. kvm_free_lapic(vcpu);
  4620. fail_mmu_destroy:
  4621. kvm_mmu_destroy(vcpu);
  4622. fail_free_pio_data:
  4623. free_page((unsigned long)vcpu->arch.pio_data);
  4624. fail:
  4625. return r;
  4626. }
  4627. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  4628. {
  4629. int idx;
  4630. kfree(vcpu->arch.mce_banks);
  4631. kvm_free_lapic(vcpu);
  4632. idx = srcu_read_lock(&vcpu->kvm->srcu);
  4633. kvm_mmu_destroy(vcpu);
  4634. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  4635. free_page((unsigned long)vcpu->arch.pio_data);
  4636. }
  4637. struct kvm *kvm_arch_create_vm(void)
  4638. {
  4639. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  4640. if (!kvm)
  4641. return ERR_PTR(-ENOMEM);
  4642. kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
  4643. if (!kvm->arch.aliases) {
  4644. kfree(kvm);
  4645. return ERR_PTR(-ENOMEM);
  4646. }
  4647. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  4648. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  4649. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  4650. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  4651. rdtscll(kvm->arch.vm_init_tsc);
  4652. return kvm;
  4653. }
  4654. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  4655. {
  4656. vcpu_load(vcpu);
  4657. kvm_mmu_unload(vcpu);
  4658. vcpu_put(vcpu);
  4659. }
  4660. static void kvm_free_vcpus(struct kvm *kvm)
  4661. {
  4662. unsigned int i;
  4663. struct kvm_vcpu *vcpu;
  4664. /*
  4665. * Unpin any mmu pages first.
  4666. */
  4667. kvm_for_each_vcpu(i, vcpu, kvm)
  4668. kvm_unload_vcpu_mmu(vcpu);
  4669. kvm_for_each_vcpu(i, vcpu, kvm)
  4670. kvm_arch_vcpu_free(vcpu);
  4671. mutex_lock(&kvm->lock);
  4672. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  4673. kvm->vcpus[i] = NULL;
  4674. atomic_set(&kvm->online_vcpus, 0);
  4675. mutex_unlock(&kvm->lock);
  4676. }
  4677. void kvm_arch_sync_events(struct kvm *kvm)
  4678. {
  4679. kvm_free_all_assigned_devices(kvm);
  4680. }
  4681. void kvm_arch_destroy_vm(struct kvm *kvm)
  4682. {
  4683. kvm_iommu_unmap_guest(kvm);
  4684. kvm_free_pit(kvm);
  4685. kfree(kvm->arch.vpic);
  4686. kfree(kvm->arch.vioapic);
  4687. kvm_free_vcpus(kvm);
  4688. kvm_free_physmem(kvm);
  4689. if (kvm->arch.apic_access_page)
  4690. put_page(kvm->arch.apic_access_page);
  4691. if (kvm->arch.ept_identity_pagetable)
  4692. put_page(kvm->arch.ept_identity_pagetable);
  4693. cleanup_srcu_struct(&kvm->srcu);
  4694. kfree(kvm->arch.aliases);
  4695. kfree(kvm);
  4696. }
  4697. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  4698. struct kvm_memory_slot *memslot,
  4699. struct kvm_memory_slot old,
  4700. struct kvm_userspace_memory_region *mem,
  4701. int user_alloc)
  4702. {
  4703. int npages = memslot->npages;
  4704. /*To keep backward compatibility with older userspace,
  4705. *x86 needs to hanlde !user_alloc case.
  4706. */
  4707. if (!user_alloc) {
  4708. if (npages && !old.rmap) {
  4709. unsigned long userspace_addr;
  4710. down_write(&current->mm->mmap_sem);
  4711. userspace_addr = do_mmap(NULL, 0,
  4712. npages * PAGE_SIZE,
  4713. PROT_READ | PROT_WRITE,
  4714. MAP_PRIVATE | MAP_ANONYMOUS,
  4715. 0);
  4716. up_write(&current->mm->mmap_sem);
  4717. if (IS_ERR((void *)userspace_addr))
  4718. return PTR_ERR((void *)userspace_addr);
  4719. memslot->userspace_addr = userspace_addr;
  4720. }
  4721. }
  4722. return 0;
  4723. }
  4724. void kvm_arch_commit_memory_region(struct kvm *kvm,
  4725. struct kvm_userspace_memory_region *mem,
  4726. struct kvm_memory_slot old,
  4727. int user_alloc)
  4728. {
  4729. int npages = mem->memory_size >> PAGE_SHIFT;
  4730. if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
  4731. int ret;
  4732. down_write(&current->mm->mmap_sem);
  4733. ret = do_munmap(current->mm, old.userspace_addr,
  4734. old.npages * PAGE_SIZE);
  4735. up_write(&current->mm->mmap_sem);
  4736. if (ret < 0)
  4737. printk(KERN_WARNING
  4738. "kvm_vm_ioctl_set_memory_region: "
  4739. "failed to munmap memory\n");
  4740. }
  4741. spin_lock(&kvm->mmu_lock);
  4742. if (!kvm->arch.n_requested_mmu_pages) {
  4743. unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  4744. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  4745. }
  4746. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  4747. spin_unlock(&kvm->mmu_lock);
  4748. }
  4749. void kvm_arch_flush_shadow(struct kvm *kvm)
  4750. {
  4751. kvm_mmu_zap_all(kvm);
  4752. kvm_reload_remote_mmus(kvm);
  4753. }
  4754. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  4755. {
  4756. return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
  4757. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
  4758. || vcpu->arch.nmi_pending ||
  4759. (kvm_arch_interrupt_allowed(vcpu) &&
  4760. kvm_cpu_has_interrupt(vcpu));
  4761. }
  4762. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  4763. {
  4764. int me;
  4765. int cpu = vcpu->cpu;
  4766. if (waitqueue_active(&vcpu->wq)) {
  4767. wake_up_interruptible(&vcpu->wq);
  4768. ++vcpu->stat.halt_wakeup;
  4769. }
  4770. me = get_cpu();
  4771. if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
  4772. if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
  4773. smp_send_reschedule(cpu);
  4774. put_cpu();
  4775. }
  4776. int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
  4777. {
  4778. return kvm_x86_ops->interrupt_allowed(vcpu);
  4779. }
  4780. bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
  4781. {
  4782. unsigned long current_rip = kvm_rip_read(vcpu) +
  4783. get_segment_base(vcpu, VCPU_SREG_CS);
  4784. return current_rip == linear_rip;
  4785. }
  4786. EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
  4787. unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
  4788. {
  4789. unsigned long rflags;
  4790. rflags = kvm_x86_ops->get_rflags(vcpu);
  4791. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  4792. rflags &= ~X86_EFLAGS_TF;
  4793. return rflags;
  4794. }
  4795. EXPORT_SYMBOL_GPL(kvm_get_rflags);
  4796. void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  4797. {
  4798. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
  4799. kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
  4800. rflags |= X86_EFLAGS_TF;
  4801. kvm_x86_ops->set_rflags(vcpu, rflags);
  4802. }
  4803. EXPORT_SYMBOL_GPL(kvm_set_rflags);
  4804. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
  4805. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
  4806. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
  4807. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
  4808. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
  4809. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
  4810. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
  4811. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
  4812. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
  4813. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
  4814. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
  4815. EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);