x86.c 108 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Amit Shah <amit.shah@qumranet.com>
  14. * Ben-Ami Yassour <benami@il.ibm.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. #include <linux/kvm_host.h>
  21. #include "irq.h"
  22. #include "mmu.h"
  23. #include "i8254.h"
  24. #include "tss.h"
  25. #include "kvm_cache_regs.h"
  26. #include "x86.h"
  27. #include <linux/clocksource.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/kvm.h>
  30. #include <linux/fs.h>
  31. #include <linux/vmalloc.h>
  32. #include <linux/module.h>
  33. #include <linux/mman.h>
  34. #include <linux/highmem.h>
  35. #include <linux/iommu.h>
  36. #include <linux/intel-iommu.h>
  37. #include <linux/cpufreq.h>
  38. #include <asm/uaccess.h>
  39. #include <asm/msr.h>
  40. #include <asm/desc.h>
  41. #include <asm/mtrr.h>
  42. #define MAX_IO_MSRS 256
  43. #define CR0_RESERVED_BITS \
  44. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  45. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  46. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  47. #define CR4_RESERVED_BITS \
  48. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  49. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  50. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  51. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  52. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  53. /* EFER defaults:
  54. * - enable syscall per default because its emulated by KVM
  55. * - enable LME and LMA per default on 64 bit KVM
  56. */
  57. #ifdef CONFIG_X86_64
  58. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
  59. #else
  60. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
  61. #endif
  62. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  63. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  64. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  65. struct kvm_cpuid_entry2 __user *entries);
  66. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  67. u32 function, u32 index);
  68. struct kvm_x86_ops *kvm_x86_ops;
  69. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  70. struct kvm_stats_debugfs_item debugfs_entries[] = {
  71. { "pf_fixed", VCPU_STAT(pf_fixed) },
  72. { "pf_guest", VCPU_STAT(pf_guest) },
  73. { "tlb_flush", VCPU_STAT(tlb_flush) },
  74. { "invlpg", VCPU_STAT(invlpg) },
  75. { "exits", VCPU_STAT(exits) },
  76. { "io_exits", VCPU_STAT(io_exits) },
  77. { "mmio_exits", VCPU_STAT(mmio_exits) },
  78. { "signal_exits", VCPU_STAT(signal_exits) },
  79. { "irq_window", VCPU_STAT(irq_window_exits) },
  80. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  81. { "halt_exits", VCPU_STAT(halt_exits) },
  82. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  83. { "hypercalls", VCPU_STAT(hypercalls) },
  84. { "request_irq", VCPU_STAT(request_irq_exits) },
  85. { "request_nmi", VCPU_STAT(request_nmi_exits) },
  86. { "irq_exits", VCPU_STAT(irq_exits) },
  87. { "host_state_reload", VCPU_STAT(host_state_reload) },
  88. { "efer_reload", VCPU_STAT(efer_reload) },
  89. { "fpu_reload", VCPU_STAT(fpu_reload) },
  90. { "insn_emulation", VCPU_STAT(insn_emulation) },
  91. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  92. { "irq_injections", VCPU_STAT(irq_injections) },
  93. { "nmi_injections", VCPU_STAT(nmi_injections) },
  94. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  95. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  96. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  97. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  98. { "mmu_flooded", VM_STAT(mmu_flooded) },
  99. { "mmu_recycled", VM_STAT(mmu_recycled) },
  100. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  101. { "mmu_unsync", VM_STAT(mmu_unsync) },
  102. { "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
  103. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  104. { "largepages", VM_STAT(lpages) },
  105. { NULL }
  106. };
  107. unsigned long segment_base(u16 selector)
  108. {
  109. struct descriptor_table gdt;
  110. struct desc_struct *d;
  111. unsigned long table_base;
  112. unsigned long v;
  113. if (selector == 0)
  114. return 0;
  115. asm("sgdt %0" : "=m"(gdt));
  116. table_base = gdt.base;
  117. if (selector & 4) { /* from ldt */
  118. u16 ldt_selector;
  119. asm("sldt %0" : "=g"(ldt_selector));
  120. table_base = segment_base(ldt_selector);
  121. }
  122. d = (struct desc_struct *)(table_base + (selector & ~7));
  123. v = d->base0 | ((unsigned long)d->base1 << 16) |
  124. ((unsigned long)d->base2 << 24);
  125. #ifdef CONFIG_X86_64
  126. if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
  127. v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
  128. #endif
  129. return v;
  130. }
  131. EXPORT_SYMBOL_GPL(segment_base);
  132. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  133. {
  134. if (irqchip_in_kernel(vcpu->kvm))
  135. return vcpu->arch.apic_base;
  136. else
  137. return vcpu->arch.apic_base;
  138. }
  139. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  140. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  141. {
  142. /* TODO: reserve bits check */
  143. if (irqchip_in_kernel(vcpu->kvm))
  144. kvm_lapic_set_base(vcpu, data);
  145. else
  146. vcpu->arch.apic_base = data;
  147. }
  148. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  149. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  150. {
  151. WARN_ON(vcpu->arch.exception.pending);
  152. vcpu->arch.exception.pending = true;
  153. vcpu->arch.exception.has_error_code = false;
  154. vcpu->arch.exception.nr = nr;
  155. }
  156. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  157. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  158. u32 error_code)
  159. {
  160. ++vcpu->stat.pf_guest;
  161. if (vcpu->arch.exception.pending) {
  162. if (vcpu->arch.exception.nr == PF_VECTOR) {
  163. printk(KERN_DEBUG "kvm: inject_page_fault:"
  164. " double fault 0x%lx\n", addr);
  165. vcpu->arch.exception.nr = DF_VECTOR;
  166. vcpu->arch.exception.error_code = 0;
  167. } else if (vcpu->arch.exception.nr == DF_VECTOR) {
  168. /* triple fault -> shutdown */
  169. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  170. }
  171. return;
  172. }
  173. vcpu->arch.cr2 = addr;
  174. kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
  175. }
  176. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  177. {
  178. vcpu->arch.nmi_pending = 1;
  179. }
  180. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  181. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  182. {
  183. WARN_ON(vcpu->arch.exception.pending);
  184. vcpu->arch.exception.pending = true;
  185. vcpu->arch.exception.has_error_code = true;
  186. vcpu->arch.exception.nr = nr;
  187. vcpu->arch.exception.error_code = error_code;
  188. }
  189. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  190. static void __queue_exception(struct kvm_vcpu *vcpu)
  191. {
  192. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  193. vcpu->arch.exception.has_error_code,
  194. vcpu->arch.exception.error_code);
  195. }
  196. /*
  197. * Load the pae pdptrs. Return true is they are all valid.
  198. */
  199. int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  200. {
  201. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  202. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  203. int i;
  204. int ret;
  205. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  206. ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
  207. offset * sizeof(u64), sizeof(pdpte));
  208. if (ret < 0) {
  209. ret = 0;
  210. goto out;
  211. }
  212. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  213. if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
  214. ret = 0;
  215. goto out;
  216. }
  217. }
  218. ret = 1;
  219. memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
  220. out:
  221. return ret;
  222. }
  223. EXPORT_SYMBOL_GPL(load_pdptrs);
  224. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  225. {
  226. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  227. bool changed = true;
  228. int r;
  229. if (is_long_mode(vcpu) || !is_pae(vcpu))
  230. return false;
  231. r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
  232. if (r < 0)
  233. goto out;
  234. changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
  235. out:
  236. return changed;
  237. }
  238. void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  239. {
  240. if (cr0 & CR0_RESERVED_BITS) {
  241. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  242. cr0, vcpu->arch.cr0);
  243. kvm_inject_gp(vcpu, 0);
  244. return;
  245. }
  246. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
  247. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  248. kvm_inject_gp(vcpu, 0);
  249. return;
  250. }
  251. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
  252. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  253. "and a clear PE flag\n");
  254. kvm_inject_gp(vcpu, 0);
  255. return;
  256. }
  257. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  258. #ifdef CONFIG_X86_64
  259. if ((vcpu->arch.shadow_efer & EFER_LME)) {
  260. int cs_db, cs_l;
  261. if (!is_pae(vcpu)) {
  262. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  263. "in long mode while PAE is disabled\n");
  264. kvm_inject_gp(vcpu, 0);
  265. return;
  266. }
  267. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  268. if (cs_l) {
  269. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  270. "in long mode while CS.L == 1\n");
  271. kvm_inject_gp(vcpu, 0);
  272. return;
  273. }
  274. } else
  275. #endif
  276. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  277. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  278. "reserved bits\n");
  279. kvm_inject_gp(vcpu, 0);
  280. return;
  281. }
  282. }
  283. kvm_x86_ops->set_cr0(vcpu, cr0);
  284. vcpu->arch.cr0 = cr0;
  285. kvm_mmu_sync_global(vcpu);
  286. kvm_mmu_reset_context(vcpu);
  287. return;
  288. }
  289. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  290. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  291. {
  292. kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
  293. KVMTRACE_1D(LMSW, vcpu,
  294. (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
  295. handler);
  296. }
  297. EXPORT_SYMBOL_GPL(kvm_lmsw);
  298. void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  299. {
  300. unsigned long old_cr4 = vcpu->arch.cr4;
  301. unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
  302. if (cr4 & CR4_RESERVED_BITS) {
  303. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  304. kvm_inject_gp(vcpu, 0);
  305. return;
  306. }
  307. if (is_long_mode(vcpu)) {
  308. if (!(cr4 & X86_CR4_PAE)) {
  309. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  310. "in long mode\n");
  311. kvm_inject_gp(vcpu, 0);
  312. return;
  313. }
  314. } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
  315. && ((cr4 ^ old_cr4) & pdptr_bits)
  316. && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  317. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  318. kvm_inject_gp(vcpu, 0);
  319. return;
  320. }
  321. if (cr4 & X86_CR4_VMXE) {
  322. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  323. kvm_inject_gp(vcpu, 0);
  324. return;
  325. }
  326. kvm_x86_ops->set_cr4(vcpu, cr4);
  327. vcpu->arch.cr4 = cr4;
  328. vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
  329. kvm_mmu_sync_global(vcpu);
  330. kvm_mmu_reset_context(vcpu);
  331. }
  332. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  333. void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  334. {
  335. if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
  336. kvm_mmu_sync_roots(vcpu);
  337. kvm_mmu_flush_tlb(vcpu);
  338. return;
  339. }
  340. if (is_long_mode(vcpu)) {
  341. if (cr3 & CR3_L_MODE_RESERVED_BITS) {
  342. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  343. kvm_inject_gp(vcpu, 0);
  344. return;
  345. }
  346. } else {
  347. if (is_pae(vcpu)) {
  348. if (cr3 & CR3_PAE_RESERVED_BITS) {
  349. printk(KERN_DEBUG
  350. "set_cr3: #GP, reserved bits\n");
  351. kvm_inject_gp(vcpu, 0);
  352. return;
  353. }
  354. if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
  355. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  356. "reserved bits\n");
  357. kvm_inject_gp(vcpu, 0);
  358. return;
  359. }
  360. }
  361. /*
  362. * We don't check reserved bits in nonpae mode, because
  363. * this isn't enforced, and VMware depends on this.
  364. */
  365. }
  366. /*
  367. * Does the new cr3 value map to physical memory? (Note, we
  368. * catch an invalid cr3 even in real-mode, because it would
  369. * cause trouble later on when we turn on paging anyway.)
  370. *
  371. * A real CPU would silently accept an invalid cr3 and would
  372. * attempt to use it - with largely undefined (and often hard
  373. * to debug) behavior on the guest side.
  374. */
  375. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  376. kvm_inject_gp(vcpu, 0);
  377. else {
  378. vcpu->arch.cr3 = cr3;
  379. vcpu->arch.mmu.new_cr3(vcpu);
  380. }
  381. }
  382. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  383. void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  384. {
  385. if (cr8 & CR8_RESERVED_BITS) {
  386. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  387. kvm_inject_gp(vcpu, 0);
  388. return;
  389. }
  390. if (irqchip_in_kernel(vcpu->kvm))
  391. kvm_lapic_set_tpr(vcpu, cr8);
  392. else
  393. vcpu->arch.cr8 = cr8;
  394. }
  395. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  396. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  397. {
  398. if (irqchip_in_kernel(vcpu->kvm))
  399. return kvm_lapic_get_cr8(vcpu);
  400. else
  401. return vcpu->arch.cr8;
  402. }
  403. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  404. static inline u32 bit(int bitno)
  405. {
  406. return 1 << (bitno & 31);
  407. }
  408. /*
  409. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  410. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  411. *
  412. * This list is modified at module load time to reflect the
  413. * capabilities of the host cpu.
  414. */
  415. static u32 msrs_to_save[] = {
  416. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  417. MSR_K6_STAR,
  418. #ifdef CONFIG_X86_64
  419. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  420. #endif
  421. MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  422. MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  423. };
  424. static unsigned num_msrs_to_save;
  425. static u32 emulated_msrs[] = {
  426. MSR_IA32_MISC_ENABLE,
  427. };
  428. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  429. {
  430. if (efer & efer_reserved_bits) {
  431. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  432. efer);
  433. kvm_inject_gp(vcpu, 0);
  434. return;
  435. }
  436. if (is_paging(vcpu)
  437. && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  438. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  439. kvm_inject_gp(vcpu, 0);
  440. return;
  441. }
  442. if (efer & EFER_FFXSR) {
  443. struct kvm_cpuid_entry2 *feat;
  444. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  445. if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
  446. printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
  447. kvm_inject_gp(vcpu, 0);
  448. return;
  449. }
  450. }
  451. if (efer & EFER_SVME) {
  452. struct kvm_cpuid_entry2 *feat;
  453. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  454. if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
  455. printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
  456. kvm_inject_gp(vcpu, 0);
  457. return;
  458. }
  459. }
  460. kvm_x86_ops->set_efer(vcpu, efer);
  461. efer &= ~EFER_LMA;
  462. efer |= vcpu->arch.shadow_efer & EFER_LMA;
  463. vcpu->arch.shadow_efer = efer;
  464. }
  465. void kvm_enable_efer_bits(u64 mask)
  466. {
  467. efer_reserved_bits &= ~mask;
  468. }
  469. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  470. /*
  471. * Writes msr value into into the appropriate "register".
  472. * Returns 0 on success, non-0 otherwise.
  473. * Assumes vcpu_load() was already called.
  474. */
  475. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  476. {
  477. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  478. }
  479. /*
  480. * Adapt set_msr() to msr_io()'s calling convention
  481. */
  482. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  483. {
  484. return kvm_set_msr(vcpu, index, *data);
  485. }
  486. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  487. {
  488. static int version;
  489. struct pvclock_wall_clock wc;
  490. struct timespec now, sys, boot;
  491. if (!wall_clock)
  492. return;
  493. version++;
  494. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  495. /*
  496. * The guest calculates current wall clock time by adding
  497. * system time (updated by kvm_write_guest_time below) to the
  498. * wall clock specified here. guest system time equals host
  499. * system time for us, thus we must fill in host boot time here.
  500. */
  501. now = current_kernel_time();
  502. ktime_get_ts(&sys);
  503. boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
  504. wc.sec = boot.tv_sec;
  505. wc.nsec = boot.tv_nsec;
  506. wc.version = version;
  507. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  508. version++;
  509. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  510. }
  511. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  512. {
  513. uint32_t quotient, remainder;
  514. /* Don't try to replace with do_div(), this one calculates
  515. * "(dividend << 32) / divisor" */
  516. __asm__ ( "divl %4"
  517. : "=a" (quotient), "=d" (remainder)
  518. : "0" (0), "1" (dividend), "r" (divisor) );
  519. return quotient;
  520. }
  521. static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
  522. {
  523. uint64_t nsecs = 1000000000LL;
  524. int32_t shift = 0;
  525. uint64_t tps64;
  526. uint32_t tps32;
  527. tps64 = tsc_khz * 1000LL;
  528. while (tps64 > nsecs*2) {
  529. tps64 >>= 1;
  530. shift--;
  531. }
  532. tps32 = (uint32_t)tps64;
  533. while (tps32 <= (uint32_t)nsecs) {
  534. tps32 <<= 1;
  535. shift++;
  536. }
  537. hv_clock->tsc_shift = shift;
  538. hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
  539. pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
  540. __func__, tsc_khz, hv_clock->tsc_shift,
  541. hv_clock->tsc_to_system_mul);
  542. }
  543. static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
  544. static void kvm_write_guest_time(struct kvm_vcpu *v)
  545. {
  546. struct timespec ts;
  547. unsigned long flags;
  548. struct kvm_vcpu_arch *vcpu = &v->arch;
  549. void *shared_kaddr;
  550. if ((!vcpu->time_page))
  551. return;
  552. if (unlikely(vcpu->hv_clock_tsc_khz != __get_cpu_var(cpu_tsc_khz))) {
  553. kvm_set_time_scale(__get_cpu_var(cpu_tsc_khz), &vcpu->hv_clock);
  554. vcpu->hv_clock_tsc_khz = __get_cpu_var(cpu_tsc_khz);
  555. }
  556. /* Keep irq disabled to prevent changes to the clock */
  557. local_irq_save(flags);
  558. kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
  559. &vcpu->hv_clock.tsc_timestamp);
  560. ktime_get_ts(&ts);
  561. local_irq_restore(flags);
  562. /* With all the info we got, fill in the values */
  563. vcpu->hv_clock.system_time = ts.tv_nsec +
  564. (NSEC_PER_SEC * (u64)ts.tv_sec);
  565. /*
  566. * The interface expects us to write an even number signaling that the
  567. * update is finished. Since the guest won't see the intermediate
  568. * state, we just increase by 2 at the end.
  569. */
  570. vcpu->hv_clock.version += 2;
  571. shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
  572. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  573. sizeof(vcpu->hv_clock));
  574. kunmap_atomic(shared_kaddr, KM_USER0);
  575. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  576. }
  577. static int kvm_request_guest_time_update(struct kvm_vcpu *v)
  578. {
  579. struct kvm_vcpu_arch *vcpu = &v->arch;
  580. if (!vcpu->time_page)
  581. return 0;
  582. set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
  583. return 1;
  584. }
  585. static bool msr_mtrr_valid(unsigned msr)
  586. {
  587. switch (msr) {
  588. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  589. case MSR_MTRRfix64K_00000:
  590. case MSR_MTRRfix16K_80000:
  591. case MSR_MTRRfix16K_A0000:
  592. case MSR_MTRRfix4K_C0000:
  593. case MSR_MTRRfix4K_C8000:
  594. case MSR_MTRRfix4K_D0000:
  595. case MSR_MTRRfix4K_D8000:
  596. case MSR_MTRRfix4K_E0000:
  597. case MSR_MTRRfix4K_E8000:
  598. case MSR_MTRRfix4K_F0000:
  599. case MSR_MTRRfix4K_F8000:
  600. case MSR_MTRRdefType:
  601. case MSR_IA32_CR_PAT:
  602. return true;
  603. case 0x2f8:
  604. return true;
  605. }
  606. return false;
  607. }
  608. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  609. {
  610. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  611. if (!msr_mtrr_valid(msr))
  612. return 1;
  613. if (msr == MSR_MTRRdefType) {
  614. vcpu->arch.mtrr_state.def_type = data;
  615. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  616. } else if (msr == MSR_MTRRfix64K_00000)
  617. p[0] = data;
  618. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  619. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  620. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  621. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  622. else if (msr == MSR_IA32_CR_PAT)
  623. vcpu->arch.pat = data;
  624. else { /* Variable MTRRs */
  625. int idx, is_mtrr_mask;
  626. u64 *pt;
  627. idx = (msr - 0x200) / 2;
  628. is_mtrr_mask = msr - 0x200 - 2 * idx;
  629. if (!is_mtrr_mask)
  630. pt =
  631. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  632. else
  633. pt =
  634. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  635. *pt = data;
  636. }
  637. kvm_mmu_reset_context(vcpu);
  638. return 0;
  639. }
  640. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  641. {
  642. switch (msr) {
  643. case MSR_EFER:
  644. set_efer(vcpu, data);
  645. break;
  646. case MSR_IA32_MC0_STATUS:
  647. pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
  648. __func__, data);
  649. break;
  650. case MSR_IA32_MCG_STATUS:
  651. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
  652. __func__, data);
  653. break;
  654. case MSR_IA32_MCG_CTL:
  655. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
  656. __func__, data);
  657. break;
  658. case MSR_IA32_DEBUGCTLMSR:
  659. if (!data) {
  660. /* We support the non-activated case already */
  661. break;
  662. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  663. /* Values other than LBR and BTF are vendor-specific,
  664. thus reserved and should throw a #GP */
  665. return 1;
  666. }
  667. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  668. __func__, data);
  669. break;
  670. case MSR_IA32_UCODE_REV:
  671. case MSR_IA32_UCODE_WRITE:
  672. case MSR_VM_HSAVE_PA:
  673. break;
  674. case 0x200 ... 0x2ff:
  675. return set_msr_mtrr(vcpu, msr, data);
  676. case MSR_IA32_APICBASE:
  677. kvm_set_apic_base(vcpu, data);
  678. break;
  679. case MSR_IA32_MISC_ENABLE:
  680. vcpu->arch.ia32_misc_enable_msr = data;
  681. break;
  682. case MSR_KVM_WALL_CLOCK:
  683. vcpu->kvm->arch.wall_clock = data;
  684. kvm_write_wall_clock(vcpu->kvm, data);
  685. break;
  686. case MSR_KVM_SYSTEM_TIME: {
  687. if (vcpu->arch.time_page) {
  688. kvm_release_page_dirty(vcpu->arch.time_page);
  689. vcpu->arch.time_page = NULL;
  690. }
  691. vcpu->arch.time = data;
  692. /* we verify if the enable bit is set... */
  693. if (!(data & 1))
  694. break;
  695. /* ...but clean it before doing the actual write */
  696. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  697. vcpu->arch.time_page =
  698. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  699. if (is_error_page(vcpu->arch.time_page)) {
  700. kvm_release_page_clean(vcpu->arch.time_page);
  701. vcpu->arch.time_page = NULL;
  702. }
  703. kvm_request_guest_time_update(vcpu);
  704. break;
  705. }
  706. default:
  707. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
  708. return 1;
  709. }
  710. return 0;
  711. }
  712. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  713. /*
  714. * Reads an msr value (of 'msr_index') into 'pdata'.
  715. * Returns 0 on success, non-0 otherwise.
  716. * Assumes vcpu_load() was already called.
  717. */
  718. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  719. {
  720. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  721. }
  722. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  723. {
  724. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  725. if (!msr_mtrr_valid(msr))
  726. return 1;
  727. if (msr == MSR_MTRRdefType)
  728. *pdata = vcpu->arch.mtrr_state.def_type +
  729. (vcpu->arch.mtrr_state.enabled << 10);
  730. else if (msr == MSR_MTRRfix64K_00000)
  731. *pdata = p[0];
  732. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  733. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  734. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  735. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  736. else if (msr == MSR_IA32_CR_PAT)
  737. *pdata = vcpu->arch.pat;
  738. else { /* Variable MTRRs */
  739. int idx, is_mtrr_mask;
  740. u64 *pt;
  741. idx = (msr - 0x200) / 2;
  742. is_mtrr_mask = msr - 0x200 - 2 * idx;
  743. if (!is_mtrr_mask)
  744. pt =
  745. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  746. else
  747. pt =
  748. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  749. *pdata = *pt;
  750. }
  751. return 0;
  752. }
  753. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  754. {
  755. u64 data;
  756. switch (msr) {
  757. case 0xc0010010: /* SYSCFG */
  758. case 0xc0010015: /* HWCR */
  759. case MSR_IA32_PLATFORM_ID:
  760. case MSR_IA32_P5_MC_ADDR:
  761. case MSR_IA32_P5_MC_TYPE:
  762. case MSR_IA32_MC0_CTL:
  763. case MSR_IA32_MCG_STATUS:
  764. case MSR_IA32_MCG_CAP:
  765. case MSR_IA32_MCG_CTL:
  766. case MSR_IA32_MC0_MISC:
  767. case MSR_IA32_MC0_MISC+4:
  768. case MSR_IA32_MC0_MISC+8:
  769. case MSR_IA32_MC0_MISC+12:
  770. case MSR_IA32_MC0_MISC+16:
  771. case MSR_IA32_MC0_MISC+20:
  772. case MSR_IA32_UCODE_REV:
  773. case MSR_IA32_EBL_CR_POWERON:
  774. case MSR_IA32_DEBUGCTLMSR:
  775. case MSR_IA32_LASTBRANCHFROMIP:
  776. case MSR_IA32_LASTBRANCHTOIP:
  777. case MSR_IA32_LASTINTFROMIP:
  778. case MSR_IA32_LASTINTTOIP:
  779. case MSR_VM_HSAVE_PA:
  780. data = 0;
  781. break;
  782. case MSR_MTRRcap:
  783. data = 0x500 | KVM_NR_VAR_MTRR;
  784. break;
  785. case 0x200 ... 0x2ff:
  786. return get_msr_mtrr(vcpu, msr, pdata);
  787. case 0xcd: /* fsb frequency */
  788. data = 3;
  789. break;
  790. case MSR_IA32_APICBASE:
  791. data = kvm_get_apic_base(vcpu);
  792. break;
  793. case MSR_IA32_MISC_ENABLE:
  794. data = vcpu->arch.ia32_misc_enable_msr;
  795. break;
  796. case MSR_IA32_PERF_STATUS:
  797. /* TSC increment by tick */
  798. data = 1000ULL;
  799. /* CPU multiplier */
  800. data |= (((uint64_t)4ULL) << 40);
  801. break;
  802. case MSR_EFER:
  803. data = vcpu->arch.shadow_efer;
  804. break;
  805. case MSR_KVM_WALL_CLOCK:
  806. data = vcpu->kvm->arch.wall_clock;
  807. break;
  808. case MSR_KVM_SYSTEM_TIME:
  809. data = vcpu->arch.time;
  810. break;
  811. default:
  812. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  813. return 1;
  814. }
  815. *pdata = data;
  816. return 0;
  817. }
  818. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  819. /*
  820. * Read or write a bunch of msrs. All parameters are kernel addresses.
  821. *
  822. * @return number of msrs set successfully.
  823. */
  824. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  825. struct kvm_msr_entry *entries,
  826. int (*do_msr)(struct kvm_vcpu *vcpu,
  827. unsigned index, u64 *data))
  828. {
  829. int i;
  830. vcpu_load(vcpu);
  831. down_read(&vcpu->kvm->slots_lock);
  832. for (i = 0; i < msrs->nmsrs; ++i)
  833. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  834. break;
  835. up_read(&vcpu->kvm->slots_lock);
  836. vcpu_put(vcpu);
  837. return i;
  838. }
  839. /*
  840. * Read or write a bunch of msrs. Parameters are user addresses.
  841. *
  842. * @return number of msrs set successfully.
  843. */
  844. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  845. int (*do_msr)(struct kvm_vcpu *vcpu,
  846. unsigned index, u64 *data),
  847. int writeback)
  848. {
  849. struct kvm_msrs msrs;
  850. struct kvm_msr_entry *entries;
  851. int r, n;
  852. unsigned size;
  853. r = -EFAULT;
  854. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  855. goto out;
  856. r = -E2BIG;
  857. if (msrs.nmsrs >= MAX_IO_MSRS)
  858. goto out;
  859. r = -ENOMEM;
  860. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  861. entries = vmalloc(size);
  862. if (!entries)
  863. goto out;
  864. r = -EFAULT;
  865. if (copy_from_user(entries, user_msrs->entries, size))
  866. goto out_free;
  867. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  868. if (r < 0)
  869. goto out_free;
  870. r = -EFAULT;
  871. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  872. goto out_free;
  873. r = n;
  874. out_free:
  875. vfree(entries);
  876. out:
  877. return r;
  878. }
  879. int kvm_dev_ioctl_check_extension(long ext)
  880. {
  881. int r;
  882. switch (ext) {
  883. case KVM_CAP_IRQCHIP:
  884. case KVM_CAP_HLT:
  885. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  886. case KVM_CAP_SET_TSS_ADDR:
  887. case KVM_CAP_EXT_CPUID:
  888. case KVM_CAP_CLOCKSOURCE:
  889. case KVM_CAP_PIT:
  890. case KVM_CAP_NOP_IO_DELAY:
  891. case KVM_CAP_MP_STATE:
  892. case KVM_CAP_SYNC_MMU:
  893. case KVM_CAP_REINJECT_CONTROL:
  894. case KVM_CAP_IRQ_INJECT_STATUS:
  895. r = 1;
  896. break;
  897. case KVM_CAP_COALESCED_MMIO:
  898. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  899. break;
  900. case KVM_CAP_VAPIC:
  901. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  902. break;
  903. case KVM_CAP_NR_VCPUS:
  904. r = KVM_MAX_VCPUS;
  905. break;
  906. case KVM_CAP_NR_MEMSLOTS:
  907. r = KVM_MEMORY_SLOTS;
  908. break;
  909. case KVM_CAP_PV_MMU:
  910. r = !tdp_enabled;
  911. break;
  912. case KVM_CAP_IOMMU:
  913. r = iommu_found();
  914. break;
  915. default:
  916. r = 0;
  917. break;
  918. }
  919. return r;
  920. }
  921. long kvm_arch_dev_ioctl(struct file *filp,
  922. unsigned int ioctl, unsigned long arg)
  923. {
  924. void __user *argp = (void __user *)arg;
  925. long r;
  926. switch (ioctl) {
  927. case KVM_GET_MSR_INDEX_LIST: {
  928. struct kvm_msr_list __user *user_msr_list = argp;
  929. struct kvm_msr_list msr_list;
  930. unsigned n;
  931. r = -EFAULT;
  932. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  933. goto out;
  934. n = msr_list.nmsrs;
  935. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  936. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  937. goto out;
  938. r = -E2BIG;
  939. if (n < num_msrs_to_save)
  940. goto out;
  941. r = -EFAULT;
  942. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  943. num_msrs_to_save * sizeof(u32)))
  944. goto out;
  945. if (copy_to_user(user_msr_list->indices
  946. + num_msrs_to_save * sizeof(u32),
  947. &emulated_msrs,
  948. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  949. goto out;
  950. r = 0;
  951. break;
  952. }
  953. case KVM_GET_SUPPORTED_CPUID: {
  954. struct kvm_cpuid2 __user *cpuid_arg = argp;
  955. struct kvm_cpuid2 cpuid;
  956. r = -EFAULT;
  957. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  958. goto out;
  959. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  960. cpuid_arg->entries);
  961. if (r)
  962. goto out;
  963. r = -EFAULT;
  964. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  965. goto out;
  966. r = 0;
  967. break;
  968. }
  969. default:
  970. r = -EINVAL;
  971. }
  972. out:
  973. return r;
  974. }
  975. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  976. {
  977. kvm_x86_ops->vcpu_load(vcpu, cpu);
  978. kvm_request_guest_time_update(vcpu);
  979. }
  980. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  981. {
  982. kvm_x86_ops->vcpu_put(vcpu);
  983. kvm_put_guest_fpu(vcpu);
  984. }
  985. static int is_efer_nx(void)
  986. {
  987. unsigned long long efer = 0;
  988. rdmsrl_safe(MSR_EFER, &efer);
  989. return efer & EFER_NX;
  990. }
  991. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  992. {
  993. int i;
  994. struct kvm_cpuid_entry2 *e, *entry;
  995. entry = NULL;
  996. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  997. e = &vcpu->arch.cpuid_entries[i];
  998. if (e->function == 0x80000001) {
  999. entry = e;
  1000. break;
  1001. }
  1002. }
  1003. if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
  1004. entry->edx &= ~(1 << 20);
  1005. printk(KERN_INFO "kvm: guest NX capability removed\n");
  1006. }
  1007. }
  1008. /* when an old userspace process fills a new kernel module */
  1009. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  1010. struct kvm_cpuid *cpuid,
  1011. struct kvm_cpuid_entry __user *entries)
  1012. {
  1013. int r, i;
  1014. struct kvm_cpuid_entry *cpuid_entries;
  1015. r = -E2BIG;
  1016. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1017. goto out;
  1018. r = -ENOMEM;
  1019. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  1020. if (!cpuid_entries)
  1021. goto out;
  1022. r = -EFAULT;
  1023. if (copy_from_user(cpuid_entries, entries,
  1024. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  1025. goto out_free;
  1026. for (i = 0; i < cpuid->nent; i++) {
  1027. vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
  1028. vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
  1029. vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
  1030. vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
  1031. vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
  1032. vcpu->arch.cpuid_entries[i].index = 0;
  1033. vcpu->arch.cpuid_entries[i].flags = 0;
  1034. vcpu->arch.cpuid_entries[i].padding[0] = 0;
  1035. vcpu->arch.cpuid_entries[i].padding[1] = 0;
  1036. vcpu->arch.cpuid_entries[i].padding[2] = 0;
  1037. }
  1038. vcpu->arch.cpuid_nent = cpuid->nent;
  1039. cpuid_fix_nx_cap(vcpu);
  1040. r = 0;
  1041. out_free:
  1042. vfree(cpuid_entries);
  1043. out:
  1044. return r;
  1045. }
  1046. static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  1047. struct kvm_cpuid2 *cpuid,
  1048. struct kvm_cpuid_entry2 __user *entries)
  1049. {
  1050. int r;
  1051. r = -E2BIG;
  1052. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1053. goto out;
  1054. r = -EFAULT;
  1055. if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
  1056. cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
  1057. goto out;
  1058. vcpu->arch.cpuid_nent = cpuid->nent;
  1059. return 0;
  1060. out:
  1061. return r;
  1062. }
  1063. static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  1064. struct kvm_cpuid2 *cpuid,
  1065. struct kvm_cpuid_entry2 __user *entries)
  1066. {
  1067. int r;
  1068. r = -E2BIG;
  1069. if (cpuid->nent < vcpu->arch.cpuid_nent)
  1070. goto out;
  1071. r = -EFAULT;
  1072. if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
  1073. vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
  1074. goto out;
  1075. return 0;
  1076. out:
  1077. cpuid->nent = vcpu->arch.cpuid_nent;
  1078. return r;
  1079. }
  1080. static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1081. u32 index)
  1082. {
  1083. entry->function = function;
  1084. entry->index = index;
  1085. cpuid_count(entry->function, entry->index,
  1086. &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
  1087. entry->flags = 0;
  1088. }
  1089. static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1090. u32 index, int *nent, int maxnent)
  1091. {
  1092. const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
  1093. bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
  1094. bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
  1095. bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
  1096. bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
  1097. bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
  1098. bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
  1099. bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
  1100. bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
  1101. bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
  1102. const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
  1103. bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
  1104. bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
  1105. bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
  1106. bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
  1107. bit(X86_FEATURE_PGE) |
  1108. bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
  1109. bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
  1110. bit(X86_FEATURE_SYSCALL) |
  1111. (is_efer_nx() ? bit(X86_FEATURE_NX) : 0) |
  1112. #ifdef CONFIG_X86_64
  1113. bit(X86_FEATURE_LM) |
  1114. #endif
  1115. bit(X86_FEATURE_FXSR_OPT) |
  1116. bit(X86_FEATURE_MMXEXT) |
  1117. bit(X86_FEATURE_3DNOWEXT) |
  1118. bit(X86_FEATURE_3DNOW);
  1119. const u32 kvm_supported_word3_x86_features =
  1120. bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
  1121. const u32 kvm_supported_word6_x86_features =
  1122. bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) |
  1123. bit(X86_FEATURE_SVM);
  1124. /* all calls to cpuid_count() should be made on the same cpu */
  1125. get_cpu();
  1126. do_cpuid_1_ent(entry, function, index);
  1127. ++*nent;
  1128. switch (function) {
  1129. case 0:
  1130. entry->eax = min(entry->eax, (u32)0xb);
  1131. break;
  1132. case 1:
  1133. entry->edx &= kvm_supported_word0_x86_features;
  1134. entry->ecx &= kvm_supported_word3_x86_features;
  1135. break;
  1136. /* function 2 entries are STATEFUL. That is, repeated cpuid commands
  1137. * may return different values. This forces us to get_cpu() before
  1138. * issuing the first command, and also to emulate this annoying behavior
  1139. * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
  1140. case 2: {
  1141. int t, times = entry->eax & 0xff;
  1142. entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1143. entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  1144. for (t = 1; t < times && *nent < maxnent; ++t) {
  1145. do_cpuid_1_ent(&entry[t], function, 0);
  1146. entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1147. ++*nent;
  1148. }
  1149. break;
  1150. }
  1151. /* function 4 and 0xb have additional index. */
  1152. case 4: {
  1153. int i, cache_type;
  1154. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1155. /* read more entries until cache_type is zero */
  1156. for (i = 1; *nent < maxnent; ++i) {
  1157. cache_type = entry[i - 1].eax & 0x1f;
  1158. if (!cache_type)
  1159. break;
  1160. do_cpuid_1_ent(&entry[i], function, i);
  1161. entry[i].flags |=
  1162. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1163. ++*nent;
  1164. }
  1165. break;
  1166. }
  1167. case 0xb: {
  1168. int i, level_type;
  1169. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1170. /* read more entries until level_type is zero */
  1171. for (i = 1; *nent < maxnent; ++i) {
  1172. level_type = entry[i - 1].ecx & 0xff00;
  1173. if (!level_type)
  1174. break;
  1175. do_cpuid_1_ent(&entry[i], function, i);
  1176. entry[i].flags |=
  1177. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1178. ++*nent;
  1179. }
  1180. break;
  1181. }
  1182. case 0x80000000:
  1183. entry->eax = min(entry->eax, 0x8000001a);
  1184. break;
  1185. case 0x80000001:
  1186. entry->edx &= kvm_supported_word1_x86_features;
  1187. entry->ecx &= kvm_supported_word6_x86_features;
  1188. break;
  1189. }
  1190. put_cpu();
  1191. }
  1192. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  1193. struct kvm_cpuid_entry2 __user *entries)
  1194. {
  1195. struct kvm_cpuid_entry2 *cpuid_entries;
  1196. int limit, nent = 0, r = -E2BIG;
  1197. u32 func;
  1198. if (cpuid->nent < 1)
  1199. goto out;
  1200. r = -ENOMEM;
  1201. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
  1202. if (!cpuid_entries)
  1203. goto out;
  1204. do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
  1205. limit = cpuid_entries[0].eax;
  1206. for (func = 1; func <= limit && nent < cpuid->nent; ++func)
  1207. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1208. &nent, cpuid->nent);
  1209. r = -E2BIG;
  1210. if (nent >= cpuid->nent)
  1211. goto out_free;
  1212. do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
  1213. limit = cpuid_entries[nent - 1].eax;
  1214. for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
  1215. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1216. &nent, cpuid->nent);
  1217. r = -EFAULT;
  1218. if (copy_to_user(entries, cpuid_entries,
  1219. nent * sizeof(struct kvm_cpuid_entry2)))
  1220. goto out_free;
  1221. cpuid->nent = nent;
  1222. r = 0;
  1223. out_free:
  1224. vfree(cpuid_entries);
  1225. out:
  1226. return r;
  1227. }
  1228. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  1229. struct kvm_lapic_state *s)
  1230. {
  1231. vcpu_load(vcpu);
  1232. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  1233. vcpu_put(vcpu);
  1234. return 0;
  1235. }
  1236. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  1237. struct kvm_lapic_state *s)
  1238. {
  1239. vcpu_load(vcpu);
  1240. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  1241. kvm_apic_post_state_restore(vcpu);
  1242. vcpu_put(vcpu);
  1243. return 0;
  1244. }
  1245. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  1246. struct kvm_interrupt *irq)
  1247. {
  1248. if (irq->irq < 0 || irq->irq >= 256)
  1249. return -EINVAL;
  1250. if (irqchip_in_kernel(vcpu->kvm))
  1251. return -ENXIO;
  1252. vcpu_load(vcpu);
  1253. set_bit(irq->irq, vcpu->arch.irq_pending);
  1254. set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
  1255. vcpu_put(vcpu);
  1256. return 0;
  1257. }
  1258. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  1259. {
  1260. vcpu_load(vcpu);
  1261. kvm_inject_nmi(vcpu);
  1262. vcpu_put(vcpu);
  1263. return 0;
  1264. }
  1265. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  1266. struct kvm_tpr_access_ctl *tac)
  1267. {
  1268. if (tac->flags)
  1269. return -EINVAL;
  1270. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  1271. return 0;
  1272. }
  1273. long kvm_arch_vcpu_ioctl(struct file *filp,
  1274. unsigned int ioctl, unsigned long arg)
  1275. {
  1276. struct kvm_vcpu *vcpu = filp->private_data;
  1277. void __user *argp = (void __user *)arg;
  1278. int r;
  1279. struct kvm_lapic_state *lapic = NULL;
  1280. switch (ioctl) {
  1281. case KVM_GET_LAPIC: {
  1282. lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  1283. r = -ENOMEM;
  1284. if (!lapic)
  1285. goto out;
  1286. r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
  1287. if (r)
  1288. goto out;
  1289. r = -EFAULT;
  1290. if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
  1291. goto out;
  1292. r = 0;
  1293. break;
  1294. }
  1295. case KVM_SET_LAPIC: {
  1296. lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  1297. r = -ENOMEM;
  1298. if (!lapic)
  1299. goto out;
  1300. r = -EFAULT;
  1301. if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
  1302. goto out;
  1303. r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
  1304. if (r)
  1305. goto out;
  1306. r = 0;
  1307. break;
  1308. }
  1309. case KVM_INTERRUPT: {
  1310. struct kvm_interrupt irq;
  1311. r = -EFAULT;
  1312. if (copy_from_user(&irq, argp, sizeof irq))
  1313. goto out;
  1314. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  1315. if (r)
  1316. goto out;
  1317. r = 0;
  1318. break;
  1319. }
  1320. case KVM_NMI: {
  1321. r = kvm_vcpu_ioctl_nmi(vcpu);
  1322. if (r)
  1323. goto out;
  1324. r = 0;
  1325. break;
  1326. }
  1327. case KVM_SET_CPUID: {
  1328. struct kvm_cpuid __user *cpuid_arg = argp;
  1329. struct kvm_cpuid cpuid;
  1330. r = -EFAULT;
  1331. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1332. goto out;
  1333. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  1334. if (r)
  1335. goto out;
  1336. break;
  1337. }
  1338. case KVM_SET_CPUID2: {
  1339. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1340. struct kvm_cpuid2 cpuid;
  1341. r = -EFAULT;
  1342. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1343. goto out;
  1344. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  1345. cpuid_arg->entries);
  1346. if (r)
  1347. goto out;
  1348. break;
  1349. }
  1350. case KVM_GET_CPUID2: {
  1351. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1352. struct kvm_cpuid2 cpuid;
  1353. r = -EFAULT;
  1354. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1355. goto out;
  1356. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  1357. cpuid_arg->entries);
  1358. if (r)
  1359. goto out;
  1360. r = -EFAULT;
  1361. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  1362. goto out;
  1363. r = 0;
  1364. break;
  1365. }
  1366. case KVM_GET_MSRS:
  1367. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  1368. break;
  1369. case KVM_SET_MSRS:
  1370. r = msr_io(vcpu, argp, do_set_msr, 0);
  1371. break;
  1372. case KVM_TPR_ACCESS_REPORTING: {
  1373. struct kvm_tpr_access_ctl tac;
  1374. r = -EFAULT;
  1375. if (copy_from_user(&tac, argp, sizeof tac))
  1376. goto out;
  1377. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  1378. if (r)
  1379. goto out;
  1380. r = -EFAULT;
  1381. if (copy_to_user(argp, &tac, sizeof tac))
  1382. goto out;
  1383. r = 0;
  1384. break;
  1385. };
  1386. case KVM_SET_VAPIC_ADDR: {
  1387. struct kvm_vapic_addr va;
  1388. r = -EINVAL;
  1389. if (!irqchip_in_kernel(vcpu->kvm))
  1390. goto out;
  1391. r = -EFAULT;
  1392. if (copy_from_user(&va, argp, sizeof va))
  1393. goto out;
  1394. r = 0;
  1395. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  1396. break;
  1397. }
  1398. default:
  1399. r = -EINVAL;
  1400. }
  1401. out:
  1402. if (lapic)
  1403. kfree(lapic);
  1404. return r;
  1405. }
  1406. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  1407. {
  1408. int ret;
  1409. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  1410. return -1;
  1411. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  1412. return ret;
  1413. }
  1414. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  1415. u32 kvm_nr_mmu_pages)
  1416. {
  1417. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  1418. return -EINVAL;
  1419. down_write(&kvm->slots_lock);
  1420. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  1421. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  1422. up_write(&kvm->slots_lock);
  1423. return 0;
  1424. }
  1425. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  1426. {
  1427. return kvm->arch.n_alloc_mmu_pages;
  1428. }
  1429. gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  1430. {
  1431. int i;
  1432. struct kvm_mem_alias *alias;
  1433. for (i = 0; i < kvm->arch.naliases; ++i) {
  1434. alias = &kvm->arch.aliases[i];
  1435. if (gfn >= alias->base_gfn
  1436. && gfn < alias->base_gfn + alias->npages)
  1437. return alias->target_gfn + gfn - alias->base_gfn;
  1438. }
  1439. return gfn;
  1440. }
  1441. /*
  1442. * Set a new alias region. Aliases map a portion of physical memory into
  1443. * another portion. This is useful for memory windows, for example the PC
  1444. * VGA region.
  1445. */
  1446. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  1447. struct kvm_memory_alias *alias)
  1448. {
  1449. int r, n;
  1450. struct kvm_mem_alias *p;
  1451. r = -EINVAL;
  1452. /* General sanity checks */
  1453. if (alias->memory_size & (PAGE_SIZE - 1))
  1454. goto out;
  1455. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  1456. goto out;
  1457. if (alias->slot >= KVM_ALIAS_SLOTS)
  1458. goto out;
  1459. if (alias->guest_phys_addr + alias->memory_size
  1460. < alias->guest_phys_addr)
  1461. goto out;
  1462. if (alias->target_phys_addr + alias->memory_size
  1463. < alias->target_phys_addr)
  1464. goto out;
  1465. down_write(&kvm->slots_lock);
  1466. spin_lock(&kvm->mmu_lock);
  1467. p = &kvm->arch.aliases[alias->slot];
  1468. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  1469. p->npages = alias->memory_size >> PAGE_SHIFT;
  1470. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  1471. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  1472. if (kvm->arch.aliases[n - 1].npages)
  1473. break;
  1474. kvm->arch.naliases = n;
  1475. spin_unlock(&kvm->mmu_lock);
  1476. kvm_mmu_zap_all(kvm);
  1477. up_write(&kvm->slots_lock);
  1478. return 0;
  1479. out:
  1480. return r;
  1481. }
  1482. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  1483. {
  1484. int r;
  1485. r = 0;
  1486. switch (chip->chip_id) {
  1487. case KVM_IRQCHIP_PIC_MASTER:
  1488. memcpy(&chip->chip.pic,
  1489. &pic_irqchip(kvm)->pics[0],
  1490. sizeof(struct kvm_pic_state));
  1491. break;
  1492. case KVM_IRQCHIP_PIC_SLAVE:
  1493. memcpy(&chip->chip.pic,
  1494. &pic_irqchip(kvm)->pics[1],
  1495. sizeof(struct kvm_pic_state));
  1496. break;
  1497. case KVM_IRQCHIP_IOAPIC:
  1498. memcpy(&chip->chip.ioapic,
  1499. ioapic_irqchip(kvm),
  1500. sizeof(struct kvm_ioapic_state));
  1501. break;
  1502. default:
  1503. r = -EINVAL;
  1504. break;
  1505. }
  1506. return r;
  1507. }
  1508. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  1509. {
  1510. int r;
  1511. r = 0;
  1512. switch (chip->chip_id) {
  1513. case KVM_IRQCHIP_PIC_MASTER:
  1514. memcpy(&pic_irqchip(kvm)->pics[0],
  1515. &chip->chip.pic,
  1516. sizeof(struct kvm_pic_state));
  1517. break;
  1518. case KVM_IRQCHIP_PIC_SLAVE:
  1519. memcpy(&pic_irqchip(kvm)->pics[1],
  1520. &chip->chip.pic,
  1521. sizeof(struct kvm_pic_state));
  1522. break;
  1523. case KVM_IRQCHIP_IOAPIC:
  1524. memcpy(ioapic_irqchip(kvm),
  1525. &chip->chip.ioapic,
  1526. sizeof(struct kvm_ioapic_state));
  1527. break;
  1528. default:
  1529. r = -EINVAL;
  1530. break;
  1531. }
  1532. kvm_pic_update_irq(pic_irqchip(kvm));
  1533. return r;
  1534. }
  1535. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  1536. {
  1537. int r = 0;
  1538. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  1539. return r;
  1540. }
  1541. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  1542. {
  1543. int r = 0;
  1544. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  1545. kvm_pit_load_count(kvm, 0, ps->channels[0].count);
  1546. return r;
  1547. }
  1548. static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  1549. struct kvm_reinject_control *control)
  1550. {
  1551. if (!kvm->arch.vpit)
  1552. return -ENXIO;
  1553. kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
  1554. return 0;
  1555. }
  1556. /*
  1557. * Get (and clear) the dirty memory log for a memory slot.
  1558. */
  1559. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  1560. struct kvm_dirty_log *log)
  1561. {
  1562. int r;
  1563. int n;
  1564. struct kvm_memory_slot *memslot;
  1565. int is_dirty = 0;
  1566. down_write(&kvm->slots_lock);
  1567. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  1568. if (r)
  1569. goto out;
  1570. /* If nothing is dirty, don't bother messing with page tables. */
  1571. if (is_dirty) {
  1572. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  1573. kvm_flush_remote_tlbs(kvm);
  1574. memslot = &kvm->memslots[log->slot];
  1575. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  1576. memset(memslot->dirty_bitmap, 0, n);
  1577. }
  1578. r = 0;
  1579. out:
  1580. up_write(&kvm->slots_lock);
  1581. return r;
  1582. }
  1583. long kvm_arch_vm_ioctl(struct file *filp,
  1584. unsigned int ioctl, unsigned long arg)
  1585. {
  1586. struct kvm *kvm = filp->private_data;
  1587. void __user *argp = (void __user *)arg;
  1588. int r = -EINVAL;
  1589. /*
  1590. * This union makes it completely explicit to gcc-3.x
  1591. * that these two variables' stack usage should be
  1592. * combined, not added together.
  1593. */
  1594. union {
  1595. struct kvm_pit_state ps;
  1596. struct kvm_memory_alias alias;
  1597. } u;
  1598. switch (ioctl) {
  1599. case KVM_SET_TSS_ADDR:
  1600. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  1601. if (r < 0)
  1602. goto out;
  1603. break;
  1604. case KVM_SET_MEMORY_REGION: {
  1605. struct kvm_memory_region kvm_mem;
  1606. struct kvm_userspace_memory_region kvm_userspace_mem;
  1607. r = -EFAULT;
  1608. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  1609. goto out;
  1610. kvm_userspace_mem.slot = kvm_mem.slot;
  1611. kvm_userspace_mem.flags = kvm_mem.flags;
  1612. kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
  1613. kvm_userspace_mem.memory_size = kvm_mem.memory_size;
  1614. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
  1615. if (r)
  1616. goto out;
  1617. break;
  1618. }
  1619. case KVM_SET_NR_MMU_PAGES:
  1620. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  1621. if (r)
  1622. goto out;
  1623. break;
  1624. case KVM_GET_NR_MMU_PAGES:
  1625. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  1626. break;
  1627. case KVM_SET_MEMORY_ALIAS:
  1628. r = -EFAULT;
  1629. if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
  1630. goto out;
  1631. r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
  1632. if (r)
  1633. goto out;
  1634. break;
  1635. case KVM_CREATE_IRQCHIP:
  1636. r = -ENOMEM;
  1637. kvm->arch.vpic = kvm_create_pic(kvm);
  1638. if (kvm->arch.vpic) {
  1639. r = kvm_ioapic_init(kvm);
  1640. if (r) {
  1641. kfree(kvm->arch.vpic);
  1642. kvm->arch.vpic = NULL;
  1643. goto out;
  1644. }
  1645. } else
  1646. goto out;
  1647. r = kvm_setup_default_irq_routing(kvm);
  1648. if (r) {
  1649. kfree(kvm->arch.vpic);
  1650. kfree(kvm->arch.vioapic);
  1651. goto out;
  1652. }
  1653. break;
  1654. case KVM_CREATE_PIT:
  1655. mutex_lock(&kvm->lock);
  1656. r = -EEXIST;
  1657. if (kvm->arch.vpit)
  1658. goto create_pit_unlock;
  1659. r = -ENOMEM;
  1660. kvm->arch.vpit = kvm_create_pit(kvm);
  1661. if (kvm->arch.vpit)
  1662. r = 0;
  1663. create_pit_unlock:
  1664. mutex_unlock(&kvm->lock);
  1665. break;
  1666. case KVM_IRQ_LINE_STATUS:
  1667. case KVM_IRQ_LINE: {
  1668. struct kvm_irq_level irq_event;
  1669. r = -EFAULT;
  1670. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  1671. goto out;
  1672. if (irqchip_in_kernel(kvm)) {
  1673. __s32 status;
  1674. mutex_lock(&kvm->lock);
  1675. status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  1676. irq_event.irq, irq_event.level);
  1677. mutex_unlock(&kvm->lock);
  1678. if (ioctl == KVM_IRQ_LINE_STATUS) {
  1679. irq_event.status = status;
  1680. if (copy_to_user(argp, &irq_event,
  1681. sizeof irq_event))
  1682. goto out;
  1683. }
  1684. r = 0;
  1685. }
  1686. break;
  1687. }
  1688. case KVM_GET_IRQCHIP: {
  1689. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  1690. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  1691. r = -ENOMEM;
  1692. if (!chip)
  1693. goto out;
  1694. r = -EFAULT;
  1695. if (copy_from_user(chip, argp, sizeof *chip))
  1696. goto get_irqchip_out;
  1697. r = -ENXIO;
  1698. if (!irqchip_in_kernel(kvm))
  1699. goto get_irqchip_out;
  1700. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  1701. if (r)
  1702. goto get_irqchip_out;
  1703. r = -EFAULT;
  1704. if (copy_to_user(argp, chip, sizeof *chip))
  1705. goto get_irqchip_out;
  1706. r = 0;
  1707. get_irqchip_out:
  1708. kfree(chip);
  1709. if (r)
  1710. goto out;
  1711. break;
  1712. }
  1713. case KVM_SET_IRQCHIP: {
  1714. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  1715. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  1716. r = -ENOMEM;
  1717. if (!chip)
  1718. goto out;
  1719. r = -EFAULT;
  1720. if (copy_from_user(chip, argp, sizeof *chip))
  1721. goto set_irqchip_out;
  1722. r = -ENXIO;
  1723. if (!irqchip_in_kernel(kvm))
  1724. goto set_irqchip_out;
  1725. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  1726. if (r)
  1727. goto set_irqchip_out;
  1728. r = 0;
  1729. set_irqchip_out:
  1730. kfree(chip);
  1731. if (r)
  1732. goto out;
  1733. break;
  1734. }
  1735. case KVM_GET_PIT: {
  1736. r = -EFAULT;
  1737. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  1738. goto out;
  1739. r = -ENXIO;
  1740. if (!kvm->arch.vpit)
  1741. goto out;
  1742. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  1743. if (r)
  1744. goto out;
  1745. r = -EFAULT;
  1746. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  1747. goto out;
  1748. r = 0;
  1749. break;
  1750. }
  1751. case KVM_SET_PIT: {
  1752. r = -EFAULT;
  1753. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  1754. goto out;
  1755. r = -ENXIO;
  1756. if (!kvm->arch.vpit)
  1757. goto out;
  1758. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  1759. if (r)
  1760. goto out;
  1761. r = 0;
  1762. break;
  1763. }
  1764. case KVM_REINJECT_CONTROL: {
  1765. struct kvm_reinject_control control;
  1766. r = -EFAULT;
  1767. if (copy_from_user(&control, argp, sizeof(control)))
  1768. goto out;
  1769. r = kvm_vm_ioctl_reinject(kvm, &control);
  1770. if (r)
  1771. goto out;
  1772. r = 0;
  1773. break;
  1774. }
  1775. default:
  1776. ;
  1777. }
  1778. out:
  1779. return r;
  1780. }
  1781. static void kvm_init_msr_list(void)
  1782. {
  1783. u32 dummy[2];
  1784. unsigned i, j;
  1785. for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
  1786. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  1787. continue;
  1788. if (j < i)
  1789. msrs_to_save[j] = msrs_to_save[i];
  1790. j++;
  1791. }
  1792. num_msrs_to_save = j;
  1793. }
  1794. /*
  1795. * Only apic need an MMIO device hook, so shortcut now..
  1796. */
  1797. static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
  1798. gpa_t addr, int len,
  1799. int is_write)
  1800. {
  1801. struct kvm_io_device *dev;
  1802. if (vcpu->arch.apic) {
  1803. dev = &vcpu->arch.apic->dev;
  1804. if (dev->in_range(dev, addr, len, is_write))
  1805. return dev;
  1806. }
  1807. return NULL;
  1808. }
  1809. static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
  1810. gpa_t addr, int len,
  1811. int is_write)
  1812. {
  1813. struct kvm_io_device *dev;
  1814. dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
  1815. if (dev == NULL)
  1816. dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
  1817. is_write);
  1818. return dev;
  1819. }
  1820. static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
  1821. struct kvm_vcpu *vcpu)
  1822. {
  1823. void *data = val;
  1824. int r = X86EMUL_CONTINUE;
  1825. while (bytes) {
  1826. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1827. unsigned offset = addr & (PAGE_SIZE-1);
  1828. unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
  1829. int ret;
  1830. if (gpa == UNMAPPED_GVA) {
  1831. r = X86EMUL_PROPAGATE_FAULT;
  1832. goto out;
  1833. }
  1834. ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
  1835. if (ret < 0) {
  1836. r = X86EMUL_UNHANDLEABLE;
  1837. goto out;
  1838. }
  1839. bytes -= toread;
  1840. data += toread;
  1841. addr += toread;
  1842. }
  1843. out:
  1844. return r;
  1845. }
  1846. static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
  1847. struct kvm_vcpu *vcpu)
  1848. {
  1849. void *data = val;
  1850. int r = X86EMUL_CONTINUE;
  1851. while (bytes) {
  1852. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1853. unsigned offset = addr & (PAGE_SIZE-1);
  1854. unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  1855. int ret;
  1856. if (gpa == UNMAPPED_GVA) {
  1857. r = X86EMUL_PROPAGATE_FAULT;
  1858. goto out;
  1859. }
  1860. ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
  1861. if (ret < 0) {
  1862. r = X86EMUL_UNHANDLEABLE;
  1863. goto out;
  1864. }
  1865. bytes -= towrite;
  1866. data += towrite;
  1867. addr += towrite;
  1868. }
  1869. out:
  1870. return r;
  1871. }
  1872. static int emulator_read_emulated(unsigned long addr,
  1873. void *val,
  1874. unsigned int bytes,
  1875. struct kvm_vcpu *vcpu)
  1876. {
  1877. struct kvm_io_device *mmio_dev;
  1878. gpa_t gpa;
  1879. if (vcpu->mmio_read_completed) {
  1880. memcpy(val, vcpu->mmio_data, bytes);
  1881. vcpu->mmio_read_completed = 0;
  1882. return X86EMUL_CONTINUE;
  1883. }
  1884. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1885. /* For APIC access vmexit */
  1886. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1887. goto mmio;
  1888. if (kvm_read_guest_virt(addr, val, bytes, vcpu)
  1889. == X86EMUL_CONTINUE)
  1890. return X86EMUL_CONTINUE;
  1891. if (gpa == UNMAPPED_GVA)
  1892. return X86EMUL_PROPAGATE_FAULT;
  1893. mmio:
  1894. /*
  1895. * Is this MMIO handled locally?
  1896. */
  1897. mutex_lock(&vcpu->kvm->lock);
  1898. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
  1899. if (mmio_dev) {
  1900. kvm_iodevice_read(mmio_dev, gpa, bytes, val);
  1901. mutex_unlock(&vcpu->kvm->lock);
  1902. return X86EMUL_CONTINUE;
  1903. }
  1904. mutex_unlock(&vcpu->kvm->lock);
  1905. vcpu->mmio_needed = 1;
  1906. vcpu->mmio_phys_addr = gpa;
  1907. vcpu->mmio_size = bytes;
  1908. vcpu->mmio_is_write = 0;
  1909. return X86EMUL_UNHANDLEABLE;
  1910. }
  1911. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  1912. const void *val, int bytes)
  1913. {
  1914. int ret;
  1915. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  1916. if (ret < 0)
  1917. return 0;
  1918. kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
  1919. return 1;
  1920. }
  1921. static int emulator_write_emulated_onepage(unsigned long addr,
  1922. const void *val,
  1923. unsigned int bytes,
  1924. struct kvm_vcpu *vcpu)
  1925. {
  1926. struct kvm_io_device *mmio_dev;
  1927. gpa_t gpa;
  1928. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1929. if (gpa == UNMAPPED_GVA) {
  1930. kvm_inject_page_fault(vcpu, addr, 2);
  1931. return X86EMUL_PROPAGATE_FAULT;
  1932. }
  1933. /* For APIC access vmexit */
  1934. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1935. goto mmio;
  1936. if (emulator_write_phys(vcpu, gpa, val, bytes))
  1937. return X86EMUL_CONTINUE;
  1938. mmio:
  1939. /*
  1940. * Is this MMIO handled locally?
  1941. */
  1942. mutex_lock(&vcpu->kvm->lock);
  1943. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
  1944. if (mmio_dev) {
  1945. kvm_iodevice_write(mmio_dev, gpa, bytes, val);
  1946. mutex_unlock(&vcpu->kvm->lock);
  1947. return X86EMUL_CONTINUE;
  1948. }
  1949. mutex_unlock(&vcpu->kvm->lock);
  1950. vcpu->mmio_needed = 1;
  1951. vcpu->mmio_phys_addr = gpa;
  1952. vcpu->mmio_size = bytes;
  1953. vcpu->mmio_is_write = 1;
  1954. memcpy(vcpu->mmio_data, val, bytes);
  1955. return X86EMUL_CONTINUE;
  1956. }
  1957. int emulator_write_emulated(unsigned long addr,
  1958. const void *val,
  1959. unsigned int bytes,
  1960. struct kvm_vcpu *vcpu)
  1961. {
  1962. /* Crossing a page boundary? */
  1963. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  1964. int rc, now;
  1965. now = -addr & ~PAGE_MASK;
  1966. rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
  1967. if (rc != X86EMUL_CONTINUE)
  1968. return rc;
  1969. addr += now;
  1970. val += now;
  1971. bytes -= now;
  1972. }
  1973. return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
  1974. }
  1975. EXPORT_SYMBOL_GPL(emulator_write_emulated);
  1976. static int emulator_cmpxchg_emulated(unsigned long addr,
  1977. const void *old,
  1978. const void *new,
  1979. unsigned int bytes,
  1980. struct kvm_vcpu *vcpu)
  1981. {
  1982. static int reported;
  1983. if (!reported) {
  1984. reported = 1;
  1985. printk(KERN_WARNING "kvm: emulating exchange as write\n");
  1986. }
  1987. #ifndef CONFIG_X86_64
  1988. /* guests cmpxchg8b have to be emulated atomically */
  1989. if (bytes == 8) {
  1990. gpa_t gpa;
  1991. struct page *page;
  1992. char *kaddr;
  1993. u64 val;
  1994. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1995. if (gpa == UNMAPPED_GVA ||
  1996. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1997. goto emul_write;
  1998. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  1999. goto emul_write;
  2000. val = *(u64 *)new;
  2001. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  2002. kaddr = kmap_atomic(page, KM_USER0);
  2003. set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
  2004. kunmap_atomic(kaddr, KM_USER0);
  2005. kvm_release_page_dirty(page);
  2006. }
  2007. emul_write:
  2008. #endif
  2009. return emulator_write_emulated(addr, new, bytes, vcpu);
  2010. }
  2011. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  2012. {
  2013. return kvm_x86_ops->get_segment_base(vcpu, seg);
  2014. }
  2015. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  2016. {
  2017. kvm_mmu_invlpg(vcpu, address);
  2018. return X86EMUL_CONTINUE;
  2019. }
  2020. int emulate_clts(struct kvm_vcpu *vcpu)
  2021. {
  2022. KVMTRACE_0D(CLTS, vcpu, handler);
  2023. kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
  2024. return X86EMUL_CONTINUE;
  2025. }
  2026. int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  2027. {
  2028. struct kvm_vcpu *vcpu = ctxt->vcpu;
  2029. switch (dr) {
  2030. case 0 ... 3:
  2031. *dest = kvm_x86_ops->get_dr(vcpu, dr);
  2032. return X86EMUL_CONTINUE;
  2033. default:
  2034. pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
  2035. return X86EMUL_UNHANDLEABLE;
  2036. }
  2037. }
  2038. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  2039. {
  2040. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  2041. int exception;
  2042. kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
  2043. if (exception) {
  2044. /* FIXME: better handling */
  2045. return X86EMUL_UNHANDLEABLE;
  2046. }
  2047. return X86EMUL_CONTINUE;
  2048. }
  2049. void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
  2050. {
  2051. u8 opcodes[4];
  2052. unsigned long rip = kvm_rip_read(vcpu);
  2053. unsigned long rip_linear;
  2054. if (!printk_ratelimit())
  2055. return;
  2056. rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
  2057. kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
  2058. printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
  2059. context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  2060. }
  2061. EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
  2062. static struct x86_emulate_ops emulate_ops = {
  2063. .read_std = kvm_read_guest_virt,
  2064. .read_emulated = emulator_read_emulated,
  2065. .write_emulated = emulator_write_emulated,
  2066. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  2067. };
  2068. static void cache_all_regs(struct kvm_vcpu *vcpu)
  2069. {
  2070. kvm_register_read(vcpu, VCPU_REGS_RAX);
  2071. kvm_register_read(vcpu, VCPU_REGS_RSP);
  2072. kvm_register_read(vcpu, VCPU_REGS_RIP);
  2073. vcpu->arch.regs_dirty = ~0;
  2074. }
  2075. int emulate_instruction(struct kvm_vcpu *vcpu,
  2076. struct kvm_run *run,
  2077. unsigned long cr2,
  2078. u16 error_code,
  2079. int emulation_type)
  2080. {
  2081. int r;
  2082. struct decode_cache *c;
  2083. kvm_clear_exception_queue(vcpu);
  2084. vcpu->arch.mmio_fault_cr2 = cr2;
  2085. /*
  2086. * TODO: fix x86_emulate.c to use guest_read/write_register
  2087. * instead of direct ->regs accesses, can save hundred cycles
  2088. * on Intel for instructions that don't read/change RSP, for
  2089. * for example.
  2090. */
  2091. cache_all_regs(vcpu);
  2092. vcpu->mmio_is_write = 0;
  2093. vcpu->arch.pio.string = 0;
  2094. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  2095. int cs_db, cs_l;
  2096. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  2097. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  2098. vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  2099. vcpu->arch.emulate_ctxt.mode =
  2100. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  2101. ? X86EMUL_MODE_REAL : cs_l
  2102. ? X86EMUL_MODE_PROT64 : cs_db
  2103. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  2104. r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  2105. /* Reject the instructions other than VMCALL/VMMCALL when
  2106. * try to emulate invalid opcode */
  2107. c = &vcpu->arch.emulate_ctxt.decode;
  2108. if ((emulation_type & EMULTYPE_TRAP_UD) &&
  2109. (!(c->twobyte && c->b == 0x01 &&
  2110. (c->modrm_reg == 0 || c->modrm_reg == 3) &&
  2111. c->modrm_mod == 3 && c->modrm_rm == 1)))
  2112. return EMULATE_FAIL;
  2113. ++vcpu->stat.insn_emulation;
  2114. if (r) {
  2115. ++vcpu->stat.insn_emulation_fail;
  2116. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  2117. return EMULATE_DONE;
  2118. return EMULATE_FAIL;
  2119. }
  2120. }
  2121. r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  2122. if (vcpu->arch.pio.string)
  2123. return EMULATE_DO_MMIO;
  2124. if ((r || vcpu->mmio_is_write) && run) {
  2125. run->exit_reason = KVM_EXIT_MMIO;
  2126. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  2127. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  2128. run->mmio.len = vcpu->mmio_size;
  2129. run->mmio.is_write = vcpu->mmio_is_write;
  2130. }
  2131. if (r) {
  2132. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  2133. return EMULATE_DONE;
  2134. if (!vcpu->mmio_needed) {
  2135. kvm_report_emulation_failure(vcpu, "mmio");
  2136. return EMULATE_FAIL;
  2137. }
  2138. return EMULATE_DO_MMIO;
  2139. }
  2140. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  2141. if (vcpu->mmio_is_write) {
  2142. vcpu->mmio_needed = 0;
  2143. return EMULATE_DO_MMIO;
  2144. }
  2145. return EMULATE_DONE;
  2146. }
  2147. EXPORT_SYMBOL_GPL(emulate_instruction);
  2148. static int pio_copy_data(struct kvm_vcpu *vcpu)
  2149. {
  2150. void *p = vcpu->arch.pio_data;
  2151. gva_t q = vcpu->arch.pio.guest_gva;
  2152. unsigned bytes;
  2153. int ret;
  2154. bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
  2155. if (vcpu->arch.pio.in)
  2156. ret = kvm_write_guest_virt(q, p, bytes, vcpu);
  2157. else
  2158. ret = kvm_read_guest_virt(q, p, bytes, vcpu);
  2159. return ret;
  2160. }
  2161. int complete_pio(struct kvm_vcpu *vcpu)
  2162. {
  2163. struct kvm_pio_request *io = &vcpu->arch.pio;
  2164. long delta;
  2165. int r;
  2166. unsigned long val;
  2167. if (!io->string) {
  2168. if (io->in) {
  2169. val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2170. memcpy(&val, vcpu->arch.pio_data, io->size);
  2171. kvm_register_write(vcpu, VCPU_REGS_RAX, val);
  2172. }
  2173. } else {
  2174. if (io->in) {
  2175. r = pio_copy_data(vcpu);
  2176. if (r)
  2177. return r;
  2178. }
  2179. delta = 1;
  2180. if (io->rep) {
  2181. delta *= io->cur_count;
  2182. /*
  2183. * The size of the register should really depend on
  2184. * current address size.
  2185. */
  2186. val = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2187. val -= delta;
  2188. kvm_register_write(vcpu, VCPU_REGS_RCX, val);
  2189. }
  2190. if (io->down)
  2191. delta = -delta;
  2192. delta *= io->size;
  2193. if (io->in) {
  2194. val = kvm_register_read(vcpu, VCPU_REGS_RDI);
  2195. val += delta;
  2196. kvm_register_write(vcpu, VCPU_REGS_RDI, val);
  2197. } else {
  2198. val = kvm_register_read(vcpu, VCPU_REGS_RSI);
  2199. val += delta;
  2200. kvm_register_write(vcpu, VCPU_REGS_RSI, val);
  2201. }
  2202. }
  2203. io->count -= io->cur_count;
  2204. io->cur_count = 0;
  2205. return 0;
  2206. }
  2207. static void kernel_pio(struct kvm_io_device *pio_dev,
  2208. struct kvm_vcpu *vcpu,
  2209. void *pd)
  2210. {
  2211. /* TODO: String I/O for in kernel device */
  2212. mutex_lock(&vcpu->kvm->lock);
  2213. if (vcpu->arch.pio.in)
  2214. kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
  2215. vcpu->arch.pio.size,
  2216. pd);
  2217. else
  2218. kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
  2219. vcpu->arch.pio.size,
  2220. pd);
  2221. mutex_unlock(&vcpu->kvm->lock);
  2222. }
  2223. static void pio_string_write(struct kvm_io_device *pio_dev,
  2224. struct kvm_vcpu *vcpu)
  2225. {
  2226. struct kvm_pio_request *io = &vcpu->arch.pio;
  2227. void *pd = vcpu->arch.pio_data;
  2228. int i;
  2229. mutex_lock(&vcpu->kvm->lock);
  2230. for (i = 0; i < io->cur_count; i++) {
  2231. kvm_iodevice_write(pio_dev, io->port,
  2232. io->size,
  2233. pd);
  2234. pd += io->size;
  2235. }
  2236. mutex_unlock(&vcpu->kvm->lock);
  2237. }
  2238. static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
  2239. gpa_t addr, int len,
  2240. int is_write)
  2241. {
  2242. return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
  2243. }
  2244. int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  2245. int size, unsigned port)
  2246. {
  2247. struct kvm_io_device *pio_dev;
  2248. unsigned long val;
  2249. vcpu->run->exit_reason = KVM_EXIT_IO;
  2250. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  2251. vcpu->run->io.size = vcpu->arch.pio.size = size;
  2252. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  2253. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
  2254. vcpu->run->io.port = vcpu->arch.pio.port = port;
  2255. vcpu->arch.pio.in = in;
  2256. vcpu->arch.pio.string = 0;
  2257. vcpu->arch.pio.down = 0;
  2258. vcpu->arch.pio.rep = 0;
  2259. if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
  2260. KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
  2261. handler);
  2262. else
  2263. KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
  2264. handler);
  2265. val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2266. memcpy(vcpu->arch.pio_data, &val, 4);
  2267. pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
  2268. if (pio_dev) {
  2269. kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
  2270. complete_pio(vcpu);
  2271. return 1;
  2272. }
  2273. return 0;
  2274. }
  2275. EXPORT_SYMBOL_GPL(kvm_emulate_pio);
  2276. int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  2277. int size, unsigned long count, int down,
  2278. gva_t address, int rep, unsigned port)
  2279. {
  2280. unsigned now, in_page;
  2281. int ret = 0;
  2282. struct kvm_io_device *pio_dev;
  2283. vcpu->run->exit_reason = KVM_EXIT_IO;
  2284. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  2285. vcpu->run->io.size = vcpu->arch.pio.size = size;
  2286. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  2287. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
  2288. vcpu->run->io.port = vcpu->arch.pio.port = port;
  2289. vcpu->arch.pio.in = in;
  2290. vcpu->arch.pio.string = 1;
  2291. vcpu->arch.pio.down = down;
  2292. vcpu->arch.pio.rep = rep;
  2293. if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
  2294. KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
  2295. handler);
  2296. else
  2297. KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
  2298. handler);
  2299. if (!count) {
  2300. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2301. return 1;
  2302. }
  2303. if (!down)
  2304. in_page = PAGE_SIZE - offset_in_page(address);
  2305. else
  2306. in_page = offset_in_page(address) + size;
  2307. now = min(count, (unsigned long)in_page / size);
  2308. if (!now)
  2309. now = 1;
  2310. if (down) {
  2311. /*
  2312. * String I/O in reverse. Yuck. Kill the guest, fix later.
  2313. */
  2314. pr_unimpl(vcpu, "guest string pio down\n");
  2315. kvm_inject_gp(vcpu, 0);
  2316. return 1;
  2317. }
  2318. vcpu->run->io.count = now;
  2319. vcpu->arch.pio.cur_count = now;
  2320. if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
  2321. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2322. vcpu->arch.pio.guest_gva = address;
  2323. pio_dev = vcpu_find_pio_dev(vcpu, port,
  2324. vcpu->arch.pio.cur_count,
  2325. !vcpu->arch.pio.in);
  2326. if (!vcpu->arch.pio.in) {
  2327. /* string PIO write */
  2328. ret = pio_copy_data(vcpu);
  2329. if (ret == X86EMUL_PROPAGATE_FAULT) {
  2330. kvm_inject_gp(vcpu, 0);
  2331. return 1;
  2332. }
  2333. if (ret == 0 && pio_dev) {
  2334. pio_string_write(pio_dev, vcpu);
  2335. complete_pio(vcpu);
  2336. if (vcpu->arch.pio.count == 0)
  2337. ret = 1;
  2338. }
  2339. } else if (pio_dev)
  2340. pr_unimpl(vcpu, "no string pio read support yet, "
  2341. "port %x size %d count %ld\n",
  2342. port, size, count);
  2343. return ret;
  2344. }
  2345. EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
  2346. static void bounce_off(void *info)
  2347. {
  2348. /* nothing */
  2349. }
  2350. static unsigned int ref_freq;
  2351. static unsigned long tsc_khz_ref;
  2352. static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  2353. void *data)
  2354. {
  2355. struct cpufreq_freqs *freq = data;
  2356. struct kvm *kvm;
  2357. struct kvm_vcpu *vcpu;
  2358. int i, send_ipi = 0;
  2359. if (!ref_freq)
  2360. ref_freq = freq->old;
  2361. if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
  2362. return 0;
  2363. if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
  2364. return 0;
  2365. per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
  2366. spin_lock(&kvm_lock);
  2367. list_for_each_entry(kvm, &vm_list, vm_list) {
  2368. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  2369. vcpu = kvm->vcpus[i];
  2370. if (!vcpu)
  2371. continue;
  2372. if (vcpu->cpu != freq->cpu)
  2373. continue;
  2374. if (!kvm_request_guest_time_update(vcpu))
  2375. continue;
  2376. if (vcpu->cpu != smp_processor_id())
  2377. send_ipi++;
  2378. }
  2379. }
  2380. spin_unlock(&kvm_lock);
  2381. if (freq->old < freq->new && send_ipi) {
  2382. /*
  2383. * We upscale the frequency. Must make the guest
  2384. * doesn't see old kvmclock values while running with
  2385. * the new frequency, otherwise we risk the guest sees
  2386. * time go backwards.
  2387. *
  2388. * In case we update the frequency for another cpu
  2389. * (which might be in guest context) send an interrupt
  2390. * to kick the cpu out of guest context. Next time
  2391. * guest context is entered kvmclock will be updated,
  2392. * so the guest will not see stale values.
  2393. */
  2394. smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
  2395. }
  2396. return 0;
  2397. }
  2398. static struct notifier_block kvmclock_cpufreq_notifier_block = {
  2399. .notifier_call = kvmclock_cpufreq_notifier
  2400. };
  2401. int kvm_arch_init(void *opaque)
  2402. {
  2403. int r, cpu;
  2404. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  2405. if (kvm_x86_ops) {
  2406. printk(KERN_ERR "kvm: already loaded the other module\n");
  2407. r = -EEXIST;
  2408. goto out;
  2409. }
  2410. if (!ops->cpu_has_kvm_support()) {
  2411. printk(KERN_ERR "kvm: no hardware support\n");
  2412. r = -EOPNOTSUPP;
  2413. goto out;
  2414. }
  2415. if (ops->disabled_by_bios()) {
  2416. printk(KERN_ERR "kvm: disabled by bios\n");
  2417. r = -EOPNOTSUPP;
  2418. goto out;
  2419. }
  2420. r = kvm_mmu_module_init();
  2421. if (r)
  2422. goto out;
  2423. kvm_init_msr_list();
  2424. kvm_x86_ops = ops;
  2425. kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
  2426. kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
  2427. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  2428. PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
  2429. for_each_possible_cpu(cpu)
  2430. per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
  2431. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  2432. tsc_khz_ref = tsc_khz;
  2433. cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
  2434. CPUFREQ_TRANSITION_NOTIFIER);
  2435. }
  2436. return 0;
  2437. out:
  2438. return r;
  2439. }
  2440. void kvm_arch_exit(void)
  2441. {
  2442. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  2443. cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
  2444. CPUFREQ_TRANSITION_NOTIFIER);
  2445. kvm_x86_ops = NULL;
  2446. kvm_mmu_module_exit();
  2447. }
  2448. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  2449. {
  2450. ++vcpu->stat.halt_exits;
  2451. KVMTRACE_0D(HLT, vcpu, handler);
  2452. if (irqchip_in_kernel(vcpu->kvm)) {
  2453. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  2454. return 1;
  2455. } else {
  2456. vcpu->run->exit_reason = KVM_EXIT_HLT;
  2457. return 0;
  2458. }
  2459. }
  2460. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  2461. static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
  2462. unsigned long a1)
  2463. {
  2464. if (is_long_mode(vcpu))
  2465. return a0;
  2466. else
  2467. return a0 | ((gpa_t)a1 << 32);
  2468. }
  2469. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  2470. {
  2471. unsigned long nr, a0, a1, a2, a3, ret;
  2472. int r = 1;
  2473. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2474. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  2475. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2476. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  2477. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  2478. KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
  2479. if (!is_long_mode(vcpu)) {
  2480. nr &= 0xFFFFFFFF;
  2481. a0 &= 0xFFFFFFFF;
  2482. a1 &= 0xFFFFFFFF;
  2483. a2 &= 0xFFFFFFFF;
  2484. a3 &= 0xFFFFFFFF;
  2485. }
  2486. switch (nr) {
  2487. case KVM_HC_VAPIC_POLL_IRQ:
  2488. ret = 0;
  2489. break;
  2490. case KVM_HC_MMU_OP:
  2491. r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
  2492. break;
  2493. default:
  2494. ret = -KVM_ENOSYS;
  2495. break;
  2496. }
  2497. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  2498. ++vcpu->stat.hypercalls;
  2499. return r;
  2500. }
  2501. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  2502. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  2503. {
  2504. char instruction[3];
  2505. int ret = 0;
  2506. unsigned long rip = kvm_rip_read(vcpu);
  2507. /*
  2508. * Blow out the MMU to ensure that no other VCPU has an active mapping
  2509. * to ensure that the updated hypercall appears atomically across all
  2510. * VCPUs.
  2511. */
  2512. kvm_mmu_zap_all(vcpu->kvm);
  2513. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  2514. if (emulator_write_emulated(rip, instruction, 3, vcpu)
  2515. != X86EMUL_CONTINUE)
  2516. ret = -EFAULT;
  2517. return ret;
  2518. }
  2519. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  2520. {
  2521. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  2522. }
  2523. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  2524. {
  2525. struct descriptor_table dt = { limit, base };
  2526. kvm_x86_ops->set_gdt(vcpu, &dt);
  2527. }
  2528. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  2529. {
  2530. struct descriptor_table dt = { limit, base };
  2531. kvm_x86_ops->set_idt(vcpu, &dt);
  2532. }
  2533. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  2534. unsigned long *rflags)
  2535. {
  2536. kvm_lmsw(vcpu, msw);
  2537. *rflags = kvm_x86_ops->get_rflags(vcpu);
  2538. }
  2539. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  2540. {
  2541. unsigned long value;
  2542. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  2543. switch (cr) {
  2544. case 0:
  2545. value = vcpu->arch.cr0;
  2546. break;
  2547. case 2:
  2548. value = vcpu->arch.cr2;
  2549. break;
  2550. case 3:
  2551. value = vcpu->arch.cr3;
  2552. break;
  2553. case 4:
  2554. value = vcpu->arch.cr4;
  2555. break;
  2556. case 8:
  2557. value = kvm_get_cr8(vcpu);
  2558. break;
  2559. default:
  2560. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  2561. return 0;
  2562. }
  2563. KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
  2564. (u32)((u64)value >> 32), handler);
  2565. return value;
  2566. }
  2567. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  2568. unsigned long *rflags)
  2569. {
  2570. KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
  2571. (u32)((u64)val >> 32), handler);
  2572. switch (cr) {
  2573. case 0:
  2574. kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
  2575. *rflags = kvm_x86_ops->get_rflags(vcpu);
  2576. break;
  2577. case 2:
  2578. vcpu->arch.cr2 = val;
  2579. break;
  2580. case 3:
  2581. kvm_set_cr3(vcpu, val);
  2582. break;
  2583. case 4:
  2584. kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
  2585. break;
  2586. case 8:
  2587. kvm_set_cr8(vcpu, val & 0xfUL);
  2588. break;
  2589. default:
  2590. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  2591. }
  2592. }
  2593. static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  2594. {
  2595. struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
  2596. int j, nent = vcpu->arch.cpuid_nent;
  2597. e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
  2598. /* when no next entry is found, the current entry[i] is reselected */
  2599. for (j = i + 1; ; j = (j + 1) % nent) {
  2600. struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
  2601. if (ej->function == e->function) {
  2602. ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  2603. return j;
  2604. }
  2605. }
  2606. return 0; /* silence gcc, even though control never reaches here */
  2607. }
  2608. /* find an entry with matching function, matching index (if needed), and that
  2609. * should be read next (if it's stateful) */
  2610. static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
  2611. u32 function, u32 index)
  2612. {
  2613. if (e->function != function)
  2614. return 0;
  2615. if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
  2616. return 0;
  2617. if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
  2618. !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
  2619. return 0;
  2620. return 1;
  2621. }
  2622. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  2623. u32 function, u32 index)
  2624. {
  2625. int i;
  2626. struct kvm_cpuid_entry2 *best = NULL;
  2627. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  2628. struct kvm_cpuid_entry2 *e;
  2629. e = &vcpu->arch.cpuid_entries[i];
  2630. if (is_matching_cpuid_entry(e, function, index)) {
  2631. if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
  2632. move_to_next_stateful_cpuid_entry(vcpu, i);
  2633. best = e;
  2634. break;
  2635. }
  2636. /*
  2637. * Both basic or both extended?
  2638. */
  2639. if (((e->function ^ function) & 0x80000000) == 0)
  2640. if (!best || e->function > best->function)
  2641. best = e;
  2642. }
  2643. return best;
  2644. }
  2645. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  2646. {
  2647. u32 function, index;
  2648. struct kvm_cpuid_entry2 *best;
  2649. function = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2650. index = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2651. kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
  2652. kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
  2653. kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
  2654. kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
  2655. best = kvm_find_cpuid_entry(vcpu, function, index);
  2656. if (best) {
  2657. kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
  2658. kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
  2659. kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
  2660. kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
  2661. }
  2662. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2663. KVMTRACE_5D(CPUID, vcpu, function,
  2664. (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
  2665. (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
  2666. (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
  2667. (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
  2668. }
  2669. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  2670. /*
  2671. * Check if userspace requested an interrupt window, and that the
  2672. * interrupt window is open.
  2673. *
  2674. * No need to exit to userspace if we already have an interrupt queued.
  2675. */
  2676. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
  2677. struct kvm_run *kvm_run)
  2678. {
  2679. return (!vcpu->arch.irq_summary &&
  2680. kvm_run->request_interrupt_window &&
  2681. vcpu->arch.interrupt_window_open &&
  2682. (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
  2683. }
  2684. static void post_kvm_run_save(struct kvm_vcpu *vcpu,
  2685. struct kvm_run *kvm_run)
  2686. {
  2687. kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  2688. kvm_run->cr8 = kvm_get_cr8(vcpu);
  2689. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  2690. if (irqchip_in_kernel(vcpu->kvm))
  2691. kvm_run->ready_for_interrupt_injection = 1;
  2692. else
  2693. kvm_run->ready_for_interrupt_injection =
  2694. (vcpu->arch.interrupt_window_open &&
  2695. vcpu->arch.irq_summary == 0);
  2696. }
  2697. static void vapic_enter(struct kvm_vcpu *vcpu)
  2698. {
  2699. struct kvm_lapic *apic = vcpu->arch.apic;
  2700. struct page *page;
  2701. if (!apic || !apic->vapic_addr)
  2702. return;
  2703. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  2704. vcpu->arch.apic->vapic_page = page;
  2705. }
  2706. static void vapic_exit(struct kvm_vcpu *vcpu)
  2707. {
  2708. struct kvm_lapic *apic = vcpu->arch.apic;
  2709. if (!apic || !apic->vapic_addr)
  2710. return;
  2711. down_read(&vcpu->kvm->slots_lock);
  2712. kvm_release_page_dirty(apic->vapic_page);
  2713. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  2714. up_read(&vcpu->kvm->slots_lock);
  2715. }
  2716. static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2717. {
  2718. int r;
  2719. if (vcpu->requests)
  2720. if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
  2721. kvm_mmu_unload(vcpu);
  2722. r = kvm_mmu_reload(vcpu);
  2723. if (unlikely(r))
  2724. goto out;
  2725. if (vcpu->requests) {
  2726. if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
  2727. __kvm_migrate_timers(vcpu);
  2728. if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
  2729. kvm_write_guest_time(vcpu);
  2730. if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
  2731. kvm_mmu_sync_roots(vcpu);
  2732. if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
  2733. kvm_x86_ops->tlb_flush(vcpu);
  2734. if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
  2735. &vcpu->requests)) {
  2736. kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
  2737. r = 0;
  2738. goto out;
  2739. }
  2740. if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
  2741. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  2742. r = 0;
  2743. goto out;
  2744. }
  2745. }
  2746. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  2747. kvm_inject_pending_timer_irqs(vcpu);
  2748. preempt_disable();
  2749. kvm_x86_ops->prepare_guest_switch(vcpu);
  2750. kvm_load_guest_fpu(vcpu);
  2751. local_irq_disable();
  2752. if (vcpu->requests || need_resched() || signal_pending(current)) {
  2753. local_irq_enable();
  2754. preempt_enable();
  2755. r = 1;
  2756. goto out;
  2757. }
  2758. vcpu->guest_mode = 1;
  2759. /*
  2760. * Make sure that guest_mode assignment won't happen after
  2761. * testing the pending IRQ vector bitmap.
  2762. */
  2763. smp_wmb();
  2764. if (vcpu->arch.exception.pending)
  2765. __queue_exception(vcpu);
  2766. else if (irqchip_in_kernel(vcpu->kvm))
  2767. kvm_x86_ops->inject_pending_irq(vcpu);
  2768. else
  2769. kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
  2770. kvm_lapic_sync_to_vapic(vcpu);
  2771. up_read(&vcpu->kvm->slots_lock);
  2772. kvm_guest_enter();
  2773. get_debugreg(vcpu->arch.host_dr6, 6);
  2774. get_debugreg(vcpu->arch.host_dr7, 7);
  2775. if (unlikely(vcpu->arch.switch_db_regs)) {
  2776. get_debugreg(vcpu->arch.host_db[0], 0);
  2777. get_debugreg(vcpu->arch.host_db[1], 1);
  2778. get_debugreg(vcpu->arch.host_db[2], 2);
  2779. get_debugreg(vcpu->arch.host_db[3], 3);
  2780. set_debugreg(0, 7);
  2781. set_debugreg(vcpu->arch.eff_db[0], 0);
  2782. set_debugreg(vcpu->arch.eff_db[1], 1);
  2783. set_debugreg(vcpu->arch.eff_db[2], 2);
  2784. set_debugreg(vcpu->arch.eff_db[3], 3);
  2785. }
  2786. KVMTRACE_0D(VMENTRY, vcpu, entryexit);
  2787. kvm_x86_ops->run(vcpu, kvm_run);
  2788. if (unlikely(vcpu->arch.switch_db_regs)) {
  2789. set_debugreg(0, 7);
  2790. set_debugreg(vcpu->arch.host_db[0], 0);
  2791. set_debugreg(vcpu->arch.host_db[1], 1);
  2792. set_debugreg(vcpu->arch.host_db[2], 2);
  2793. set_debugreg(vcpu->arch.host_db[3], 3);
  2794. }
  2795. set_debugreg(vcpu->arch.host_dr6, 6);
  2796. set_debugreg(vcpu->arch.host_dr7, 7);
  2797. vcpu->guest_mode = 0;
  2798. local_irq_enable();
  2799. ++vcpu->stat.exits;
  2800. /*
  2801. * We must have an instruction between local_irq_enable() and
  2802. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  2803. * the interrupt shadow. The stat.exits increment will do nicely.
  2804. * But we need to prevent reordering, hence this barrier():
  2805. */
  2806. barrier();
  2807. kvm_guest_exit();
  2808. preempt_enable();
  2809. down_read(&vcpu->kvm->slots_lock);
  2810. /*
  2811. * Profile KVM exit RIPs:
  2812. */
  2813. if (unlikely(prof_on == KVM_PROFILING)) {
  2814. unsigned long rip = kvm_rip_read(vcpu);
  2815. profile_hit(KVM_PROFILING, (void *)rip);
  2816. }
  2817. if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
  2818. vcpu->arch.exception.pending = false;
  2819. kvm_lapic_sync_from_vapic(vcpu);
  2820. r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
  2821. out:
  2822. return r;
  2823. }
  2824. static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2825. {
  2826. int r;
  2827. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  2828. pr_debug("vcpu %d received sipi with vector # %x\n",
  2829. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  2830. kvm_lapic_reset(vcpu);
  2831. r = kvm_arch_vcpu_reset(vcpu);
  2832. if (r)
  2833. return r;
  2834. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  2835. }
  2836. down_read(&vcpu->kvm->slots_lock);
  2837. vapic_enter(vcpu);
  2838. r = 1;
  2839. while (r > 0) {
  2840. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
  2841. r = vcpu_enter_guest(vcpu, kvm_run);
  2842. else {
  2843. up_read(&vcpu->kvm->slots_lock);
  2844. kvm_vcpu_block(vcpu);
  2845. down_read(&vcpu->kvm->slots_lock);
  2846. if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
  2847. if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
  2848. vcpu->arch.mp_state =
  2849. KVM_MP_STATE_RUNNABLE;
  2850. if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
  2851. r = -EINTR;
  2852. }
  2853. if (r > 0) {
  2854. if (dm_request_for_irq_injection(vcpu, kvm_run)) {
  2855. r = -EINTR;
  2856. kvm_run->exit_reason = KVM_EXIT_INTR;
  2857. ++vcpu->stat.request_irq_exits;
  2858. }
  2859. if (signal_pending(current)) {
  2860. r = -EINTR;
  2861. kvm_run->exit_reason = KVM_EXIT_INTR;
  2862. ++vcpu->stat.signal_exits;
  2863. }
  2864. if (need_resched()) {
  2865. up_read(&vcpu->kvm->slots_lock);
  2866. kvm_resched(vcpu);
  2867. down_read(&vcpu->kvm->slots_lock);
  2868. }
  2869. }
  2870. }
  2871. up_read(&vcpu->kvm->slots_lock);
  2872. post_kvm_run_save(vcpu, kvm_run);
  2873. vapic_exit(vcpu);
  2874. return r;
  2875. }
  2876. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2877. {
  2878. int r;
  2879. sigset_t sigsaved;
  2880. vcpu_load(vcpu);
  2881. if (vcpu->sigset_active)
  2882. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  2883. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  2884. kvm_vcpu_block(vcpu);
  2885. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  2886. r = -EAGAIN;
  2887. goto out;
  2888. }
  2889. /* re-sync apic's tpr */
  2890. if (!irqchip_in_kernel(vcpu->kvm))
  2891. kvm_set_cr8(vcpu, kvm_run->cr8);
  2892. if (vcpu->arch.pio.cur_count) {
  2893. r = complete_pio(vcpu);
  2894. if (r)
  2895. goto out;
  2896. }
  2897. #if CONFIG_HAS_IOMEM
  2898. if (vcpu->mmio_needed) {
  2899. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  2900. vcpu->mmio_read_completed = 1;
  2901. vcpu->mmio_needed = 0;
  2902. down_read(&vcpu->kvm->slots_lock);
  2903. r = emulate_instruction(vcpu, kvm_run,
  2904. vcpu->arch.mmio_fault_cr2, 0,
  2905. EMULTYPE_NO_DECODE);
  2906. up_read(&vcpu->kvm->slots_lock);
  2907. if (r == EMULATE_DO_MMIO) {
  2908. /*
  2909. * Read-modify-write. Back to userspace.
  2910. */
  2911. r = 0;
  2912. goto out;
  2913. }
  2914. }
  2915. #endif
  2916. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
  2917. kvm_register_write(vcpu, VCPU_REGS_RAX,
  2918. kvm_run->hypercall.ret);
  2919. r = __vcpu_run(vcpu, kvm_run);
  2920. out:
  2921. if (vcpu->sigset_active)
  2922. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  2923. vcpu_put(vcpu);
  2924. return r;
  2925. }
  2926. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  2927. {
  2928. vcpu_load(vcpu);
  2929. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2930. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  2931. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2932. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  2933. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  2934. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  2935. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  2936. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  2937. #ifdef CONFIG_X86_64
  2938. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  2939. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  2940. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  2941. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  2942. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  2943. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  2944. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  2945. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  2946. #endif
  2947. regs->rip = kvm_rip_read(vcpu);
  2948. regs->rflags = kvm_x86_ops->get_rflags(vcpu);
  2949. /*
  2950. * Don't leak debug flags in case they were set for guest debugging
  2951. */
  2952. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  2953. regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  2954. vcpu_put(vcpu);
  2955. return 0;
  2956. }
  2957. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  2958. {
  2959. vcpu_load(vcpu);
  2960. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  2961. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  2962. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  2963. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  2964. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  2965. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  2966. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  2967. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  2968. #ifdef CONFIG_X86_64
  2969. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  2970. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  2971. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  2972. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  2973. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  2974. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  2975. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  2976. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  2977. #endif
  2978. kvm_rip_write(vcpu, regs->rip);
  2979. kvm_x86_ops->set_rflags(vcpu, regs->rflags);
  2980. vcpu->arch.exception.pending = false;
  2981. vcpu_put(vcpu);
  2982. return 0;
  2983. }
  2984. void kvm_get_segment(struct kvm_vcpu *vcpu,
  2985. struct kvm_segment *var, int seg)
  2986. {
  2987. kvm_x86_ops->get_segment(vcpu, var, seg);
  2988. }
  2989. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  2990. {
  2991. struct kvm_segment cs;
  2992. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  2993. *db = cs.db;
  2994. *l = cs.l;
  2995. }
  2996. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  2997. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  2998. struct kvm_sregs *sregs)
  2999. {
  3000. struct descriptor_table dt;
  3001. int pending_vec;
  3002. vcpu_load(vcpu);
  3003. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  3004. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  3005. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  3006. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  3007. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  3008. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  3009. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  3010. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  3011. kvm_x86_ops->get_idt(vcpu, &dt);
  3012. sregs->idt.limit = dt.limit;
  3013. sregs->idt.base = dt.base;
  3014. kvm_x86_ops->get_gdt(vcpu, &dt);
  3015. sregs->gdt.limit = dt.limit;
  3016. sregs->gdt.base = dt.base;
  3017. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  3018. sregs->cr0 = vcpu->arch.cr0;
  3019. sregs->cr2 = vcpu->arch.cr2;
  3020. sregs->cr3 = vcpu->arch.cr3;
  3021. sregs->cr4 = vcpu->arch.cr4;
  3022. sregs->cr8 = kvm_get_cr8(vcpu);
  3023. sregs->efer = vcpu->arch.shadow_efer;
  3024. sregs->apic_base = kvm_get_apic_base(vcpu);
  3025. if (irqchip_in_kernel(vcpu->kvm)) {
  3026. memset(sregs->interrupt_bitmap, 0,
  3027. sizeof sregs->interrupt_bitmap);
  3028. pending_vec = kvm_x86_ops->get_irq(vcpu);
  3029. if (pending_vec >= 0)
  3030. set_bit(pending_vec,
  3031. (unsigned long *)sregs->interrupt_bitmap);
  3032. } else
  3033. memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
  3034. sizeof sregs->interrupt_bitmap);
  3035. vcpu_put(vcpu);
  3036. return 0;
  3037. }
  3038. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  3039. struct kvm_mp_state *mp_state)
  3040. {
  3041. vcpu_load(vcpu);
  3042. mp_state->mp_state = vcpu->arch.mp_state;
  3043. vcpu_put(vcpu);
  3044. return 0;
  3045. }
  3046. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  3047. struct kvm_mp_state *mp_state)
  3048. {
  3049. vcpu_load(vcpu);
  3050. vcpu->arch.mp_state = mp_state->mp_state;
  3051. vcpu_put(vcpu);
  3052. return 0;
  3053. }
  3054. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  3055. struct kvm_segment *var, int seg)
  3056. {
  3057. kvm_x86_ops->set_segment(vcpu, var, seg);
  3058. }
  3059. static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
  3060. struct kvm_segment *kvm_desct)
  3061. {
  3062. kvm_desct->base = seg_desc->base0;
  3063. kvm_desct->base |= seg_desc->base1 << 16;
  3064. kvm_desct->base |= seg_desc->base2 << 24;
  3065. kvm_desct->limit = seg_desc->limit0;
  3066. kvm_desct->limit |= seg_desc->limit << 16;
  3067. if (seg_desc->g) {
  3068. kvm_desct->limit <<= 12;
  3069. kvm_desct->limit |= 0xfff;
  3070. }
  3071. kvm_desct->selector = selector;
  3072. kvm_desct->type = seg_desc->type;
  3073. kvm_desct->present = seg_desc->p;
  3074. kvm_desct->dpl = seg_desc->dpl;
  3075. kvm_desct->db = seg_desc->d;
  3076. kvm_desct->s = seg_desc->s;
  3077. kvm_desct->l = seg_desc->l;
  3078. kvm_desct->g = seg_desc->g;
  3079. kvm_desct->avl = seg_desc->avl;
  3080. if (!selector)
  3081. kvm_desct->unusable = 1;
  3082. else
  3083. kvm_desct->unusable = 0;
  3084. kvm_desct->padding = 0;
  3085. }
  3086. static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
  3087. u16 selector,
  3088. struct descriptor_table *dtable)
  3089. {
  3090. if (selector & 1 << 2) {
  3091. struct kvm_segment kvm_seg;
  3092. kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
  3093. if (kvm_seg.unusable)
  3094. dtable->limit = 0;
  3095. else
  3096. dtable->limit = kvm_seg.limit;
  3097. dtable->base = kvm_seg.base;
  3098. }
  3099. else
  3100. kvm_x86_ops->get_gdt(vcpu, dtable);
  3101. }
  3102. /* allowed just for 8 bytes segments */
  3103. static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  3104. struct desc_struct *seg_desc)
  3105. {
  3106. gpa_t gpa;
  3107. struct descriptor_table dtable;
  3108. u16 index = selector >> 3;
  3109. get_segment_descriptor_dtable(vcpu, selector, &dtable);
  3110. if (dtable.limit < index * 8 + 7) {
  3111. kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
  3112. return 1;
  3113. }
  3114. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
  3115. gpa += index * 8;
  3116. return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
  3117. }
  3118. /* allowed just for 8 bytes segments */
  3119. static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  3120. struct desc_struct *seg_desc)
  3121. {
  3122. gpa_t gpa;
  3123. struct descriptor_table dtable;
  3124. u16 index = selector >> 3;
  3125. get_segment_descriptor_dtable(vcpu, selector, &dtable);
  3126. if (dtable.limit < index * 8 + 7)
  3127. return 1;
  3128. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
  3129. gpa += index * 8;
  3130. return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
  3131. }
  3132. static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
  3133. struct desc_struct *seg_desc)
  3134. {
  3135. u32 base_addr;
  3136. base_addr = seg_desc->base0;
  3137. base_addr |= (seg_desc->base1 << 16);
  3138. base_addr |= (seg_desc->base2 << 24);
  3139. return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
  3140. }
  3141. static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
  3142. {
  3143. struct kvm_segment kvm_seg;
  3144. kvm_get_segment(vcpu, &kvm_seg, seg);
  3145. return kvm_seg.selector;
  3146. }
  3147. static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
  3148. u16 selector,
  3149. struct kvm_segment *kvm_seg)
  3150. {
  3151. struct desc_struct seg_desc;
  3152. if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
  3153. return 1;
  3154. seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
  3155. return 0;
  3156. }
  3157. static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
  3158. {
  3159. struct kvm_segment segvar = {
  3160. .base = selector << 4,
  3161. .limit = 0xffff,
  3162. .selector = selector,
  3163. .type = 3,
  3164. .present = 1,
  3165. .dpl = 3,
  3166. .db = 0,
  3167. .s = 1,
  3168. .l = 0,
  3169. .g = 0,
  3170. .avl = 0,
  3171. .unusable = 0,
  3172. };
  3173. kvm_x86_ops->set_segment(vcpu, &segvar, seg);
  3174. return 0;
  3175. }
  3176. int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  3177. int type_bits, int seg)
  3178. {
  3179. struct kvm_segment kvm_seg;
  3180. if (!(vcpu->arch.cr0 & X86_CR0_PE))
  3181. return kvm_load_realmode_segment(vcpu, selector, seg);
  3182. if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
  3183. return 1;
  3184. kvm_seg.type |= type_bits;
  3185. if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
  3186. seg != VCPU_SREG_LDTR)
  3187. if (!kvm_seg.s)
  3188. kvm_seg.unusable = 1;
  3189. kvm_set_segment(vcpu, &kvm_seg, seg);
  3190. return 0;
  3191. }
  3192. static void save_state_to_tss32(struct kvm_vcpu *vcpu,
  3193. struct tss_segment_32 *tss)
  3194. {
  3195. tss->cr3 = vcpu->arch.cr3;
  3196. tss->eip = kvm_rip_read(vcpu);
  3197. tss->eflags = kvm_x86_ops->get_rflags(vcpu);
  3198. tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3199. tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3200. tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3201. tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3202. tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  3203. tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  3204. tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3205. tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  3206. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  3207. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  3208. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  3209. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  3210. tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
  3211. tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
  3212. tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  3213. tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
  3214. }
  3215. static int load_state_from_tss32(struct kvm_vcpu *vcpu,
  3216. struct tss_segment_32 *tss)
  3217. {
  3218. kvm_set_cr3(vcpu, tss->cr3);
  3219. kvm_rip_write(vcpu, tss->eip);
  3220. kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
  3221. kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
  3222. kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
  3223. kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
  3224. kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
  3225. kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
  3226. kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
  3227. kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
  3228. kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
  3229. if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
  3230. return 1;
  3231. if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  3232. return 1;
  3233. if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  3234. return 1;
  3235. if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  3236. return 1;
  3237. if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  3238. return 1;
  3239. if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
  3240. return 1;
  3241. if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
  3242. return 1;
  3243. return 0;
  3244. }
  3245. static void save_state_to_tss16(struct kvm_vcpu *vcpu,
  3246. struct tss_segment_16 *tss)
  3247. {
  3248. tss->ip = kvm_rip_read(vcpu);
  3249. tss->flag = kvm_x86_ops->get_rflags(vcpu);
  3250. tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3251. tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3252. tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3253. tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3254. tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  3255. tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  3256. tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3257. tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
  3258. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  3259. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  3260. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  3261. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  3262. tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  3263. tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
  3264. }
  3265. static int load_state_from_tss16(struct kvm_vcpu *vcpu,
  3266. struct tss_segment_16 *tss)
  3267. {
  3268. kvm_rip_write(vcpu, tss->ip);
  3269. kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
  3270. kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
  3271. kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
  3272. kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
  3273. kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
  3274. kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
  3275. kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
  3276. kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
  3277. kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
  3278. if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
  3279. return 1;
  3280. if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  3281. return 1;
  3282. if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  3283. return 1;
  3284. if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  3285. return 1;
  3286. if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  3287. return 1;
  3288. return 0;
  3289. }
  3290. static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
  3291. u32 old_tss_base,
  3292. struct desc_struct *nseg_desc)
  3293. {
  3294. struct tss_segment_16 tss_segment_16;
  3295. int ret = 0;
  3296. if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
  3297. sizeof tss_segment_16))
  3298. goto out;
  3299. save_state_to_tss16(vcpu, &tss_segment_16);
  3300. if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
  3301. sizeof tss_segment_16))
  3302. goto out;
  3303. if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
  3304. &tss_segment_16, sizeof tss_segment_16))
  3305. goto out;
  3306. if (load_state_from_tss16(vcpu, &tss_segment_16))
  3307. goto out;
  3308. ret = 1;
  3309. out:
  3310. return ret;
  3311. }
  3312. static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
  3313. u32 old_tss_base,
  3314. struct desc_struct *nseg_desc)
  3315. {
  3316. struct tss_segment_32 tss_segment_32;
  3317. int ret = 0;
  3318. if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
  3319. sizeof tss_segment_32))
  3320. goto out;
  3321. save_state_to_tss32(vcpu, &tss_segment_32);
  3322. if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
  3323. sizeof tss_segment_32))
  3324. goto out;
  3325. if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
  3326. &tss_segment_32, sizeof tss_segment_32))
  3327. goto out;
  3328. if (load_state_from_tss32(vcpu, &tss_segment_32))
  3329. goto out;
  3330. ret = 1;
  3331. out:
  3332. return ret;
  3333. }
  3334. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
  3335. {
  3336. struct kvm_segment tr_seg;
  3337. struct desc_struct cseg_desc;
  3338. struct desc_struct nseg_desc;
  3339. int ret = 0;
  3340. u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
  3341. u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
  3342. old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
  3343. /* FIXME: Handle errors. Failure to read either TSS or their
  3344. * descriptors should generate a pagefault.
  3345. */
  3346. if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
  3347. goto out;
  3348. if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
  3349. goto out;
  3350. if (reason != TASK_SWITCH_IRET) {
  3351. int cpl;
  3352. cpl = kvm_x86_ops->get_cpl(vcpu);
  3353. if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
  3354. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  3355. return 1;
  3356. }
  3357. }
  3358. if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
  3359. kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
  3360. return 1;
  3361. }
  3362. if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
  3363. cseg_desc.type &= ~(1 << 1); //clear the B flag
  3364. save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
  3365. }
  3366. if (reason == TASK_SWITCH_IRET) {
  3367. u32 eflags = kvm_x86_ops->get_rflags(vcpu);
  3368. kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
  3369. }
  3370. kvm_x86_ops->skip_emulated_instruction(vcpu);
  3371. if (nseg_desc.type & 8)
  3372. ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
  3373. &nseg_desc);
  3374. else
  3375. ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
  3376. &nseg_desc);
  3377. if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
  3378. u32 eflags = kvm_x86_ops->get_rflags(vcpu);
  3379. kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
  3380. }
  3381. if (reason != TASK_SWITCH_IRET) {
  3382. nseg_desc.type |= (1 << 1);
  3383. save_guest_segment_descriptor(vcpu, tss_selector,
  3384. &nseg_desc);
  3385. }
  3386. kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
  3387. seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
  3388. tr_seg.type = 11;
  3389. kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
  3390. out:
  3391. return ret;
  3392. }
  3393. EXPORT_SYMBOL_GPL(kvm_task_switch);
  3394. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  3395. struct kvm_sregs *sregs)
  3396. {
  3397. int mmu_reset_needed = 0;
  3398. int i, pending_vec, max_bits;
  3399. struct descriptor_table dt;
  3400. vcpu_load(vcpu);
  3401. dt.limit = sregs->idt.limit;
  3402. dt.base = sregs->idt.base;
  3403. kvm_x86_ops->set_idt(vcpu, &dt);
  3404. dt.limit = sregs->gdt.limit;
  3405. dt.base = sregs->gdt.base;
  3406. kvm_x86_ops->set_gdt(vcpu, &dt);
  3407. vcpu->arch.cr2 = sregs->cr2;
  3408. mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
  3409. vcpu->arch.cr3 = sregs->cr3;
  3410. kvm_set_cr8(vcpu, sregs->cr8);
  3411. mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
  3412. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  3413. kvm_set_apic_base(vcpu, sregs->apic_base);
  3414. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  3415. mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
  3416. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  3417. vcpu->arch.cr0 = sregs->cr0;
  3418. mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
  3419. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  3420. if (!is_long_mode(vcpu) && is_pae(vcpu))
  3421. load_pdptrs(vcpu, vcpu->arch.cr3);
  3422. if (mmu_reset_needed)
  3423. kvm_mmu_reset_context(vcpu);
  3424. if (!irqchip_in_kernel(vcpu->kvm)) {
  3425. memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
  3426. sizeof vcpu->arch.irq_pending);
  3427. vcpu->arch.irq_summary = 0;
  3428. for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
  3429. if (vcpu->arch.irq_pending[i])
  3430. __set_bit(i, &vcpu->arch.irq_summary);
  3431. } else {
  3432. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  3433. pending_vec = find_first_bit(
  3434. (const unsigned long *)sregs->interrupt_bitmap,
  3435. max_bits);
  3436. /* Only pending external irq is handled here */
  3437. if (pending_vec < max_bits) {
  3438. kvm_x86_ops->set_irq(vcpu, pending_vec);
  3439. pr_debug("Set back pending irq %d\n",
  3440. pending_vec);
  3441. }
  3442. kvm_pic_clear_isr_ack(vcpu->kvm);
  3443. }
  3444. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  3445. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  3446. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  3447. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  3448. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  3449. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  3450. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  3451. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  3452. /* Older userspace won't unhalt the vcpu on reset. */
  3453. if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
  3454. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  3455. !(vcpu->arch.cr0 & X86_CR0_PE))
  3456. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  3457. vcpu_put(vcpu);
  3458. return 0;
  3459. }
  3460. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  3461. struct kvm_guest_debug *dbg)
  3462. {
  3463. int i, r;
  3464. vcpu_load(vcpu);
  3465. if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
  3466. (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
  3467. for (i = 0; i < KVM_NR_DB_REGS; ++i)
  3468. vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
  3469. vcpu->arch.switch_db_regs =
  3470. (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
  3471. } else {
  3472. for (i = 0; i < KVM_NR_DB_REGS; i++)
  3473. vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  3474. vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
  3475. }
  3476. r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
  3477. if (dbg->control & KVM_GUESTDBG_INJECT_DB)
  3478. kvm_queue_exception(vcpu, DB_VECTOR);
  3479. else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
  3480. kvm_queue_exception(vcpu, BP_VECTOR);
  3481. vcpu_put(vcpu);
  3482. return r;
  3483. }
  3484. /*
  3485. * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
  3486. * we have asm/x86/processor.h
  3487. */
  3488. struct fxsave {
  3489. u16 cwd;
  3490. u16 swd;
  3491. u16 twd;
  3492. u16 fop;
  3493. u64 rip;
  3494. u64 rdp;
  3495. u32 mxcsr;
  3496. u32 mxcsr_mask;
  3497. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  3498. #ifdef CONFIG_X86_64
  3499. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  3500. #else
  3501. u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
  3502. #endif
  3503. };
  3504. /*
  3505. * Translate a guest virtual address to a guest physical address.
  3506. */
  3507. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  3508. struct kvm_translation *tr)
  3509. {
  3510. unsigned long vaddr = tr->linear_address;
  3511. gpa_t gpa;
  3512. vcpu_load(vcpu);
  3513. down_read(&vcpu->kvm->slots_lock);
  3514. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
  3515. up_read(&vcpu->kvm->slots_lock);
  3516. tr->physical_address = gpa;
  3517. tr->valid = gpa != UNMAPPED_GVA;
  3518. tr->writeable = 1;
  3519. tr->usermode = 0;
  3520. vcpu_put(vcpu);
  3521. return 0;
  3522. }
  3523. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  3524. {
  3525. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  3526. vcpu_load(vcpu);
  3527. memcpy(fpu->fpr, fxsave->st_space, 128);
  3528. fpu->fcw = fxsave->cwd;
  3529. fpu->fsw = fxsave->swd;
  3530. fpu->ftwx = fxsave->twd;
  3531. fpu->last_opcode = fxsave->fop;
  3532. fpu->last_ip = fxsave->rip;
  3533. fpu->last_dp = fxsave->rdp;
  3534. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  3535. vcpu_put(vcpu);
  3536. return 0;
  3537. }
  3538. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  3539. {
  3540. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  3541. vcpu_load(vcpu);
  3542. memcpy(fxsave->st_space, fpu->fpr, 128);
  3543. fxsave->cwd = fpu->fcw;
  3544. fxsave->swd = fpu->fsw;
  3545. fxsave->twd = fpu->ftwx;
  3546. fxsave->fop = fpu->last_opcode;
  3547. fxsave->rip = fpu->last_ip;
  3548. fxsave->rdp = fpu->last_dp;
  3549. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  3550. vcpu_put(vcpu);
  3551. return 0;
  3552. }
  3553. void fx_init(struct kvm_vcpu *vcpu)
  3554. {
  3555. unsigned after_mxcsr_mask;
  3556. /*
  3557. * Touch the fpu the first time in non atomic context as if
  3558. * this is the first fpu instruction the exception handler
  3559. * will fire before the instruction returns and it'll have to
  3560. * allocate ram with GFP_KERNEL.
  3561. */
  3562. if (!used_math())
  3563. kvm_fx_save(&vcpu->arch.host_fx_image);
  3564. /* Initialize guest FPU by resetting ours and saving into guest's */
  3565. preempt_disable();
  3566. kvm_fx_save(&vcpu->arch.host_fx_image);
  3567. kvm_fx_finit();
  3568. kvm_fx_save(&vcpu->arch.guest_fx_image);
  3569. kvm_fx_restore(&vcpu->arch.host_fx_image);
  3570. preempt_enable();
  3571. vcpu->arch.cr0 |= X86_CR0_ET;
  3572. after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
  3573. vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
  3574. memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
  3575. 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
  3576. }
  3577. EXPORT_SYMBOL_GPL(fx_init);
  3578. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  3579. {
  3580. if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
  3581. return;
  3582. vcpu->guest_fpu_loaded = 1;
  3583. kvm_fx_save(&vcpu->arch.host_fx_image);
  3584. kvm_fx_restore(&vcpu->arch.guest_fx_image);
  3585. }
  3586. EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
  3587. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  3588. {
  3589. if (!vcpu->guest_fpu_loaded)
  3590. return;
  3591. vcpu->guest_fpu_loaded = 0;
  3592. kvm_fx_save(&vcpu->arch.guest_fx_image);
  3593. kvm_fx_restore(&vcpu->arch.host_fx_image);
  3594. ++vcpu->stat.fpu_reload;
  3595. }
  3596. EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
  3597. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  3598. {
  3599. if (vcpu->arch.time_page) {
  3600. kvm_release_page_dirty(vcpu->arch.time_page);
  3601. vcpu->arch.time_page = NULL;
  3602. }
  3603. kvm_x86_ops->vcpu_free(vcpu);
  3604. }
  3605. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  3606. unsigned int id)
  3607. {
  3608. return kvm_x86_ops->vcpu_create(kvm, id);
  3609. }
  3610. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  3611. {
  3612. int r;
  3613. /* We do fxsave: this must be aligned. */
  3614. BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
  3615. vcpu->arch.mtrr_state.have_fixed = 1;
  3616. vcpu_load(vcpu);
  3617. r = kvm_arch_vcpu_reset(vcpu);
  3618. if (r == 0)
  3619. r = kvm_mmu_setup(vcpu);
  3620. vcpu_put(vcpu);
  3621. if (r < 0)
  3622. goto free_vcpu;
  3623. return 0;
  3624. free_vcpu:
  3625. kvm_x86_ops->vcpu_free(vcpu);
  3626. return r;
  3627. }
  3628. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  3629. {
  3630. vcpu_load(vcpu);
  3631. kvm_mmu_unload(vcpu);
  3632. vcpu_put(vcpu);
  3633. kvm_x86_ops->vcpu_free(vcpu);
  3634. }
  3635. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  3636. {
  3637. vcpu->arch.nmi_pending = false;
  3638. vcpu->arch.nmi_injected = false;
  3639. vcpu->arch.switch_db_regs = 0;
  3640. memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
  3641. vcpu->arch.dr6 = DR6_FIXED_1;
  3642. vcpu->arch.dr7 = DR7_FIXED_1;
  3643. return kvm_x86_ops->vcpu_reset(vcpu);
  3644. }
  3645. void kvm_arch_hardware_enable(void *garbage)
  3646. {
  3647. kvm_x86_ops->hardware_enable(garbage);
  3648. }
  3649. void kvm_arch_hardware_disable(void *garbage)
  3650. {
  3651. kvm_x86_ops->hardware_disable(garbage);
  3652. }
  3653. int kvm_arch_hardware_setup(void)
  3654. {
  3655. return kvm_x86_ops->hardware_setup();
  3656. }
  3657. void kvm_arch_hardware_unsetup(void)
  3658. {
  3659. kvm_x86_ops->hardware_unsetup();
  3660. }
  3661. void kvm_arch_check_processor_compat(void *rtn)
  3662. {
  3663. kvm_x86_ops->check_processor_compatibility(rtn);
  3664. }
  3665. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  3666. {
  3667. struct page *page;
  3668. struct kvm *kvm;
  3669. int r;
  3670. BUG_ON(vcpu->kvm == NULL);
  3671. kvm = vcpu->kvm;
  3672. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  3673. if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
  3674. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  3675. else
  3676. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  3677. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  3678. if (!page) {
  3679. r = -ENOMEM;
  3680. goto fail;
  3681. }
  3682. vcpu->arch.pio_data = page_address(page);
  3683. r = kvm_mmu_create(vcpu);
  3684. if (r < 0)
  3685. goto fail_free_pio_data;
  3686. if (irqchip_in_kernel(kvm)) {
  3687. r = kvm_create_lapic(vcpu);
  3688. if (r < 0)
  3689. goto fail_mmu_destroy;
  3690. }
  3691. return 0;
  3692. fail_mmu_destroy:
  3693. kvm_mmu_destroy(vcpu);
  3694. fail_free_pio_data:
  3695. free_page((unsigned long)vcpu->arch.pio_data);
  3696. fail:
  3697. return r;
  3698. }
  3699. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  3700. {
  3701. kvm_free_lapic(vcpu);
  3702. down_read(&vcpu->kvm->slots_lock);
  3703. kvm_mmu_destroy(vcpu);
  3704. up_read(&vcpu->kvm->slots_lock);
  3705. free_page((unsigned long)vcpu->arch.pio_data);
  3706. }
  3707. struct kvm *kvm_arch_create_vm(void)
  3708. {
  3709. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  3710. if (!kvm)
  3711. return ERR_PTR(-ENOMEM);
  3712. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  3713. INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
  3714. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  3715. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  3716. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  3717. rdtscll(kvm->arch.vm_init_tsc);
  3718. return kvm;
  3719. }
  3720. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  3721. {
  3722. vcpu_load(vcpu);
  3723. kvm_mmu_unload(vcpu);
  3724. vcpu_put(vcpu);
  3725. }
  3726. static void kvm_free_vcpus(struct kvm *kvm)
  3727. {
  3728. unsigned int i;
  3729. /*
  3730. * Unpin any mmu pages first.
  3731. */
  3732. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  3733. if (kvm->vcpus[i])
  3734. kvm_unload_vcpu_mmu(kvm->vcpus[i]);
  3735. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  3736. if (kvm->vcpus[i]) {
  3737. kvm_arch_vcpu_free(kvm->vcpus[i]);
  3738. kvm->vcpus[i] = NULL;
  3739. }
  3740. }
  3741. }
  3742. void kvm_arch_sync_events(struct kvm *kvm)
  3743. {
  3744. kvm_free_all_assigned_devices(kvm);
  3745. }
  3746. void kvm_arch_destroy_vm(struct kvm *kvm)
  3747. {
  3748. kvm_iommu_unmap_guest(kvm);
  3749. kvm_free_pit(kvm);
  3750. kfree(kvm->arch.vpic);
  3751. kfree(kvm->arch.vioapic);
  3752. kvm_free_vcpus(kvm);
  3753. kvm_free_physmem(kvm);
  3754. if (kvm->arch.apic_access_page)
  3755. put_page(kvm->arch.apic_access_page);
  3756. if (kvm->arch.ept_identity_pagetable)
  3757. put_page(kvm->arch.ept_identity_pagetable);
  3758. kfree(kvm);
  3759. }
  3760. int kvm_arch_set_memory_region(struct kvm *kvm,
  3761. struct kvm_userspace_memory_region *mem,
  3762. struct kvm_memory_slot old,
  3763. int user_alloc)
  3764. {
  3765. int npages = mem->memory_size >> PAGE_SHIFT;
  3766. struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
  3767. /*To keep backward compatibility with older userspace,
  3768. *x86 needs to hanlde !user_alloc case.
  3769. */
  3770. if (!user_alloc) {
  3771. if (npages && !old.rmap) {
  3772. unsigned long userspace_addr;
  3773. down_write(&current->mm->mmap_sem);
  3774. userspace_addr = do_mmap(NULL, 0,
  3775. npages * PAGE_SIZE,
  3776. PROT_READ | PROT_WRITE,
  3777. MAP_PRIVATE | MAP_ANONYMOUS,
  3778. 0);
  3779. up_write(&current->mm->mmap_sem);
  3780. if (IS_ERR((void *)userspace_addr))
  3781. return PTR_ERR((void *)userspace_addr);
  3782. /* set userspace_addr atomically for kvm_hva_to_rmapp */
  3783. spin_lock(&kvm->mmu_lock);
  3784. memslot->userspace_addr = userspace_addr;
  3785. spin_unlock(&kvm->mmu_lock);
  3786. } else {
  3787. if (!old.user_alloc && old.rmap) {
  3788. int ret;
  3789. down_write(&current->mm->mmap_sem);
  3790. ret = do_munmap(current->mm, old.userspace_addr,
  3791. old.npages * PAGE_SIZE);
  3792. up_write(&current->mm->mmap_sem);
  3793. if (ret < 0)
  3794. printk(KERN_WARNING
  3795. "kvm_vm_ioctl_set_memory_region: "
  3796. "failed to munmap memory\n");
  3797. }
  3798. }
  3799. }
  3800. if (!kvm->arch.n_requested_mmu_pages) {
  3801. unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  3802. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  3803. }
  3804. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  3805. kvm_flush_remote_tlbs(kvm);
  3806. return 0;
  3807. }
  3808. void kvm_arch_flush_shadow(struct kvm *kvm)
  3809. {
  3810. kvm_mmu_zap_all(kvm);
  3811. }
  3812. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  3813. {
  3814. return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
  3815. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
  3816. || vcpu->arch.nmi_pending;
  3817. }
  3818. static void vcpu_kick_intr(void *info)
  3819. {
  3820. #ifdef DEBUG
  3821. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
  3822. printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
  3823. #endif
  3824. }
  3825. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  3826. {
  3827. int ipi_pcpu = vcpu->cpu;
  3828. int cpu = get_cpu();
  3829. if (waitqueue_active(&vcpu->wq)) {
  3830. wake_up_interruptible(&vcpu->wq);
  3831. ++vcpu->stat.halt_wakeup;
  3832. }
  3833. /*
  3834. * We may be called synchronously with irqs disabled in guest mode,
  3835. * So need not to call smp_call_function_single() in that case.
  3836. */
  3837. if (vcpu->guest_mode && vcpu->cpu != cpu)
  3838. smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
  3839. put_cpu();
  3840. }