x86.c 111 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Amit Shah <amit.shah@qumranet.com>
  14. * Ben-Ami Yassour <benami@il.ibm.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. #include <linux/kvm_host.h>
  21. #include "irq.h"
  22. #include "mmu.h"
  23. #include "i8254.h"
  24. #include "tss.h"
  25. #include "kvm_cache_regs.h"
  26. #include "x86.h"
  27. #include <linux/clocksource.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/kvm.h>
  30. #include <linux/fs.h>
  31. #include <linux/vmalloc.h>
  32. #include <linux/module.h>
  33. #include <linux/mman.h>
  34. #include <linux/highmem.h>
  35. #include <linux/iommu.h>
  36. #include <linux/intel-iommu.h>
  37. #include <linux/cpufreq.h>
  38. #include <asm/uaccess.h>
  39. #include <asm/msr.h>
  40. #include <asm/desc.h>
  41. #include <asm/mtrr.h>
  42. #define MAX_IO_MSRS 256
  43. #define CR0_RESERVED_BITS \
  44. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  45. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  46. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  47. #define CR4_RESERVED_BITS \
  48. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  49. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  50. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  51. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  52. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  53. /* EFER defaults:
  54. * - enable syscall per default because its emulated by KVM
  55. * - enable LME and LMA per default on 64 bit KVM
  56. */
  57. #ifdef CONFIG_X86_64
  58. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
  59. #else
  60. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
  61. #endif
  62. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  63. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  64. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  65. struct kvm_cpuid_entry2 __user *entries);
  66. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  67. u32 function, u32 index);
  68. struct kvm_x86_ops *kvm_x86_ops;
  69. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  70. struct kvm_stats_debugfs_item debugfs_entries[] = {
  71. { "pf_fixed", VCPU_STAT(pf_fixed) },
  72. { "pf_guest", VCPU_STAT(pf_guest) },
  73. { "tlb_flush", VCPU_STAT(tlb_flush) },
  74. { "invlpg", VCPU_STAT(invlpg) },
  75. { "exits", VCPU_STAT(exits) },
  76. { "io_exits", VCPU_STAT(io_exits) },
  77. { "mmio_exits", VCPU_STAT(mmio_exits) },
  78. { "signal_exits", VCPU_STAT(signal_exits) },
  79. { "irq_window", VCPU_STAT(irq_window_exits) },
  80. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  81. { "halt_exits", VCPU_STAT(halt_exits) },
  82. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  83. { "hypercalls", VCPU_STAT(hypercalls) },
  84. { "request_irq", VCPU_STAT(request_irq_exits) },
  85. { "irq_exits", VCPU_STAT(irq_exits) },
  86. { "host_state_reload", VCPU_STAT(host_state_reload) },
  87. { "efer_reload", VCPU_STAT(efer_reload) },
  88. { "fpu_reload", VCPU_STAT(fpu_reload) },
  89. { "insn_emulation", VCPU_STAT(insn_emulation) },
  90. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  91. { "irq_injections", VCPU_STAT(irq_injections) },
  92. { "nmi_injections", VCPU_STAT(nmi_injections) },
  93. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  94. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  95. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  96. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  97. { "mmu_flooded", VM_STAT(mmu_flooded) },
  98. { "mmu_recycled", VM_STAT(mmu_recycled) },
  99. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  100. { "mmu_unsync", VM_STAT(mmu_unsync) },
  101. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  102. { "largepages", VM_STAT(lpages) },
  103. { NULL }
  104. };
  105. unsigned long segment_base(u16 selector)
  106. {
  107. struct descriptor_table gdt;
  108. struct desc_struct *d;
  109. unsigned long table_base;
  110. unsigned long v;
  111. if (selector == 0)
  112. return 0;
  113. asm("sgdt %0" : "=m"(gdt));
  114. table_base = gdt.base;
  115. if (selector & 4) { /* from ldt */
  116. u16 ldt_selector;
  117. asm("sldt %0" : "=g"(ldt_selector));
  118. table_base = segment_base(ldt_selector);
  119. }
  120. d = (struct desc_struct *)(table_base + (selector & ~7));
  121. v = d->base0 | ((unsigned long)d->base1 << 16) |
  122. ((unsigned long)d->base2 << 24);
  123. #ifdef CONFIG_X86_64
  124. if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
  125. v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
  126. #endif
  127. return v;
  128. }
  129. EXPORT_SYMBOL_GPL(segment_base);
  130. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  131. {
  132. if (irqchip_in_kernel(vcpu->kvm))
  133. return vcpu->arch.apic_base;
  134. else
  135. return vcpu->arch.apic_base;
  136. }
  137. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  138. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  139. {
  140. /* TODO: reserve bits check */
  141. if (irqchip_in_kernel(vcpu->kvm))
  142. kvm_lapic_set_base(vcpu, data);
  143. else
  144. vcpu->arch.apic_base = data;
  145. }
  146. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  147. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  148. {
  149. WARN_ON(vcpu->arch.exception.pending);
  150. vcpu->arch.exception.pending = true;
  151. vcpu->arch.exception.has_error_code = false;
  152. vcpu->arch.exception.nr = nr;
  153. }
  154. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  155. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  156. u32 error_code)
  157. {
  158. ++vcpu->stat.pf_guest;
  159. if (vcpu->arch.exception.pending) {
  160. if (vcpu->arch.exception.nr == PF_VECTOR) {
  161. printk(KERN_DEBUG "kvm: inject_page_fault:"
  162. " double fault 0x%lx\n", addr);
  163. vcpu->arch.exception.nr = DF_VECTOR;
  164. vcpu->arch.exception.error_code = 0;
  165. } else if (vcpu->arch.exception.nr == DF_VECTOR) {
  166. /* triple fault -> shutdown */
  167. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  168. }
  169. return;
  170. }
  171. vcpu->arch.cr2 = addr;
  172. kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
  173. }
  174. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  175. {
  176. vcpu->arch.nmi_pending = 1;
  177. }
  178. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  179. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  180. {
  181. WARN_ON(vcpu->arch.exception.pending);
  182. vcpu->arch.exception.pending = true;
  183. vcpu->arch.exception.has_error_code = true;
  184. vcpu->arch.exception.nr = nr;
  185. vcpu->arch.exception.error_code = error_code;
  186. }
  187. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  188. static void __queue_exception(struct kvm_vcpu *vcpu)
  189. {
  190. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  191. vcpu->arch.exception.has_error_code,
  192. vcpu->arch.exception.error_code);
  193. }
  194. /*
  195. * Load the pae pdptrs. Return true is they are all valid.
  196. */
  197. int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  198. {
  199. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  200. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  201. int i;
  202. int ret;
  203. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  204. ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
  205. offset * sizeof(u64), sizeof(pdpte));
  206. if (ret < 0) {
  207. ret = 0;
  208. goto out;
  209. }
  210. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  211. if (is_present_pte(pdpte[i]) &&
  212. (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
  213. ret = 0;
  214. goto out;
  215. }
  216. }
  217. ret = 1;
  218. memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
  219. out:
  220. return ret;
  221. }
  222. EXPORT_SYMBOL_GPL(load_pdptrs);
  223. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  224. {
  225. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  226. bool changed = true;
  227. int r;
  228. if (is_long_mode(vcpu) || !is_pae(vcpu))
  229. return false;
  230. r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
  231. if (r < 0)
  232. goto out;
  233. changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
  234. out:
  235. return changed;
  236. }
  237. void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  238. {
  239. if (cr0 & CR0_RESERVED_BITS) {
  240. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  241. cr0, vcpu->arch.cr0);
  242. kvm_inject_gp(vcpu, 0);
  243. return;
  244. }
  245. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
  246. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  247. kvm_inject_gp(vcpu, 0);
  248. return;
  249. }
  250. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
  251. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  252. "and a clear PE flag\n");
  253. kvm_inject_gp(vcpu, 0);
  254. return;
  255. }
  256. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  257. #ifdef CONFIG_X86_64
  258. if ((vcpu->arch.shadow_efer & EFER_LME)) {
  259. int cs_db, cs_l;
  260. if (!is_pae(vcpu)) {
  261. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  262. "in long mode while PAE is disabled\n");
  263. kvm_inject_gp(vcpu, 0);
  264. return;
  265. }
  266. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  267. if (cs_l) {
  268. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  269. "in long mode while CS.L == 1\n");
  270. kvm_inject_gp(vcpu, 0);
  271. return;
  272. }
  273. } else
  274. #endif
  275. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  276. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  277. "reserved bits\n");
  278. kvm_inject_gp(vcpu, 0);
  279. return;
  280. }
  281. }
  282. kvm_x86_ops->set_cr0(vcpu, cr0);
  283. vcpu->arch.cr0 = cr0;
  284. kvm_mmu_reset_context(vcpu);
  285. return;
  286. }
  287. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  288. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  289. {
  290. kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
  291. KVMTRACE_1D(LMSW, vcpu,
  292. (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
  293. handler);
  294. }
  295. EXPORT_SYMBOL_GPL(kvm_lmsw);
  296. void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  297. {
  298. unsigned long old_cr4 = vcpu->arch.cr4;
  299. unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
  300. if (cr4 & CR4_RESERVED_BITS) {
  301. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  302. kvm_inject_gp(vcpu, 0);
  303. return;
  304. }
  305. if (is_long_mode(vcpu)) {
  306. if (!(cr4 & X86_CR4_PAE)) {
  307. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  308. "in long mode\n");
  309. kvm_inject_gp(vcpu, 0);
  310. return;
  311. }
  312. } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
  313. && ((cr4 ^ old_cr4) & pdptr_bits)
  314. && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  315. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  316. kvm_inject_gp(vcpu, 0);
  317. return;
  318. }
  319. if (cr4 & X86_CR4_VMXE) {
  320. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  321. kvm_inject_gp(vcpu, 0);
  322. return;
  323. }
  324. kvm_x86_ops->set_cr4(vcpu, cr4);
  325. vcpu->arch.cr4 = cr4;
  326. vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
  327. kvm_mmu_reset_context(vcpu);
  328. }
  329. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  330. void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  331. {
  332. if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
  333. kvm_mmu_sync_roots(vcpu);
  334. kvm_mmu_flush_tlb(vcpu);
  335. return;
  336. }
  337. if (is_long_mode(vcpu)) {
  338. if (cr3 & CR3_L_MODE_RESERVED_BITS) {
  339. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  340. kvm_inject_gp(vcpu, 0);
  341. return;
  342. }
  343. } else {
  344. if (is_pae(vcpu)) {
  345. if (cr3 & CR3_PAE_RESERVED_BITS) {
  346. printk(KERN_DEBUG
  347. "set_cr3: #GP, reserved bits\n");
  348. kvm_inject_gp(vcpu, 0);
  349. return;
  350. }
  351. if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
  352. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  353. "reserved bits\n");
  354. kvm_inject_gp(vcpu, 0);
  355. return;
  356. }
  357. }
  358. /*
  359. * We don't check reserved bits in nonpae mode, because
  360. * this isn't enforced, and VMware depends on this.
  361. */
  362. }
  363. /*
  364. * Does the new cr3 value map to physical memory? (Note, we
  365. * catch an invalid cr3 even in real-mode, because it would
  366. * cause trouble later on when we turn on paging anyway.)
  367. *
  368. * A real CPU would silently accept an invalid cr3 and would
  369. * attempt to use it - with largely undefined (and often hard
  370. * to debug) behavior on the guest side.
  371. */
  372. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  373. kvm_inject_gp(vcpu, 0);
  374. else {
  375. vcpu->arch.cr3 = cr3;
  376. vcpu->arch.mmu.new_cr3(vcpu);
  377. }
  378. }
  379. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  380. void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  381. {
  382. if (cr8 & CR8_RESERVED_BITS) {
  383. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  384. kvm_inject_gp(vcpu, 0);
  385. return;
  386. }
  387. if (irqchip_in_kernel(vcpu->kvm))
  388. kvm_lapic_set_tpr(vcpu, cr8);
  389. else
  390. vcpu->arch.cr8 = cr8;
  391. }
  392. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  393. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  394. {
  395. if (irqchip_in_kernel(vcpu->kvm))
  396. return kvm_lapic_get_cr8(vcpu);
  397. else
  398. return vcpu->arch.cr8;
  399. }
  400. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  401. static inline u32 bit(int bitno)
  402. {
  403. return 1 << (bitno & 31);
  404. }
  405. /*
  406. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  407. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  408. *
  409. * This list is modified at module load time to reflect the
  410. * capabilities of the host cpu.
  411. */
  412. static u32 msrs_to_save[] = {
  413. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  414. MSR_K6_STAR,
  415. #ifdef CONFIG_X86_64
  416. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  417. #endif
  418. MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  419. MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  420. };
  421. static unsigned num_msrs_to_save;
  422. static u32 emulated_msrs[] = {
  423. MSR_IA32_MISC_ENABLE,
  424. };
  425. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  426. {
  427. if (efer & efer_reserved_bits) {
  428. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  429. efer);
  430. kvm_inject_gp(vcpu, 0);
  431. return;
  432. }
  433. if (is_paging(vcpu)
  434. && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  435. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  436. kvm_inject_gp(vcpu, 0);
  437. return;
  438. }
  439. if (efer & EFER_FFXSR) {
  440. struct kvm_cpuid_entry2 *feat;
  441. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  442. if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
  443. printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
  444. kvm_inject_gp(vcpu, 0);
  445. return;
  446. }
  447. }
  448. if (efer & EFER_SVME) {
  449. struct kvm_cpuid_entry2 *feat;
  450. feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
  451. if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
  452. printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
  453. kvm_inject_gp(vcpu, 0);
  454. return;
  455. }
  456. }
  457. kvm_x86_ops->set_efer(vcpu, efer);
  458. efer &= ~EFER_LMA;
  459. efer |= vcpu->arch.shadow_efer & EFER_LMA;
  460. vcpu->arch.shadow_efer = efer;
  461. vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
  462. kvm_mmu_reset_context(vcpu);
  463. }
  464. void kvm_enable_efer_bits(u64 mask)
  465. {
  466. efer_reserved_bits &= ~mask;
  467. }
  468. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  469. /*
  470. * Writes msr value into into the appropriate "register".
  471. * Returns 0 on success, non-0 otherwise.
  472. * Assumes vcpu_load() was already called.
  473. */
  474. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  475. {
  476. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  477. }
  478. /*
  479. * Adapt set_msr() to msr_io()'s calling convention
  480. */
  481. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  482. {
  483. return kvm_set_msr(vcpu, index, *data);
  484. }
  485. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  486. {
  487. static int version;
  488. struct pvclock_wall_clock wc;
  489. struct timespec now, sys, boot;
  490. if (!wall_clock)
  491. return;
  492. version++;
  493. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  494. /*
  495. * The guest calculates current wall clock time by adding
  496. * system time (updated by kvm_write_guest_time below) to the
  497. * wall clock specified here. guest system time equals host
  498. * system time for us, thus we must fill in host boot time here.
  499. */
  500. now = current_kernel_time();
  501. ktime_get_ts(&sys);
  502. boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
  503. wc.sec = boot.tv_sec;
  504. wc.nsec = boot.tv_nsec;
  505. wc.version = version;
  506. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  507. version++;
  508. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  509. }
  510. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  511. {
  512. uint32_t quotient, remainder;
  513. /* Don't try to replace with do_div(), this one calculates
  514. * "(dividend << 32) / divisor" */
  515. __asm__ ( "divl %4"
  516. : "=a" (quotient), "=d" (remainder)
  517. : "0" (0), "1" (dividend), "r" (divisor) );
  518. return quotient;
  519. }
  520. static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
  521. {
  522. uint64_t nsecs = 1000000000LL;
  523. int32_t shift = 0;
  524. uint64_t tps64;
  525. uint32_t tps32;
  526. tps64 = tsc_khz * 1000LL;
  527. while (tps64 > nsecs*2) {
  528. tps64 >>= 1;
  529. shift--;
  530. }
  531. tps32 = (uint32_t)tps64;
  532. while (tps32 <= (uint32_t)nsecs) {
  533. tps32 <<= 1;
  534. shift++;
  535. }
  536. hv_clock->tsc_shift = shift;
  537. hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
  538. pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
  539. __func__, tsc_khz, hv_clock->tsc_shift,
  540. hv_clock->tsc_to_system_mul);
  541. }
  542. static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
  543. static void kvm_write_guest_time(struct kvm_vcpu *v)
  544. {
  545. struct timespec ts;
  546. unsigned long flags;
  547. struct kvm_vcpu_arch *vcpu = &v->arch;
  548. void *shared_kaddr;
  549. unsigned long this_tsc_khz;
  550. if ((!vcpu->time_page))
  551. return;
  552. this_tsc_khz = get_cpu_var(cpu_tsc_khz);
  553. if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
  554. kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
  555. vcpu->hv_clock_tsc_khz = this_tsc_khz;
  556. }
  557. put_cpu_var(cpu_tsc_khz);
  558. /* Keep irq disabled to prevent changes to the clock */
  559. local_irq_save(flags);
  560. kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
  561. &vcpu->hv_clock.tsc_timestamp);
  562. ktime_get_ts(&ts);
  563. local_irq_restore(flags);
  564. /* With all the info we got, fill in the values */
  565. vcpu->hv_clock.system_time = ts.tv_nsec +
  566. (NSEC_PER_SEC * (u64)ts.tv_sec);
  567. /*
  568. * The interface expects us to write an even number signaling that the
  569. * update is finished. Since the guest won't see the intermediate
  570. * state, we just increase by 2 at the end.
  571. */
  572. vcpu->hv_clock.version += 2;
  573. shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
  574. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  575. sizeof(vcpu->hv_clock));
  576. kunmap_atomic(shared_kaddr, KM_USER0);
  577. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  578. }
  579. static int kvm_request_guest_time_update(struct kvm_vcpu *v)
  580. {
  581. struct kvm_vcpu_arch *vcpu = &v->arch;
  582. if (!vcpu->time_page)
  583. return 0;
  584. set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
  585. return 1;
  586. }
  587. static bool msr_mtrr_valid(unsigned msr)
  588. {
  589. switch (msr) {
  590. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  591. case MSR_MTRRfix64K_00000:
  592. case MSR_MTRRfix16K_80000:
  593. case MSR_MTRRfix16K_A0000:
  594. case MSR_MTRRfix4K_C0000:
  595. case MSR_MTRRfix4K_C8000:
  596. case MSR_MTRRfix4K_D0000:
  597. case MSR_MTRRfix4K_D8000:
  598. case MSR_MTRRfix4K_E0000:
  599. case MSR_MTRRfix4K_E8000:
  600. case MSR_MTRRfix4K_F0000:
  601. case MSR_MTRRfix4K_F8000:
  602. case MSR_MTRRdefType:
  603. case MSR_IA32_CR_PAT:
  604. return true;
  605. case 0x2f8:
  606. return true;
  607. }
  608. return false;
  609. }
  610. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  611. {
  612. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  613. if (!msr_mtrr_valid(msr))
  614. return 1;
  615. if (msr == MSR_MTRRdefType) {
  616. vcpu->arch.mtrr_state.def_type = data;
  617. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  618. } else if (msr == MSR_MTRRfix64K_00000)
  619. p[0] = data;
  620. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  621. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  622. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  623. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  624. else if (msr == MSR_IA32_CR_PAT)
  625. vcpu->arch.pat = data;
  626. else { /* Variable MTRRs */
  627. int idx, is_mtrr_mask;
  628. u64 *pt;
  629. idx = (msr - 0x200) / 2;
  630. is_mtrr_mask = msr - 0x200 - 2 * idx;
  631. if (!is_mtrr_mask)
  632. pt =
  633. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  634. else
  635. pt =
  636. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  637. *pt = data;
  638. }
  639. kvm_mmu_reset_context(vcpu);
  640. return 0;
  641. }
  642. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  643. {
  644. switch (msr) {
  645. case MSR_EFER:
  646. set_efer(vcpu, data);
  647. break;
  648. case MSR_IA32_MC0_STATUS:
  649. pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
  650. __func__, data);
  651. break;
  652. case MSR_IA32_MCG_STATUS:
  653. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
  654. __func__, data);
  655. break;
  656. case MSR_IA32_MCG_CTL:
  657. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
  658. __func__, data);
  659. break;
  660. case MSR_IA32_DEBUGCTLMSR:
  661. if (!data) {
  662. /* We support the non-activated case already */
  663. break;
  664. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  665. /* Values other than LBR and BTF are vendor-specific,
  666. thus reserved and should throw a #GP */
  667. return 1;
  668. }
  669. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  670. __func__, data);
  671. break;
  672. case MSR_IA32_UCODE_REV:
  673. case MSR_IA32_UCODE_WRITE:
  674. case MSR_VM_HSAVE_PA:
  675. break;
  676. case 0x200 ... 0x2ff:
  677. return set_msr_mtrr(vcpu, msr, data);
  678. case MSR_IA32_APICBASE:
  679. kvm_set_apic_base(vcpu, data);
  680. break;
  681. case MSR_IA32_MISC_ENABLE:
  682. vcpu->arch.ia32_misc_enable_msr = data;
  683. break;
  684. case MSR_KVM_WALL_CLOCK:
  685. vcpu->kvm->arch.wall_clock = data;
  686. kvm_write_wall_clock(vcpu->kvm, data);
  687. break;
  688. case MSR_KVM_SYSTEM_TIME: {
  689. if (vcpu->arch.time_page) {
  690. kvm_release_page_dirty(vcpu->arch.time_page);
  691. vcpu->arch.time_page = NULL;
  692. }
  693. vcpu->arch.time = data;
  694. /* we verify if the enable bit is set... */
  695. if (!(data & 1))
  696. break;
  697. /* ...but clean it before doing the actual write */
  698. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  699. vcpu->arch.time_page =
  700. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  701. if (is_error_page(vcpu->arch.time_page)) {
  702. kvm_release_page_clean(vcpu->arch.time_page);
  703. vcpu->arch.time_page = NULL;
  704. }
  705. kvm_request_guest_time_update(vcpu);
  706. break;
  707. }
  708. default:
  709. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
  710. return 1;
  711. }
  712. return 0;
  713. }
  714. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  715. /*
  716. * Reads an msr value (of 'msr_index') into 'pdata'.
  717. * Returns 0 on success, non-0 otherwise.
  718. * Assumes vcpu_load() was already called.
  719. */
  720. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  721. {
  722. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  723. }
  724. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  725. {
  726. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  727. if (!msr_mtrr_valid(msr))
  728. return 1;
  729. if (msr == MSR_MTRRdefType)
  730. *pdata = vcpu->arch.mtrr_state.def_type +
  731. (vcpu->arch.mtrr_state.enabled << 10);
  732. else if (msr == MSR_MTRRfix64K_00000)
  733. *pdata = p[0];
  734. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  735. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  736. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  737. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  738. else if (msr == MSR_IA32_CR_PAT)
  739. *pdata = vcpu->arch.pat;
  740. else { /* Variable MTRRs */
  741. int idx, is_mtrr_mask;
  742. u64 *pt;
  743. idx = (msr - 0x200) / 2;
  744. is_mtrr_mask = msr - 0x200 - 2 * idx;
  745. if (!is_mtrr_mask)
  746. pt =
  747. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  748. else
  749. pt =
  750. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  751. *pdata = *pt;
  752. }
  753. return 0;
  754. }
  755. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  756. {
  757. u64 data;
  758. switch (msr) {
  759. case 0xc0010010: /* SYSCFG */
  760. case 0xc0010015: /* HWCR */
  761. case MSR_IA32_PLATFORM_ID:
  762. case MSR_IA32_P5_MC_ADDR:
  763. case MSR_IA32_P5_MC_TYPE:
  764. case MSR_IA32_MC0_CTL:
  765. case MSR_IA32_MCG_STATUS:
  766. case MSR_IA32_MCG_CAP:
  767. case MSR_IA32_MCG_CTL:
  768. case MSR_IA32_MC0_MISC:
  769. case MSR_IA32_MC0_MISC+4:
  770. case MSR_IA32_MC0_MISC+8:
  771. case MSR_IA32_MC0_MISC+12:
  772. case MSR_IA32_MC0_MISC+16:
  773. case MSR_IA32_MC0_MISC+20:
  774. case MSR_IA32_UCODE_REV:
  775. case MSR_IA32_EBL_CR_POWERON:
  776. case MSR_IA32_DEBUGCTLMSR:
  777. case MSR_IA32_LASTBRANCHFROMIP:
  778. case MSR_IA32_LASTBRANCHTOIP:
  779. case MSR_IA32_LASTINTFROMIP:
  780. case MSR_IA32_LASTINTTOIP:
  781. case MSR_VM_HSAVE_PA:
  782. case MSR_P6_EVNTSEL0:
  783. case MSR_P6_EVNTSEL1:
  784. data = 0;
  785. break;
  786. case MSR_MTRRcap:
  787. data = 0x500 | KVM_NR_VAR_MTRR;
  788. break;
  789. case 0x200 ... 0x2ff:
  790. return get_msr_mtrr(vcpu, msr, pdata);
  791. case 0xcd: /* fsb frequency */
  792. data = 3;
  793. break;
  794. case MSR_IA32_APICBASE:
  795. data = kvm_get_apic_base(vcpu);
  796. break;
  797. case MSR_IA32_MISC_ENABLE:
  798. data = vcpu->arch.ia32_misc_enable_msr;
  799. break;
  800. case MSR_IA32_PERF_STATUS:
  801. /* TSC increment by tick */
  802. data = 1000ULL;
  803. /* CPU multiplier */
  804. data |= (((uint64_t)4ULL) << 40);
  805. break;
  806. case MSR_EFER:
  807. data = vcpu->arch.shadow_efer;
  808. break;
  809. case MSR_KVM_WALL_CLOCK:
  810. data = vcpu->kvm->arch.wall_clock;
  811. break;
  812. case MSR_KVM_SYSTEM_TIME:
  813. data = vcpu->arch.time;
  814. break;
  815. default:
  816. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  817. return 1;
  818. }
  819. *pdata = data;
  820. return 0;
  821. }
  822. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  823. /*
  824. * Read or write a bunch of msrs. All parameters are kernel addresses.
  825. *
  826. * @return number of msrs set successfully.
  827. */
  828. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  829. struct kvm_msr_entry *entries,
  830. int (*do_msr)(struct kvm_vcpu *vcpu,
  831. unsigned index, u64 *data))
  832. {
  833. int i;
  834. vcpu_load(vcpu);
  835. down_read(&vcpu->kvm->slots_lock);
  836. for (i = 0; i < msrs->nmsrs; ++i)
  837. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  838. break;
  839. up_read(&vcpu->kvm->slots_lock);
  840. vcpu_put(vcpu);
  841. return i;
  842. }
  843. /*
  844. * Read or write a bunch of msrs. Parameters are user addresses.
  845. *
  846. * @return number of msrs set successfully.
  847. */
  848. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  849. int (*do_msr)(struct kvm_vcpu *vcpu,
  850. unsigned index, u64 *data),
  851. int writeback)
  852. {
  853. struct kvm_msrs msrs;
  854. struct kvm_msr_entry *entries;
  855. int r, n;
  856. unsigned size;
  857. r = -EFAULT;
  858. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  859. goto out;
  860. r = -E2BIG;
  861. if (msrs.nmsrs >= MAX_IO_MSRS)
  862. goto out;
  863. r = -ENOMEM;
  864. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  865. entries = vmalloc(size);
  866. if (!entries)
  867. goto out;
  868. r = -EFAULT;
  869. if (copy_from_user(entries, user_msrs->entries, size))
  870. goto out_free;
  871. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  872. if (r < 0)
  873. goto out_free;
  874. r = -EFAULT;
  875. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  876. goto out_free;
  877. r = n;
  878. out_free:
  879. vfree(entries);
  880. out:
  881. return r;
  882. }
  883. int kvm_dev_ioctl_check_extension(long ext)
  884. {
  885. int r;
  886. switch (ext) {
  887. case KVM_CAP_IRQCHIP:
  888. case KVM_CAP_HLT:
  889. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  890. case KVM_CAP_SET_TSS_ADDR:
  891. case KVM_CAP_EXT_CPUID:
  892. case KVM_CAP_CLOCKSOURCE:
  893. case KVM_CAP_PIT:
  894. case KVM_CAP_NOP_IO_DELAY:
  895. case KVM_CAP_MP_STATE:
  896. case KVM_CAP_SYNC_MMU:
  897. case KVM_CAP_REINJECT_CONTROL:
  898. case KVM_CAP_IRQ_INJECT_STATUS:
  899. case KVM_CAP_ASSIGN_DEV_IRQ:
  900. r = 1;
  901. break;
  902. case KVM_CAP_COALESCED_MMIO:
  903. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  904. break;
  905. case KVM_CAP_VAPIC:
  906. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  907. break;
  908. case KVM_CAP_NR_VCPUS:
  909. r = KVM_MAX_VCPUS;
  910. break;
  911. case KVM_CAP_NR_MEMSLOTS:
  912. r = KVM_MEMORY_SLOTS;
  913. break;
  914. case KVM_CAP_PV_MMU:
  915. r = !tdp_enabled;
  916. break;
  917. case KVM_CAP_IOMMU:
  918. r = iommu_found();
  919. break;
  920. default:
  921. r = 0;
  922. break;
  923. }
  924. return r;
  925. }
  926. long kvm_arch_dev_ioctl(struct file *filp,
  927. unsigned int ioctl, unsigned long arg)
  928. {
  929. void __user *argp = (void __user *)arg;
  930. long r;
  931. switch (ioctl) {
  932. case KVM_GET_MSR_INDEX_LIST: {
  933. struct kvm_msr_list __user *user_msr_list = argp;
  934. struct kvm_msr_list msr_list;
  935. unsigned n;
  936. r = -EFAULT;
  937. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  938. goto out;
  939. n = msr_list.nmsrs;
  940. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  941. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  942. goto out;
  943. r = -E2BIG;
  944. if (n < num_msrs_to_save)
  945. goto out;
  946. r = -EFAULT;
  947. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  948. num_msrs_to_save * sizeof(u32)))
  949. goto out;
  950. if (copy_to_user(user_msr_list->indices
  951. + num_msrs_to_save * sizeof(u32),
  952. &emulated_msrs,
  953. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  954. goto out;
  955. r = 0;
  956. break;
  957. }
  958. case KVM_GET_SUPPORTED_CPUID: {
  959. struct kvm_cpuid2 __user *cpuid_arg = argp;
  960. struct kvm_cpuid2 cpuid;
  961. r = -EFAULT;
  962. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  963. goto out;
  964. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  965. cpuid_arg->entries);
  966. if (r)
  967. goto out;
  968. r = -EFAULT;
  969. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  970. goto out;
  971. r = 0;
  972. break;
  973. }
  974. default:
  975. r = -EINVAL;
  976. }
  977. out:
  978. return r;
  979. }
  980. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  981. {
  982. kvm_x86_ops->vcpu_load(vcpu, cpu);
  983. kvm_request_guest_time_update(vcpu);
  984. }
  985. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  986. {
  987. kvm_x86_ops->vcpu_put(vcpu);
  988. kvm_put_guest_fpu(vcpu);
  989. }
  990. static int is_efer_nx(void)
  991. {
  992. unsigned long long efer = 0;
  993. rdmsrl_safe(MSR_EFER, &efer);
  994. return efer & EFER_NX;
  995. }
  996. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  997. {
  998. int i;
  999. struct kvm_cpuid_entry2 *e, *entry;
  1000. entry = NULL;
  1001. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  1002. e = &vcpu->arch.cpuid_entries[i];
  1003. if (e->function == 0x80000001) {
  1004. entry = e;
  1005. break;
  1006. }
  1007. }
  1008. if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
  1009. entry->edx &= ~(1 << 20);
  1010. printk(KERN_INFO "kvm: guest NX capability removed\n");
  1011. }
  1012. }
  1013. /* when an old userspace process fills a new kernel module */
  1014. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  1015. struct kvm_cpuid *cpuid,
  1016. struct kvm_cpuid_entry __user *entries)
  1017. {
  1018. int r, i;
  1019. struct kvm_cpuid_entry *cpuid_entries;
  1020. r = -E2BIG;
  1021. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1022. goto out;
  1023. r = -ENOMEM;
  1024. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  1025. if (!cpuid_entries)
  1026. goto out;
  1027. r = -EFAULT;
  1028. if (copy_from_user(cpuid_entries, entries,
  1029. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  1030. goto out_free;
  1031. for (i = 0; i < cpuid->nent; i++) {
  1032. vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
  1033. vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
  1034. vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
  1035. vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
  1036. vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
  1037. vcpu->arch.cpuid_entries[i].index = 0;
  1038. vcpu->arch.cpuid_entries[i].flags = 0;
  1039. vcpu->arch.cpuid_entries[i].padding[0] = 0;
  1040. vcpu->arch.cpuid_entries[i].padding[1] = 0;
  1041. vcpu->arch.cpuid_entries[i].padding[2] = 0;
  1042. }
  1043. vcpu->arch.cpuid_nent = cpuid->nent;
  1044. cpuid_fix_nx_cap(vcpu);
  1045. r = 0;
  1046. out_free:
  1047. vfree(cpuid_entries);
  1048. out:
  1049. return r;
  1050. }
  1051. static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  1052. struct kvm_cpuid2 *cpuid,
  1053. struct kvm_cpuid_entry2 __user *entries)
  1054. {
  1055. int r;
  1056. r = -E2BIG;
  1057. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1058. goto out;
  1059. r = -EFAULT;
  1060. if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
  1061. cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
  1062. goto out;
  1063. vcpu->arch.cpuid_nent = cpuid->nent;
  1064. return 0;
  1065. out:
  1066. return r;
  1067. }
  1068. static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  1069. struct kvm_cpuid2 *cpuid,
  1070. struct kvm_cpuid_entry2 __user *entries)
  1071. {
  1072. int r;
  1073. r = -E2BIG;
  1074. if (cpuid->nent < vcpu->arch.cpuid_nent)
  1075. goto out;
  1076. r = -EFAULT;
  1077. if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
  1078. vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
  1079. goto out;
  1080. return 0;
  1081. out:
  1082. cpuid->nent = vcpu->arch.cpuid_nent;
  1083. return r;
  1084. }
  1085. static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1086. u32 index)
  1087. {
  1088. entry->function = function;
  1089. entry->index = index;
  1090. cpuid_count(entry->function, entry->index,
  1091. &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
  1092. entry->flags = 0;
  1093. }
  1094. #define F(x) bit(X86_FEATURE_##x)
  1095. static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1096. u32 index, int *nent, int maxnent)
  1097. {
  1098. unsigned f_nx = is_efer_nx() ? F(NX) : 0;
  1099. #ifdef CONFIG_X86_64
  1100. unsigned f_lm = F(LM);
  1101. #else
  1102. unsigned f_lm = 0;
  1103. #endif
  1104. /* cpuid 1.edx */
  1105. const u32 kvm_supported_word0_x86_features =
  1106. F(FPU) | F(VME) | F(DE) | F(PSE) |
  1107. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  1108. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
  1109. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  1110. F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
  1111. 0 /* Reserved, DS, ACPI */ | F(MMX) |
  1112. F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
  1113. 0 /* HTT, TM, Reserved, PBE */;
  1114. /* cpuid 0x80000001.edx */
  1115. const u32 kvm_supported_word1_x86_features =
  1116. F(FPU) | F(VME) | F(DE) | F(PSE) |
  1117. F(TSC) | F(MSR) | F(PAE) | F(MCE) |
  1118. F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
  1119. F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
  1120. F(PAT) | F(PSE36) | 0 /* Reserved */ |
  1121. f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
  1122. F(FXSR) | F(FXSR_OPT) | 0 /* GBPAGES */ | 0 /* RDTSCP */ |
  1123. 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
  1124. /* cpuid 1.ecx */
  1125. const u32 kvm_supported_word4_x86_features =
  1126. F(XMM3) | F(CX16);
  1127. /* cpuid 0x80000001.ecx */
  1128. const u32 kvm_supported_word6_x86_features =
  1129. F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
  1130. F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
  1131. F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
  1132. 0 /* SKINIT */ | 0 /* WDT */;
  1133. /* all calls to cpuid_count() should be made on the same cpu */
  1134. get_cpu();
  1135. do_cpuid_1_ent(entry, function, index);
  1136. ++*nent;
  1137. switch (function) {
  1138. case 0:
  1139. entry->eax = min(entry->eax, (u32)0xb);
  1140. break;
  1141. case 1:
  1142. entry->edx &= kvm_supported_word0_x86_features;
  1143. entry->ecx &= kvm_supported_word4_x86_features;
  1144. break;
  1145. /* function 2 entries are STATEFUL. That is, repeated cpuid commands
  1146. * may return different values. This forces us to get_cpu() before
  1147. * issuing the first command, and also to emulate this annoying behavior
  1148. * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
  1149. case 2: {
  1150. int t, times = entry->eax & 0xff;
  1151. entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1152. entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  1153. for (t = 1; t < times && *nent < maxnent; ++t) {
  1154. do_cpuid_1_ent(&entry[t], function, 0);
  1155. entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1156. ++*nent;
  1157. }
  1158. break;
  1159. }
  1160. /* function 4 and 0xb have additional index. */
  1161. case 4: {
  1162. int i, cache_type;
  1163. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1164. /* read more entries until cache_type is zero */
  1165. for (i = 1; *nent < maxnent; ++i) {
  1166. cache_type = entry[i - 1].eax & 0x1f;
  1167. if (!cache_type)
  1168. break;
  1169. do_cpuid_1_ent(&entry[i], function, i);
  1170. entry[i].flags |=
  1171. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1172. ++*nent;
  1173. }
  1174. break;
  1175. }
  1176. case 0xb: {
  1177. int i, level_type;
  1178. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1179. /* read more entries until level_type is zero */
  1180. for (i = 1; *nent < maxnent; ++i) {
  1181. level_type = entry[i - 1].ecx & 0xff00;
  1182. if (!level_type)
  1183. break;
  1184. do_cpuid_1_ent(&entry[i], function, i);
  1185. entry[i].flags |=
  1186. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1187. ++*nent;
  1188. }
  1189. break;
  1190. }
  1191. case 0x80000000:
  1192. entry->eax = min(entry->eax, 0x8000001a);
  1193. break;
  1194. case 0x80000001:
  1195. entry->edx &= kvm_supported_word1_x86_features;
  1196. entry->ecx &= kvm_supported_word6_x86_features;
  1197. break;
  1198. }
  1199. put_cpu();
  1200. }
  1201. #undef F
  1202. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  1203. struct kvm_cpuid_entry2 __user *entries)
  1204. {
  1205. struct kvm_cpuid_entry2 *cpuid_entries;
  1206. int limit, nent = 0, r = -E2BIG;
  1207. u32 func;
  1208. if (cpuid->nent < 1)
  1209. goto out;
  1210. r = -ENOMEM;
  1211. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
  1212. if (!cpuid_entries)
  1213. goto out;
  1214. do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
  1215. limit = cpuid_entries[0].eax;
  1216. for (func = 1; func <= limit && nent < cpuid->nent; ++func)
  1217. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1218. &nent, cpuid->nent);
  1219. r = -E2BIG;
  1220. if (nent >= cpuid->nent)
  1221. goto out_free;
  1222. do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
  1223. limit = cpuid_entries[nent - 1].eax;
  1224. for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
  1225. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1226. &nent, cpuid->nent);
  1227. r = -EFAULT;
  1228. if (copy_to_user(entries, cpuid_entries,
  1229. nent * sizeof(struct kvm_cpuid_entry2)))
  1230. goto out_free;
  1231. cpuid->nent = nent;
  1232. r = 0;
  1233. out_free:
  1234. vfree(cpuid_entries);
  1235. out:
  1236. return r;
  1237. }
  1238. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  1239. struct kvm_lapic_state *s)
  1240. {
  1241. vcpu_load(vcpu);
  1242. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  1243. vcpu_put(vcpu);
  1244. return 0;
  1245. }
  1246. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  1247. struct kvm_lapic_state *s)
  1248. {
  1249. vcpu_load(vcpu);
  1250. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  1251. kvm_apic_post_state_restore(vcpu);
  1252. vcpu_put(vcpu);
  1253. return 0;
  1254. }
  1255. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  1256. struct kvm_interrupt *irq)
  1257. {
  1258. if (irq->irq < 0 || irq->irq >= 256)
  1259. return -EINVAL;
  1260. if (irqchip_in_kernel(vcpu->kvm))
  1261. return -ENXIO;
  1262. vcpu_load(vcpu);
  1263. set_bit(irq->irq, vcpu->arch.irq_pending);
  1264. set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
  1265. vcpu_put(vcpu);
  1266. return 0;
  1267. }
  1268. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  1269. {
  1270. vcpu_load(vcpu);
  1271. kvm_inject_nmi(vcpu);
  1272. vcpu_put(vcpu);
  1273. return 0;
  1274. }
  1275. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  1276. struct kvm_tpr_access_ctl *tac)
  1277. {
  1278. if (tac->flags)
  1279. return -EINVAL;
  1280. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  1281. return 0;
  1282. }
  1283. long kvm_arch_vcpu_ioctl(struct file *filp,
  1284. unsigned int ioctl, unsigned long arg)
  1285. {
  1286. struct kvm_vcpu *vcpu = filp->private_data;
  1287. void __user *argp = (void __user *)arg;
  1288. int r;
  1289. struct kvm_lapic_state *lapic = NULL;
  1290. switch (ioctl) {
  1291. case KVM_GET_LAPIC: {
  1292. lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  1293. r = -ENOMEM;
  1294. if (!lapic)
  1295. goto out;
  1296. r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
  1297. if (r)
  1298. goto out;
  1299. r = -EFAULT;
  1300. if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
  1301. goto out;
  1302. r = 0;
  1303. break;
  1304. }
  1305. case KVM_SET_LAPIC: {
  1306. lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  1307. r = -ENOMEM;
  1308. if (!lapic)
  1309. goto out;
  1310. r = -EFAULT;
  1311. if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
  1312. goto out;
  1313. r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
  1314. if (r)
  1315. goto out;
  1316. r = 0;
  1317. break;
  1318. }
  1319. case KVM_INTERRUPT: {
  1320. struct kvm_interrupt irq;
  1321. r = -EFAULT;
  1322. if (copy_from_user(&irq, argp, sizeof irq))
  1323. goto out;
  1324. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  1325. if (r)
  1326. goto out;
  1327. r = 0;
  1328. break;
  1329. }
  1330. case KVM_NMI: {
  1331. r = kvm_vcpu_ioctl_nmi(vcpu);
  1332. if (r)
  1333. goto out;
  1334. r = 0;
  1335. break;
  1336. }
  1337. case KVM_SET_CPUID: {
  1338. struct kvm_cpuid __user *cpuid_arg = argp;
  1339. struct kvm_cpuid cpuid;
  1340. r = -EFAULT;
  1341. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1342. goto out;
  1343. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  1344. if (r)
  1345. goto out;
  1346. break;
  1347. }
  1348. case KVM_SET_CPUID2: {
  1349. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1350. struct kvm_cpuid2 cpuid;
  1351. r = -EFAULT;
  1352. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1353. goto out;
  1354. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  1355. cpuid_arg->entries);
  1356. if (r)
  1357. goto out;
  1358. break;
  1359. }
  1360. case KVM_GET_CPUID2: {
  1361. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1362. struct kvm_cpuid2 cpuid;
  1363. r = -EFAULT;
  1364. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1365. goto out;
  1366. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  1367. cpuid_arg->entries);
  1368. if (r)
  1369. goto out;
  1370. r = -EFAULT;
  1371. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  1372. goto out;
  1373. r = 0;
  1374. break;
  1375. }
  1376. case KVM_GET_MSRS:
  1377. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  1378. break;
  1379. case KVM_SET_MSRS:
  1380. r = msr_io(vcpu, argp, do_set_msr, 0);
  1381. break;
  1382. case KVM_TPR_ACCESS_REPORTING: {
  1383. struct kvm_tpr_access_ctl tac;
  1384. r = -EFAULT;
  1385. if (copy_from_user(&tac, argp, sizeof tac))
  1386. goto out;
  1387. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  1388. if (r)
  1389. goto out;
  1390. r = -EFAULT;
  1391. if (copy_to_user(argp, &tac, sizeof tac))
  1392. goto out;
  1393. r = 0;
  1394. break;
  1395. };
  1396. case KVM_SET_VAPIC_ADDR: {
  1397. struct kvm_vapic_addr va;
  1398. r = -EINVAL;
  1399. if (!irqchip_in_kernel(vcpu->kvm))
  1400. goto out;
  1401. r = -EFAULT;
  1402. if (copy_from_user(&va, argp, sizeof va))
  1403. goto out;
  1404. r = 0;
  1405. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  1406. break;
  1407. }
  1408. default:
  1409. r = -EINVAL;
  1410. }
  1411. out:
  1412. kfree(lapic);
  1413. return r;
  1414. }
  1415. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  1416. {
  1417. int ret;
  1418. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  1419. return -1;
  1420. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  1421. return ret;
  1422. }
  1423. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  1424. u32 kvm_nr_mmu_pages)
  1425. {
  1426. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  1427. return -EINVAL;
  1428. down_write(&kvm->slots_lock);
  1429. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  1430. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  1431. up_write(&kvm->slots_lock);
  1432. return 0;
  1433. }
  1434. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  1435. {
  1436. return kvm->arch.n_alloc_mmu_pages;
  1437. }
  1438. gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  1439. {
  1440. int i;
  1441. struct kvm_mem_alias *alias;
  1442. for (i = 0; i < kvm->arch.naliases; ++i) {
  1443. alias = &kvm->arch.aliases[i];
  1444. if (gfn >= alias->base_gfn
  1445. && gfn < alias->base_gfn + alias->npages)
  1446. return alias->target_gfn + gfn - alias->base_gfn;
  1447. }
  1448. return gfn;
  1449. }
  1450. /*
  1451. * Set a new alias region. Aliases map a portion of physical memory into
  1452. * another portion. This is useful for memory windows, for example the PC
  1453. * VGA region.
  1454. */
  1455. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  1456. struct kvm_memory_alias *alias)
  1457. {
  1458. int r, n;
  1459. struct kvm_mem_alias *p;
  1460. r = -EINVAL;
  1461. /* General sanity checks */
  1462. if (alias->memory_size & (PAGE_SIZE - 1))
  1463. goto out;
  1464. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  1465. goto out;
  1466. if (alias->slot >= KVM_ALIAS_SLOTS)
  1467. goto out;
  1468. if (alias->guest_phys_addr + alias->memory_size
  1469. < alias->guest_phys_addr)
  1470. goto out;
  1471. if (alias->target_phys_addr + alias->memory_size
  1472. < alias->target_phys_addr)
  1473. goto out;
  1474. down_write(&kvm->slots_lock);
  1475. spin_lock(&kvm->mmu_lock);
  1476. p = &kvm->arch.aliases[alias->slot];
  1477. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  1478. p->npages = alias->memory_size >> PAGE_SHIFT;
  1479. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  1480. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  1481. if (kvm->arch.aliases[n - 1].npages)
  1482. break;
  1483. kvm->arch.naliases = n;
  1484. spin_unlock(&kvm->mmu_lock);
  1485. kvm_mmu_zap_all(kvm);
  1486. up_write(&kvm->slots_lock);
  1487. return 0;
  1488. out:
  1489. return r;
  1490. }
  1491. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  1492. {
  1493. int r;
  1494. r = 0;
  1495. switch (chip->chip_id) {
  1496. case KVM_IRQCHIP_PIC_MASTER:
  1497. memcpy(&chip->chip.pic,
  1498. &pic_irqchip(kvm)->pics[0],
  1499. sizeof(struct kvm_pic_state));
  1500. break;
  1501. case KVM_IRQCHIP_PIC_SLAVE:
  1502. memcpy(&chip->chip.pic,
  1503. &pic_irqchip(kvm)->pics[1],
  1504. sizeof(struct kvm_pic_state));
  1505. break;
  1506. case KVM_IRQCHIP_IOAPIC:
  1507. memcpy(&chip->chip.ioapic,
  1508. ioapic_irqchip(kvm),
  1509. sizeof(struct kvm_ioapic_state));
  1510. break;
  1511. default:
  1512. r = -EINVAL;
  1513. break;
  1514. }
  1515. return r;
  1516. }
  1517. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  1518. {
  1519. int r;
  1520. r = 0;
  1521. switch (chip->chip_id) {
  1522. case KVM_IRQCHIP_PIC_MASTER:
  1523. memcpy(&pic_irqchip(kvm)->pics[0],
  1524. &chip->chip.pic,
  1525. sizeof(struct kvm_pic_state));
  1526. break;
  1527. case KVM_IRQCHIP_PIC_SLAVE:
  1528. memcpy(&pic_irqchip(kvm)->pics[1],
  1529. &chip->chip.pic,
  1530. sizeof(struct kvm_pic_state));
  1531. break;
  1532. case KVM_IRQCHIP_IOAPIC:
  1533. memcpy(ioapic_irqchip(kvm),
  1534. &chip->chip.ioapic,
  1535. sizeof(struct kvm_ioapic_state));
  1536. break;
  1537. default:
  1538. r = -EINVAL;
  1539. break;
  1540. }
  1541. kvm_pic_update_irq(pic_irqchip(kvm));
  1542. return r;
  1543. }
  1544. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  1545. {
  1546. int r = 0;
  1547. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  1548. return r;
  1549. }
  1550. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  1551. {
  1552. int r = 0;
  1553. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  1554. kvm_pit_load_count(kvm, 0, ps->channels[0].count);
  1555. return r;
  1556. }
  1557. static int kvm_vm_ioctl_reinject(struct kvm *kvm,
  1558. struct kvm_reinject_control *control)
  1559. {
  1560. if (!kvm->arch.vpit)
  1561. return -ENXIO;
  1562. kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
  1563. return 0;
  1564. }
  1565. /*
  1566. * Get (and clear) the dirty memory log for a memory slot.
  1567. */
  1568. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  1569. struct kvm_dirty_log *log)
  1570. {
  1571. int r;
  1572. int n;
  1573. struct kvm_memory_slot *memslot;
  1574. int is_dirty = 0;
  1575. down_write(&kvm->slots_lock);
  1576. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  1577. if (r)
  1578. goto out;
  1579. /* If nothing is dirty, don't bother messing with page tables. */
  1580. if (is_dirty) {
  1581. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  1582. kvm_flush_remote_tlbs(kvm);
  1583. memslot = &kvm->memslots[log->slot];
  1584. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  1585. memset(memslot->dirty_bitmap, 0, n);
  1586. }
  1587. r = 0;
  1588. out:
  1589. up_write(&kvm->slots_lock);
  1590. return r;
  1591. }
  1592. long kvm_arch_vm_ioctl(struct file *filp,
  1593. unsigned int ioctl, unsigned long arg)
  1594. {
  1595. struct kvm *kvm = filp->private_data;
  1596. void __user *argp = (void __user *)arg;
  1597. int r = -EINVAL;
  1598. /*
  1599. * This union makes it completely explicit to gcc-3.x
  1600. * that these two variables' stack usage should be
  1601. * combined, not added together.
  1602. */
  1603. union {
  1604. struct kvm_pit_state ps;
  1605. struct kvm_memory_alias alias;
  1606. } u;
  1607. switch (ioctl) {
  1608. case KVM_SET_TSS_ADDR:
  1609. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  1610. if (r < 0)
  1611. goto out;
  1612. break;
  1613. case KVM_SET_MEMORY_REGION: {
  1614. struct kvm_memory_region kvm_mem;
  1615. struct kvm_userspace_memory_region kvm_userspace_mem;
  1616. r = -EFAULT;
  1617. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  1618. goto out;
  1619. kvm_userspace_mem.slot = kvm_mem.slot;
  1620. kvm_userspace_mem.flags = kvm_mem.flags;
  1621. kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
  1622. kvm_userspace_mem.memory_size = kvm_mem.memory_size;
  1623. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
  1624. if (r)
  1625. goto out;
  1626. break;
  1627. }
  1628. case KVM_SET_NR_MMU_PAGES:
  1629. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  1630. if (r)
  1631. goto out;
  1632. break;
  1633. case KVM_GET_NR_MMU_PAGES:
  1634. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  1635. break;
  1636. case KVM_SET_MEMORY_ALIAS:
  1637. r = -EFAULT;
  1638. if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
  1639. goto out;
  1640. r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
  1641. if (r)
  1642. goto out;
  1643. break;
  1644. case KVM_CREATE_IRQCHIP:
  1645. r = -ENOMEM;
  1646. kvm->arch.vpic = kvm_create_pic(kvm);
  1647. if (kvm->arch.vpic) {
  1648. r = kvm_ioapic_init(kvm);
  1649. if (r) {
  1650. kfree(kvm->arch.vpic);
  1651. kvm->arch.vpic = NULL;
  1652. goto out;
  1653. }
  1654. } else
  1655. goto out;
  1656. r = kvm_setup_default_irq_routing(kvm);
  1657. if (r) {
  1658. kfree(kvm->arch.vpic);
  1659. kfree(kvm->arch.vioapic);
  1660. goto out;
  1661. }
  1662. break;
  1663. case KVM_CREATE_PIT:
  1664. mutex_lock(&kvm->lock);
  1665. r = -EEXIST;
  1666. if (kvm->arch.vpit)
  1667. goto create_pit_unlock;
  1668. r = -ENOMEM;
  1669. kvm->arch.vpit = kvm_create_pit(kvm);
  1670. if (kvm->arch.vpit)
  1671. r = 0;
  1672. create_pit_unlock:
  1673. mutex_unlock(&kvm->lock);
  1674. break;
  1675. case KVM_IRQ_LINE_STATUS:
  1676. case KVM_IRQ_LINE: {
  1677. struct kvm_irq_level irq_event;
  1678. r = -EFAULT;
  1679. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  1680. goto out;
  1681. if (irqchip_in_kernel(kvm)) {
  1682. __s32 status;
  1683. mutex_lock(&kvm->lock);
  1684. status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  1685. irq_event.irq, irq_event.level);
  1686. mutex_unlock(&kvm->lock);
  1687. if (ioctl == KVM_IRQ_LINE_STATUS) {
  1688. irq_event.status = status;
  1689. if (copy_to_user(argp, &irq_event,
  1690. sizeof irq_event))
  1691. goto out;
  1692. }
  1693. r = 0;
  1694. }
  1695. break;
  1696. }
  1697. case KVM_GET_IRQCHIP: {
  1698. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  1699. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  1700. r = -ENOMEM;
  1701. if (!chip)
  1702. goto out;
  1703. r = -EFAULT;
  1704. if (copy_from_user(chip, argp, sizeof *chip))
  1705. goto get_irqchip_out;
  1706. r = -ENXIO;
  1707. if (!irqchip_in_kernel(kvm))
  1708. goto get_irqchip_out;
  1709. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  1710. if (r)
  1711. goto get_irqchip_out;
  1712. r = -EFAULT;
  1713. if (copy_to_user(argp, chip, sizeof *chip))
  1714. goto get_irqchip_out;
  1715. r = 0;
  1716. get_irqchip_out:
  1717. kfree(chip);
  1718. if (r)
  1719. goto out;
  1720. break;
  1721. }
  1722. case KVM_SET_IRQCHIP: {
  1723. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  1724. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  1725. r = -ENOMEM;
  1726. if (!chip)
  1727. goto out;
  1728. r = -EFAULT;
  1729. if (copy_from_user(chip, argp, sizeof *chip))
  1730. goto set_irqchip_out;
  1731. r = -ENXIO;
  1732. if (!irqchip_in_kernel(kvm))
  1733. goto set_irqchip_out;
  1734. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  1735. if (r)
  1736. goto set_irqchip_out;
  1737. r = 0;
  1738. set_irqchip_out:
  1739. kfree(chip);
  1740. if (r)
  1741. goto out;
  1742. break;
  1743. }
  1744. case KVM_GET_PIT: {
  1745. r = -EFAULT;
  1746. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  1747. goto out;
  1748. r = -ENXIO;
  1749. if (!kvm->arch.vpit)
  1750. goto out;
  1751. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  1752. if (r)
  1753. goto out;
  1754. r = -EFAULT;
  1755. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  1756. goto out;
  1757. r = 0;
  1758. break;
  1759. }
  1760. case KVM_SET_PIT: {
  1761. r = -EFAULT;
  1762. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  1763. goto out;
  1764. r = -ENXIO;
  1765. if (!kvm->arch.vpit)
  1766. goto out;
  1767. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  1768. if (r)
  1769. goto out;
  1770. r = 0;
  1771. break;
  1772. }
  1773. case KVM_REINJECT_CONTROL: {
  1774. struct kvm_reinject_control control;
  1775. r = -EFAULT;
  1776. if (copy_from_user(&control, argp, sizeof(control)))
  1777. goto out;
  1778. r = kvm_vm_ioctl_reinject(kvm, &control);
  1779. if (r)
  1780. goto out;
  1781. r = 0;
  1782. break;
  1783. }
  1784. default:
  1785. ;
  1786. }
  1787. out:
  1788. return r;
  1789. }
  1790. static void kvm_init_msr_list(void)
  1791. {
  1792. u32 dummy[2];
  1793. unsigned i, j;
  1794. for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
  1795. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  1796. continue;
  1797. if (j < i)
  1798. msrs_to_save[j] = msrs_to_save[i];
  1799. j++;
  1800. }
  1801. num_msrs_to_save = j;
  1802. }
  1803. /*
  1804. * Only apic need an MMIO device hook, so shortcut now..
  1805. */
  1806. static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
  1807. gpa_t addr, int len,
  1808. int is_write)
  1809. {
  1810. struct kvm_io_device *dev;
  1811. if (vcpu->arch.apic) {
  1812. dev = &vcpu->arch.apic->dev;
  1813. if (dev->in_range(dev, addr, len, is_write))
  1814. return dev;
  1815. }
  1816. return NULL;
  1817. }
  1818. static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
  1819. gpa_t addr, int len,
  1820. int is_write)
  1821. {
  1822. struct kvm_io_device *dev;
  1823. dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
  1824. if (dev == NULL)
  1825. dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
  1826. is_write);
  1827. return dev;
  1828. }
  1829. static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
  1830. struct kvm_vcpu *vcpu)
  1831. {
  1832. void *data = val;
  1833. int r = X86EMUL_CONTINUE;
  1834. while (bytes) {
  1835. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1836. unsigned offset = addr & (PAGE_SIZE-1);
  1837. unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
  1838. int ret;
  1839. if (gpa == UNMAPPED_GVA) {
  1840. r = X86EMUL_PROPAGATE_FAULT;
  1841. goto out;
  1842. }
  1843. ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
  1844. if (ret < 0) {
  1845. r = X86EMUL_UNHANDLEABLE;
  1846. goto out;
  1847. }
  1848. bytes -= toread;
  1849. data += toread;
  1850. addr += toread;
  1851. }
  1852. out:
  1853. return r;
  1854. }
  1855. static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
  1856. struct kvm_vcpu *vcpu)
  1857. {
  1858. void *data = val;
  1859. int r = X86EMUL_CONTINUE;
  1860. while (bytes) {
  1861. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1862. unsigned offset = addr & (PAGE_SIZE-1);
  1863. unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
  1864. int ret;
  1865. if (gpa == UNMAPPED_GVA) {
  1866. r = X86EMUL_PROPAGATE_FAULT;
  1867. goto out;
  1868. }
  1869. ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
  1870. if (ret < 0) {
  1871. r = X86EMUL_UNHANDLEABLE;
  1872. goto out;
  1873. }
  1874. bytes -= towrite;
  1875. data += towrite;
  1876. addr += towrite;
  1877. }
  1878. out:
  1879. return r;
  1880. }
  1881. static int emulator_read_emulated(unsigned long addr,
  1882. void *val,
  1883. unsigned int bytes,
  1884. struct kvm_vcpu *vcpu)
  1885. {
  1886. struct kvm_io_device *mmio_dev;
  1887. gpa_t gpa;
  1888. if (vcpu->mmio_read_completed) {
  1889. memcpy(val, vcpu->mmio_data, bytes);
  1890. vcpu->mmio_read_completed = 0;
  1891. return X86EMUL_CONTINUE;
  1892. }
  1893. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1894. /* For APIC access vmexit */
  1895. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1896. goto mmio;
  1897. if (kvm_read_guest_virt(addr, val, bytes, vcpu)
  1898. == X86EMUL_CONTINUE)
  1899. return X86EMUL_CONTINUE;
  1900. if (gpa == UNMAPPED_GVA)
  1901. return X86EMUL_PROPAGATE_FAULT;
  1902. mmio:
  1903. /*
  1904. * Is this MMIO handled locally?
  1905. */
  1906. mutex_lock(&vcpu->kvm->lock);
  1907. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
  1908. if (mmio_dev) {
  1909. kvm_iodevice_read(mmio_dev, gpa, bytes, val);
  1910. mutex_unlock(&vcpu->kvm->lock);
  1911. return X86EMUL_CONTINUE;
  1912. }
  1913. mutex_unlock(&vcpu->kvm->lock);
  1914. vcpu->mmio_needed = 1;
  1915. vcpu->mmio_phys_addr = gpa;
  1916. vcpu->mmio_size = bytes;
  1917. vcpu->mmio_is_write = 0;
  1918. return X86EMUL_UNHANDLEABLE;
  1919. }
  1920. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  1921. const void *val, int bytes)
  1922. {
  1923. int ret;
  1924. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  1925. if (ret < 0)
  1926. return 0;
  1927. kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
  1928. return 1;
  1929. }
  1930. static int emulator_write_emulated_onepage(unsigned long addr,
  1931. const void *val,
  1932. unsigned int bytes,
  1933. struct kvm_vcpu *vcpu)
  1934. {
  1935. struct kvm_io_device *mmio_dev;
  1936. gpa_t gpa;
  1937. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1938. if (gpa == UNMAPPED_GVA) {
  1939. kvm_inject_page_fault(vcpu, addr, 2);
  1940. return X86EMUL_PROPAGATE_FAULT;
  1941. }
  1942. /* For APIC access vmexit */
  1943. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1944. goto mmio;
  1945. if (emulator_write_phys(vcpu, gpa, val, bytes))
  1946. return X86EMUL_CONTINUE;
  1947. mmio:
  1948. /*
  1949. * Is this MMIO handled locally?
  1950. */
  1951. mutex_lock(&vcpu->kvm->lock);
  1952. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
  1953. if (mmio_dev) {
  1954. kvm_iodevice_write(mmio_dev, gpa, bytes, val);
  1955. mutex_unlock(&vcpu->kvm->lock);
  1956. return X86EMUL_CONTINUE;
  1957. }
  1958. mutex_unlock(&vcpu->kvm->lock);
  1959. vcpu->mmio_needed = 1;
  1960. vcpu->mmio_phys_addr = gpa;
  1961. vcpu->mmio_size = bytes;
  1962. vcpu->mmio_is_write = 1;
  1963. memcpy(vcpu->mmio_data, val, bytes);
  1964. return X86EMUL_CONTINUE;
  1965. }
  1966. int emulator_write_emulated(unsigned long addr,
  1967. const void *val,
  1968. unsigned int bytes,
  1969. struct kvm_vcpu *vcpu)
  1970. {
  1971. /* Crossing a page boundary? */
  1972. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  1973. int rc, now;
  1974. now = -addr & ~PAGE_MASK;
  1975. rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
  1976. if (rc != X86EMUL_CONTINUE)
  1977. return rc;
  1978. addr += now;
  1979. val += now;
  1980. bytes -= now;
  1981. }
  1982. return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
  1983. }
  1984. EXPORT_SYMBOL_GPL(emulator_write_emulated);
  1985. static int emulator_cmpxchg_emulated(unsigned long addr,
  1986. const void *old,
  1987. const void *new,
  1988. unsigned int bytes,
  1989. struct kvm_vcpu *vcpu)
  1990. {
  1991. static int reported;
  1992. if (!reported) {
  1993. reported = 1;
  1994. printk(KERN_WARNING "kvm: emulating exchange as write\n");
  1995. }
  1996. #ifndef CONFIG_X86_64
  1997. /* guests cmpxchg8b have to be emulated atomically */
  1998. if (bytes == 8) {
  1999. gpa_t gpa;
  2000. struct page *page;
  2001. char *kaddr;
  2002. u64 val;
  2003. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  2004. if (gpa == UNMAPPED_GVA ||
  2005. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  2006. goto emul_write;
  2007. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  2008. goto emul_write;
  2009. val = *(u64 *)new;
  2010. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  2011. kaddr = kmap_atomic(page, KM_USER0);
  2012. set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
  2013. kunmap_atomic(kaddr, KM_USER0);
  2014. kvm_release_page_dirty(page);
  2015. }
  2016. emul_write:
  2017. #endif
  2018. return emulator_write_emulated(addr, new, bytes, vcpu);
  2019. }
  2020. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  2021. {
  2022. return kvm_x86_ops->get_segment_base(vcpu, seg);
  2023. }
  2024. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  2025. {
  2026. kvm_mmu_invlpg(vcpu, address);
  2027. return X86EMUL_CONTINUE;
  2028. }
  2029. int emulate_clts(struct kvm_vcpu *vcpu)
  2030. {
  2031. KVMTRACE_0D(CLTS, vcpu, handler);
  2032. kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
  2033. return X86EMUL_CONTINUE;
  2034. }
  2035. int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  2036. {
  2037. struct kvm_vcpu *vcpu = ctxt->vcpu;
  2038. switch (dr) {
  2039. case 0 ... 3:
  2040. *dest = kvm_x86_ops->get_dr(vcpu, dr);
  2041. return X86EMUL_CONTINUE;
  2042. default:
  2043. pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
  2044. return X86EMUL_UNHANDLEABLE;
  2045. }
  2046. }
  2047. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  2048. {
  2049. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  2050. int exception;
  2051. kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
  2052. if (exception) {
  2053. /* FIXME: better handling */
  2054. return X86EMUL_UNHANDLEABLE;
  2055. }
  2056. return X86EMUL_CONTINUE;
  2057. }
  2058. void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
  2059. {
  2060. u8 opcodes[4];
  2061. unsigned long rip = kvm_rip_read(vcpu);
  2062. unsigned long rip_linear;
  2063. if (!printk_ratelimit())
  2064. return;
  2065. rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
  2066. kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
  2067. printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
  2068. context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  2069. }
  2070. EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
  2071. static struct x86_emulate_ops emulate_ops = {
  2072. .read_std = kvm_read_guest_virt,
  2073. .read_emulated = emulator_read_emulated,
  2074. .write_emulated = emulator_write_emulated,
  2075. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  2076. };
  2077. static void cache_all_regs(struct kvm_vcpu *vcpu)
  2078. {
  2079. kvm_register_read(vcpu, VCPU_REGS_RAX);
  2080. kvm_register_read(vcpu, VCPU_REGS_RSP);
  2081. kvm_register_read(vcpu, VCPU_REGS_RIP);
  2082. vcpu->arch.regs_dirty = ~0;
  2083. }
  2084. int emulate_instruction(struct kvm_vcpu *vcpu,
  2085. struct kvm_run *run,
  2086. unsigned long cr2,
  2087. u16 error_code,
  2088. int emulation_type)
  2089. {
  2090. int r;
  2091. struct decode_cache *c;
  2092. kvm_clear_exception_queue(vcpu);
  2093. vcpu->arch.mmio_fault_cr2 = cr2;
  2094. /*
  2095. * TODO: fix x86_emulate.c to use guest_read/write_register
  2096. * instead of direct ->regs accesses, can save hundred cycles
  2097. * on Intel for instructions that don't read/change RSP, for
  2098. * for example.
  2099. */
  2100. cache_all_regs(vcpu);
  2101. vcpu->mmio_is_write = 0;
  2102. vcpu->arch.pio.string = 0;
  2103. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  2104. int cs_db, cs_l;
  2105. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  2106. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  2107. vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  2108. vcpu->arch.emulate_ctxt.mode =
  2109. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  2110. ? X86EMUL_MODE_REAL : cs_l
  2111. ? X86EMUL_MODE_PROT64 : cs_db
  2112. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  2113. r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  2114. /* Reject the instructions other than VMCALL/VMMCALL when
  2115. * try to emulate invalid opcode */
  2116. c = &vcpu->arch.emulate_ctxt.decode;
  2117. if ((emulation_type & EMULTYPE_TRAP_UD) &&
  2118. (!(c->twobyte && c->b == 0x01 &&
  2119. (c->modrm_reg == 0 || c->modrm_reg == 3) &&
  2120. c->modrm_mod == 3 && c->modrm_rm == 1)))
  2121. return EMULATE_FAIL;
  2122. ++vcpu->stat.insn_emulation;
  2123. if (r) {
  2124. ++vcpu->stat.insn_emulation_fail;
  2125. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  2126. return EMULATE_DONE;
  2127. return EMULATE_FAIL;
  2128. }
  2129. }
  2130. if (emulation_type & EMULTYPE_SKIP) {
  2131. kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
  2132. return EMULATE_DONE;
  2133. }
  2134. r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  2135. if (vcpu->arch.pio.string)
  2136. return EMULATE_DO_MMIO;
  2137. if ((r || vcpu->mmio_is_write) && run) {
  2138. run->exit_reason = KVM_EXIT_MMIO;
  2139. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  2140. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  2141. run->mmio.len = vcpu->mmio_size;
  2142. run->mmio.is_write = vcpu->mmio_is_write;
  2143. }
  2144. if (r) {
  2145. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  2146. return EMULATE_DONE;
  2147. if (!vcpu->mmio_needed) {
  2148. kvm_report_emulation_failure(vcpu, "mmio");
  2149. return EMULATE_FAIL;
  2150. }
  2151. return EMULATE_DO_MMIO;
  2152. }
  2153. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  2154. if (vcpu->mmio_is_write) {
  2155. vcpu->mmio_needed = 0;
  2156. return EMULATE_DO_MMIO;
  2157. }
  2158. return EMULATE_DONE;
  2159. }
  2160. EXPORT_SYMBOL_GPL(emulate_instruction);
  2161. static int pio_copy_data(struct kvm_vcpu *vcpu)
  2162. {
  2163. void *p = vcpu->arch.pio_data;
  2164. gva_t q = vcpu->arch.pio.guest_gva;
  2165. unsigned bytes;
  2166. int ret;
  2167. bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
  2168. if (vcpu->arch.pio.in)
  2169. ret = kvm_write_guest_virt(q, p, bytes, vcpu);
  2170. else
  2171. ret = kvm_read_guest_virt(q, p, bytes, vcpu);
  2172. return ret;
  2173. }
  2174. int complete_pio(struct kvm_vcpu *vcpu)
  2175. {
  2176. struct kvm_pio_request *io = &vcpu->arch.pio;
  2177. long delta;
  2178. int r;
  2179. unsigned long val;
  2180. if (!io->string) {
  2181. if (io->in) {
  2182. val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2183. memcpy(&val, vcpu->arch.pio_data, io->size);
  2184. kvm_register_write(vcpu, VCPU_REGS_RAX, val);
  2185. }
  2186. } else {
  2187. if (io->in) {
  2188. r = pio_copy_data(vcpu);
  2189. if (r)
  2190. return r;
  2191. }
  2192. delta = 1;
  2193. if (io->rep) {
  2194. delta *= io->cur_count;
  2195. /*
  2196. * The size of the register should really depend on
  2197. * current address size.
  2198. */
  2199. val = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2200. val -= delta;
  2201. kvm_register_write(vcpu, VCPU_REGS_RCX, val);
  2202. }
  2203. if (io->down)
  2204. delta = -delta;
  2205. delta *= io->size;
  2206. if (io->in) {
  2207. val = kvm_register_read(vcpu, VCPU_REGS_RDI);
  2208. val += delta;
  2209. kvm_register_write(vcpu, VCPU_REGS_RDI, val);
  2210. } else {
  2211. val = kvm_register_read(vcpu, VCPU_REGS_RSI);
  2212. val += delta;
  2213. kvm_register_write(vcpu, VCPU_REGS_RSI, val);
  2214. }
  2215. }
  2216. io->count -= io->cur_count;
  2217. io->cur_count = 0;
  2218. return 0;
  2219. }
  2220. static void kernel_pio(struct kvm_io_device *pio_dev,
  2221. struct kvm_vcpu *vcpu,
  2222. void *pd)
  2223. {
  2224. /* TODO: String I/O for in kernel device */
  2225. mutex_lock(&vcpu->kvm->lock);
  2226. if (vcpu->arch.pio.in)
  2227. kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
  2228. vcpu->arch.pio.size,
  2229. pd);
  2230. else
  2231. kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
  2232. vcpu->arch.pio.size,
  2233. pd);
  2234. mutex_unlock(&vcpu->kvm->lock);
  2235. }
  2236. static void pio_string_write(struct kvm_io_device *pio_dev,
  2237. struct kvm_vcpu *vcpu)
  2238. {
  2239. struct kvm_pio_request *io = &vcpu->arch.pio;
  2240. void *pd = vcpu->arch.pio_data;
  2241. int i;
  2242. mutex_lock(&vcpu->kvm->lock);
  2243. for (i = 0; i < io->cur_count; i++) {
  2244. kvm_iodevice_write(pio_dev, io->port,
  2245. io->size,
  2246. pd);
  2247. pd += io->size;
  2248. }
  2249. mutex_unlock(&vcpu->kvm->lock);
  2250. }
  2251. static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
  2252. gpa_t addr, int len,
  2253. int is_write)
  2254. {
  2255. return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
  2256. }
  2257. int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  2258. int size, unsigned port)
  2259. {
  2260. struct kvm_io_device *pio_dev;
  2261. unsigned long val;
  2262. vcpu->run->exit_reason = KVM_EXIT_IO;
  2263. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  2264. vcpu->run->io.size = vcpu->arch.pio.size = size;
  2265. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  2266. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
  2267. vcpu->run->io.port = vcpu->arch.pio.port = port;
  2268. vcpu->arch.pio.in = in;
  2269. vcpu->arch.pio.string = 0;
  2270. vcpu->arch.pio.down = 0;
  2271. vcpu->arch.pio.rep = 0;
  2272. if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
  2273. KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
  2274. handler);
  2275. else
  2276. KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
  2277. handler);
  2278. val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2279. memcpy(vcpu->arch.pio_data, &val, 4);
  2280. pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
  2281. if (pio_dev) {
  2282. kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
  2283. complete_pio(vcpu);
  2284. return 1;
  2285. }
  2286. return 0;
  2287. }
  2288. EXPORT_SYMBOL_GPL(kvm_emulate_pio);
  2289. int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  2290. int size, unsigned long count, int down,
  2291. gva_t address, int rep, unsigned port)
  2292. {
  2293. unsigned now, in_page;
  2294. int ret = 0;
  2295. struct kvm_io_device *pio_dev;
  2296. vcpu->run->exit_reason = KVM_EXIT_IO;
  2297. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  2298. vcpu->run->io.size = vcpu->arch.pio.size = size;
  2299. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  2300. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
  2301. vcpu->run->io.port = vcpu->arch.pio.port = port;
  2302. vcpu->arch.pio.in = in;
  2303. vcpu->arch.pio.string = 1;
  2304. vcpu->arch.pio.down = down;
  2305. vcpu->arch.pio.rep = rep;
  2306. if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
  2307. KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
  2308. handler);
  2309. else
  2310. KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
  2311. handler);
  2312. if (!count) {
  2313. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2314. return 1;
  2315. }
  2316. if (!down)
  2317. in_page = PAGE_SIZE - offset_in_page(address);
  2318. else
  2319. in_page = offset_in_page(address) + size;
  2320. now = min(count, (unsigned long)in_page / size);
  2321. if (!now)
  2322. now = 1;
  2323. if (down) {
  2324. /*
  2325. * String I/O in reverse. Yuck. Kill the guest, fix later.
  2326. */
  2327. pr_unimpl(vcpu, "guest string pio down\n");
  2328. kvm_inject_gp(vcpu, 0);
  2329. return 1;
  2330. }
  2331. vcpu->run->io.count = now;
  2332. vcpu->arch.pio.cur_count = now;
  2333. if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
  2334. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2335. vcpu->arch.pio.guest_gva = address;
  2336. pio_dev = vcpu_find_pio_dev(vcpu, port,
  2337. vcpu->arch.pio.cur_count,
  2338. !vcpu->arch.pio.in);
  2339. if (!vcpu->arch.pio.in) {
  2340. /* string PIO write */
  2341. ret = pio_copy_data(vcpu);
  2342. if (ret == X86EMUL_PROPAGATE_FAULT) {
  2343. kvm_inject_gp(vcpu, 0);
  2344. return 1;
  2345. }
  2346. if (ret == 0 && pio_dev) {
  2347. pio_string_write(pio_dev, vcpu);
  2348. complete_pio(vcpu);
  2349. if (vcpu->arch.pio.count == 0)
  2350. ret = 1;
  2351. }
  2352. } else if (pio_dev)
  2353. pr_unimpl(vcpu, "no string pio read support yet, "
  2354. "port %x size %d count %ld\n",
  2355. port, size, count);
  2356. return ret;
  2357. }
  2358. EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
  2359. static void bounce_off(void *info)
  2360. {
  2361. /* nothing */
  2362. }
  2363. static unsigned int ref_freq;
  2364. static unsigned long tsc_khz_ref;
  2365. static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  2366. void *data)
  2367. {
  2368. struct cpufreq_freqs *freq = data;
  2369. struct kvm *kvm;
  2370. struct kvm_vcpu *vcpu;
  2371. int i, send_ipi = 0;
  2372. if (!ref_freq)
  2373. ref_freq = freq->old;
  2374. if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
  2375. return 0;
  2376. if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
  2377. return 0;
  2378. per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
  2379. spin_lock(&kvm_lock);
  2380. list_for_each_entry(kvm, &vm_list, vm_list) {
  2381. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  2382. vcpu = kvm->vcpus[i];
  2383. if (!vcpu)
  2384. continue;
  2385. if (vcpu->cpu != freq->cpu)
  2386. continue;
  2387. if (!kvm_request_guest_time_update(vcpu))
  2388. continue;
  2389. if (vcpu->cpu != smp_processor_id())
  2390. send_ipi++;
  2391. }
  2392. }
  2393. spin_unlock(&kvm_lock);
  2394. if (freq->old < freq->new && send_ipi) {
  2395. /*
  2396. * We upscale the frequency. Must make the guest
  2397. * doesn't see old kvmclock values while running with
  2398. * the new frequency, otherwise we risk the guest sees
  2399. * time go backwards.
  2400. *
  2401. * In case we update the frequency for another cpu
  2402. * (which might be in guest context) send an interrupt
  2403. * to kick the cpu out of guest context. Next time
  2404. * guest context is entered kvmclock will be updated,
  2405. * so the guest will not see stale values.
  2406. */
  2407. smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
  2408. }
  2409. return 0;
  2410. }
  2411. static struct notifier_block kvmclock_cpufreq_notifier_block = {
  2412. .notifier_call = kvmclock_cpufreq_notifier
  2413. };
  2414. int kvm_arch_init(void *opaque)
  2415. {
  2416. int r, cpu;
  2417. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  2418. if (kvm_x86_ops) {
  2419. printk(KERN_ERR "kvm: already loaded the other module\n");
  2420. r = -EEXIST;
  2421. goto out;
  2422. }
  2423. if (!ops->cpu_has_kvm_support()) {
  2424. printk(KERN_ERR "kvm: no hardware support\n");
  2425. r = -EOPNOTSUPP;
  2426. goto out;
  2427. }
  2428. if (ops->disabled_by_bios()) {
  2429. printk(KERN_ERR "kvm: disabled by bios\n");
  2430. r = -EOPNOTSUPP;
  2431. goto out;
  2432. }
  2433. r = kvm_mmu_module_init();
  2434. if (r)
  2435. goto out;
  2436. kvm_init_msr_list();
  2437. kvm_x86_ops = ops;
  2438. kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
  2439. kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
  2440. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  2441. PT_DIRTY_MASK, PT64_NX_MASK, 0);
  2442. for_each_possible_cpu(cpu)
  2443. per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
  2444. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
  2445. tsc_khz_ref = tsc_khz;
  2446. cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
  2447. CPUFREQ_TRANSITION_NOTIFIER);
  2448. }
  2449. return 0;
  2450. out:
  2451. return r;
  2452. }
  2453. void kvm_arch_exit(void)
  2454. {
  2455. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  2456. cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
  2457. CPUFREQ_TRANSITION_NOTIFIER);
  2458. kvm_x86_ops = NULL;
  2459. kvm_mmu_module_exit();
  2460. }
  2461. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  2462. {
  2463. ++vcpu->stat.halt_exits;
  2464. KVMTRACE_0D(HLT, vcpu, handler);
  2465. if (irqchip_in_kernel(vcpu->kvm)) {
  2466. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  2467. return 1;
  2468. } else {
  2469. vcpu->run->exit_reason = KVM_EXIT_HLT;
  2470. return 0;
  2471. }
  2472. }
  2473. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  2474. static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
  2475. unsigned long a1)
  2476. {
  2477. if (is_long_mode(vcpu))
  2478. return a0;
  2479. else
  2480. return a0 | ((gpa_t)a1 << 32);
  2481. }
  2482. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  2483. {
  2484. unsigned long nr, a0, a1, a2, a3, ret;
  2485. int r = 1;
  2486. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2487. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  2488. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2489. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  2490. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  2491. KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
  2492. if (!is_long_mode(vcpu)) {
  2493. nr &= 0xFFFFFFFF;
  2494. a0 &= 0xFFFFFFFF;
  2495. a1 &= 0xFFFFFFFF;
  2496. a2 &= 0xFFFFFFFF;
  2497. a3 &= 0xFFFFFFFF;
  2498. }
  2499. switch (nr) {
  2500. case KVM_HC_VAPIC_POLL_IRQ:
  2501. ret = 0;
  2502. break;
  2503. case KVM_HC_MMU_OP:
  2504. r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
  2505. break;
  2506. default:
  2507. ret = -KVM_ENOSYS;
  2508. break;
  2509. }
  2510. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  2511. ++vcpu->stat.hypercalls;
  2512. return r;
  2513. }
  2514. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  2515. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  2516. {
  2517. char instruction[3];
  2518. int ret = 0;
  2519. unsigned long rip = kvm_rip_read(vcpu);
  2520. /*
  2521. * Blow out the MMU to ensure that no other VCPU has an active mapping
  2522. * to ensure that the updated hypercall appears atomically across all
  2523. * VCPUs.
  2524. */
  2525. kvm_mmu_zap_all(vcpu->kvm);
  2526. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  2527. if (emulator_write_emulated(rip, instruction, 3, vcpu)
  2528. != X86EMUL_CONTINUE)
  2529. ret = -EFAULT;
  2530. return ret;
  2531. }
  2532. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  2533. {
  2534. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  2535. }
  2536. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  2537. {
  2538. struct descriptor_table dt = { limit, base };
  2539. kvm_x86_ops->set_gdt(vcpu, &dt);
  2540. }
  2541. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  2542. {
  2543. struct descriptor_table dt = { limit, base };
  2544. kvm_x86_ops->set_idt(vcpu, &dt);
  2545. }
  2546. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  2547. unsigned long *rflags)
  2548. {
  2549. kvm_lmsw(vcpu, msw);
  2550. *rflags = kvm_x86_ops->get_rflags(vcpu);
  2551. }
  2552. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  2553. {
  2554. unsigned long value;
  2555. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  2556. switch (cr) {
  2557. case 0:
  2558. value = vcpu->arch.cr0;
  2559. break;
  2560. case 2:
  2561. value = vcpu->arch.cr2;
  2562. break;
  2563. case 3:
  2564. value = vcpu->arch.cr3;
  2565. break;
  2566. case 4:
  2567. value = vcpu->arch.cr4;
  2568. break;
  2569. case 8:
  2570. value = kvm_get_cr8(vcpu);
  2571. break;
  2572. default:
  2573. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  2574. return 0;
  2575. }
  2576. KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
  2577. (u32)((u64)value >> 32), handler);
  2578. return value;
  2579. }
  2580. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  2581. unsigned long *rflags)
  2582. {
  2583. KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
  2584. (u32)((u64)val >> 32), handler);
  2585. switch (cr) {
  2586. case 0:
  2587. kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
  2588. *rflags = kvm_x86_ops->get_rflags(vcpu);
  2589. break;
  2590. case 2:
  2591. vcpu->arch.cr2 = val;
  2592. break;
  2593. case 3:
  2594. kvm_set_cr3(vcpu, val);
  2595. break;
  2596. case 4:
  2597. kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
  2598. break;
  2599. case 8:
  2600. kvm_set_cr8(vcpu, val & 0xfUL);
  2601. break;
  2602. default:
  2603. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  2604. }
  2605. }
  2606. static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  2607. {
  2608. struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
  2609. int j, nent = vcpu->arch.cpuid_nent;
  2610. e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
  2611. /* when no next entry is found, the current entry[i] is reselected */
  2612. for (j = i + 1; ; j = (j + 1) % nent) {
  2613. struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
  2614. if (ej->function == e->function) {
  2615. ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  2616. return j;
  2617. }
  2618. }
  2619. return 0; /* silence gcc, even though control never reaches here */
  2620. }
  2621. /* find an entry with matching function, matching index (if needed), and that
  2622. * should be read next (if it's stateful) */
  2623. static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
  2624. u32 function, u32 index)
  2625. {
  2626. if (e->function != function)
  2627. return 0;
  2628. if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
  2629. return 0;
  2630. if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
  2631. !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
  2632. return 0;
  2633. return 1;
  2634. }
  2635. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  2636. u32 function, u32 index)
  2637. {
  2638. int i;
  2639. struct kvm_cpuid_entry2 *best = NULL;
  2640. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  2641. struct kvm_cpuid_entry2 *e;
  2642. e = &vcpu->arch.cpuid_entries[i];
  2643. if (is_matching_cpuid_entry(e, function, index)) {
  2644. if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
  2645. move_to_next_stateful_cpuid_entry(vcpu, i);
  2646. best = e;
  2647. break;
  2648. }
  2649. /*
  2650. * Both basic or both extended?
  2651. */
  2652. if (((e->function ^ function) & 0x80000000) == 0)
  2653. if (!best || e->function > best->function)
  2654. best = e;
  2655. }
  2656. return best;
  2657. }
  2658. int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
  2659. {
  2660. struct kvm_cpuid_entry2 *best;
  2661. best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
  2662. if (best)
  2663. return best->eax & 0xff;
  2664. return 36;
  2665. }
  2666. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  2667. {
  2668. u32 function, index;
  2669. struct kvm_cpuid_entry2 *best;
  2670. function = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2671. index = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2672. kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
  2673. kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
  2674. kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
  2675. kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
  2676. best = kvm_find_cpuid_entry(vcpu, function, index);
  2677. if (best) {
  2678. kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
  2679. kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
  2680. kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
  2681. kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
  2682. }
  2683. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2684. KVMTRACE_5D(CPUID, vcpu, function,
  2685. (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
  2686. (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
  2687. (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
  2688. (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
  2689. }
  2690. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  2691. /*
  2692. * Check if userspace requested an interrupt window, and that the
  2693. * interrupt window is open.
  2694. *
  2695. * No need to exit to userspace if we already have an interrupt queued.
  2696. */
  2697. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
  2698. struct kvm_run *kvm_run)
  2699. {
  2700. return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
  2701. kvm_run->request_interrupt_window &&
  2702. kvm_arch_interrupt_allowed(vcpu));
  2703. }
  2704. static void post_kvm_run_save(struct kvm_vcpu *vcpu,
  2705. struct kvm_run *kvm_run)
  2706. {
  2707. kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  2708. kvm_run->cr8 = kvm_get_cr8(vcpu);
  2709. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  2710. if (irqchip_in_kernel(vcpu->kvm))
  2711. kvm_run->ready_for_interrupt_injection = 1;
  2712. else
  2713. kvm_run->ready_for_interrupt_injection =
  2714. (kvm_arch_interrupt_allowed(vcpu) &&
  2715. !kvm_cpu_has_interrupt(vcpu));
  2716. }
  2717. static void vapic_enter(struct kvm_vcpu *vcpu)
  2718. {
  2719. struct kvm_lapic *apic = vcpu->arch.apic;
  2720. struct page *page;
  2721. if (!apic || !apic->vapic_addr)
  2722. return;
  2723. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  2724. vcpu->arch.apic->vapic_page = page;
  2725. }
  2726. static void vapic_exit(struct kvm_vcpu *vcpu)
  2727. {
  2728. struct kvm_lapic *apic = vcpu->arch.apic;
  2729. if (!apic || !apic->vapic_addr)
  2730. return;
  2731. down_read(&vcpu->kvm->slots_lock);
  2732. kvm_release_page_dirty(apic->vapic_page);
  2733. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  2734. up_read(&vcpu->kvm->slots_lock);
  2735. }
  2736. static void update_cr8_intercept(struct kvm_vcpu *vcpu)
  2737. {
  2738. int max_irr, tpr;
  2739. if (!kvm_x86_ops->update_cr8_intercept)
  2740. return;
  2741. max_irr = kvm_lapic_find_highest_irr(vcpu);
  2742. if (max_irr != -1)
  2743. max_irr >>= 4;
  2744. tpr = kvm_lapic_get_cr8(vcpu);
  2745. kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
  2746. }
  2747. static void inject_irq(struct kvm_vcpu *vcpu)
  2748. {
  2749. /* try to reinject previous events if any */
  2750. if (vcpu->arch.nmi_injected) {
  2751. kvm_x86_ops->set_nmi(vcpu);
  2752. return;
  2753. }
  2754. if (vcpu->arch.interrupt.pending) {
  2755. kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
  2756. return;
  2757. }
  2758. /* try to inject new event if pending */
  2759. if (vcpu->arch.nmi_pending) {
  2760. if (kvm_x86_ops->nmi_allowed(vcpu)) {
  2761. vcpu->arch.nmi_pending = false;
  2762. vcpu->arch.nmi_injected = true;
  2763. kvm_x86_ops->set_nmi(vcpu);
  2764. }
  2765. } else if (kvm_cpu_has_interrupt(vcpu)) {
  2766. if (kvm_x86_ops->interrupt_allowed(vcpu)) {
  2767. kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
  2768. kvm_x86_ops->set_irq(vcpu, vcpu->arch.interrupt.nr);
  2769. }
  2770. }
  2771. }
  2772. static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2773. {
  2774. bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
  2775. kvm_run->request_interrupt_window;
  2776. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  2777. kvm_x86_ops->drop_interrupt_shadow(vcpu);
  2778. inject_irq(vcpu);
  2779. /* enable NMI/IRQ window open exits if needed */
  2780. if (vcpu->arch.nmi_pending)
  2781. kvm_x86_ops->enable_nmi_window(vcpu);
  2782. else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
  2783. kvm_x86_ops->enable_irq_window(vcpu);
  2784. }
  2785. static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2786. {
  2787. int r;
  2788. if (vcpu->requests)
  2789. if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
  2790. kvm_mmu_unload(vcpu);
  2791. r = kvm_mmu_reload(vcpu);
  2792. if (unlikely(r))
  2793. goto out;
  2794. if (vcpu->requests) {
  2795. if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
  2796. __kvm_migrate_timers(vcpu);
  2797. if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
  2798. kvm_write_guest_time(vcpu);
  2799. if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
  2800. kvm_mmu_sync_roots(vcpu);
  2801. if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
  2802. kvm_x86_ops->tlb_flush(vcpu);
  2803. if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
  2804. &vcpu->requests)) {
  2805. kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
  2806. r = 0;
  2807. goto out;
  2808. }
  2809. if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
  2810. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  2811. r = 0;
  2812. goto out;
  2813. }
  2814. }
  2815. preempt_disable();
  2816. kvm_x86_ops->prepare_guest_switch(vcpu);
  2817. kvm_load_guest_fpu(vcpu);
  2818. local_irq_disable();
  2819. if (vcpu->requests || need_resched() || signal_pending(current)) {
  2820. local_irq_enable();
  2821. preempt_enable();
  2822. r = 1;
  2823. goto out;
  2824. }
  2825. vcpu->guest_mode = 1;
  2826. /*
  2827. * Make sure that guest_mode assignment won't happen after
  2828. * testing the pending IRQ vector bitmap.
  2829. */
  2830. smp_wmb();
  2831. if (vcpu->arch.exception.pending)
  2832. __queue_exception(vcpu);
  2833. else
  2834. inject_pending_irq(vcpu, kvm_run);
  2835. if (kvm_lapic_enabled(vcpu)) {
  2836. if (!vcpu->arch.apic->vapic_addr)
  2837. update_cr8_intercept(vcpu);
  2838. else
  2839. kvm_lapic_sync_to_vapic(vcpu);
  2840. }
  2841. up_read(&vcpu->kvm->slots_lock);
  2842. kvm_guest_enter();
  2843. get_debugreg(vcpu->arch.host_dr6, 6);
  2844. get_debugreg(vcpu->arch.host_dr7, 7);
  2845. if (unlikely(vcpu->arch.switch_db_regs)) {
  2846. get_debugreg(vcpu->arch.host_db[0], 0);
  2847. get_debugreg(vcpu->arch.host_db[1], 1);
  2848. get_debugreg(vcpu->arch.host_db[2], 2);
  2849. get_debugreg(vcpu->arch.host_db[3], 3);
  2850. set_debugreg(0, 7);
  2851. set_debugreg(vcpu->arch.eff_db[0], 0);
  2852. set_debugreg(vcpu->arch.eff_db[1], 1);
  2853. set_debugreg(vcpu->arch.eff_db[2], 2);
  2854. set_debugreg(vcpu->arch.eff_db[3], 3);
  2855. }
  2856. KVMTRACE_0D(VMENTRY, vcpu, entryexit);
  2857. kvm_x86_ops->run(vcpu, kvm_run);
  2858. if (unlikely(vcpu->arch.switch_db_regs)) {
  2859. set_debugreg(0, 7);
  2860. set_debugreg(vcpu->arch.host_db[0], 0);
  2861. set_debugreg(vcpu->arch.host_db[1], 1);
  2862. set_debugreg(vcpu->arch.host_db[2], 2);
  2863. set_debugreg(vcpu->arch.host_db[3], 3);
  2864. }
  2865. set_debugreg(vcpu->arch.host_dr6, 6);
  2866. set_debugreg(vcpu->arch.host_dr7, 7);
  2867. vcpu->guest_mode = 0;
  2868. local_irq_enable();
  2869. ++vcpu->stat.exits;
  2870. /*
  2871. * We must have an instruction between local_irq_enable() and
  2872. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  2873. * the interrupt shadow. The stat.exits increment will do nicely.
  2874. * But we need to prevent reordering, hence this barrier():
  2875. */
  2876. barrier();
  2877. kvm_guest_exit();
  2878. preempt_enable();
  2879. down_read(&vcpu->kvm->slots_lock);
  2880. /*
  2881. * Profile KVM exit RIPs:
  2882. */
  2883. if (unlikely(prof_on == KVM_PROFILING)) {
  2884. unsigned long rip = kvm_rip_read(vcpu);
  2885. profile_hit(KVM_PROFILING, (void *)rip);
  2886. }
  2887. kvm_lapic_sync_from_vapic(vcpu);
  2888. r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
  2889. out:
  2890. return r;
  2891. }
  2892. static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2893. {
  2894. int r;
  2895. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  2896. pr_debug("vcpu %d received sipi with vector # %x\n",
  2897. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  2898. kvm_lapic_reset(vcpu);
  2899. r = kvm_arch_vcpu_reset(vcpu);
  2900. if (r)
  2901. return r;
  2902. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  2903. }
  2904. down_read(&vcpu->kvm->slots_lock);
  2905. vapic_enter(vcpu);
  2906. r = 1;
  2907. while (r > 0) {
  2908. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
  2909. r = vcpu_enter_guest(vcpu, kvm_run);
  2910. else {
  2911. up_read(&vcpu->kvm->slots_lock);
  2912. kvm_vcpu_block(vcpu);
  2913. down_read(&vcpu->kvm->slots_lock);
  2914. if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
  2915. {
  2916. switch(vcpu->arch.mp_state) {
  2917. case KVM_MP_STATE_HALTED:
  2918. vcpu->arch.mp_state =
  2919. KVM_MP_STATE_RUNNABLE;
  2920. case KVM_MP_STATE_RUNNABLE:
  2921. break;
  2922. case KVM_MP_STATE_SIPI_RECEIVED:
  2923. default:
  2924. r = -EINTR;
  2925. break;
  2926. }
  2927. }
  2928. }
  2929. if (r <= 0)
  2930. break;
  2931. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  2932. if (kvm_cpu_has_pending_timer(vcpu))
  2933. kvm_inject_pending_timer_irqs(vcpu);
  2934. if (dm_request_for_irq_injection(vcpu, kvm_run)) {
  2935. r = -EINTR;
  2936. kvm_run->exit_reason = KVM_EXIT_INTR;
  2937. ++vcpu->stat.request_irq_exits;
  2938. }
  2939. if (signal_pending(current)) {
  2940. r = -EINTR;
  2941. kvm_run->exit_reason = KVM_EXIT_INTR;
  2942. ++vcpu->stat.signal_exits;
  2943. }
  2944. if (need_resched()) {
  2945. up_read(&vcpu->kvm->slots_lock);
  2946. kvm_resched(vcpu);
  2947. down_read(&vcpu->kvm->slots_lock);
  2948. }
  2949. }
  2950. up_read(&vcpu->kvm->slots_lock);
  2951. post_kvm_run_save(vcpu, kvm_run);
  2952. vapic_exit(vcpu);
  2953. return r;
  2954. }
  2955. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2956. {
  2957. int r;
  2958. sigset_t sigsaved;
  2959. vcpu_load(vcpu);
  2960. if (vcpu->sigset_active)
  2961. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  2962. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  2963. kvm_vcpu_block(vcpu);
  2964. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  2965. r = -EAGAIN;
  2966. goto out;
  2967. }
  2968. /* re-sync apic's tpr */
  2969. if (!irqchip_in_kernel(vcpu->kvm))
  2970. kvm_set_cr8(vcpu, kvm_run->cr8);
  2971. if (vcpu->arch.pio.cur_count) {
  2972. r = complete_pio(vcpu);
  2973. if (r)
  2974. goto out;
  2975. }
  2976. #if CONFIG_HAS_IOMEM
  2977. if (vcpu->mmio_needed) {
  2978. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  2979. vcpu->mmio_read_completed = 1;
  2980. vcpu->mmio_needed = 0;
  2981. down_read(&vcpu->kvm->slots_lock);
  2982. r = emulate_instruction(vcpu, kvm_run,
  2983. vcpu->arch.mmio_fault_cr2, 0,
  2984. EMULTYPE_NO_DECODE);
  2985. up_read(&vcpu->kvm->slots_lock);
  2986. if (r == EMULATE_DO_MMIO) {
  2987. /*
  2988. * Read-modify-write. Back to userspace.
  2989. */
  2990. r = 0;
  2991. goto out;
  2992. }
  2993. }
  2994. #endif
  2995. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
  2996. kvm_register_write(vcpu, VCPU_REGS_RAX,
  2997. kvm_run->hypercall.ret);
  2998. r = __vcpu_run(vcpu, kvm_run);
  2999. out:
  3000. if (vcpu->sigset_active)
  3001. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  3002. vcpu_put(vcpu);
  3003. return r;
  3004. }
  3005. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  3006. {
  3007. vcpu_load(vcpu);
  3008. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3009. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3010. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3011. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3012. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3013. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  3014. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  3015. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  3016. #ifdef CONFIG_X86_64
  3017. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  3018. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  3019. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  3020. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  3021. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  3022. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  3023. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  3024. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  3025. #endif
  3026. regs->rip = kvm_rip_read(vcpu);
  3027. regs->rflags = kvm_x86_ops->get_rflags(vcpu);
  3028. /*
  3029. * Don't leak debug flags in case they were set for guest debugging
  3030. */
  3031. if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
  3032. regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  3033. vcpu_put(vcpu);
  3034. return 0;
  3035. }
  3036. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  3037. {
  3038. vcpu_load(vcpu);
  3039. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  3040. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  3041. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  3042. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  3043. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  3044. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  3045. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  3046. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  3047. #ifdef CONFIG_X86_64
  3048. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  3049. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  3050. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  3051. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  3052. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  3053. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  3054. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  3055. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  3056. #endif
  3057. kvm_rip_write(vcpu, regs->rip);
  3058. kvm_x86_ops->set_rflags(vcpu, regs->rflags);
  3059. vcpu->arch.exception.pending = false;
  3060. vcpu_put(vcpu);
  3061. return 0;
  3062. }
  3063. void kvm_get_segment(struct kvm_vcpu *vcpu,
  3064. struct kvm_segment *var, int seg)
  3065. {
  3066. kvm_x86_ops->get_segment(vcpu, var, seg);
  3067. }
  3068. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  3069. {
  3070. struct kvm_segment cs;
  3071. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  3072. *db = cs.db;
  3073. *l = cs.l;
  3074. }
  3075. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  3076. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  3077. struct kvm_sregs *sregs)
  3078. {
  3079. struct descriptor_table dt;
  3080. vcpu_load(vcpu);
  3081. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  3082. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  3083. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  3084. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  3085. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  3086. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  3087. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  3088. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  3089. kvm_x86_ops->get_idt(vcpu, &dt);
  3090. sregs->idt.limit = dt.limit;
  3091. sregs->idt.base = dt.base;
  3092. kvm_x86_ops->get_gdt(vcpu, &dt);
  3093. sregs->gdt.limit = dt.limit;
  3094. sregs->gdt.base = dt.base;
  3095. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  3096. sregs->cr0 = vcpu->arch.cr0;
  3097. sregs->cr2 = vcpu->arch.cr2;
  3098. sregs->cr3 = vcpu->arch.cr3;
  3099. sregs->cr4 = vcpu->arch.cr4;
  3100. sregs->cr8 = kvm_get_cr8(vcpu);
  3101. sregs->efer = vcpu->arch.shadow_efer;
  3102. sregs->apic_base = kvm_get_apic_base(vcpu);
  3103. if (irqchip_in_kernel(vcpu->kvm))
  3104. memset(sregs->interrupt_bitmap, 0,
  3105. sizeof sregs->interrupt_bitmap);
  3106. else
  3107. memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
  3108. sizeof sregs->interrupt_bitmap);
  3109. if (vcpu->arch.interrupt.pending)
  3110. set_bit(vcpu->arch.interrupt.nr,
  3111. (unsigned long *)sregs->interrupt_bitmap);
  3112. vcpu_put(vcpu);
  3113. return 0;
  3114. }
  3115. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  3116. struct kvm_mp_state *mp_state)
  3117. {
  3118. vcpu_load(vcpu);
  3119. mp_state->mp_state = vcpu->arch.mp_state;
  3120. vcpu_put(vcpu);
  3121. return 0;
  3122. }
  3123. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  3124. struct kvm_mp_state *mp_state)
  3125. {
  3126. vcpu_load(vcpu);
  3127. vcpu->arch.mp_state = mp_state->mp_state;
  3128. vcpu_put(vcpu);
  3129. return 0;
  3130. }
  3131. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  3132. struct kvm_segment *var, int seg)
  3133. {
  3134. kvm_x86_ops->set_segment(vcpu, var, seg);
  3135. }
  3136. static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
  3137. struct kvm_segment *kvm_desct)
  3138. {
  3139. kvm_desct->base = seg_desc->base0;
  3140. kvm_desct->base |= seg_desc->base1 << 16;
  3141. kvm_desct->base |= seg_desc->base2 << 24;
  3142. kvm_desct->limit = seg_desc->limit0;
  3143. kvm_desct->limit |= seg_desc->limit << 16;
  3144. if (seg_desc->g) {
  3145. kvm_desct->limit <<= 12;
  3146. kvm_desct->limit |= 0xfff;
  3147. }
  3148. kvm_desct->selector = selector;
  3149. kvm_desct->type = seg_desc->type;
  3150. kvm_desct->present = seg_desc->p;
  3151. kvm_desct->dpl = seg_desc->dpl;
  3152. kvm_desct->db = seg_desc->d;
  3153. kvm_desct->s = seg_desc->s;
  3154. kvm_desct->l = seg_desc->l;
  3155. kvm_desct->g = seg_desc->g;
  3156. kvm_desct->avl = seg_desc->avl;
  3157. if (!selector)
  3158. kvm_desct->unusable = 1;
  3159. else
  3160. kvm_desct->unusable = 0;
  3161. kvm_desct->padding = 0;
  3162. }
  3163. static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
  3164. u16 selector,
  3165. struct descriptor_table *dtable)
  3166. {
  3167. if (selector & 1 << 2) {
  3168. struct kvm_segment kvm_seg;
  3169. kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
  3170. if (kvm_seg.unusable)
  3171. dtable->limit = 0;
  3172. else
  3173. dtable->limit = kvm_seg.limit;
  3174. dtable->base = kvm_seg.base;
  3175. }
  3176. else
  3177. kvm_x86_ops->get_gdt(vcpu, dtable);
  3178. }
  3179. /* allowed just for 8 bytes segments */
  3180. static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  3181. struct desc_struct *seg_desc)
  3182. {
  3183. gpa_t gpa;
  3184. struct descriptor_table dtable;
  3185. u16 index = selector >> 3;
  3186. get_segment_descriptor_dtable(vcpu, selector, &dtable);
  3187. if (dtable.limit < index * 8 + 7) {
  3188. kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
  3189. return 1;
  3190. }
  3191. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
  3192. gpa += index * 8;
  3193. return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
  3194. }
  3195. /* allowed just for 8 bytes segments */
  3196. static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  3197. struct desc_struct *seg_desc)
  3198. {
  3199. gpa_t gpa;
  3200. struct descriptor_table dtable;
  3201. u16 index = selector >> 3;
  3202. get_segment_descriptor_dtable(vcpu, selector, &dtable);
  3203. if (dtable.limit < index * 8 + 7)
  3204. return 1;
  3205. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
  3206. gpa += index * 8;
  3207. return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
  3208. }
  3209. static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
  3210. struct desc_struct *seg_desc)
  3211. {
  3212. u32 base_addr;
  3213. base_addr = seg_desc->base0;
  3214. base_addr |= (seg_desc->base1 << 16);
  3215. base_addr |= (seg_desc->base2 << 24);
  3216. return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
  3217. }
  3218. static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
  3219. {
  3220. struct kvm_segment kvm_seg;
  3221. kvm_get_segment(vcpu, &kvm_seg, seg);
  3222. return kvm_seg.selector;
  3223. }
  3224. static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
  3225. u16 selector,
  3226. struct kvm_segment *kvm_seg)
  3227. {
  3228. struct desc_struct seg_desc;
  3229. if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
  3230. return 1;
  3231. seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
  3232. return 0;
  3233. }
  3234. static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
  3235. {
  3236. struct kvm_segment segvar = {
  3237. .base = selector << 4,
  3238. .limit = 0xffff,
  3239. .selector = selector,
  3240. .type = 3,
  3241. .present = 1,
  3242. .dpl = 3,
  3243. .db = 0,
  3244. .s = 1,
  3245. .l = 0,
  3246. .g = 0,
  3247. .avl = 0,
  3248. .unusable = 0,
  3249. };
  3250. kvm_x86_ops->set_segment(vcpu, &segvar, seg);
  3251. return 0;
  3252. }
  3253. int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  3254. int type_bits, int seg)
  3255. {
  3256. struct kvm_segment kvm_seg;
  3257. if (!(vcpu->arch.cr0 & X86_CR0_PE))
  3258. return kvm_load_realmode_segment(vcpu, selector, seg);
  3259. if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
  3260. return 1;
  3261. kvm_seg.type |= type_bits;
  3262. if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
  3263. seg != VCPU_SREG_LDTR)
  3264. if (!kvm_seg.s)
  3265. kvm_seg.unusable = 1;
  3266. kvm_set_segment(vcpu, &kvm_seg, seg);
  3267. return 0;
  3268. }
  3269. static void save_state_to_tss32(struct kvm_vcpu *vcpu,
  3270. struct tss_segment_32 *tss)
  3271. {
  3272. tss->cr3 = vcpu->arch.cr3;
  3273. tss->eip = kvm_rip_read(vcpu);
  3274. tss->eflags = kvm_x86_ops->get_rflags(vcpu);
  3275. tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3276. tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3277. tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3278. tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3279. tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  3280. tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  3281. tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3282. tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  3283. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  3284. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  3285. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  3286. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  3287. tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
  3288. tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
  3289. tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  3290. }
  3291. static int load_state_from_tss32(struct kvm_vcpu *vcpu,
  3292. struct tss_segment_32 *tss)
  3293. {
  3294. kvm_set_cr3(vcpu, tss->cr3);
  3295. kvm_rip_write(vcpu, tss->eip);
  3296. kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
  3297. kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
  3298. kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
  3299. kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
  3300. kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
  3301. kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
  3302. kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
  3303. kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
  3304. kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
  3305. if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
  3306. return 1;
  3307. if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  3308. return 1;
  3309. if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  3310. return 1;
  3311. if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  3312. return 1;
  3313. if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  3314. return 1;
  3315. if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
  3316. return 1;
  3317. if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
  3318. return 1;
  3319. return 0;
  3320. }
  3321. static void save_state_to_tss16(struct kvm_vcpu *vcpu,
  3322. struct tss_segment_16 *tss)
  3323. {
  3324. tss->ip = kvm_rip_read(vcpu);
  3325. tss->flag = kvm_x86_ops->get_rflags(vcpu);
  3326. tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3327. tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3328. tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3329. tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3330. tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  3331. tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  3332. tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3333. tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
  3334. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  3335. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  3336. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  3337. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  3338. tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  3339. tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
  3340. }
  3341. static int load_state_from_tss16(struct kvm_vcpu *vcpu,
  3342. struct tss_segment_16 *tss)
  3343. {
  3344. kvm_rip_write(vcpu, tss->ip);
  3345. kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
  3346. kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
  3347. kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
  3348. kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
  3349. kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
  3350. kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
  3351. kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
  3352. kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
  3353. kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
  3354. if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
  3355. return 1;
  3356. if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  3357. return 1;
  3358. if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  3359. return 1;
  3360. if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  3361. return 1;
  3362. if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  3363. return 1;
  3364. return 0;
  3365. }
  3366. static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
  3367. u16 old_tss_sel, u32 old_tss_base,
  3368. struct desc_struct *nseg_desc)
  3369. {
  3370. struct tss_segment_16 tss_segment_16;
  3371. int ret = 0;
  3372. if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
  3373. sizeof tss_segment_16))
  3374. goto out;
  3375. save_state_to_tss16(vcpu, &tss_segment_16);
  3376. if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
  3377. sizeof tss_segment_16))
  3378. goto out;
  3379. if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
  3380. &tss_segment_16, sizeof tss_segment_16))
  3381. goto out;
  3382. if (old_tss_sel != 0xffff) {
  3383. tss_segment_16.prev_task_link = old_tss_sel;
  3384. if (kvm_write_guest(vcpu->kvm,
  3385. get_tss_base_addr(vcpu, nseg_desc),
  3386. &tss_segment_16.prev_task_link,
  3387. sizeof tss_segment_16.prev_task_link))
  3388. goto out;
  3389. }
  3390. if (load_state_from_tss16(vcpu, &tss_segment_16))
  3391. goto out;
  3392. ret = 1;
  3393. out:
  3394. return ret;
  3395. }
  3396. static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
  3397. u16 old_tss_sel, u32 old_tss_base,
  3398. struct desc_struct *nseg_desc)
  3399. {
  3400. struct tss_segment_32 tss_segment_32;
  3401. int ret = 0;
  3402. if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
  3403. sizeof tss_segment_32))
  3404. goto out;
  3405. save_state_to_tss32(vcpu, &tss_segment_32);
  3406. if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
  3407. sizeof tss_segment_32))
  3408. goto out;
  3409. if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
  3410. &tss_segment_32, sizeof tss_segment_32))
  3411. goto out;
  3412. if (old_tss_sel != 0xffff) {
  3413. tss_segment_32.prev_task_link = old_tss_sel;
  3414. if (kvm_write_guest(vcpu->kvm,
  3415. get_tss_base_addr(vcpu, nseg_desc),
  3416. &tss_segment_32.prev_task_link,
  3417. sizeof tss_segment_32.prev_task_link))
  3418. goto out;
  3419. }
  3420. if (load_state_from_tss32(vcpu, &tss_segment_32))
  3421. goto out;
  3422. ret = 1;
  3423. out:
  3424. return ret;
  3425. }
  3426. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
  3427. {
  3428. struct kvm_segment tr_seg;
  3429. struct desc_struct cseg_desc;
  3430. struct desc_struct nseg_desc;
  3431. int ret = 0;
  3432. u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
  3433. u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
  3434. old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
  3435. /* FIXME: Handle errors. Failure to read either TSS or their
  3436. * descriptors should generate a pagefault.
  3437. */
  3438. if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
  3439. goto out;
  3440. if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
  3441. goto out;
  3442. if (reason != TASK_SWITCH_IRET) {
  3443. int cpl;
  3444. cpl = kvm_x86_ops->get_cpl(vcpu);
  3445. if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
  3446. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  3447. return 1;
  3448. }
  3449. }
  3450. if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
  3451. kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
  3452. return 1;
  3453. }
  3454. if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
  3455. cseg_desc.type &= ~(1 << 1); //clear the B flag
  3456. save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
  3457. }
  3458. if (reason == TASK_SWITCH_IRET) {
  3459. u32 eflags = kvm_x86_ops->get_rflags(vcpu);
  3460. kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
  3461. }
  3462. /* set back link to prev task only if NT bit is set in eflags
  3463. note that old_tss_sel is not used afetr this point */
  3464. if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
  3465. old_tss_sel = 0xffff;
  3466. /* set back link to prev task only if NT bit is set in eflags
  3467. note that old_tss_sel is not used afetr this point */
  3468. if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
  3469. old_tss_sel = 0xffff;
  3470. if (nseg_desc.type & 8)
  3471. ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
  3472. old_tss_base, &nseg_desc);
  3473. else
  3474. ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
  3475. old_tss_base, &nseg_desc);
  3476. if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
  3477. u32 eflags = kvm_x86_ops->get_rflags(vcpu);
  3478. kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
  3479. }
  3480. if (reason != TASK_SWITCH_IRET) {
  3481. nseg_desc.type |= (1 << 1);
  3482. save_guest_segment_descriptor(vcpu, tss_selector,
  3483. &nseg_desc);
  3484. }
  3485. kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
  3486. seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
  3487. tr_seg.type = 11;
  3488. kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
  3489. out:
  3490. return ret;
  3491. }
  3492. EXPORT_SYMBOL_GPL(kvm_task_switch);
  3493. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  3494. struct kvm_sregs *sregs)
  3495. {
  3496. int mmu_reset_needed = 0;
  3497. int i, pending_vec, max_bits;
  3498. struct descriptor_table dt;
  3499. vcpu_load(vcpu);
  3500. dt.limit = sregs->idt.limit;
  3501. dt.base = sregs->idt.base;
  3502. kvm_x86_ops->set_idt(vcpu, &dt);
  3503. dt.limit = sregs->gdt.limit;
  3504. dt.base = sregs->gdt.base;
  3505. kvm_x86_ops->set_gdt(vcpu, &dt);
  3506. vcpu->arch.cr2 = sregs->cr2;
  3507. mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
  3508. down_read(&vcpu->kvm->slots_lock);
  3509. if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
  3510. vcpu->arch.cr3 = sregs->cr3;
  3511. else
  3512. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  3513. up_read(&vcpu->kvm->slots_lock);
  3514. kvm_set_cr8(vcpu, sregs->cr8);
  3515. mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
  3516. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  3517. kvm_set_apic_base(vcpu, sregs->apic_base);
  3518. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  3519. mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
  3520. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  3521. vcpu->arch.cr0 = sregs->cr0;
  3522. mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
  3523. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  3524. if (!is_long_mode(vcpu) && is_pae(vcpu))
  3525. load_pdptrs(vcpu, vcpu->arch.cr3);
  3526. if (mmu_reset_needed)
  3527. kvm_mmu_reset_context(vcpu);
  3528. if (!irqchip_in_kernel(vcpu->kvm)) {
  3529. memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
  3530. sizeof vcpu->arch.irq_pending);
  3531. vcpu->arch.irq_summary = 0;
  3532. for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
  3533. if (vcpu->arch.irq_pending[i])
  3534. __set_bit(i, &vcpu->arch.irq_summary);
  3535. } else {
  3536. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  3537. pending_vec = find_first_bit(
  3538. (const unsigned long *)sregs->interrupt_bitmap,
  3539. max_bits);
  3540. /* Only pending external irq is handled here */
  3541. if (pending_vec < max_bits) {
  3542. kvm_queue_interrupt(vcpu, pending_vec);
  3543. pr_debug("Set back pending irq %d\n", pending_vec);
  3544. }
  3545. kvm_pic_clear_isr_ack(vcpu->kvm);
  3546. }
  3547. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  3548. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  3549. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  3550. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  3551. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  3552. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  3553. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  3554. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  3555. /* Older userspace won't unhalt the vcpu on reset. */
  3556. if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
  3557. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  3558. !(vcpu->arch.cr0 & X86_CR0_PE))
  3559. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  3560. vcpu_put(vcpu);
  3561. return 0;
  3562. }
  3563. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  3564. struct kvm_guest_debug *dbg)
  3565. {
  3566. int i, r;
  3567. vcpu_load(vcpu);
  3568. if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
  3569. (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
  3570. for (i = 0; i < KVM_NR_DB_REGS; ++i)
  3571. vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
  3572. vcpu->arch.switch_db_regs =
  3573. (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
  3574. } else {
  3575. for (i = 0; i < KVM_NR_DB_REGS; i++)
  3576. vcpu->arch.eff_db[i] = vcpu->arch.db[i];
  3577. vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
  3578. }
  3579. r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
  3580. if (dbg->control & KVM_GUESTDBG_INJECT_DB)
  3581. kvm_queue_exception(vcpu, DB_VECTOR);
  3582. else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
  3583. kvm_queue_exception(vcpu, BP_VECTOR);
  3584. vcpu_put(vcpu);
  3585. return r;
  3586. }
  3587. /*
  3588. * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
  3589. * we have asm/x86/processor.h
  3590. */
  3591. struct fxsave {
  3592. u16 cwd;
  3593. u16 swd;
  3594. u16 twd;
  3595. u16 fop;
  3596. u64 rip;
  3597. u64 rdp;
  3598. u32 mxcsr;
  3599. u32 mxcsr_mask;
  3600. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  3601. #ifdef CONFIG_X86_64
  3602. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  3603. #else
  3604. u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
  3605. #endif
  3606. };
  3607. /*
  3608. * Translate a guest virtual address to a guest physical address.
  3609. */
  3610. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  3611. struct kvm_translation *tr)
  3612. {
  3613. unsigned long vaddr = tr->linear_address;
  3614. gpa_t gpa;
  3615. vcpu_load(vcpu);
  3616. down_read(&vcpu->kvm->slots_lock);
  3617. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
  3618. up_read(&vcpu->kvm->slots_lock);
  3619. tr->physical_address = gpa;
  3620. tr->valid = gpa != UNMAPPED_GVA;
  3621. tr->writeable = 1;
  3622. tr->usermode = 0;
  3623. vcpu_put(vcpu);
  3624. return 0;
  3625. }
  3626. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  3627. {
  3628. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  3629. vcpu_load(vcpu);
  3630. memcpy(fpu->fpr, fxsave->st_space, 128);
  3631. fpu->fcw = fxsave->cwd;
  3632. fpu->fsw = fxsave->swd;
  3633. fpu->ftwx = fxsave->twd;
  3634. fpu->last_opcode = fxsave->fop;
  3635. fpu->last_ip = fxsave->rip;
  3636. fpu->last_dp = fxsave->rdp;
  3637. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  3638. vcpu_put(vcpu);
  3639. return 0;
  3640. }
  3641. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  3642. {
  3643. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  3644. vcpu_load(vcpu);
  3645. memcpy(fxsave->st_space, fpu->fpr, 128);
  3646. fxsave->cwd = fpu->fcw;
  3647. fxsave->swd = fpu->fsw;
  3648. fxsave->twd = fpu->ftwx;
  3649. fxsave->fop = fpu->last_opcode;
  3650. fxsave->rip = fpu->last_ip;
  3651. fxsave->rdp = fpu->last_dp;
  3652. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  3653. vcpu_put(vcpu);
  3654. return 0;
  3655. }
  3656. void fx_init(struct kvm_vcpu *vcpu)
  3657. {
  3658. unsigned after_mxcsr_mask;
  3659. /*
  3660. * Touch the fpu the first time in non atomic context as if
  3661. * this is the first fpu instruction the exception handler
  3662. * will fire before the instruction returns and it'll have to
  3663. * allocate ram with GFP_KERNEL.
  3664. */
  3665. if (!used_math())
  3666. kvm_fx_save(&vcpu->arch.host_fx_image);
  3667. /* Initialize guest FPU by resetting ours and saving into guest's */
  3668. preempt_disable();
  3669. kvm_fx_save(&vcpu->arch.host_fx_image);
  3670. kvm_fx_finit();
  3671. kvm_fx_save(&vcpu->arch.guest_fx_image);
  3672. kvm_fx_restore(&vcpu->arch.host_fx_image);
  3673. preempt_enable();
  3674. vcpu->arch.cr0 |= X86_CR0_ET;
  3675. after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
  3676. vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
  3677. memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
  3678. 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
  3679. }
  3680. EXPORT_SYMBOL_GPL(fx_init);
  3681. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  3682. {
  3683. if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
  3684. return;
  3685. vcpu->guest_fpu_loaded = 1;
  3686. kvm_fx_save(&vcpu->arch.host_fx_image);
  3687. kvm_fx_restore(&vcpu->arch.guest_fx_image);
  3688. }
  3689. EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
  3690. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  3691. {
  3692. if (!vcpu->guest_fpu_loaded)
  3693. return;
  3694. vcpu->guest_fpu_loaded = 0;
  3695. kvm_fx_save(&vcpu->arch.guest_fx_image);
  3696. kvm_fx_restore(&vcpu->arch.host_fx_image);
  3697. ++vcpu->stat.fpu_reload;
  3698. }
  3699. EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
  3700. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  3701. {
  3702. if (vcpu->arch.time_page) {
  3703. kvm_release_page_dirty(vcpu->arch.time_page);
  3704. vcpu->arch.time_page = NULL;
  3705. }
  3706. kvm_x86_ops->vcpu_free(vcpu);
  3707. }
  3708. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  3709. unsigned int id)
  3710. {
  3711. return kvm_x86_ops->vcpu_create(kvm, id);
  3712. }
  3713. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  3714. {
  3715. int r;
  3716. /* We do fxsave: this must be aligned. */
  3717. BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
  3718. vcpu->arch.mtrr_state.have_fixed = 1;
  3719. vcpu_load(vcpu);
  3720. r = kvm_arch_vcpu_reset(vcpu);
  3721. if (r == 0)
  3722. r = kvm_mmu_setup(vcpu);
  3723. vcpu_put(vcpu);
  3724. if (r < 0)
  3725. goto free_vcpu;
  3726. return 0;
  3727. free_vcpu:
  3728. kvm_x86_ops->vcpu_free(vcpu);
  3729. return r;
  3730. }
  3731. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  3732. {
  3733. vcpu_load(vcpu);
  3734. kvm_mmu_unload(vcpu);
  3735. vcpu_put(vcpu);
  3736. kvm_x86_ops->vcpu_free(vcpu);
  3737. }
  3738. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  3739. {
  3740. vcpu->arch.nmi_pending = false;
  3741. vcpu->arch.nmi_injected = false;
  3742. vcpu->arch.switch_db_regs = 0;
  3743. memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
  3744. vcpu->arch.dr6 = DR6_FIXED_1;
  3745. vcpu->arch.dr7 = DR7_FIXED_1;
  3746. return kvm_x86_ops->vcpu_reset(vcpu);
  3747. }
  3748. void kvm_arch_hardware_enable(void *garbage)
  3749. {
  3750. kvm_x86_ops->hardware_enable(garbage);
  3751. }
  3752. void kvm_arch_hardware_disable(void *garbage)
  3753. {
  3754. kvm_x86_ops->hardware_disable(garbage);
  3755. }
  3756. int kvm_arch_hardware_setup(void)
  3757. {
  3758. return kvm_x86_ops->hardware_setup();
  3759. }
  3760. void kvm_arch_hardware_unsetup(void)
  3761. {
  3762. kvm_x86_ops->hardware_unsetup();
  3763. }
  3764. void kvm_arch_check_processor_compat(void *rtn)
  3765. {
  3766. kvm_x86_ops->check_processor_compatibility(rtn);
  3767. }
  3768. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  3769. {
  3770. struct page *page;
  3771. struct kvm *kvm;
  3772. int r;
  3773. BUG_ON(vcpu->kvm == NULL);
  3774. kvm = vcpu->kvm;
  3775. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  3776. if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
  3777. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  3778. else
  3779. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  3780. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  3781. if (!page) {
  3782. r = -ENOMEM;
  3783. goto fail;
  3784. }
  3785. vcpu->arch.pio_data = page_address(page);
  3786. r = kvm_mmu_create(vcpu);
  3787. if (r < 0)
  3788. goto fail_free_pio_data;
  3789. if (irqchip_in_kernel(kvm)) {
  3790. r = kvm_create_lapic(vcpu);
  3791. if (r < 0)
  3792. goto fail_mmu_destroy;
  3793. }
  3794. return 0;
  3795. fail_mmu_destroy:
  3796. kvm_mmu_destroy(vcpu);
  3797. fail_free_pio_data:
  3798. free_page((unsigned long)vcpu->arch.pio_data);
  3799. fail:
  3800. return r;
  3801. }
  3802. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  3803. {
  3804. kvm_free_lapic(vcpu);
  3805. down_read(&vcpu->kvm->slots_lock);
  3806. kvm_mmu_destroy(vcpu);
  3807. up_read(&vcpu->kvm->slots_lock);
  3808. free_page((unsigned long)vcpu->arch.pio_data);
  3809. }
  3810. struct kvm *kvm_arch_create_vm(void)
  3811. {
  3812. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  3813. if (!kvm)
  3814. return ERR_PTR(-ENOMEM);
  3815. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  3816. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  3817. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  3818. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  3819. rdtscll(kvm->arch.vm_init_tsc);
  3820. return kvm;
  3821. }
  3822. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  3823. {
  3824. vcpu_load(vcpu);
  3825. kvm_mmu_unload(vcpu);
  3826. vcpu_put(vcpu);
  3827. }
  3828. static void kvm_free_vcpus(struct kvm *kvm)
  3829. {
  3830. unsigned int i;
  3831. /*
  3832. * Unpin any mmu pages first.
  3833. */
  3834. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  3835. if (kvm->vcpus[i])
  3836. kvm_unload_vcpu_mmu(kvm->vcpus[i]);
  3837. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  3838. if (kvm->vcpus[i]) {
  3839. kvm_arch_vcpu_free(kvm->vcpus[i]);
  3840. kvm->vcpus[i] = NULL;
  3841. }
  3842. }
  3843. }
  3844. void kvm_arch_sync_events(struct kvm *kvm)
  3845. {
  3846. kvm_free_all_assigned_devices(kvm);
  3847. }
  3848. void kvm_arch_destroy_vm(struct kvm *kvm)
  3849. {
  3850. kvm_iommu_unmap_guest(kvm);
  3851. kvm_free_pit(kvm);
  3852. kfree(kvm->arch.vpic);
  3853. kfree(kvm->arch.vioapic);
  3854. kvm_free_vcpus(kvm);
  3855. kvm_free_physmem(kvm);
  3856. if (kvm->arch.apic_access_page)
  3857. put_page(kvm->arch.apic_access_page);
  3858. if (kvm->arch.ept_identity_pagetable)
  3859. put_page(kvm->arch.ept_identity_pagetable);
  3860. kfree(kvm);
  3861. }
  3862. int kvm_arch_set_memory_region(struct kvm *kvm,
  3863. struct kvm_userspace_memory_region *mem,
  3864. struct kvm_memory_slot old,
  3865. int user_alloc)
  3866. {
  3867. int npages = mem->memory_size >> PAGE_SHIFT;
  3868. struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
  3869. /*To keep backward compatibility with older userspace,
  3870. *x86 needs to hanlde !user_alloc case.
  3871. */
  3872. if (!user_alloc) {
  3873. if (npages && !old.rmap) {
  3874. unsigned long userspace_addr;
  3875. down_write(&current->mm->mmap_sem);
  3876. userspace_addr = do_mmap(NULL, 0,
  3877. npages * PAGE_SIZE,
  3878. PROT_READ | PROT_WRITE,
  3879. MAP_PRIVATE | MAP_ANONYMOUS,
  3880. 0);
  3881. up_write(&current->mm->mmap_sem);
  3882. if (IS_ERR((void *)userspace_addr))
  3883. return PTR_ERR((void *)userspace_addr);
  3884. /* set userspace_addr atomically for kvm_hva_to_rmapp */
  3885. spin_lock(&kvm->mmu_lock);
  3886. memslot->userspace_addr = userspace_addr;
  3887. spin_unlock(&kvm->mmu_lock);
  3888. } else {
  3889. if (!old.user_alloc && old.rmap) {
  3890. int ret;
  3891. down_write(&current->mm->mmap_sem);
  3892. ret = do_munmap(current->mm, old.userspace_addr,
  3893. old.npages * PAGE_SIZE);
  3894. up_write(&current->mm->mmap_sem);
  3895. if (ret < 0)
  3896. printk(KERN_WARNING
  3897. "kvm_vm_ioctl_set_memory_region: "
  3898. "failed to munmap memory\n");
  3899. }
  3900. }
  3901. }
  3902. if (!kvm->arch.n_requested_mmu_pages) {
  3903. unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  3904. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  3905. }
  3906. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  3907. kvm_flush_remote_tlbs(kvm);
  3908. return 0;
  3909. }
  3910. void kvm_arch_flush_shadow(struct kvm *kvm)
  3911. {
  3912. kvm_mmu_zap_all(kvm);
  3913. }
  3914. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  3915. {
  3916. return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
  3917. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
  3918. || vcpu->arch.nmi_pending;
  3919. }
  3920. static void vcpu_kick_intr(void *info)
  3921. {
  3922. #ifdef DEBUG
  3923. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
  3924. printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
  3925. #endif
  3926. }
  3927. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  3928. {
  3929. int ipi_pcpu = vcpu->cpu;
  3930. int cpu;
  3931. if (waitqueue_active(&vcpu->wq)) {
  3932. wake_up_interruptible(&vcpu->wq);
  3933. ++vcpu->stat.halt_wakeup;
  3934. }
  3935. /*
  3936. * We may be called synchronously with irqs disabled in guest mode,
  3937. * So need not to call smp_call_function_single() in that case.
  3938. */
  3939. cpu = get_cpu();
  3940. if (vcpu->guest_mode && vcpu->cpu != cpu)
  3941. smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
  3942. put_cpu();
  3943. }
  3944. int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
  3945. {
  3946. return kvm_x86_ops->interrupt_allowed(vcpu);
  3947. }