kvm_main.c 78 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "kvm.h"
  18. #include "x86_emulate.h"
  19. #include "segment_descriptor.h"
  20. #include "irq.h"
  21. #include <linux/kvm.h>
  22. #include <linux/module.h>
  23. #include <linux/errno.h>
  24. #include <linux/percpu.h>
  25. #include <linux/gfp.h>
  26. #include <linux/mm.h>
  27. #include <linux/miscdevice.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/reboot.h>
  30. #include <linux/debugfs.h>
  31. #include <linux/highmem.h>
  32. #include <linux/file.h>
  33. #include <linux/sysdev.h>
  34. #include <linux/cpu.h>
  35. #include <linux/sched.h>
  36. #include <linux/cpumask.h>
  37. #include <linux/smp.h>
  38. #include <linux/anon_inodes.h>
  39. #include <linux/profile.h>
  40. #include <linux/kvm_para.h>
  41. #include <asm/processor.h>
  42. #include <asm/msr.h>
  43. #include <asm/io.h>
  44. #include <asm/uaccess.h>
  45. #include <asm/desc.h>
  46. MODULE_AUTHOR("Qumranet");
  47. MODULE_LICENSE("GPL");
  48. static DEFINE_SPINLOCK(kvm_lock);
  49. static LIST_HEAD(vm_list);
  50. static cpumask_t cpus_hardware_enabled;
  51. struct kvm_x86_ops *kvm_x86_ops;
  52. struct kmem_cache *kvm_vcpu_cache;
  53. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  54. static __read_mostly struct preempt_ops kvm_preempt_ops;
  55. #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
  56. static struct kvm_stats_debugfs_item {
  57. const char *name;
  58. int offset;
  59. struct dentry *dentry;
  60. } debugfs_entries[] = {
  61. { "pf_fixed", STAT_OFFSET(pf_fixed) },
  62. { "pf_guest", STAT_OFFSET(pf_guest) },
  63. { "tlb_flush", STAT_OFFSET(tlb_flush) },
  64. { "invlpg", STAT_OFFSET(invlpg) },
  65. { "exits", STAT_OFFSET(exits) },
  66. { "io_exits", STAT_OFFSET(io_exits) },
  67. { "mmio_exits", STAT_OFFSET(mmio_exits) },
  68. { "signal_exits", STAT_OFFSET(signal_exits) },
  69. { "irq_window", STAT_OFFSET(irq_window_exits) },
  70. { "halt_exits", STAT_OFFSET(halt_exits) },
  71. { "halt_wakeup", STAT_OFFSET(halt_wakeup) },
  72. { "request_irq", STAT_OFFSET(request_irq_exits) },
  73. { "irq_exits", STAT_OFFSET(irq_exits) },
  74. { "light_exits", STAT_OFFSET(light_exits) },
  75. { "efer_reload", STAT_OFFSET(efer_reload) },
  76. { NULL }
  77. };
  78. static struct dentry *debugfs_dir;
  79. #define MAX_IO_MSRS 256
  80. #define CR0_RESERVED_BITS \
  81. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  82. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  83. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  84. #define CR4_RESERVED_BITS \
  85. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  86. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  87. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  88. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  89. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  90. #define EFER_RESERVED_BITS 0xfffffffffffff2fe
  91. #ifdef CONFIG_X86_64
  92. // LDT or TSS descriptor in the GDT. 16 bytes.
  93. struct segment_descriptor_64 {
  94. struct segment_descriptor s;
  95. u32 base_higher;
  96. u32 pad_zero;
  97. };
  98. #endif
  99. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  100. unsigned long arg);
  101. unsigned long segment_base(u16 selector)
  102. {
  103. struct descriptor_table gdt;
  104. struct segment_descriptor *d;
  105. unsigned long table_base;
  106. typedef unsigned long ul;
  107. unsigned long v;
  108. if (selector == 0)
  109. return 0;
  110. asm ("sgdt %0" : "=m"(gdt));
  111. table_base = gdt.base;
  112. if (selector & 4) { /* from ldt */
  113. u16 ldt_selector;
  114. asm ("sldt %0" : "=g"(ldt_selector));
  115. table_base = segment_base(ldt_selector);
  116. }
  117. d = (struct segment_descriptor *)(table_base + (selector & ~7));
  118. v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
  119. #ifdef CONFIG_X86_64
  120. if (d->system == 0
  121. && (d->type == 2 || d->type == 9 || d->type == 11))
  122. v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
  123. #endif
  124. return v;
  125. }
  126. EXPORT_SYMBOL_GPL(segment_base);
  127. static inline int valid_vcpu(int n)
  128. {
  129. return likely(n >= 0 && n < KVM_MAX_VCPUS);
  130. }
  131. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  132. {
  133. if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
  134. return;
  135. vcpu->guest_fpu_loaded = 1;
  136. fx_save(&vcpu->host_fx_image);
  137. fx_restore(&vcpu->guest_fx_image);
  138. }
  139. EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
  140. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  141. {
  142. if (!vcpu->guest_fpu_loaded)
  143. return;
  144. vcpu->guest_fpu_loaded = 0;
  145. fx_save(&vcpu->guest_fx_image);
  146. fx_restore(&vcpu->host_fx_image);
  147. }
  148. EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
  149. /*
  150. * Switches to specified vcpu, until a matching vcpu_put()
  151. */
  152. static void vcpu_load(struct kvm_vcpu *vcpu)
  153. {
  154. int cpu;
  155. mutex_lock(&vcpu->mutex);
  156. cpu = get_cpu();
  157. preempt_notifier_register(&vcpu->preempt_notifier);
  158. kvm_x86_ops->vcpu_load(vcpu, cpu);
  159. put_cpu();
  160. }
  161. static void vcpu_put(struct kvm_vcpu *vcpu)
  162. {
  163. preempt_disable();
  164. kvm_x86_ops->vcpu_put(vcpu);
  165. preempt_notifier_unregister(&vcpu->preempt_notifier);
  166. preempt_enable();
  167. mutex_unlock(&vcpu->mutex);
  168. }
  169. static void ack_flush(void *_completed)
  170. {
  171. }
  172. void kvm_flush_remote_tlbs(struct kvm *kvm)
  173. {
  174. int i, cpu;
  175. cpumask_t cpus;
  176. struct kvm_vcpu *vcpu;
  177. cpus_clear(cpus);
  178. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  179. vcpu = kvm->vcpus[i];
  180. if (!vcpu)
  181. continue;
  182. if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
  183. continue;
  184. cpu = vcpu->cpu;
  185. if (cpu != -1 && cpu != raw_smp_processor_id())
  186. cpu_set(cpu, cpus);
  187. }
  188. smp_call_function_mask(cpus, ack_flush, NULL, 1);
  189. }
  190. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  191. {
  192. struct page *page;
  193. int r;
  194. mutex_init(&vcpu->mutex);
  195. vcpu->cpu = -1;
  196. vcpu->mmu.root_hpa = INVALID_PAGE;
  197. vcpu->kvm = kvm;
  198. vcpu->vcpu_id = id;
  199. if (!irqchip_in_kernel(kvm) || id == 0)
  200. vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
  201. else
  202. vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
  203. init_waitqueue_head(&vcpu->wq);
  204. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  205. if (!page) {
  206. r = -ENOMEM;
  207. goto fail;
  208. }
  209. vcpu->run = page_address(page);
  210. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  211. if (!page) {
  212. r = -ENOMEM;
  213. goto fail_free_run;
  214. }
  215. vcpu->pio_data = page_address(page);
  216. r = kvm_mmu_create(vcpu);
  217. if (r < 0)
  218. goto fail_free_pio_data;
  219. return 0;
  220. fail_free_pio_data:
  221. free_page((unsigned long)vcpu->pio_data);
  222. fail_free_run:
  223. free_page((unsigned long)vcpu->run);
  224. fail:
  225. return -ENOMEM;
  226. }
  227. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  228. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  229. {
  230. kvm_mmu_destroy(vcpu);
  231. if (vcpu->apic)
  232. hrtimer_cancel(&vcpu->apic->timer.dev);
  233. kvm_free_apic(vcpu->apic);
  234. free_page((unsigned long)vcpu->pio_data);
  235. free_page((unsigned long)vcpu->run);
  236. }
  237. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  238. static struct kvm *kvm_create_vm(void)
  239. {
  240. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  241. if (!kvm)
  242. return ERR_PTR(-ENOMEM);
  243. kvm_io_bus_init(&kvm->pio_bus);
  244. mutex_init(&kvm->lock);
  245. INIT_LIST_HEAD(&kvm->active_mmu_pages);
  246. kvm_io_bus_init(&kvm->mmio_bus);
  247. spin_lock(&kvm_lock);
  248. list_add(&kvm->vm_list, &vm_list);
  249. spin_unlock(&kvm_lock);
  250. return kvm;
  251. }
  252. /*
  253. * Free any memory in @free but not in @dont.
  254. */
  255. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  256. struct kvm_memory_slot *dont)
  257. {
  258. int i;
  259. if (!dont || free->phys_mem != dont->phys_mem)
  260. if (free->phys_mem) {
  261. for (i = 0; i < free->npages; ++i)
  262. if (free->phys_mem[i])
  263. __free_page(free->phys_mem[i]);
  264. vfree(free->phys_mem);
  265. }
  266. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  267. vfree(free->dirty_bitmap);
  268. free->phys_mem = NULL;
  269. free->npages = 0;
  270. free->dirty_bitmap = NULL;
  271. }
  272. static void kvm_free_physmem(struct kvm *kvm)
  273. {
  274. int i;
  275. for (i = 0; i < kvm->nmemslots; ++i)
  276. kvm_free_physmem_slot(&kvm->memslots[i], NULL);
  277. }
  278. static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
  279. {
  280. int i;
  281. for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
  282. if (vcpu->pio.guest_pages[i]) {
  283. __free_page(vcpu->pio.guest_pages[i]);
  284. vcpu->pio.guest_pages[i] = NULL;
  285. }
  286. }
  287. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  288. {
  289. vcpu_load(vcpu);
  290. kvm_mmu_unload(vcpu);
  291. vcpu_put(vcpu);
  292. }
  293. static void kvm_free_vcpus(struct kvm *kvm)
  294. {
  295. unsigned int i;
  296. /*
  297. * Unpin any mmu pages first.
  298. */
  299. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  300. if (kvm->vcpus[i])
  301. kvm_unload_vcpu_mmu(kvm->vcpus[i]);
  302. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  303. if (kvm->vcpus[i]) {
  304. kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
  305. kvm->vcpus[i] = NULL;
  306. }
  307. }
  308. }
  309. static void kvm_destroy_vm(struct kvm *kvm)
  310. {
  311. spin_lock(&kvm_lock);
  312. list_del(&kvm->vm_list);
  313. spin_unlock(&kvm_lock);
  314. kvm_io_bus_destroy(&kvm->pio_bus);
  315. kvm_io_bus_destroy(&kvm->mmio_bus);
  316. kfree(kvm->vpic);
  317. kfree(kvm->vioapic);
  318. kvm_free_vcpus(kvm);
  319. kvm_free_physmem(kvm);
  320. kfree(kvm);
  321. }
  322. static int kvm_vm_release(struct inode *inode, struct file *filp)
  323. {
  324. struct kvm *kvm = filp->private_data;
  325. kvm_destroy_vm(kvm);
  326. return 0;
  327. }
  328. static void inject_gp(struct kvm_vcpu *vcpu)
  329. {
  330. kvm_x86_ops->inject_gp(vcpu, 0);
  331. }
  332. /*
  333. * Load the pae pdptrs. Return true is they are all valid.
  334. */
  335. static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  336. {
  337. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  338. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  339. int i;
  340. u64 *pdpt;
  341. int ret;
  342. struct page *page;
  343. u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
  344. mutex_lock(&vcpu->kvm->lock);
  345. page = gfn_to_page(vcpu->kvm, pdpt_gfn);
  346. if (!page) {
  347. ret = 0;
  348. goto out;
  349. }
  350. pdpt = kmap_atomic(page, KM_USER0);
  351. memcpy(pdpte, pdpt+offset, sizeof(pdpte));
  352. kunmap_atomic(pdpt, KM_USER0);
  353. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  354. if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
  355. ret = 0;
  356. goto out;
  357. }
  358. }
  359. ret = 1;
  360. memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
  361. out:
  362. mutex_unlock(&vcpu->kvm->lock);
  363. return ret;
  364. }
  365. void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  366. {
  367. if (cr0 & CR0_RESERVED_BITS) {
  368. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  369. cr0, vcpu->cr0);
  370. inject_gp(vcpu);
  371. return;
  372. }
  373. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
  374. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  375. inject_gp(vcpu);
  376. return;
  377. }
  378. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
  379. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  380. "and a clear PE flag\n");
  381. inject_gp(vcpu);
  382. return;
  383. }
  384. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  385. #ifdef CONFIG_X86_64
  386. if ((vcpu->shadow_efer & EFER_LME)) {
  387. int cs_db, cs_l;
  388. if (!is_pae(vcpu)) {
  389. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  390. "in long mode while PAE is disabled\n");
  391. inject_gp(vcpu);
  392. return;
  393. }
  394. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  395. if (cs_l) {
  396. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  397. "in long mode while CS.L == 1\n");
  398. inject_gp(vcpu);
  399. return;
  400. }
  401. } else
  402. #endif
  403. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
  404. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  405. "reserved bits\n");
  406. inject_gp(vcpu);
  407. return;
  408. }
  409. }
  410. kvm_x86_ops->set_cr0(vcpu, cr0);
  411. vcpu->cr0 = cr0;
  412. mutex_lock(&vcpu->kvm->lock);
  413. kvm_mmu_reset_context(vcpu);
  414. mutex_unlock(&vcpu->kvm->lock);
  415. return;
  416. }
  417. EXPORT_SYMBOL_GPL(set_cr0);
  418. void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  419. {
  420. set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
  421. }
  422. EXPORT_SYMBOL_GPL(lmsw);
  423. void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  424. {
  425. if (cr4 & CR4_RESERVED_BITS) {
  426. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  427. inject_gp(vcpu);
  428. return;
  429. }
  430. if (is_long_mode(vcpu)) {
  431. if (!(cr4 & X86_CR4_PAE)) {
  432. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  433. "in long mode\n");
  434. inject_gp(vcpu);
  435. return;
  436. }
  437. } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
  438. && !load_pdptrs(vcpu, vcpu->cr3)) {
  439. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  440. inject_gp(vcpu);
  441. return;
  442. }
  443. if (cr4 & X86_CR4_VMXE) {
  444. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  445. inject_gp(vcpu);
  446. return;
  447. }
  448. kvm_x86_ops->set_cr4(vcpu, cr4);
  449. vcpu->cr4 = cr4;
  450. mutex_lock(&vcpu->kvm->lock);
  451. kvm_mmu_reset_context(vcpu);
  452. mutex_unlock(&vcpu->kvm->lock);
  453. }
  454. EXPORT_SYMBOL_GPL(set_cr4);
  455. void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  456. {
  457. if (is_long_mode(vcpu)) {
  458. if (cr3 & CR3_L_MODE_RESERVED_BITS) {
  459. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  460. inject_gp(vcpu);
  461. return;
  462. }
  463. } else {
  464. if (is_pae(vcpu)) {
  465. if (cr3 & CR3_PAE_RESERVED_BITS) {
  466. printk(KERN_DEBUG
  467. "set_cr3: #GP, reserved bits\n");
  468. inject_gp(vcpu);
  469. return;
  470. }
  471. if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
  472. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  473. "reserved bits\n");
  474. inject_gp(vcpu);
  475. return;
  476. }
  477. } else {
  478. if (cr3 & CR3_NONPAE_RESERVED_BITS) {
  479. printk(KERN_DEBUG
  480. "set_cr3: #GP, reserved bits\n");
  481. inject_gp(vcpu);
  482. return;
  483. }
  484. }
  485. }
  486. mutex_lock(&vcpu->kvm->lock);
  487. /*
  488. * Does the new cr3 value map to physical memory? (Note, we
  489. * catch an invalid cr3 even in real-mode, because it would
  490. * cause trouble later on when we turn on paging anyway.)
  491. *
  492. * A real CPU would silently accept an invalid cr3 and would
  493. * attempt to use it - with largely undefined (and often hard
  494. * to debug) behavior on the guest side.
  495. */
  496. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  497. inject_gp(vcpu);
  498. else {
  499. vcpu->cr3 = cr3;
  500. vcpu->mmu.new_cr3(vcpu);
  501. }
  502. mutex_unlock(&vcpu->kvm->lock);
  503. }
  504. EXPORT_SYMBOL_GPL(set_cr3);
  505. void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  506. {
  507. if (cr8 & CR8_RESERVED_BITS) {
  508. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  509. inject_gp(vcpu);
  510. return;
  511. }
  512. if (irqchip_in_kernel(vcpu->kvm))
  513. kvm_lapic_set_tpr(vcpu, cr8);
  514. else
  515. vcpu->cr8 = cr8;
  516. }
  517. EXPORT_SYMBOL_GPL(set_cr8);
  518. unsigned long get_cr8(struct kvm_vcpu *vcpu)
  519. {
  520. if (irqchip_in_kernel(vcpu->kvm))
  521. return kvm_lapic_get_cr8(vcpu);
  522. else
  523. return vcpu->cr8;
  524. }
  525. EXPORT_SYMBOL_GPL(get_cr8);
  526. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  527. {
  528. if (irqchip_in_kernel(vcpu->kvm))
  529. return vcpu->apic_base;
  530. else
  531. return vcpu->apic_base;
  532. }
  533. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  534. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  535. {
  536. /* TODO: reserve bits check */
  537. if (irqchip_in_kernel(vcpu->kvm))
  538. kvm_lapic_set_base(vcpu, data);
  539. else
  540. vcpu->apic_base = data;
  541. }
  542. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  543. void fx_init(struct kvm_vcpu *vcpu)
  544. {
  545. unsigned after_mxcsr_mask;
  546. /* Initialize guest FPU by resetting ours and saving into guest's */
  547. preempt_disable();
  548. fx_save(&vcpu->host_fx_image);
  549. fpu_init();
  550. fx_save(&vcpu->guest_fx_image);
  551. fx_restore(&vcpu->host_fx_image);
  552. preempt_enable();
  553. vcpu->cr0 |= X86_CR0_ET;
  554. after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
  555. vcpu->guest_fx_image.mxcsr = 0x1f80;
  556. memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
  557. 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
  558. }
  559. EXPORT_SYMBOL_GPL(fx_init);
  560. /*
  561. * Allocate some memory and give it an address in the guest physical address
  562. * space.
  563. *
  564. * Discontiguous memory is allowed, mostly for framebuffers.
  565. */
  566. static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  567. struct kvm_memory_region *mem)
  568. {
  569. int r;
  570. gfn_t base_gfn;
  571. unsigned long npages;
  572. unsigned long i;
  573. struct kvm_memory_slot *memslot;
  574. struct kvm_memory_slot old, new;
  575. r = -EINVAL;
  576. /* General sanity checks */
  577. if (mem->memory_size & (PAGE_SIZE - 1))
  578. goto out;
  579. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  580. goto out;
  581. if (mem->slot >= KVM_MEMORY_SLOTS)
  582. goto out;
  583. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  584. goto out;
  585. memslot = &kvm->memslots[mem->slot];
  586. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  587. npages = mem->memory_size >> PAGE_SHIFT;
  588. if (!npages)
  589. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  590. mutex_lock(&kvm->lock);
  591. new = old = *memslot;
  592. new.base_gfn = base_gfn;
  593. new.npages = npages;
  594. new.flags = mem->flags;
  595. /* Disallow changing a memory slot's size. */
  596. r = -EINVAL;
  597. if (npages && old.npages && npages != old.npages)
  598. goto out_unlock;
  599. /* Check for overlaps */
  600. r = -EEXIST;
  601. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  602. struct kvm_memory_slot *s = &kvm->memslots[i];
  603. if (s == memslot)
  604. continue;
  605. if (!((base_gfn + npages <= s->base_gfn) ||
  606. (base_gfn >= s->base_gfn + s->npages)))
  607. goto out_unlock;
  608. }
  609. /* Deallocate if slot is being removed */
  610. if (!npages)
  611. new.phys_mem = NULL;
  612. /* Free page dirty bitmap if unneeded */
  613. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  614. new.dirty_bitmap = NULL;
  615. r = -ENOMEM;
  616. /* Allocate if a slot is being created */
  617. if (npages && !new.phys_mem) {
  618. new.phys_mem = vmalloc(npages * sizeof(struct page *));
  619. if (!new.phys_mem)
  620. goto out_unlock;
  621. memset(new.phys_mem, 0, npages * sizeof(struct page *));
  622. for (i = 0; i < npages; ++i) {
  623. new.phys_mem[i] = alloc_page(GFP_HIGHUSER
  624. | __GFP_ZERO);
  625. if (!new.phys_mem[i])
  626. goto out_unlock;
  627. set_page_private(new.phys_mem[i],0);
  628. }
  629. }
  630. /* Allocate page dirty bitmap if needed */
  631. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  632. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  633. new.dirty_bitmap = vmalloc(dirty_bytes);
  634. if (!new.dirty_bitmap)
  635. goto out_unlock;
  636. memset(new.dirty_bitmap, 0, dirty_bytes);
  637. }
  638. if (mem->slot >= kvm->nmemslots)
  639. kvm->nmemslots = mem->slot + 1;
  640. *memslot = new;
  641. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  642. kvm_flush_remote_tlbs(kvm);
  643. mutex_unlock(&kvm->lock);
  644. kvm_free_physmem_slot(&old, &new);
  645. return 0;
  646. out_unlock:
  647. mutex_unlock(&kvm->lock);
  648. kvm_free_physmem_slot(&new, &old);
  649. out:
  650. return r;
  651. }
  652. /*
  653. * Get (and clear) the dirty memory log for a memory slot.
  654. */
  655. static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  656. struct kvm_dirty_log *log)
  657. {
  658. struct kvm_memory_slot *memslot;
  659. int r, i;
  660. int n;
  661. unsigned long any = 0;
  662. mutex_lock(&kvm->lock);
  663. r = -EINVAL;
  664. if (log->slot >= KVM_MEMORY_SLOTS)
  665. goto out;
  666. memslot = &kvm->memslots[log->slot];
  667. r = -ENOENT;
  668. if (!memslot->dirty_bitmap)
  669. goto out;
  670. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  671. for (i = 0; !any && i < n/sizeof(long); ++i)
  672. any = memslot->dirty_bitmap[i];
  673. r = -EFAULT;
  674. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  675. goto out;
  676. /* If nothing is dirty, don't bother messing with page tables. */
  677. if (any) {
  678. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  679. kvm_flush_remote_tlbs(kvm);
  680. memset(memslot->dirty_bitmap, 0, n);
  681. }
  682. r = 0;
  683. out:
  684. mutex_unlock(&kvm->lock);
  685. return r;
  686. }
  687. /*
  688. * Set a new alias region. Aliases map a portion of physical memory into
  689. * another portion. This is useful for memory windows, for example the PC
  690. * VGA region.
  691. */
  692. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  693. struct kvm_memory_alias *alias)
  694. {
  695. int r, n;
  696. struct kvm_mem_alias *p;
  697. r = -EINVAL;
  698. /* General sanity checks */
  699. if (alias->memory_size & (PAGE_SIZE - 1))
  700. goto out;
  701. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  702. goto out;
  703. if (alias->slot >= KVM_ALIAS_SLOTS)
  704. goto out;
  705. if (alias->guest_phys_addr + alias->memory_size
  706. < alias->guest_phys_addr)
  707. goto out;
  708. if (alias->target_phys_addr + alias->memory_size
  709. < alias->target_phys_addr)
  710. goto out;
  711. mutex_lock(&kvm->lock);
  712. p = &kvm->aliases[alias->slot];
  713. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  714. p->npages = alias->memory_size >> PAGE_SHIFT;
  715. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  716. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  717. if (kvm->aliases[n - 1].npages)
  718. break;
  719. kvm->naliases = n;
  720. kvm_mmu_zap_all(kvm);
  721. mutex_unlock(&kvm->lock);
  722. return 0;
  723. out:
  724. return r;
  725. }
  726. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  727. {
  728. int r;
  729. r = 0;
  730. switch (chip->chip_id) {
  731. case KVM_IRQCHIP_PIC_MASTER:
  732. memcpy (&chip->chip.pic,
  733. &pic_irqchip(kvm)->pics[0],
  734. sizeof(struct kvm_pic_state));
  735. break;
  736. case KVM_IRQCHIP_PIC_SLAVE:
  737. memcpy (&chip->chip.pic,
  738. &pic_irqchip(kvm)->pics[1],
  739. sizeof(struct kvm_pic_state));
  740. break;
  741. case KVM_IRQCHIP_IOAPIC:
  742. memcpy (&chip->chip.ioapic,
  743. ioapic_irqchip(kvm),
  744. sizeof(struct kvm_ioapic_state));
  745. break;
  746. default:
  747. r = -EINVAL;
  748. break;
  749. }
  750. return r;
  751. }
  752. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  753. {
  754. int r;
  755. r = 0;
  756. switch (chip->chip_id) {
  757. case KVM_IRQCHIP_PIC_MASTER:
  758. memcpy (&pic_irqchip(kvm)->pics[0],
  759. &chip->chip.pic,
  760. sizeof(struct kvm_pic_state));
  761. break;
  762. case KVM_IRQCHIP_PIC_SLAVE:
  763. memcpy (&pic_irqchip(kvm)->pics[1],
  764. &chip->chip.pic,
  765. sizeof(struct kvm_pic_state));
  766. break;
  767. case KVM_IRQCHIP_IOAPIC:
  768. memcpy (ioapic_irqchip(kvm),
  769. &chip->chip.ioapic,
  770. sizeof(struct kvm_ioapic_state));
  771. break;
  772. default:
  773. r = -EINVAL;
  774. break;
  775. }
  776. kvm_pic_update_irq(pic_irqchip(kvm));
  777. return r;
  778. }
  779. static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  780. {
  781. int i;
  782. struct kvm_mem_alias *alias;
  783. for (i = 0; i < kvm->naliases; ++i) {
  784. alias = &kvm->aliases[i];
  785. if (gfn >= alias->base_gfn
  786. && gfn < alias->base_gfn + alias->npages)
  787. return alias->target_gfn + gfn - alias->base_gfn;
  788. }
  789. return gfn;
  790. }
  791. static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  792. {
  793. int i;
  794. for (i = 0; i < kvm->nmemslots; ++i) {
  795. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  796. if (gfn >= memslot->base_gfn
  797. && gfn < memslot->base_gfn + memslot->npages)
  798. return memslot;
  799. }
  800. return NULL;
  801. }
  802. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  803. {
  804. gfn = unalias_gfn(kvm, gfn);
  805. return __gfn_to_memslot(kvm, gfn);
  806. }
  807. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  808. {
  809. struct kvm_memory_slot *slot;
  810. gfn = unalias_gfn(kvm, gfn);
  811. slot = __gfn_to_memslot(kvm, gfn);
  812. if (!slot)
  813. return NULL;
  814. return slot->phys_mem[gfn - slot->base_gfn];
  815. }
  816. EXPORT_SYMBOL_GPL(gfn_to_page);
  817. /* WARNING: Does not work on aliased pages. */
  818. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  819. {
  820. struct kvm_memory_slot *memslot;
  821. memslot = __gfn_to_memslot(kvm, gfn);
  822. if (memslot && memslot->dirty_bitmap) {
  823. unsigned long rel_gfn = gfn - memslot->base_gfn;
  824. /* avoid RMW */
  825. if (!test_bit(rel_gfn, memslot->dirty_bitmap))
  826. set_bit(rel_gfn, memslot->dirty_bitmap);
  827. }
  828. }
  829. int emulator_read_std(unsigned long addr,
  830. void *val,
  831. unsigned int bytes,
  832. struct kvm_vcpu *vcpu)
  833. {
  834. void *data = val;
  835. while (bytes) {
  836. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  837. unsigned offset = addr & (PAGE_SIZE-1);
  838. unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
  839. unsigned long pfn;
  840. struct page *page;
  841. void *page_virt;
  842. if (gpa == UNMAPPED_GVA)
  843. return X86EMUL_PROPAGATE_FAULT;
  844. pfn = gpa >> PAGE_SHIFT;
  845. page = gfn_to_page(vcpu->kvm, pfn);
  846. if (!page)
  847. return X86EMUL_UNHANDLEABLE;
  848. page_virt = kmap_atomic(page, KM_USER0);
  849. memcpy(data, page_virt + offset, tocopy);
  850. kunmap_atomic(page_virt, KM_USER0);
  851. bytes -= tocopy;
  852. data += tocopy;
  853. addr += tocopy;
  854. }
  855. return X86EMUL_CONTINUE;
  856. }
  857. EXPORT_SYMBOL_GPL(emulator_read_std);
  858. static int emulator_write_std(unsigned long addr,
  859. const void *val,
  860. unsigned int bytes,
  861. struct kvm_vcpu *vcpu)
  862. {
  863. pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
  864. return X86EMUL_UNHANDLEABLE;
  865. }
  866. /*
  867. * Only apic need an MMIO device hook, so shortcut now..
  868. */
  869. static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
  870. gpa_t addr)
  871. {
  872. struct kvm_io_device *dev;
  873. if (vcpu->apic) {
  874. dev = &vcpu->apic->dev;
  875. if (dev->in_range(dev, addr))
  876. return dev;
  877. }
  878. return NULL;
  879. }
  880. static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
  881. gpa_t addr)
  882. {
  883. struct kvm_io_device *dev;
  884. dev = vcpu_find_pervcpu_dev(vcpu, addr);
  885. if (dev == NULL)
  886. dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
  887. return dev;
  888. }
  889. static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
  890. gpa_t addr)
  891. {
  892. return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
  893. }
  894. static int emulator_read_emulated(unsigned long addr,
  895. void *val,
  896. unsigned int bytes,
  897. struct kvm_vcpu *vcpu)
  898. {
  899. struct kvm_io_device *mmio_dev;
  900. gpa_t gpa;
  901. if (vcpu->mmio_read_completed) {
  902. memcpy(val, vcpu->mmio_data, bytes);
  903. vcpu->mmio_read_completed = 0;
  904. return X86EMUL_CONTINUE;
  905. } else if (emulator_read_std(addr, val, bytes, vcpu)
  906. == X86EMUL_CONTINUE)
  907. return X86EMUL_CONTINUE;
  908. gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  909. if (gpa == UNMAPPED_GVA)
  910. return X86EMUL_PROPAGATE_FAULT;
  911. /*
  912. * Is this MMIO handled locally?
  913. */
  914. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
  915. if (mmio_dev) {
  916. kvm_iodevice_read(mmio_dev, gpa, bytes, val);
  917. return X86EMUL_CONTINUE;
  918. }
  919. vcpu->mmio_needed = 1;
  920. vcpu->mmio_phys_addr = gpa;
  921. vcpu->mmio_size = bytes;
  922. vcpu->mmio_is_write = 0;
  923. return X86EMUL_UNHANDLEABLE;
  924. }
  925. static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  926. const void *val, int bytes)
  927. {
  928. struct page *page;
  929. void *virt;
  930. if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
  931. return 0;
  932. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  933. if (!page)
  934. return 0;
  935. mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
  936. virt = kmap_atomic(page, KM_USER0);
  937. kvm_mmu_pte_write(vcpu, gpa, val, bytes);
  938. memcpy(virt + offset_in_page(gpa), val, bytes);
  939. kunmap_atomic(virt, KM_USER0);
  940. return 1;
  941. }
  942. static int emulator_write_emulated_onepage(unsigned long addr,
  943. const void *val,
  944. unsigned int bytes,
  945. struct kvm_vcpu *vcpu)
  946. {
  947. struct kvm_io_device *mmio_dev;
  948. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  949. if (gpa == UNMAPPED_GVA) {
  950. kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
  951. return X86EMUL_PROPAGATE_FAULT;
  952. }
  953. if (emulator_write_phys(vcpu, gpa, val, bytes))
  954. return X86EMUL_CONTINUE;
  955. /*
  956. * Is this MMIO handled locally?
  957. */
  958. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
  959. if (mmio_dev) {
  960. kvm_iodevice_write(mmio_dev, gpa, bytes, val);
  961. return X86EMUL_CONTINUE;
  962. }
  963. vcpu->mmio_needed = 1;
  964. vcpu->mmio_phys_addr = gpa;
  965. vcpu->mmio_size = bytes;
  966. vcpu->mmio_is_write = 1;
  967. memcpy(vcpu->mmio_data, val, bytes);
  968. return X86EMUL_CONTINUE;
  969. }
  970. int emulator_write_emulated(unsigned long addr,
  971. const void *val,
  972. unsigned int bytes,
  973. struct kvm_vcpu *vcpu)
  974. {
  975. /* Crossing a page boundary? */
  976. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  977. int rc, now;
  978. now = -addr & ~PAGE_MASK;
  979. rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
  980. if (rc != X86EMUL_CONTINUE)
  981. return rc;
  982. addr += now;
  983. val += now;
  984. bytes -= now;
  985. }
  986. return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
  987. }
  988. EXPORT_SYMBOL_GPL(emulator_write_emulated);
  989. static int emulator_cmpxchg_emulated(unsigned long addr,
  990. const void *old,
  991. const void *new,
  992. unsigned int bytes,
  993. struct kvm_vcpu *vcpu)
  994. {
  995. static int reported;
  996. if (!reported) {
  997. reported = 1;
  998. printk(KERN_WARNING "kvm: emulating exchange as write\n");
  999. }
  1000. return emulator_write_emulated(addr, new, bytes, vcpu);
  1001. }
  1002. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  1003. {
  1004. return kvm_x86_ops->get_segment_base(vcpu, seg);
  1005. }
  1006. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  1007. {
  1008. return X86EMUL_CONTINUE;
  1009. }
  1010. int emulate_clts(struct kvm_vcpu *vcpu)
  1011. {
  1012. kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
  1013. return X86EMUL_CONTINUE;
  1014. }
  1015. int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
  1016. {
  1017. struct kvm_vcpu *vcpu = ctxt->vcpu;
  1018. switch (dr) {
  1019. case 0 ... 3:
  1020. *dest = kvm_x86_ops->get_dr(vcpu, dr);
  1021. return X86EMUL_CONTINUE;
  1022. default:
  1023. pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
  1024. return X86EMUL_UNHANDLEABLE;
  1025. }
  1026. }
  1027. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  1028. {
  1029. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  1030. int exception;
  1031. kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
  1032. if (exception) {
  1033. /* FIXME: better handling */
  1034. return X86EMUL_UNHANDLEABLE;
  1035. }
  1036. return X86EMUL_CONTINUE;
  1037. }
  1038. void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
  1039. {
  1040. static int reported;
  1041. u8 opcodes[4];
  1042. unsigned long rip = vcpu->rip;
  1043. unsigned long rip_linear;
  1044. rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
  1045. if (reported)
  1046. return;
  1047. emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
  1048. printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
  1049. context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  1050. reported = 1;
  1051. }
  1052. EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
  1053. struct x86_emulate_ops emulate_ops = {
  1054. .read_std = emulator_read_std,
  1055. .write_std = emulator_write_std,
  1056. .read_emulated = emulator_read_emulated,
  1057. .write_emulated = emulator_write_emulated,
  1058. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  1059. };
  1060. int emulate_instruction(struct kvm_vcpu *vcpu,
  1061. struct kvm_run *run,
  1062. unsigned long cr2,
  1063. u16 error_code)
  1064. {
  1065. struct x86_emulate_ctxt emulate_ctxt;
  1066. int r;
  1067. int cs_db, cs_l;
  1068. vcpu->mmio_fault_cr2 = cr2;
  1069. kvm_x86_ops->cache_regs(vcpu);
  1070. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  1071. emulate_ctxt.vcpu = vcpu;
  1072. emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  1073. emulate_ctxt.cr2 = cr2;
  1074. emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
  1075. ? X86EMUL_MODE_REAL : cs_l
  1076. ? X86EMUL_MODE_PROT64 : cs_db
  1077. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  1078. if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
  1079. emulate_ctxt.cs_base = 0;
  1080. emulate_ctxt.ds_base = 0;
  1081. emulate_ctxt.es_base = 0;
  1082. emulate_ctxt.ss_base = 0;
  1083. } else {
  1084. emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
  1085. emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
  1086. emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
  1087. emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
  1088. }
  1089. emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
  1090. emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
  1091. vcpu->mmio_is_write = 0;
  1092. vcpu->pio.string = 0;
  1093. r = x86_decode_insn(&emulate_ctxt, &emulate_ops);
  1094. if (r == 0)
  1095. r = x86_emulate_insn(&emulate_ctxt, &emulate_ops);
  1096. if (vcpu->pio.string)
  1097. return EMULATE_DO_MMIO;
  1098. if ((r || vcpu->mmio_is_write) && run) {
  1099. run->exit_reason = KVM_EXIT_MMIO;
  1100. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  1101. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  1102. run->mmio.len = vcpu->mmio_size;
  1103. run->mmio.is_write = vcpu->mmio_is_write;
  1104. }
  1105. if (r) {
  1106. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  1107. return EMULATE_DONE;
  1108. if (!vcpu->mmio_needed) {
  1109. kvm_report_emulation_failure(vcpu, "mmio");
  1110. return EMULATE_FAIL;
  1111. }
  1112. return EMULATE_DO_MMIO;
  1113. }
  1114. kvm_x86_ops->decache_regs(vcpu);
  1115. kvm_x86_ops->set_rflags(vcpu, emulate_ctxt.eflags);
  1116. if (vcpu->mmio_is_write) {
  1117. vcpu->mmio_needed = 0;
  1118. return EMULATE_DO_MMIO;
  1119. }
  1120. return EMULATE_DONE;
  1121. }
  1122. EXPORT_SYMBOL_GPL(emulate_instruction);
  1123. /*
  1124. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  1125. */
  1126. static void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  1127. {
  1128. DECLARE_WAITQUEUE(wait, current);
  1129. add_wait_queue(&vcpu->wq, &wait);
  1130. /*
  1131. * We will block until either an interrupt or a signal wakes us up
  1132. */
  1133. while (!kvm_cpu_has_interrupt(vcpu)
  1134. && !signal_pending(current)
  1135. && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
  1136. && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
  1137. set_current_state(TASK_INTERRUPTIBLE);
  1138. vcpu_put(vcpu);
  1139. schedule();
  1140. vcpu_load(vcpu);
  1141. }
  1142. __set_current_state(TASK_RUNNING);
  1143. remove_wait_queue(&vcpu->wq, &wait);
  1144. }
  1145. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  1146. {
  1147. ++vcpu->stat.halt_exits;
  1148. if (irqchip_in_kernel(vcpu->kvm)) {
  1149. vcpu->mp_state = VCPU_MP_STATE_HALTED;
  1150. kvm_vcpu_block(vcpu);
  1151. if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
  1152. return -EINTR;
  1153. return 1;
  1154. } else {
  1155. vcpu->run->exit_reason = KVM_EXIT_HLT;
  1156. return 0;
  1157. }
  1158. }
  1159. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  1160. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  1161. {
  1162. unsigned long nr, a0, a1, a2, a3, ret;
  1163. kvm_x86_ops->cache_regs(vcpu);
  1164. nr = vcpu->regs[VCPU_REGS_RAX];
  1165. a0 = vcpu->regs[VCPU_REGS_RBX];
  1166. a1 = vcpu->regs[VCPU_REGS_RCX];
  1167. a2 = vcpu->regs[VCPU_REGS_RDX];
  1168. a3 = vcpu->regs[VCPU_REGS_RSI];
  1169. if (!is_long_mode(vcpu)) {
  1170. nr &= 0xFFFFFFFF;
  1171. a0 &= 0xFFFFFFFF;
  1172. a1 &= 0xFFFFFFFF;
  1173. a2 &= 0xFFFFFFFF;
  1174. a3 &= 0xFFFFFFFF;
  1175. }
  1176. switch (nr) {
  1177. default:
  1178. ret = -KVM_ENOSYS;
  1179. break;
  1180. }
  1181. vcpu->regs[VCPU_REGS_RAX] = ret;
  1182. kvm_x86_ops->decache_regs(vcpu);
  1183. return 0;
  1184. }
  1185. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  1186. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  1187. {
  1188. char instruction[3];
  1189. int ret = 0;
  1190. mutex_lock(&vcpu->kvm->lock);
  1191. /*
  1192. * Blow out the MMU to ensure that no other VCPU has an active mapping
  1193. * to ensure that the updated hypercall appears atomically across all
  1194. * VCPUs.
  1195. */
  1196. kvm_mmu_zap_all(vcpu->kvm);
  1197. kvm_x86_ops->cache_regs(vcpu);
  1198. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  1199. if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
  1200. != X86EMUL_CONTINUE)
  1201. ret = -EFAULT;
  1202. mutex_unlock(&vcpu->kvm->lock);
  1203. return ret;
  1204. }
  1205. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  1206. {
  1207. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  1208. }
  1209. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  1210. {
  1211. struct descriptor_table dt = { limit, base };
  1212. kvm_x86_ops->set_gdt(vcpu, &dt);
  1213. }
  1214. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  1215. {
  1216. struct descriptor_table dt = { limit, base };
  1217. kvm_x86_ops->set_idt(vcpu, &dt);
  1218. }
  1219. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  1220. unsigned long *rflags)
  1221. {
  1222. lmsw(vcpu, msw);
  1223. *rflags = kvm_x86_ops->get_rflags(vcpu);
  1224. }
  1225. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  1226. {
  1227. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  1228. switch (cr) {
  1229. case 0:
  1230. return vcpu->cr0;
  1231. case 2:
  1232. return vcpu->cr2;
  1233. case 3:
  1234. return vcpu->cr3;
  1235. case 4:
  1236. return vcpu->cr4;
  1237. default:
  1238. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  1239. return 0;
  1240. }
  1241. }
  1242. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  1243. unsigned long *rflags)
  1244. {
  1245. switch (cr) {
  1246. case 0:
  1247. set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
  1248. *rflags = kvm_x86_ops->get_rflags(vcpu);
  1249. break;
  1250. case 2:
  1251. vcpu->cr2 = val;
  1252. break;
  1253. case 3:
  1254. set_cr3(vcpu, val);
  1255. break;
  1256. case 4:
  1257. set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
  1258. break;
  1259. default:
  1260. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  1261. }
  1262. }
  1263. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1264. {
  1265. u64 data;
  1266. switch (msr) {
  1267. case 0xc0010010: /* SYSCFG */
  1268. case 0xc0010015: /* HWCR */
  1269. case MSR_IA32_PLATFORM_ID:
  1270. case MSR_IA32_P5_MC_ADDR:
  1271. case MSR_IA32_P5_MC_TYPE:
  1272. case MSR_IA32_MC0_CTL:
  1273. case MSR_IA32_MCG_STATUS:
  1274. case MSR_IA32_MCG_CAP:
  1275. case MSR_IA32_MC0_MISC:
  1276. case MSR_IA32_MC0_MISC+4:
  1277. case MSR_IA32_MC0_MISC+8:
  1278. case MSR_IA32_MC0_MISC+12:
  1279. case MSR_IA32_MC0_MISC+16:
  1280. case MSR_IA32_UCODE_REV:
  1281. case MSR_IA32_PERF_STATUS:
  1282. case MSR_IA32_EBL_CR_POWERON:
  1283. /* MTRR registers */
  1284. case 0xfe:
  1285. case 0x200 ... 0x2ff:
  1286. data = 0;
  1287. break;
  1288. case 0xcd: /* fsb frequency */
  1289. data = 3;
  1290. break;
  1291. case MSR_IA32_APICBASE:
  1292. data = kvm_get_apic_base(vcpu);
  1293. break;
  1294. case MSR_IA32_MISC_ENABLE:
  1295. data = vcpu->ia32_misc_enable_msr;
  1296. break;
  1297. #ifdef CONFIG_X86_64
  1298. case MSR_EFER:
  1299. data = vcpu->shadow_efer;
  1300. break;
  1301. #endif
  1302. default:
  1303. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  1304. return 1;
  1305. }
  1306. *pdata = data;
  1307. return 0;
  1308. }
  1309. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1310. /*
  1311. * Reads an msr value (of 'msr_index') into 'pdata'.
  1312. * Returns 0 on success, non-0 otherwise.
  1313. * Assumes vcpu_load() was already called.
  1314. */
  1315. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1316. {
  1317. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  1318. }
  1319. #ifdef CONFIG_X86_64
  1320. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  1321. {
  1322. if (efer & EFER_RESERVED_BITS) {
  1323. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  1324. efer);
  1325. inject_gp(vcpu);
  1326. return;
  1327. }
  1328. if (is_paging(vcpu)
  1329. && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  1330. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  1331. inject_gp(vcpu);
  1332. return;
  1333. }
  1334. kvm_x86_ops->set_efer(vcpu, efer);
  1335. efer &= ~EFER_LMA;
  1336. efer |= vcpu->shadow_efer & EFER_LMA;
  1337. vcpu->shadow_efer = efer;
  1338. }
  1339. #endif
  1340. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1341. {
  1342. switch (msr) {
  1343. #ifdef CONFIG_X86_64
  1344. case MSR_EFER:
  1345. set_efer(vcpu, data);
  1346. break;
  1347. #endif
  1348. case MSR_IA32_MC0_STATUS:
  1349. pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
  1350. __FUNCTION__, data);
  1351. break;
  1352. case MSR_IA32_MCG_STATUS:
  1353. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
  1354. __FUNCTION__, data);
  1355. break;
  1356. case MSR_IA32_UCODE_REV:
  1357. case MSR_IA32_UCODE_WRITE:
  1358. case 0x200 ... 0x2ff: /* MTRRs */
  1359. break;
  1360. case MSR_IA32_APICBASE:
  1361. kvm_set_apic_base(vcpu, data);
  1362. break;
  1363. case MSR_IA32_MISC_ENABLE:
  1364. vcpu->ia32_misc_enable_msr = data;
  1365. break;
  1366. default:
  1367. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
  1368. return 1;
  1369. }
  1370. return 0;
  1371. }
  1372. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1373. /*
  1374. * Writes msr value into into the appropriate "register".
  1375. * Returns 0 on success, non-0 otherwise.
  1376. * Assumes vcpu_load() was already called.
  1377. */
  1378. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  1379. {
  1380. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  1381. }
  1382. void kvm_resched(struct kvm_vcpu *vcpu)
  1383. {
  1384. if (!need_resched())
  1385. return;
  1386. cond_resched();
  1387. }
  1388. EXPORT_SYMBOL_GPL(kvm_resched);
  1389. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  1390. {
  1391. int i;
  1392. u32 function;
  1393. struct kvm_cpuid_entry *e, *best;
  1394. kvm_x86_ops->cache_regs(vcpu);
  1395. function = vcpu->regs[VCPU_REGS_RAX];
  1396. vcpu->regs[VCPU_REGS_RAX] = 0;
  1397. vcpu->regs[VCPU_REGS_RBX] = 0;
  1398. vcpu->regs[VCPU_REGS_RCX] = 0;
  1399. vcpu->regs[VCPU_REGS_RDX] = 0;
  1400. best = NULL;
  1401. for (i = 0; i < vcpu->cpuid_nent; ++i) {
  1402. e = &vcpu->cpuid_entries[i];
  1403. if (e->function == function) {
  1404. best = e;
  1405. break;
  1406. }
  1407. /*
  1408. * Both basic or both extended?
  1409. */
  1410. if (((e->function ^ function) & 0x80000000) == 0)
  1411. if (!best || e->function > best->function)
  1412. best = e;
  1413. }
  1414. if (best) {
  1415. vcpu->regs[VCPU_REGS_RAX] = best->eax;
  1416. vcpu->regs[VCPU_REGS_RBX] = best->ebx;
  1417. vcpu->regs[VCPU_REGS_RCX] = best->ecx;
  1418. vcpu->regs[VCPU_REGS_RDX] = best->edx;
  1419. }
  1420. kvm_x86_ops->decache_regs(vcpu);
  1421. kvm_x86_ops->skip_emulated_instruction(vcpu);
  1422. }
  1423. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  1424. static int pio_copy_data(struct kvm_vcpu *vcpu)
  1425. {
  1426. void *p = vcpu->pio_data;
  1427. void *q;
  1428. unsigned bytes;
  1429. int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
  1430. q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
  1431. PAGE_KERNEL);
  1432. if (!q) {
  1433. free_pio_guest_pages(vcpu);
  1434. return -ENOMEM;
  1435. }
  1436. q += vcpu->pio.guest_page_offset;
  1437. bytes = vcpu->pio.size * vcpu->pio.cur_count;
  1438. if (vcpu->pio.in)
  1439. memcpy(q, p, bytes);
  1440. else
  1441. memcpy(p, q, bytes);
  1442. q -= vcpu->pio.guest_page_offset;
  1443. vunmap(q);
  1444. free_pio_guest_pages(vcpu);
  1445. return 0;
  1446. }
  1447. static int complete_pio(struct kvm_vcpu *vcpu)
  1448. {
  1449. struct kvm_pio_request *io = &vcpu->pio;
  1450. long delta;
  1451. int r;
  1452. kvm_x86_ops->cache_regs(vcpu);
  1453. if (!io->string) {
  1454. if (io->in)
  1455. memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
  1456. io->size);
  1457. } else {
  1458. if (io->in) {
  1459. r = pio_copy_data(vcpu);
  1460. if (r) {
  1461. kvm_x86_ops->cache_regs(vcpu);
  1462. return r;
  1463. }
  1464. }
  1465. delta = 1;
  1466. if (io->rep) {
  1467. delta *= io->cur_count;
  1468. /*
  1469. * The size of the register should really depend on
  1470. * current address size.
  1471. */
  1472. vcpu->regs[VCPU_REGS_RCX] -= delta;
  1473. }
  1474. if (io->down)
  1475. delta = -delta;
  1476. delta *= io->size;
  1477. if (io->in)
  1478. vcpu->regs[VCPU_REGS_RDI] += delta;
  1479. else
  1480. vcpu->regs[VCPU_REGS_RSI] += delta;
  1481. }
  1482. kvm_x86_ops->decache_regs(vcpu);
  1483. io->count -= io->cur_count;
  1484. io->cur_count = 0;
  1485. return 0;
  1486. }
  1487. static void kernel_pio(struct kvm_io_device *pio_dev,
  1488. struct kvm_vcpu *vcpu,
  1489. void *pd)
  1490. {
  1491. /* TODO: String I/O for in kernel device */
  1492. mutex_lock(&vcpu->kvm->lock);
  1493. if (vcpu->pio.in)
  1494. kvm_iodevice_read(pio_dev, vcpu->pio.port,
  1495. vcpu->pio.size,
  1496. pd);
  1497. else
  1498. kvm_iodevice_write(pio_dev, vcpu->pio.port,
  1499. vcpu->pio.size,
  1500. pd);
  1501. mutex_unlock(&vcpu->kvm->lock);
  1502. }
  1503. static void pio_string_write(struct kvm_io_device *pio_dev,
  1504. struct kvm_vcpu *vcpu)
  1505. {
  1506. struct kvm_pio_request *io = &vcpu->pio;
  1507. void *pd = vcpu->pio_data;
  1508. int i;
  1509. mutex_lock(&vcpu->kvm->lock);
  1510. for (i = 0; i < io->cur_count; i++) {
  1511. kvm_iodevice_write(pio_dev, io->port,
  1512. io->size,
  1513. pd);
  1514. pd += io->size;
  1515. }
  1516. mutex_unlock(&vcpu->kvm->lock);
  1517. }
  1518. int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  1519. int size, unsigned port)
  1520. {
  1521. struct kvm_io_device *pio_dev;
  1522. vcpu->run->exit_reason = KVM_EXIT_IO;
  1523. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  1524. vcpu->run->io.size = vcpu->pio.size = size;
  1525. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  1526. vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
  1527. vcpu->run->io.port = vcpu->pio.port = port;
  1528. vcpu->pio.in = in;
  1529. vcpu->pio.string = 0;
  1530. vcpu->pio.down = 0;
  1531. vcpu->pio.guest_page_offset = 0;
  1532. vcpu->pio.rep = 0;
  1533. kvm_x86_ops->cache_regs(vcpu);
  1534. memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
  1535. kvm_x86_ops->decache_regs(vcpu);
  1536. kvm_x86_ops->skip_emulated_instruction(vcpu);
  1537. pio_dev = vcpu_find_pio_dev(vcpu, port);
  1538. if (pio_dev) {
  1539. kernel_pio(pio_dev, vcpu, vcpu->pio_data);
  1540. complete_pio(vcpu);
  1541. return 1;
  1542. }
  1543. return 0;
  1544. }
  1545. EXPORT_SYMBOL_GPL(kvm_emulate_pio);
  1546. int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  1547. int size, unsigned long count, int down,
  1548. gva_t address, int rep, unsigned port)
  1549. {
  1550. unsigned now, in_page;
  1551. int i, ret = 0;
  1552. int nr_pages = 1;
  1553. struct page *page;
  1554. struct kvm_io_device *pio_dev;
  1555. vcpu->run->exit_reason = KVM_EXIT_IO;
  1556. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  1557. vcpu->run->io.size = vcpu->pio.size = size;
  1558. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  1559. vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
  1560. vcpu->run->io.port = vcpu->pio.port = port;
  1561. vcpu->pio.in = in;
  1562. vcpu->pio.string = 1;
  1563. vcpu->pio.down = down;
  1564. vcpu->pio.guest_page_offset = offset_in_page(address);
  1565. vcpu->pio.rep = rep;
  1566. if (!count) {
  1567. kvm_x86_ops->skip_emulated_instruction(vcpu);
  1568. return 1;
  1569. }
  1570. if (!down)
  1571. in_page = PAGE_SIZE - offset_in_page(address);
  1572. else
  1573. in_page = offset_in_page(address) + size;
  1574. now = min(count, (unsigned long)in_page / size);
  1575. if (!now) {
  1576. /*
  1577. * String I/O straddles page boundary. Pin two guest pages
  1578. * so that we satisfy atomicity constraints. Do just one
  1579. * transaction to avoid complexity.
  1580. */
  1581. nr_pages = 2;
  1582. now = 1;
  1583. }
  1584. if (down) {
  1585. /*
  1586. * String I/O in reverse. Yuck. Kill the guest, fix later.
  1587. */
  1588. pr_unimpl(vcpu, "guest string pio down\n");
  1589. inject_gp(vcpu);
  1590. return 1;
  1591. }
  1592. vcpu->run->io.count = now;
  1593. vcpu->pio.cur_count = now;
  1594. if (vcpu->pio.cur_count == vcpu->pio.count)
  1595. kvm_x86_ops->skip_emulated_instruction(vcpu);
  1596. for (i = 0; i < nr_pages; ++i) {
  1597. mutex_lock(&vcpu->kvm->lock);
  1598. page = gva_to_page(vcpu, address + i * PAGE_SIZE);
  1599. if (page)
  1600. get_page(page);
  1601. vcpu->pio.guest_pages[i] = page;
  1602. mutex_unlock(&vcpu->kvm->lock);
  1603. if (!page) {
  1604. inject_gp(vcpu);
  1605. free_pio_guest_pages(vcpu);
  1606. return 1;
  1607. }
  1608. }
  1609. pio_dev = vcpu_find_pio_dev(vcpu, port);
  1610. if (!vcpu->pio.in) {
  1611. /* string PIO write */
  1612. ret = pio_copy_data(vcpu);
  1613. if (ret >= 0 && pio_dev) {
  1614. pio_string_write(pio_dev, vcpu);
  1615. complete_pio(vcpu);
  1616. if (vcpu->pio.count == 0)
  1617. ret = 1;
  1618. }
  1619. } else if (pio_dev)
  1620. pr_unimpl(vcpu, "no string pio read support yet, "
  1621. "port %x size %d count %ld\n",
  1622. port, size, count);
  1623. return ret;
  1624. }
  1625. EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
  1626. /*
  1627. * Check if userspace requested an interrupt window, and that the
  1628. * interrupt window is open.
  1629. *
  1630. * No need to exit to userspace if we already have an interrupt queued.
  1631. */
  1632. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
  1633. struct kvm_run *kvm_run)
  1634. {
  1635. return (!vcpu->irq_summary &&
  1636. kvm_run->request_interrupt_window &&
  1637. vcpu->interrupt_window_open &&
  1638. (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
  1639. }
  1640. static void post_kvm_run_save(struct kvm_vcpu *vcpu,
  1641. struct kvm_run *kvm_run)
  1642. {
  1643. kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  1644. kvm_run->cr8 = get_cr8(vcpu);
  1645. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  1646. if (irqchip_in_kernel(vcpu->kvm))
  1647. kvm_run->ready_for_interrupt_injection = 1;
  1648. else
  1649. kvm_run->ready_for_interrupt_injection =
  1650. (vcpu->interrupt_window_open &&
  1651. vcpu->irq_summary == 0);
  1652. }
  1653. static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1654. {
  1655. int r;
  1656. if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
  1657. printk("vcpu %d received sipi with vector # %x\n",
  1658. vcpu->vcpu_id, vcpu->sipi_vector);
  1659. kvm_lapic_reset(vcpu);
  1660. kvm_x86_ops->vcpu_reset(vcpu);
  1661. vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
  1662. }
  1663. preempted:
  1664. if (vcpu->guest_debug.enabled)
  1665. kvm_x86_ops->guest_debug_pre(vcpu);
  1666. again:
  1667. r = kvm_mmu_reload(vcpu);
  1668. if (unlikely(r))
  1669. goto out;
  1670. preempt_disable();
  1671. kvm_x86_ops->prepare_guest_switch(vcpu);
  1672. kvm_load_guest_fpu(vcpu);
  1673. local_irq_disable();
  1674. if (signal_pending(current)) {
  1675. local_irq_enable();
  1676. preempt_enable();
  1677. r = -EINTR;
  1678. kvm_run->exit_reason = KVM_EXIT_INTR;
  1679. ++vcpu->stat.signal_exits;
  1680. goto out;
  1681. }
  1682. if (irqchip_in_kernel(vcpu->kvm))
  1683. kvm_x86_ops->inject_pending_irq(vcpu);
  1684. else if (!vcpu->mmio_read_completed)
  1685. kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
  1686. vcpu->guest_mode = 1;
  1687. kvm_guest_enter();
  1688. if (vcpu->requests)
  1689. if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
  1690. kvm_x86_ops->tlb_flush(vcpu);
  1691. kvm_x86_ops->run(vcpu, kvm_run);
  1692. vcpu->guest_mode = 0;
  1693. local_irq_enable();
  1694. ++vcpu->stat.exits;
  1695. /*
  1696. * We must have an instruction between local_irq_enable() and
  1697. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  1698. * the interrupt shadow. The stat.exits increment will do nicely.
  1699. * But we need to prevent reordering, hence this barrier():
  1700. */
  1701. barrier();
  1702. kvm_guest_exit();
  1703. preempt_enable();
  1704. /*
  1705. * Profile KVM exit RIPs:
  1706. */
  1707. if (unlikely(prof_on == KVM_PROFILING)) {
  1708. kvm_x86_ops->cache_regs(vcpu);
  1709. profile_hit(KVM_PROFILING, (void *)vcpu->rip);
  1710. }
  1711. r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
  1712. if (r > 0) {
  1713. if (dm_request_for_irq_injection(vcpu, kvm_run)) {
  1714. r = -EINTR;
  1715. kvm_run->exit_reason = KVM_EXIT_INTR;
  1716. ++vcpu->stat.request_irq_exits;
  1717. goto out;
  1718. }
  1719. if (!need_resched()) {
  1720. ++vcpu->stat.light_exits;
  1721. goto again;
  1722. }
  1723. }
  1724. out:
  1725. if (r > 0) {
  1726. kvm_resched(vcpu);
  1727. goto preempted;
  1728. }
  1729. post_kvm_run_save(vcpu, kvm_run);
  1730. return r;
  1731. }
  1732. static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1733. {
  1734. int r;
  1735. sigset_t sigsaved;
  1736. vcpu_load(vcpu);
  1737. if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
  1738. kvm_vcpu_block(vcpu);
  1739. vcpu_put(vcpu);
  1740. return -EAGAIN;
  1741. }
  1742. if (vcpu->sigset_active)
  1743. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  1744. /* re-sync apic's tpr */
  1745. if (!irqchip_in_kernel(vcpu->kvm))
  1746. set_cr8(vcpu, kvm_run->cr8);
  1747. if (vcpu->pio.cur_count) {
  1748. r = complete_pio(vcpu);
  1749. if (r)
  1750. goto out;
  1751. }
  1752. if (vcpu->mmio_needed) {
  1753. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  1754. vcpu->mmio_read_completed = 1;
  1755. vcpu->mmio_needed = 0;
  1756. r = emulate_instruction(vcpu, kvm_run,
  1757. vcpu->mmio_fault_cr2, 0);
  1758. if (r == EMULATE_DO_MMIO) {
  1759. /*
  1760. * Read-modify-write. Back to userspace.
  1761. */
  1762. r = 0;
  1763. goto out;
  1764. }
  1765. }
  1766. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
  1767. kvm_x86_ops->cache_regs(vcpu);
  1768. vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
  1769. kvm_x86_ops->decache_regs(vcpu);
  1770. }
  1771. r = __vcpu_run(vcpu, kvm_run);
  1772. out:
  1773. if (vcpu->sigset_active)
  1774. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  1775. vcpu_put(vcpu);
  1776. return r;
  1777. }
  1778. static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
  1779. struct kvm_regs *regs)
  1780. {
  1781. vcpu_load(vcpu);
  1782. kvm_x86_ops->cache_regs(vcpu);
  1783. regs->rax = vcpu->regs[VCPU_REGS_RAX];
  1784. regs->rbx = vcpu->regs[VCPU_REGS_RBX];
  1785. regs->rcx = vcpu->regs[VCPU_REGS_RCX];
  1786. regs->rdx = vcpu->regs[VCPU_REGS_RDX];
  1787. regs->rsi = vcpu->regs[VCPU_REGS_RSI];
  1788. regs->rdi = vcpu->regs[VCPU_REGS_RDI];
  1789. regs->rsp = vcpu->regs[VCPU_REGS_RSP];
  1790. regs->rbp = vcpu->regs[VCPU_REGS_RBP];
  1791. #ifdef CONFIG_X86_64
  1792. regs->r8 = vcpu->regs[VCPU_REGS_R8];
  1793. regs->r9 = vcpu->regs[VCPU_REGS_R9];
  1794. regs->r10 = vcpu->regs[VCPU_REGS_R10];
  1795. regs->r11 = vcpu->regs[VCPU_REGS_R11];
  1796. regs->r12 = vcpu->regs[VCPU_REGS_R12];
  1797. regs->r13 = vcpu->regs[VCPU_REGS_R13];
  1798. regs->r14 = vcpu->regs[VCPU_REGS_R14];
  1799. regs->r15 = vcpu->regs[VCPU_REGS_R15];
  1800. #endif
  1801. regs->rip = vcpu->rip;
  1802. regs->rflags = kvm_x86_ops->get_rflags(vcpu);
  1803. /*
  1804. * Don't leak debug flags in case they were set for guest debugging
  1805. */
  1806. if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
  1807. regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  1808. vcpu_put(vcpu);
  1809. return 0;
  1810. }
  1811. static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
  1812. struct kvm_regs *regs)
  1813. {
  1814. vcpu_load(vcpu);
  1815. vcpu->regs[VCPU_REGS_RAX] = regs->rax;
  1816. vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
  1817. vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
  1818. vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
  1819. vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
  1820. vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
  1821. vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
  1822. vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
  1823. #ifdef CONFIG_X86_64
  1824. vcpu->regs[VCPU_REGS_R8] = regs->r8;
  1825. vcpu->regs[VCPU_REGS_R9] = regs->r9;
  1826. vcpu->regs[VCPU_REGS_R10] = regs->r10;
  1827. vcpu->regs[VCPU_REGS_R11] = regs->r11;
  1828. vcpu->regs[VCPU_REGS_R12] = regs->r12;
  1829. vcpu->regs[VCPU_REGS_R13] = regs->r13;
  1830. vcpu->regs[VCPU_REGS_R14] = regs->r14;
  1831. vcpu->regs[VCPU_REGS_R15] = regs->r15;
  1832. #endif
  1833. vcpu->rip = regs->rip;
  1834. kvm_x86_ops->set_rflags(vcpu, regs->rflags);
  1835. kvm_x86_ops->decache_regs(vcpu);
  1836. vcpu_put(vcpu);
  1837. return 0;
  1838. }
  1839. static void get_segment(struct kvm_vcpu *vcpu,
  1840. struct kvm_segment *var, int seg)
  1841. {
  1842. return kvm_x86_ops->get_segment(vcpu, var, seg);
  1843. }
  1844. static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  1845. struct kvm_sregs *sregs)
  1846. {
  1847. struct descriptor_table dt;
  1848. int pending_vec;
  1849. vcpu_load(vcpu);
  1850. get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  1851. get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  1852. get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  1853. get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  1854. get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  1855. get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  1856. get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  1857. get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  1858. kvm_x86_ops->get_idt(vcpu, &dt);
  1859. sregs->idt.limit = dt.limit;
  1860. sregs->idt.base = dt.base;
  1861. kvm_x86_ops->get_gdt(vcpu, &dt);
  1862. sregs->gdt.limit = dt.limit;
  1863. sregs->gdt.base = dt.base;
  1864. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  1865. sregs->cr0 = vcpu->cr0;
  1866. sregs->cr2 = vcpu->cr2;
  1867. sregs->cr3 = vcpu->cr3;
  1868. sregs->cr4 = vcpu->cr4;
  1869. sregs->cr8 = get_cr8(vcpu);
  1870. sregs->efer = vcpu->shadow_efer;
  1871. sregs->apic_base = kvm_get_apic_base(vcpu);
  1872. if (irqchip_in_kernel(vcpu->kvm)) {
  1873. memset(sregs->interrupt_bitmap, 0,
  1874. sizeof sregs->interrupt_bitmap);
  1875. pending_vec = kvm_x86_ops->get_irq(vcpu);
  1876. if (pending_vec >= 0)
  1877. set_bit(pending_vec, (unsigned long *)sregs->interrupt_bitmap);
  1878. } else
  1879. memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
  1880. sizeof sregs->interrupt_bitmap);
  1881. vcpu_put(vcpu);
  1882. return 0;
  1883. }
  1884. static void set_segment(struct kvm_vcpu *vcpu,
  1885. struct kvm_segment *var, int seg)
  1886. {
  1887. return kvm_x86_ops->set_segment(vcpu, var, seg);
  1888. }
  1889. static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  1890. struct kvm_sregs *sregs)
  1891. {
  1892. int mmu_reset_needed = 0;
  1893. int i, pending_vec, max_bits;
  1894. struct descriptor_table dt;
  1895. vcpu_load(vcpu);
  1896. dt.limit = sregs->idt.limit;
  1897. dt.base = sregs->idt.base;
  1898. kvm_x86_ops->set_idt(vcpu, &dt);
  1899. dt.limit = sregs->gdt.limit;
  1900. dt.base = sregs->gdt.base;
  1901. kvm_x86_ops->set_gdt(vcpu, &dt);
  1902. vcpu->cr2 = sregs->cr2;
  1903. mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
  1904. vcpu->cr3 = sregs->cr3;
  1905. set_cr8(vcpu, sregs->cr8);
  1906. mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
  1907. #ifdef CONFIG_X86_64
  1908. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  1909. #endif
  1910. kvm_set_apic_base(vcpu, sregs->apic_base);
  1911. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  1912. mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
  1913. vcpu->cr0 = sregs->cr0;
  1914. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  1915. mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
  1916. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  1917. if (!is_long_mode(vcpu) && is_pae(vcpu))
  1918. load_pdptrs(vcpu, vcpu->cr3);
  1919. if (mmu_reset_needed)
  1920. kvm_mmu_reset_context(vcpu);
  1921. if (!irqchip_in_kernel(vcpu->kvm)) {
  1922. memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
  1923. sizeof vcpu->irq_pending);
  1924. vcpu->irq_summary = 0;
  1925. for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
  1926. if (vcpu->irq_pending[i])
  1927. __set_bit(i, &vcpu->irq_summary);
  1928. } else {
  1929. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  1930. pending_vec = find_first_bit(
  1931. (const unsigned long *)sregs->interrupt_bitmap,
  1932. max_bits);
  1933. /* Only pending external irq is handled here */
  1934. if (pending_vec < max_bits) {
  1935. kvm_x86_ops->set_irq(vcpu, pending_vec);
  1936. printk("Set back pending irq %d\n", pending_vec);
  1937. }
  1938. }
  1939. set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  1940. set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  1941. set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  1942. set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  1943. set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  1944. set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  1945. set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  1946. set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  1947. vcpu_put(vcpu);
  1948. return 0;
  1949. }
  1950. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  1951. {
  1952. struct kvm_segment cs;
  1953. get_segment(vcpu, &cs, VCPU_SREG_CS);
  1954. *db = cs.db;
  1955. *l = cs.l;
  1956. }
  1957. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  1958. /*
  1959. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  1960. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  1961. *
  1962. * This list is modified at module load time to reflect the
  1963. * capabilities of the host cpu.
  1964. */
  1965. static u32 msrs_to_save[] = {
  1966. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  1967. MSR_K6_STAR,
  1968. #ifdef CONFIG_X86_64
  1969. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  1970. #endif
  1971. MSR_IA32_TIME_STAMP_COUNTER,
  1972. };
  1973. static unsigned num_msrs_to_save;
  1974. static u32 emulated_msrs[] = {
  1975. MSR_IA32_MISC_ENABLE,
  1976. };
  1977. static __init void kvm_init_msr_list(void)
  1978. {
  1979. u32 dummy[2];
  1980. unsigned i, j;
  1981. for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
  1982. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  1983. continue;
  1984. if (j < i)
  1985. msrs_to_save[j] = msrs_to_save[i];
  1986. j++;
  1987. }
  1988. num_msrs_to_save = j;
  1989. }
  1990. /*
  1991. * Adapt set_msr() to msr_io()'s calling convention
  1992. */
  1993. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  1994. {
  1995. return kvm_set_msr(vcpu, index, *data);
  1996. }
  1997. /*
  1998. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1999. *
  2000. * @return number of msrs set successfully.
  2001. */
  2002. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  2003. struct kvm_msr_entry *entries,
  2004. int (*do_msr)(struct kvm_vcpu *vcpu,
  2005. unsigned index, u64 *data))
  2006. {
  2007. int i;
  2008. vcpu_load(vcpu);
  2009. for (i = 0; i < msrs->nmsrs; ++i)
  2010. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  2011. break;
  2012. vcpu_put(vcpu);
  2013. return i;
  2014. }
  2015. /*
  2016. * Read or write a bunch of msrs. Parameters are user addresses.
  2017. *
  2018. * @return number of msrs set successfully.
  2019. */
  2020. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  2021. int (*do_msr)(struct kvm_vcpu *vcpu,
  2022. unsigned index, u64 *data),
  2023. int writeback)
  2024. {
  2025. struct kvm_msrs msrs;
  2026. struct kvm_msr_entry *entries;
  2027. int r, n;
  2028. unsigned size;
  2029. r = -EFAULT;
  2030. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  2031. goto out;
  2032. r = -E2BIG;
  2033. if (msrs.nmsrs >= MAX_IO_MSRS)
  2034. goto out;
  2035. r = -ENOMEM;
  2036. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  2037. entries = vmalloc(size);
  2038. if (!entries)
  2039. goto out;
  2040. r = -EFAULT;
  2041. if (copy_from_user(entries, user_msrs->entries, size))
  2042. goto out_free;
  2043. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  2044. if (r < 0)
  2045. goto out_free;
  2046. r = -EFAULT;
  2047. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  2048. goto out_free;
  2049. r = n;
  2050. out_free:
  2051. vfree(entries);
  2052. out:
  2053. return r;
  2054. }
  2055. /*
  2056. * Translate a guest virtual address to a guest physical address.
  2057. */
  2058. static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  2059. struct kvm_translation *tr)
  2060. {
  2061. unsigned long vaddr = tr->linear_address;
  2062. gpa_t gpa;
  2063. vcpu_load(vcpu);
  2064. mutex_lock(&vcpu->kvm->lock);
  2065. gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
  2066. tr->physical_address = gpa;
  2067. tr->valid = gpa != UNMAPPED_GVA;
  2068. tr->writeable = 1;
  2069. tr->usermode = 0;
  2070. mutex_unlock(&vcpu->kvm->lock);
  2071. vcpu_put(vcpu);
  2072. return 0;
  2073. }
  2074. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  2075. struct kvm_interrupt *irq)
  2076. {
  2077. if (irq->irq < 0 || irq->irq >= 256)
  2078. return -EINVAL;
  2079. if (irqchip_in_kernel(vcpu->kvm))
  2080. return -ENXIO;
  2081. vcpu_load(vcpu);
  2082. set_bit(irq->irq, vcpu->irq_pending);
  2083. set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
  2084. vcpu_put(vcpu);
  2085. return 0;
  2086. }
  2087. static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
  2088. struct kvm_debug_guest *dbg)
  2089. {
  2090. int r;
  2091. vcpu_load(vcpu);
  2092. r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
  2093. vcpu_put(vcpu);
  2094. return r;
  2095. }
  2096. static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
  2097. unsigned long address,
  2098. int *type)
  2099. {
  2100. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  2101. unsigned long pgoff;
  2102. struct page *page;
  2103. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  2104. if (pgoff == 0)
  2105. page = virt_to_page(vcpu->run);
  2106. else if (pgoff == KVM_PIO_PAGE_OFFSET)
  2107. page = virt_to_page(vcpu->pio_data);
  2108. else
  2109. return NOPAGE_SIGBUS;
  2110. get_page(page);
  2111. if (type != NULL)
  2112. *type = VM_FAULT_MINOR;
  2113. return page;
  2114. }
  2115. static struct vm_operations_struct kvm_vcpu_vm_ops = {
  2116. .nopage = kvm_vcpu_nopage,
  2117. };
  2118. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  2119. {
  2120. vma->vm_ops = &kvm_vcpu_vm_ops;
  2121. return 0;
  2122. }
  2123. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  2124. {
  2125. struct kvm_vcpu *vcpu = filp->private_data;
  2126. fput(vcpu->kvm->filp);
  2127. return 0;
  2128. }
  2129. static struct file_operations kvm_vcpu_fops = {
  2130. .release = kvm_vcpu_release,
  2131. .unlocked_ioctl = kvm_vcpu_ioctl,
  2132. .compat_ioctl = kvm_vcpu_ioctl,
  2133. .mmap = kvm_vcpu_mmap,
  2134. };
  2135. /*
  2136. * Allocates an inode for the vcpu.
  2137. */
  2138. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  2139. {
  2140. int fd, r;
  2141. struct inode *inode;
  2142. struct file *file;
  2143. r = anon_inode_getfd(&fd, &inode, &file,
  2144. "kvm-vcpu", &kvm_vcpu_fops, vcpu);
  2145. if (r)
  2146. return r;
  2147. atomic_inc(&vcpu->kvm->filp->f_count);
  2148. return fd;
  2149. }
  2150. /*
  2151. * Creates some virtual cpus. Good luck creating more than one.
  2152. */
  2153. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
  2154. {
  2155. int r;
  2156. struct kvm_vcpu *vcpu;
  2157. if (!valid_vcpu(n))
  2158. return -EINVAL;
  2159. vcpu = kvm_x86_ops->vcpu_create(kvm, n);
  2160. if (IS_ERR(vcpu))
  2161. return PTR_ERR(vcpu);
  2162. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  2163. /* We do fxsave: this must be aligned. */
  2164. BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
  2165. vcpu_load(vcpu);
  2166. r = kvm_mmu_setup(vcpu);
  2167. vcpu_put(vcpu);
  2168. if (r < 0)
  2169. goto free_vcpu;
  2170. mutex_lock(&kvm->lock);
  2171. if (kvm->vcpus[n]) {
  2172. r = -EEXIST;
  2173. mutex_unlock(&kvm->lock);
  2174. goto mmu_unload;
  2175. }
  2176. kvm->vcpus[n] = vcpu;
  2177. mutex_unlock(&kvm->lock);
  2178. /* Now it's all set up, let userspace reach it */
  2179. r = create_vcpu_fd(vcpu);
  2180. if (r < 0)
  2181. goto unlink;
  2182. return r;
  2183. unlink:
  2184. mutex_lock(&kvm->lock);
  2185. kvm->vcpus[n] = NULL;
  2186. mutex_unlock(&kvm->lock);
  2187. mmu_unload:
  2188. vcpu_load(vcpu);
  2189. kvm_mmu_unload(vcpu);
  2190. vcpu_put(vcpu);
  2191. free_vcpu:
  2192. kvm_x86_ops->vcpu_free(vcpu);
  2193. return r;
  2194. }
  2195. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  2196. {
  2197. u64 efer;
  2198. int i;
  2199. struct kvm_cpuid_entry *e, *entry;
  2200. rdmsrl(MSR_EFER, efer);
  2201. entry = NULL;
  2202. for (i = 0; i < vcpu->cpuid_nent; ++i) {
  2203. e = &vcpu->cpuid_entries[i];
  2204. if (e->function == 0x80000001) {
  2205. entry = e;
  2206. break;
  2207. }
  2208. }
  2209. if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
  2210. entry->edx &= ~(1 << 20);
  2211. printk(KERN_INFO "kvm: guest NX capability removed\n");
  2212. }
  2213. }
  2214. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  2215. struct kvm_cpuid *cpuid,
  2216. struct kvm_cpuid_entry __user *entries)
  2217. {
  2218. int r;
  2219. r = -E2BIG;
  2220. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  2221. goto out;
  2222. r = -EFAULT;
  2223. if (copy_from_user(&vcpu->cpuid_entries, entries,
  2224. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  2225. goto out;
  2226. vcpu->cpuid_nent = cpuid->nent;
  2227. cpuid_fix_nx_cap(vcpu);
  2228. return 0;
  2229. out:
  2230. return r;
  2231. }
  2232. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  2233. {
  2234. if (sigset) {
  2235. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2236. vcpu->sigset_active = 1;
  2237. vcpu->sigset = *sigset;
  2238. } else
  2239. vcpu->sigset_active = 0;
  2240. return 0;
  2241. }
  2242. /*
  2243. * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
  2244. * we have asm/x86/processor.h
  2245. */
  2246. struct fxsave {
  2247. u16 cwd;
  2248. u16 swd;
  2249. u16 twd;
  2250. u16 fop;
  2251. u64 rip;
  2252. u64 rdp;
  2253. u32 mxcsr;
  2254. u32 mxcsr_mask;
  2255. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  2256. #ifdef CONFIG_X86_64
  2257. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  2258. #else
  2259. u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
  2260. #endif
  2261. };
  2262. static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  2263. {
  2264. struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
  2265. vcpu_load(vcpu);
  2266. memcpy(fpu->fpr, fxsave->st_space, 128);
  2267. fpu->fcw = fxsave->cwd;
  2268. fpu->fsw = fxsave->swd;
  2269. fpu->ftwx = fxsave->twd;
  2270. fpu->last_opcode = fxsave->fop;
  2271. fpu->last_ip = fxsave->rip;
  2272. fpu->last_dp = fxsave->rdp;
  2273. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  2274. vcpu_put(vcpu);
  2275. return 0;
  2276. }
  2277. static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  2278. {
  2279. struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
  2280. vcpu_load(vcpu);
  2281. memcpy(fxsave->st_space, fpu->fpr, 128);
  2282. fxsave->cwd = fpu->fcw;
  2283. fxsave->swd = fpu->fsw;
  2284. fxsave->twd = fpu->ftwx;
  2285. fxsave->fop = fpu->last_opcode;
  2286. fxsave->rip = fpu->last_ip;
  2287. fxsave->rdp = fpu->last_dp;
  2288. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  2289. vcpu_put(vcpu);
  2290. return 0;
  2291. }
  2292. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  2293. struct kvm_lapic_state *s)
  2294. {
  2295. vcpu_load(vcpu);
  2296. memcpy(s->regs, vcpu->apic->regs, sizeof *s);
  2297. vcpu_put(vcpu);
  2298. return 0;
  2299. }
  2300. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  2301. struct kvm_lapic_state *s)
  2302. {
  2303. vcpu_load(vcpu);
  2304. memcpy(vcpu->apic->regs, s->regs, sizeof *s);
  2305. kvm_apic_post_state_restore(vcpu);
  2306. vcpu_put(vcpu);
  2307. return 0;
  2308. }
  2309. static long kvm_vcpu_ioctl(struct file *filp,
  2310. unsigned int ioctl, unsigned long arg)
  2311. {
  2312. struct kvm_vcpu *vcpu = filp->private_data;
  2313. void __user *argp = (void __user *)arg;
  2314. int r = -EINVAL;
  2315. switch (ioctl) {
  2316. case KVM_RUN:
  2317. r = -EINVAL;
  2318. if (arg)
  2319. goto out;
  2320. r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
  2321. break;
  2322. case KVM_GET_REGS: {
  2323. struct kvm_regs kvm_regs;
  2324. memset(&kvm_regs, 0, sizeof kvm_regs);
  2325. r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
  2326. if (r)
  2327. goto out;
  2328. r = -EFAULT;
  2329. if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
  2330. goto out;
  2331. r = 0;
  2332. break;
  2333. }
  2334. case KVM_SET_REGS: {
  2335. struct kvm_regs kvm_regs;
  2336. r = -EFAULT;
  2337. if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
  2338. goto out;
  2339. r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
  2340. if (r)
  2341. goto out;
  2342. r = 0;
  2343. break;
  2344. }
  2345. case KVM_GET_SREGS: {
  2346. struct kvm_sregs kvm_sregs;
  2347. memset(&kvm_sregs, 0, sizeof kvm_sregs);
  2348. r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
  2349. if (r)
  2350. goto out;
  2351. r = -EFAULT;
  2352. if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
  2353. goto out;
  2354. r = 0;
  2355. break;
  2356. }
  2357. case KVM_SET_SREGS: {
  2358. struct kvm_sregs kvm_sregs;
  2359. r = -EFAULT;
  2360. if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
  2361. goto out;
  2362. r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
  2363. if (r)
  2364. goto out;
  2365. r = 0;
  2366. break;
  2367. }
  2368. case KVM_TRANSLATE: {
  2369. struct kvm_translation tr;
  2370. r = -EFAULT;
  2371. if (copy_from_user(&tr, argp, sizeof tr))
  2372. goto out;
  2373. r = kvm_vcpu_ioctl_translate(vcpu, &tr);
  2374. if (r)
  2375. goto out;
  2376. r = -EFAULT;
  2377. if (copy_to_user(argp, &tr, sizeof tr))
  2378. goto out;
  2379. r = 0;
  2380. break;
  2381. }
  2382. case KVM_INTERRUPT: {
  2383. struct kvm_interrupt irq;
  2384. r = -EFAULT;
  2385. if (copy_from_user(&irq, argp, sizeof irq))
  2386. goto out;
  2387. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  2388. if (r)
  2389. goto out;
  2390. r = 0;
  2391. break;
  2392. }
  2393. case KVM_DEBUG_GUEST: {
  2394. struct kvm_debug_guest dbg;
  2395. r = -EFAULT;
  2396. if (copy_from_user(&dbg, argp, sizeof dbg))
  2397. goto out;
  2398. r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
  2399. if (r)
  2400. goto out;
  2401. r = 0;
  2402. break;
  2403. }
  2404. case KVM_GET_MSRS:
  2405. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  2406. break;
  2407. case KVM_SET_MSRS:
  2408. r = msr_io(vcpu, argp, do_set_msr, 0);
  2409. break;
  2410. case KVM_SET_CPUID: {
  2411. struct kvm_cpuid __user *cpuid_arg = argp;
  2412. struct kvm_cpuid cpuid;
  2413. r = -EFAULT;
  2414. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2415. goto out;
  2416. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  2417. if (r)
  2418. goto out;
  2419. break;
  2420. }
  2421. case KVM_SET_SIGNAL_MASK: {
  2422. struct kvm_signal_mask __user *sigmask_arg = argp;
  2423. struct kvm_signal_mask kvm_sigmask;
  2424. sigset_t sigset, *p;
  2425. p = NULL;
  2426. if (argp) {
  2427. r = -EFAULT;
  2428. if (copy_from_user(&kvm_sigmask, argp,
  2429. sizeof kvm_sigmask))
  2430. goto out;
  2431. r = -EINVAL;
  2432. if (kvm_sigmask.len != sizeof sigset)
  2433. goto out;
  2434. r = -EFAULT;
  2435. if (copy_from_user(&sigset, sigmask_arg->sigset,
  2436. sizeof sigset))
  2437. goto out;
  2438. p = &sigset;
  2439. }
  2440. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  2441. break;
  2442. }
  2443. case KVM_GET_FPU: {
  2444. struct kvm_fpu fpu;
  2445. memset(&fpu, 0, sizeof fpu);
  2446. r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
  2447. if (r)
  2448. goto out;
  2449. r = -EFAULT;
  2450. if (copy_to_user(argp, &fpu, sizeof fpu))
  2451. goto out;
  2452. r = 0;
  2453. break;
  2454. }
  2455. case KVM_SET_FPU: {
  2456. struct kvm_fpu fpu;
  2457. r = -EFAULT;
  2458. if (copy_from_user(&fpu, argp, sizeof fpu))
  2459. goto out;
  2460. r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
  2461. if (r)
  2462. goto out;
  2463. r = 0;
  2464. break;
  2465. }
  2466. case KVM_GET_LAPIC: {
  2467. struct kvm_lapic_state lapic;
  2468. memset(&lapic, 0, sizeof lapic);
  2469. r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
  2470. if (r)
  2471. goto out;
  2472. r = -EFAULT;
  2473. if (copy_to_user(argp, &lapic, sizeof lapic))
  2474. goto out;
  2475. r = 0;
  2476. break;
  2477. }
  2478. case KVM_SET_LAPIC: {
  2479. struct kvm_lapic_state lapic;
  2480. r = -EFAULT;
  2481. if (copy_from_user(&lapic, argp, sizeof lapic))
  2482. goto out;
  2483. r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
  2484. if (r)
  2485. goto out;
  2486. r = 0;
  2487. break;
  2488. }
  2489. default:
  2490. ;
  2491. }
  2492. out:
  2493. return r;
  2494. }
  2495. static long kvm_vm_ioctl(struct file *filp,
  2496. unsigned int ioctl, unsigned long arg)
  2497. {
  2498. struct kvm *kvm = filp->private_data;
  2499. void __user *argp = (void __user *)arg;
  2500. int r = -EINVAL;
  2501. switch (ioctl) {
  2502. case KVM_CREATE_VCPU:
  2503. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  2504. if (r < 0)
  2505. goto out;
  2506. break;
  2507. case KVM_SET_MEMORY_REGION: {
  2508. struct kvm_memory_region kvm_mem;
  2509. r = -EFAULT;
  2510. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  2511. goto out;
  2512. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
  2513. if (r)
  2514. goto out;
  2515. break;
  2516. }
  2517. case KVM_GET_DIRTY_LOG: {
  2518. struct kvm_dirty_log log;
  2519. r = -EFAULT;
  2520. if (copy_from_user(&log, argp, sizeof log))
  2521. goto out;
  2522. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  2523. if (r)
  2524. goto out;
  2525. break;
  2526. }
  2527. case KVM_SET_MEMORY_ALIAS: {
  2528. struct kvm_memory_alias alias;
  2529. r = -EFAULT;
  2530. if (copy_from_user(&alias, argp, sizeof alias))
  2531. goto out;
  2532. r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
  2533. if (r)
  2534. goto out;
  2535. break;
  2536. }
  2537. case KVM_CREATE_IRQCHIP:
  2538. r = -ENOMEM;
  2539. kvm->vpic = kvm_create_pic(kvm);
  2540. if (kvm->vpic) {
  2541. r = kvm_ioapic_init(kvm);
  2542. if (r) {
  2543. kfree(kvm->vpic);
  2544. kvm->vpic = NULL;
  2545. goto out;
  2546. }
  2547. }
  2548. else
  2549. goto out;
  2550. break;
  2551. case KVM_IRQ_LINE: {
  2552. struct kvm_irq_level irq_event;
  2553. r = -EFAULT;
  2554. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  2555. goto out;
  2556. if (irqchip_in_kernel(kvm)) {
  2557. mutex_lock(&kvm->lock);
  2558. if (irq_event.irq < 16)
  2559. kvm_pic_set_irq(pic_irqchip(kvm),
  2560. irq_event.irq,
  2561. irq_event.level);
  2562. kvm_ioapic_set_irq(kvm->vioapic,
  2563. irq_event.irq,
  2564. irq_event.level);
  2565. mutex_unlock(&kvm->lock);
  2566. r = 0;
  2567. }
  2568. break;
  2569. }
  2570. case KVM_GET_IRQCHIP: {
  2571. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2572. struct kvm_irqchip chip;
  2573. r = -EFAULT;
  2574. if (copy_from_user(&chip, argp, sizeof chip))
  2575. goto out;
  2576. r = -ENXIO;
  2577. if (!irqchip_in_kernel(kvm))
  2578. goto out;
  2579. r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
  2580. if (r)
  2581. goto out;
  2582. r = -EFAULT;
  2583. if (copy_to_user(argp, &chip, sizeof chip))
  2584. goto out;
  2585. r = 0;
  2586. break;
  2587. }
  2588. case KVM_SET_IRQCHIP: {
  2589. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  2590. struct kvm_irqchip chip;
  2591. r = -EFAULT;
  2592. if (copy_from_user(&chip, argp, sizeof chip))
  2593. goto out;
  2594. r = -ENXIO;
  2595. if (!irqchip_in_kernel(kvm))
  2596. goto out;
  2597. r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
  2598. if (r)
  2599. goto out;
  2600. r = 0;
  2601. break;
  2602. }
  2603. default:
  2604. ;
  2605. }
  2606. out:
  2607. return r;
  2608. }
  2609. static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
  2610. unsigned long address,
  2611. int *type)
  2612. {
  2613. struct kvm *kvm = vma->vm_file->private_data;
  2614. unsigned long pgoff;
  2615. struct page *page;
  2616. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  2617. page = gfn_to_page(kvm, pgoff);
  2618. if (!page)
  2619. return NOPAGE_SIGBUS;
  2620. get_page(page);
  2621. if (type != NULL)
  2622. *type = VM_FAULT_MINOR;
  2623. return page;
  2624. }
  2625. static struct vm_operations_struct kvm_vm_vm_ops = {
  2626. .nopage = kvm_vm_nopage,
  2627. };
  2628. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  2629. {
  2630. vma->vm_ops = &kvm_vm_vm_ops;
  2631. return 0;
  2632. }
  2633. static struct file_operations kvm_vm_fops = {
  2634. .release = kvm_vm_release,
  2635. .unlocked_ioctl = kvm_vm_ioctl,
  2636. .compat_ioctl = kvm_vm_ioctl,
  2637. .mmap = kvm_vm_mmap,
  2638. };
  2639. static int kvm_dev_ioctl_create_vm(void)
  2640. {
  2641. int fd, r;
  2642. struct inode *inode;
  2643. struct file *file;
  2644. struct kvm *kvm;
  2645. kvm = kvm_create_vm();
  2646. if (IS_ERR(kvm))
  2647. return PTR_ERR(kvm);
  2648. r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
  2649. if (r) {
  2650. kvm_destroy_vm(kvm);
  2651. return r;
  2652. }
  2653. kvm->filp = file;
  2654. return fd;
  2655. }
  2656. static long kvm_dev_ioctl(struct file *filp,
  2657. unsigned int ioctl, unsigned long arg)
  2658. {
  2659. void __user *argp = (void __user *)arg;
  2660. long r = -EINVAL;
  2661. switch (ioctl) {
  2662. case KVM_GET_API_VERSION:
  2663. r = -EINVAL;
  2664. if (arg)
  2665. goto out;
  2666. r = KVM_API_VERSION;
  2667. break;
  2668. case KVM_CREATE_VM:
  2669. r = -EINVAL;
  2670. if (arg)
  2671. goto out;
  2672. r = kvm_dev_ioctl_create_vm();
  2673. break;
  2674. case KVM_GET_MSR_INDEX_LIST: {
  2675. struct kvm_msr_list __user *user_msr_list = argp;
  2676. struct kvm_msr_list msr_list;
  2677. unsigned n;
  2678. r = -EFAULT;
  2679. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  2680. goto out;
  2681. n = msr_list.nmsrs;
  2682. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  2683. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  2684. goto out;
  2685. r = -E2BIG;
  2686. if (n < num_msrs_to_save)
  2687. goto out;
  2688. r = -EFAULT;
  2689. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  2690. num_msrs_to_save * sizeof(u32)))
  2691. goto out;
  2692. if (copy_to_user(user_msr_list->indices
  2693. + num_msrs_to_save * sizeof(u32),
  2694. &emulated_msrs,
  2695. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  2696. goto out;
  2697. r = 0;
  2698. break;
  2699. }
  2700. case KVM_CHECK_EXTENSION: {
  2701. int ext = (long)argp;
  2702. switch (ext) {
  2703. case KVM_CAP_IRQCHIP:
  2704. case KVM_CAP_HLT:
  2705. r = 1;
  2706. break;
  2707. default:
  2708. r = 0;
  2709. break;
  2710. }
  2711. break;
  2712. }
  2713. case KVM_GET_VCPU_MMAP_SIZE:
  2714. r = -EINVAL;
  2715. if (arg)
  2716. goto out;
  2717. r = 2 * PAGE_SIZE;
  2718. break;
  2719. default:
  2720. ;
  2721. }
  2722. out:
  2723. return r;
  2724. }
  2725. static struct file_operations kvm_chardev_ops = {
  2726. .unlocked_ioctl = kvm_dev_ioctl,
  2727. .compat_ioctl = kvm_dev_ioctl,
  2728. };
  2729. static struct miscdevice kvm_dev = {
  2730. KVM_MINOR,
  2731. "kvm",
  2732. &kvm_chardev_ops,
  2733. };
  2734. /*
  2735. * Make sure that a cpu that is being hot-unplugged does not have any vcpus
  2736. * cached on it.
  2737. */
  2738. static void decache_vcpus_on_cpu(int cpu)
  2739. {
  2740. struct kvm *vm;
  2741. struct kvm_vcpu *vcpu;
  2742. int i;
  2743. spin_lock(&kvm_lock);
  2744. list_for_each_entry(vm, &vm_list, vm_list)
  2745. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  2746. vcpu = vm->vcpus[i];
  2747. if (!vcpu)
  2748. continue;
  2749. /*
  2750. * If the vcpu is locked, then it is running on some
  2751. * other cpu and therefore it is not cached on the
  2752. * cpu in question.
  2753. *
  2754. * If it's not locked, check the last cpu it executed
  2755. * on.
  2756. */
  2757. if (mutex_trylock(&vcpu->mutex)) {
  2758. if (vcpu->cpu == cpu) {
  2759. kvm_x86_ops->vcpu_decache(vcpu);
  2760. vcpu->cpu = -1;
  2761. }
  2762. mutex_unlock(&vcpu->mutex);
  2763. }
  2764. }
  2765. spin_unlock(&kvm_lock);
  2766. }
  2767. static void hardware_enable(void *junk)
  2768. {
  2769. int cpu = raw_smp_processor_id();
  2770. if (cpu_isset(cpu, cpus_hardware_enabled))
  2771. return;
  2772. cpu_set(cpu, cpus_hardware_enabled);
  2773. kvm_x86_ops->hardware_enable(NULL);
  2774. }
  2775. static void hardware_disable(void *junk)
  2776. {
  2777. int cpu = raw_smp_processor_id();
  2778. if (!cpu_isset(cpu, cpus_hardware_enabled))
  2779. return;
  2780. cpu_clear(cpu, cpus_hardware_enabled);
  2781. decache_vcpus_on_cpu(cpu);
  2782. kvm_x86_ops->hardware_disable(NULL);
  2783. }
  2784. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  2785. void *v)
  2786. {
  2787. int cpu = (long)v;
  2788. switch (val) {
  2789. case CPU_DYING:
  2790. case CPU_DYING_FROZEN:
  2791. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  2792. cpu);
  2793. hardware_disable(NULL);
  2794. break;
  2795. case CPU_UP_CANCELED:
  2796. case CPU_UP_CANCELED_FROZEN:
  2797. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  2798. cpu);
  2799. smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
  2800. break;
  2801. case CPU_ONLINE:
  2802. case CPU_ONLINE_FROZEN:
  2803. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  2804. cpu);
  2805. smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
  2806. break;
  2807. }
  2808. return NOTIFY_OK;
  2809. }
  2810. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  2811. void *v)
  2812. {
  2813. if (val == SYS_RESTART) {
  2814. /*
  2815. * Some (well, at least mine) BIOSes hang on reboot if
  2816. * in vmx root mode.
  2817. */
  2818. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  2819. on_each_cpu(hardware_disable, NULL, 0, 1);
  2820. }
  2821. return NOTIFY_OK;
  2822. }
  2823. static struct notifier_block kvm_reboot_notifier = {
  2824. .notifier_call = kvm_reboot,
  2825. .priority = 0,
  2826. };
  2827. void kvm_io_bus_init(struct kvm_io_bus *bus)
  2828. {
  2829. memset(bus, 0, sizeof(*bus));
  2830. }
  2831. void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  2832. {
  2833. int i;
  2834. for (i = 0; i < bus->dev_count; i++) {
  2835. struct kvm_io_device *pos = bus->devs[i];
  2836. kvm_iodevice_destructor(pos);
  2837. }
  2838. }
  2839. struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
  2840. {
  2841. int i;
  2842. for (i = 0; i < bus->dev_count; i++) {
  2843. struct kvm_io_device *pos = bus->devs[i];
  2844. if (pos->in_range(pos, addr))
  2845. return pos;
  2846. }
  2847. return NULL;
  2848. }
  2849. void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
  2850. {
  2851. BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
  2852. bus->devs[bus->dev_count++] = dev;
  2853. }
  2854. static struct notifier_block kvm_cpu_notifier = {
  2855. .notifier_call = kvm_cpu_hotplug,
  2856. .priority = 20, /* must be > scheduler priority */
  2857. };
  2858. static u64 stat_get(void *_offset)
  2859. {
  2860. unsigned offset = (long)_offset;
  2861. u64 total = 0;
  2862. struct kvm *kvm;
  2863. struct kvm_vcpu *vcpu;
  2864. int i;
  2865. spin_lock(&kvm_lock);
  2866. list_for_each_entry(kvm, &vm_list, vm_list)
  2867. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  2868. vcpu = kvm->vcpus[i];
  2869. if (vcpu)
  2870. total += *(u32 *)((void *)vcpu + offset);
  2871. }
  2872. spin_unlock(&kvm_lock);
  2873. return total;
  2874. }
  2875. DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
  2876. static __init void kvm_init_debug(void)
  2877. {
  2878. struct kvm_stats_debugfs_item *p;
  2879. debugfs_dir = debugfs_create_dir("kvm", NULL);
  2880. for (p = debugfs_entries; p->name; ++p)
  2881. p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
  2882. (void *)(long)p->offset,
  2883. &stat_fops);
  2884. }
  2885. static void kvm_exit_debug(void)
  2886. {
  2887. struct kvm_stats_debugfs_item *p;
  2888. for (p = debugfs_entries; p->name; ++p)
  2889. debugfs_remove(p->dentry);
  2890. debugfs_remove(debugfs_dir);
  2891. }
  2892. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  2893. {
  2894. hardware_disable(NULL);
  2895. return 0;
  2896. }
  2897. static int kvm_resume(struct sys_device *dev)
  2898. {
  2899. hardware_enable(NULL);
  2900. return 0;
  2901. }
  2902. static struct sysdev_class kvm_sysdev_class = {
  2903. .name = "kvm",
  2904. .suspend = kvm_suspend,
  2905. .resume = kvm_resume,
  2906. };
  2907. static struct sys_device kvm_sysdev = {
  2908. .id = 0,
  2909. .cls = &kvm_sysdev_class,
  2910. };
  2911. hpa_t bad_page_address;
  2912. static inline
  2913. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  2914. {
  2915. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  2916. }
  2917. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  2918. {
  2919. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  2920. kvm_x86_ops->vcpu_load(vcpu, cpu);
  2921. }
  2922. static void kvm_sched_out(struct preempt_notifier *pn,
  2923. struct task_struct *next)
  2924. {
  2925. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  2926. kvm_x86_ops->vcpu_put(vcpu);
  2927. }
  2928. int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
  2929. struct module *module)
  2930. {
  2931. int r;
  2932. int cpu;
  2933. if (kvm_x86_ops) {
  2934. printk(KERN_ERR "kvm: already loaded the other module\n");
  2935. return -EEXIST;
  2936. }
  2937. if (!ops->cpu_has_kvm_support()) {
  2938. printk(KERN_ERR "kvm: no hardware support\n");
  2939. return -EOPNOTSUPP;
  2940. }
  2941. if (ops->disabled_by_bios()) {
  2942. printk(KERN_ERR "kvm: disabled by bios\n");
  2943. return -EOPNOTSUPP;
  2944. }
  2945. kvm_x86_ops = ops;
  2946. r = kvm_x86_ops->hardware_setup();
  2947. if (r < 0)
  2948. goto out;
  2949. for_each_online_cpu(cpu) {
  2950. smp_call_function_single(cpu,
  2951. kvm_x86_ops->check_processor_compatibility,
  2952. &r, 0, 1);
  2953. if (r < 0)
  2954. goto out_free_0;
  2955. }
  2956. on_each_cpu(hardware_enable, NULL, 0, 1);
  2957. r = register_cpu_notifier(&kvm_cpu_notifier);
  2958. if (r)
  2959. goto out_free_1;
  2960. register_reboot_notifier(&kvm_reboot_notifier);
  2961. r = sysdev_class_register(&kvm_sysdev_class);
  2962. if (r)
  2963. goto out_free_2;
  2964. r = sysdev_register(&kvm_sysdev);
  2965. if (r)
  2966. goto out_free_3;
  2967. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  2968. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
  2969. __alignof__(struct kvm_vcpu), 0, 0);
  2970. if (!kvm_vcpu_cache) {
  2971. r = -ENOMEM;
  2972. goto out_free_4;
  2973. }
  2974. kvm_chardev_ops.owner = module;
  2975. r = misc_register(&kvm_dev);
  2976. if (r) {
  2977. printk (KERN_ERR "kvm: misc device register failed\n");
  2978. goto out_free;
  2979. }
  2980. kvm_preempt_ops.sched_in = kvm_sched_in;
  2981. kvm_preempt_ops.sched_out = kvm_sched_out;
  2982. return r;
  2983. out_free:
  2984. kmem_cache_destroy(kvm_vcpu_cache);
  2985. out_free_4:
  2986. sysdev_unregister(&kvm_sysdev);
  2987. out_free_3:
  2988. sysdev_class_unregister(&kvm_sysdev_class);
  2989. out_free_2:
  2990. unregister_reboot_notifier(&kvm_reboot_notifier);
  2991. unregister_cpu_notifier(&kvm_cpu_notifier);
  2992. out_free_1:
  2993. on_each_cpu(hardware_disable, NULL, 0, 1);
  2994. out_free_0:
  2995. kvm_x86_ops->hardware_unsetup();
  2996. out:
  2997. kvm_x86_ops = NULL;
  2998. return r;
  2999. }
  3000. void kvm_exit_x86(void)
  3001. {
  3002. misc_deregister(&kvm_dev);
  3003. kmem_cache_destroy(kvm_vcpu_cache);
  3004. sysdev_unregister(&kvm_sysdev);
  3005. sysdev_class_unregister(&kvm_sysdev_class);
  3006. unregister_reboot_notifier(&kvm_reboot_notifier);
  3007. unregister_cpu_notifier(&kvm_cpu_notifier);
  3008. on_each_cpu(hardware_disable, NULL, 0, 1);
  3009. kvm_x86_ops->hardware_unsetup();
  3010. kvm_x86_ops = NULL;
  3011. }
  3012. static __init int kvm_init(void)
  3013. {
  3014. static struct page *bad_page;
  3015. int r;
  3016. r = kvm_mmu_module_init();
  3017. if (r)
  3018. goto out4;
  3019. kvm_init_debug();
  3020. kvm_init_msr_list();
  3021. if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
  3022. r = -ENOMEM;
  3023. goto out;
  3024. }
  3025. bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
  3026. memset(__va(bad_page_address), 0, PAGE_SIZE);
  3027. return 0;
  3028. out:
  3029. kvm_exit_debug();
  3030. kvm_mmu_module_exit();
  3031. out4:
  3032. return r;
  3033. }
  3034. static __exit void kvm_exit(void)
  3035. {
  3036. kvm_exit_debug();
  3037. __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
  3038. kvm_mmu_module_exit();
  3039. }
  3040. module_init(kvm_init)
  3041. module_exit(kvm_exit)
  3042. EXPORT_SYMBOL_GPL(kvm_init_x86);
  3043. EXPORT_SYMBOL_GPL(kvm_exit_x86);