x86.c 103 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * derived from drivers/kvm/kvm_main.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Amit Shah <amit.shah@qumranet.com>
  14. * Ben-Ami Yassour <benami@il.ibm.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. #include <linux/kvm_host.h>
  21. #include "irq.h"
  22. #include "mmu.h"
  23. #include "i8254.h"
  24. #include "tss.h"
  25. #include "kvm_cache_regs.h"
  26. #include "x86.h"
  27. #include <linux/clocksource.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/kvm.h>
  30. #include <linux/fs.h>
  31. #include <linux/vmalloc.h>
  32. #include <linux/module.h>
  33. #include <linux/mman.h>
  34. #include <linux/highmem.h>
  35. #include <linux/intel-iommu.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/msr.h>
  38. #include <asm/desc.h>
  39. #include <asm/mtrr.h>
  40. #define MAX_IO_MSRS 256
  41. #define CR0_RESERVED_BITS \
  42. (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  43. | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
  44. | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
  45. #define CR4_RESERVED_BITS \
  46. (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
  47. | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
  48. | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
  49. | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
  50. #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  51. /* EFER defaults:
  52. * - enable syscall per default because its emulated by KVM
  53. * - enable LME and LMA per default on 64 bit KVM
  54. */
  55. #ifdef CONFIG_X86_64
  56. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
  57. #else
  58. static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
  59. #endif
  60. #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
  61. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  62. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  63. struct kvm_cpuid_entry2 __user *entries);
  64. struct kvm_x86_ops *kvm_x86_ops;
  65. EXPORT_SYMBOL_GPL(kvm_x86_ops);
  66. struct kvm_stats_debugfs_item debugfs_entries[] = {
  67. { "pf_fixed", VCPU_STAT(pf_fixed) },
  68. { "pf_guest", VCPU_STAT(pf_guest) },
  69. { "tlb_flush", VCPU_STAT(tlb_flush) },
  70. { "invlpg", VCPU_STAT(invlpg) },
  71. { "exits", VCPU_STAT(exits) },
  72. { "io_exits", VCPU_STAT(io_exits) },
  73. { "mmio_exits", VCPU_STAT(mmio_exits) },
  74. { "signal_exits", VCPU_STAT(signal_exits) },
  75. { "irq_window", VCPU_STAT(irq_window_exits) },
  76. { "nmi_window", VCPU_STAT(nmi_window_exits) },
  77. { "halt_exits", VCPU_STAT(halt_exits) },
  78. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  79. { "hypercalls", VCPU_STAT(hypercalls) },
  80. { "request_irq", VCPU_STAT(request_irq_exits) },
  81. { "request_nmi", VCPU_STAT(request_nmi_exits) },
  82. { "irq_exits", VCPU_STAT(irq_exits) },
  83. { "host_state_reload", VCPU_STAT(host_state_reload) },
  84. { "efer_reload", VCPU_STAT(efer_reload) },
  85. { "fpu_reload", VCPU_STAT(fpu_reload) },
  86. { "insn_emulation", VCPU_STAT(insn_emulation) },
  87. { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
  88. { "irq_injections", VCPU_STAT(irq_injections) },
  89. { "nmi_injections", VCPU_STAT(nmi_injections) },
  90. { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
  91. { "mmu_pte_write", VM_STAT(mmu_pte_write) },
  92. { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
  93. { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
  94. { "mmu_flooded", VM_STAT(mmu_flooded) },
  95. { "mmu_recycled", VM_STAT(mmu_recycled) },
  96. { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
  97. { "mmu_unsync", VM_STAT(mmu_unsync) },
  98. { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
  99. { "largepages", VM_STAT(lpages) },
  100. { NULL }
  101. };
  102. unsigned long segment_base(u16 selector)
  103. {
  104. struct descriptor_table gdt;
  105. struct desc_struct *d;
  106. unsigned long table_base;
  107. unsigned long v;
  108. if (selector == 0)
  109. return 0;
  110. asm("sgdt %0" : "=m"(gdt));
  111. table_base = gdt.base;
  112. if (selector & 4) { /* from ldt */
  113. u16 ldt_selector;
  114. asm("sldt %0" : "=g"(ldt_selector));
  115. table_base = segment_base(ldt_selector);
  116. }
  117. d = (struct desc_struct *)(table_base + (selector & ~7));
  118. v = d->base0 | ((unsigned long)d->base1 << 16) |
  119. ((unsigned long)d->base2 << 24);
  120. #ifdef CONFIG_X86_64
  121. if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
  122. v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
  123. #endif
  124. return v;
  125. }
  126. EXPORT_SYMBOL_GPL(segment_base);
  127. u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
  128. {
  129. if (irqchip_in_kernel(vcpu->kvm))
  130. return vcpu->arch.apic_base;
  131. else
  132. return vcpu->arch.apic_base;
  133. }
  134. EXPORT_SYMBOL_GPL(kvm_get_apic_base);
  135. void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
  136. {
  137. /* TODO: reserve bits check */
  138. if (irqchip_in_kernel(vcpu->kvm))
  139. kvm_lapic_set_base(vcpu, data);
  140. else
  141. vcpu->arch.apic_base = data;
  142. }
  143. EXPORT_SYMBOL_GPL(kvm_set_apic_base);
  144. void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
  145. {
  146. WARN_ON(vcpu->arch.exception.pending);
  147. vcpu->arch.exception.pending = true;
  148. vcpu->arch.exception.has_error_code = false;
  149. vcpu->arch.exception.nr = nr;
  150. }
  151. EXPORT_SYMBOL_GPL(kvm_queue_exception);
  152. void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
  153. u32 error_code)
  154. {
  155. ++vcpu->stat.pf_guest;
  156. if (vcpu->arch.exception.pending) {
  157. if (vcpu->arch.exception.nr == PF_VECTOR) {
  158. printk(KERN_DEBUG "kvm: inject_page_fault:"
  159. " double fault 0x%lx\n", addr);
  160. vcpu->arch.exception.nr = DF_VECTOR;
  161. vcpu->arch.exception.error_code = 0;
  162. } else if (vcpu->arch.exception.nr == DF_VECTOR) {
  163. /* triple fault -> shutdown */
  164. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  165. }
  166. return;
  167. }
  168. vcpu->arch.cr2 = addr;
  169. kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
  170. }
  171. void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  172. {
  173. vcpu->arch.nmi_pending = 1;
  174. }
  175. EXPORT_SYMBOL_GPL(kvm_inject_nmi);
  176. void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
  177. {
  178. WARN_ON(vcpu->arch.exception.pending);
  179. vcpu->arch.exception.pending = true;
  180. vcpu->arch.exception.has_error_code = true;
  181. vcpu->arch.exception.nr = nr;
  182. vcpu->arch.exception.error_code = error_code;
  183. }
  184. EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
  185. static void __queue_exception(struct kvm_vcpu *vcpu)
  186. {
  187. kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
  188. vcpu->arch.exception.has_error_code,
  189. vcpu->arch.exception.error_code);
  190. }
  191. /*
  192. * Load the pae pdptrs. Return true is they are all valid.
  193. */
  194. int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  195. {
  196. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  197. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  198. int i;
  199. int ret;
  200. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  201. ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
  202. offset * sizeof(u64), sizeof(pdpte));
  203. if (ret < 0) {
  204. ret = 0;
  205. goto out;
  206. }
  207. for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
  208. if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
  209. ret = 0;
  210. goto out;
  211. }
  212. }
  213. ret = 1;
  214. memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
  215. out:
  216. return ret;
  217. }
  218. EXPORT_SYMBOL_GPL(load_pdptrs);
  219. static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  220. {
  221. u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
  222. bool changed = true;
  223. int r;
  224. if (is_long_mode(vcpu) || !is_pae(vcpu))
  225. return false;
  226. r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
  227. if (r < 0)
  228. goto out;
  229. changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
  230. out:
  231. return changed;
  232. }
  233. void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  234. {
  235. if (cr0 & CR0_RESERVED_BITS) {
  236. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  237. cr0, vcpu->arch.cr0);
  238. kvm_inject_gp(vcpu, 0);
  239. return;
  240. }
  241. if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
  242. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  243. kvm_inject_gp(vcpu, 0);
  244. return;
  245. }
  246. if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
  247. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  248. "and a clear PE flag\n");
  249. kvm_inject_gp(vcpu, 0);
  250. return;
  251. }
  252. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  253. #ifdef CONFIG_X86_64
  254. if ((vcpu->arch.shadow_efer & EFER_LME)) {
  255. int cs_db, cs_l;
  256. if (!is_pae(vcpu)) {
  257. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  258. "in long mode while PAE is disabled\n");
  259. kvm_inject_gp(vcpu, 0);
  260. return;
  261. }
  262. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  263. if (cs_l) {
  264. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  265. "in long mode while CS.L == 1\n");
  266. kvm_inject_gp(vcpu, 0);
  267. return;
  268. }
  269. } else
  270. #endif
  271. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  272. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  273. "reserved bits\n");
  274. kvm_inject_gp(vcpu, 0);
  275. return;
  276. }
  277. }
  278. kvm_x86_ops->set_cr0(vcpu, cr0);
  279. vcpu->arch.cr0 = cr0;
  280. kvm_mmu_reset_context(vcpu);
  281. return;
  282. }
  283. EXPORT_SYMBOL_GPL(kvm_set_cr0);
  284. void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  285. {
  286. kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
  287. KVMTRACE_1D(LMSW, vcpu,
  288. (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
  289. handler);
  290. }
  291. EXPORT_SYMBOL_GPL(kvm_lmsw);
  292. void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  293. {
  294. if (cr4 & CR4_RESERVED_BITS) {
  295. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  296. kvm_inject_gp(vcpu, 0);
  297. return;
  298. }
  299. if (is_long_mode(vcpu)) {
  300. if (!(cr4 & X86_CR4_PAE)) {
  301. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  302. "in long mode\n");
  303. kvm_inject_gp(vcpu, 0);
  304. return;
  305. }
  306. } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
  307. && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
  308. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  309. kvm_inject_gp(vcpu, 0);
  310. return;
  311. }
  312. if (cr4 & X86_CR4_VMXE) {
  313. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  314. kvm_inject_gp(vcpu, 0);
  315. return;
  316. }
  317. kvm_x86_ops->set_cr4(vcpu, cr4);
  318. vcpu->arch.cr4 = cr4;
  319. kvm_mmu_reset_context(vcpu);
  320. }
  321. EXPORT_SYMBOL_GPL(kvm_set_cr4);
  322. void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  323. {
  324. if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
  325. kvm_mmu_sync_roots(vcpu);
  326. kvm_mmu_flush_tlb(vcpu);
  327. return;
  328. }
  329. if (is_long_mode(vcpu)) {
  330. if (cr3 & CR3_L_MODE_RESERVED_BITS) {
  331. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  332. kvm_inject_gp(vcpu, 0);
  333. return;
  334. }
  335. } else {
  336. if (is_pae(vcpu)) {
  337. if (cr3 & CR3_PAE_RESERVED_BITS) {
  338. printk(KERN_DEBUG
  339. "set_cr3: #GP, reserved bits\n");
  340. kvm_inject_gp(vcpu, 0);
  341. return;
  342. }
  343. if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
  344. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  345. "reserved bits\n");
  346. kvm_inject_gp(vcpu, 0);
  347. return;
  348. }
  349. }
  350. /*
  351. * We don't check reserved bits in nonpae mode, because
  352. * this isn't enforced, and VMware depends on this.
  353. */
  354. }
  355. /*
  356. * Does the new cr3 value map to physical memory? (Note, we
  357. * catch an invalid cr3 even in real-mode, because it would
  358. * cause trouble later on when we turn on paging anyway.)
  359. *
  360. * A real CPU would silently accept an invalid cr3 and would
  361. * attempt to use it - with largely undefined (and often hard
  362. * to debug) behavior on the guest side.
  363. */
  364. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  365. kvm_inject_gp(vcpu, 0);
  366. else {
  367. vcpu->arch.cr3 = cr3;
  368. vcpu->arch.mmu.new_cr3(vcpu);
  369. }
  370. }
  371. EXPORT_SYMBOL_GPL(kvm_set_cr3);
  372. void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  373. {
  374. if (cr8 & CR8_RESERVED_BITS) {
  375. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  376. kvm_inject_gp(vcpu, 0);
  377. return;
  378. }
  379. if (irqchip_in_kernel(vcpu->kvm))
  380. kvm_lapic_set_tpr(vcpu, cr8);
  381. else
  382. vcpu->arch.cr8 = cr8;
  383. }
  384. EXPORT_SYMBOL_GPL(kvm_set_cr8);
  385. unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
  386. {
  387. if (irqchip_in_kernel(vcpu->kvm))
  388. return kvm_lapic_get_cr8(vcpu);
  389. else
  390. return vcpu->arch.cr8;
  391. }
  392. EXPORT_SYMBOL_GPL(kvm_get_cr8);
  393. /*
  394. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  395. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  396. *
  397. * This list is modified at module load time to reflect the
  398. * capabilities of the host cpu.
  399. */
  400. static u32 msrs_to_save[] = {
  401. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  402. MSR_K6_STAR,
  403. #ifdef CONFIG_X86_64
  404. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  405. #endif
  406. MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
  407. MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT
  408. };
  409. static unsigned num_msrs_to_save;
  410. static u32 emulated_msrs[] = {
  411. MSR_IA32_MISC_ENABLE,
  412. };
  413. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  414. {
  415. if (efer & efer_reserved_bits) {
  416. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  417. efer);
  418. kvm_inject_gp(vcpu, 0);
  419. return;
  420. }
  421. if (is_paging(vcpu)
  422. && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  423. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  424. kvm_inject_gp(vcpu, 0);
  425. return;
  426. }
  427. kvm_x86_ops->set_efer(vcpu, efer);
  428. efer &= ~EFER_LMA;
  429. efer |= vcpu->arch.shadow_efer & EFER_LMA;
  430. vcpu->arch.shadow_efer = efer;
  431. }
  432. void kvm_enable_efer_bits(u64 mask)
  433. {
  434. efer_reserved_bits &= ~mask;
  435. }
  436. EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  437. /*
  438. * Writes msr value into into the appropriate "register".
  439. * Returns 0 on success, non-0 otherwise.
  440. * Assumes vcpu_load() was already called.
  441. */
  442. int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  443. {
  444. return kvm_x86_ops->set_msr(vcpu, msr_index, data);
  445. }
  446. /*
  447. * Adapt set_msr() to msr_io()'s calling convention
  448. */
  449. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  450. {
  451. return kvm_set_msr(vcpu, index, *data);
  452. }
  453. static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
  454. {
  455. static int version;
  456. struct pvclock_wall_clock wc;
  457. struct timespec now, sys, boot;
  458. if (!wall_clock)
  459. return;
  460. version++;
  461. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  462. /*
  463. * The guest calculates current wall clock time by adding
  464. * system time (updated by kvm_write_guest_time below) to the
  465. * wall clock specified here. guest system time equals host
  466. * system time for us, thus we must fill in host boot time here.
  467. */
  468. now = current_kernel_time();
  469. ktime_get_ts(&sys);
  470. boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
  471. wc.sec = boot.tv_sec;
  472. wc.nsec = boot.tv_nsec;
  473. wc.version = version;
  474. kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
  475. version++;
  476. kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
  477. }
  478. static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
  479. {
  480. uint32_t quotient, remainder;
  481. /* Don't try to replace with do_div(), this one calculates
  482. * "(dividend << 32) / divisor" */
  483. __asm__ ( "divl %4"
  484. : "=a" (quotient), "=d" (remainder)
  485. : "0" (0), "1" (dividend), "r" (divisor) );
  486. return quotient;
  487. }
  488. static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
  489. {
  490. uint64_t nsecs = 1000000000LL;
  491. int32_t shift = 0;
  492. uint64_t tps64;
  493. uint32_t tps32;
  494. tps64 = tsc_khz * 1000LL;
  495. while (tps64 > nsecs*2) {
  496. tps64 >>= 1;
  497. shift--;
  498. }
  499. tps32 = (uint32_t)tps64;
  500. while (tps32 <= (uint32_t)nsecs) {
  501. tps32 <<= 1;
  502. shift++;
  503. }
  504. hv_clock->tsc_shift = shift;
  505. hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
  506. pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
  507. __func__, tsc_khz, hv_clock->tsc_shift,
  508. hv_clock->tsc_to_system_mul);
  509. }
  510. static void kvm_write_guest_time(struct kvm_vcpu *v)
  511. {
  512. struct timespec ts;
  513. unsigned long flags;
  514. struct kvm_vcpu_arch *vcpu = &v->arch;
  515. void *shared_kaddr;
  516. if ((!vcpu->time_page))
  517. return;
  518. if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
  519. kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
  520. vcpu->hv_clock_tsc_khz = tsc_khz;
  521. }
  522. /* Keep irq disabled to prevent changes to the clock */
  523. local_irq_save(flags);
  524. kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
  525. &vcpu->hv_clock.tsc_timestamp);
  526. ktime_get_ts(&ts);
  527. local_irq_restore(flags);
  528. /* With all the info we got, fill in the values */
  529. vcpu->hv_clock.system_time = ts.tv_nsec +
  530. (NSEC_PER_SEC * (u64)ts.tv_sec);
  531. /*
  532. * The interface expects us to write an even number signaling that the
  533. * update is finished. Since the guest won't see the intermediate
  534. * state, we just increase by 2 at the end.
  535. */
  536. vcpu->hv_clock.version += 2;
  537. shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
  538. memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
  539. sizeof(vcpu->hv_clock));
  540. kunmap_atomic(shared_kaddr, KM_USER0);
  541. mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
  542. }
  543. static bool msr_mtrr_valid(unsigned msr)
  544. {
  545. switch (msr) {
  546. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  547. case MSR_MTRRfix64K_00000:
  548. case MSR_MTRRfix16K_80000:
  549. case MSR_MTRRfix16K_A0000:
  550. case MSR_MTRRfix4K_C0000:
  551. case MSR_MTRRfix4K_C8000:
  552. case MSR_MTRRfix4K_D0000:
  553. case MSR_MTRRfix4K_D8000:
  554. case MSR_MTRRfix4K_E0000:
  555. case MSR_MTRRfix4K_E8000:
  556. case MSR_MTRRfix4K_F0000:
  557. case MSR_MTRRfix4K_F8000:
  558. case MSR_MTRRdefType:
  559. case MSR_IA32_CR_PAT:
  560. return true;
  561. case 0x2f8:
  562. return true;
  563. }
  564. return false;
  565. }
  566. static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  567. {
  568. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  569. if (!msr_mtrr_valid(msr))
  570. return 1;
  571. if (msr == MSR_MTRRdefType) {
  572. vcpu->arch.mtrr_state.def_type = data;
  573. vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
  574. } else if (msr == MSR_MTRRfix64K_00000)
  575. p[0] = data;
  576. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  577. p[1 + msr - MSR_MTRRfix16K_80000] = data;
  578. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  579. p[3 + msr - MSR_MTRRfix4K_C0000] = data;
  580. else if (msr == MSR_IA32_CR_PAT)
  581. vcpu->arch.pat = data;
  582. else { /* Variable MTRRs */
  583. int idx, is_mtrr_mask;
  584. u64 *pt;
  585. idx = (msr - 0x200) / 2;
  586. is_mtrr_mask = msr - 0x200 - 2 * idx;
  587. if (!is_mtrr_mask)
  588. pt =
  589. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  590. else
  591. pt =
  592. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  593. *pt = data;
  594. }
  595. kvm_mmu_reset_context(vcpu);
  596. return 0;
  597. }
  598. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  599. {
  600. switch (msr) {
  601. case MSR_EFER:
  602. set_efer(vcpu, data);
  603. break;
  604. case MSR_IA32_MC0_STATUS:
  605. pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
  606. __func__, data);
  607. break;
  608. case MSR_IA32_MCG_STATUS:
  609. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
  610. __func__, data);
  611. break;
  612. case MSR_IA32_MCG_CTL:
  613. pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
  614. __func__, data);
  615. break;
  616. case MSR_IA32_DEBUGCTLMSR:
  617. if (!data) {
  618. /* We support the non-activated case already */
  619. break;
  620. } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
  621. /* Values other than LBR and BTF are vendor-specific,
  622. thus reserved and should throw a #GP */
  623. return 1;
  624. }
  625. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
  626. __func__, data);
  627. break;
  628. case MSR_IA32_UCODE_REV:
  629. case MSR_IA32_UCODE_WRITE:
  630. break;
  631. case 0x200 ... 0x2ff:
  632. return set_msr_mtrr(vcpu, msr, data);
  633. case MSR_IA32_APICBASE:
  634. kvm_set_apic_base(vcpu, data);
  635. break;
  636. case MSR_IA32_MISC_ENABLE:
  637. vcpu->arch.ia32_misc_enable_msr = data;
  638. break;
  639. case MSR_KVM_WALL_CLOCK:
  640. vcpu->kvm->arch.wall_clock = data;
  641. kvm_write_wall_clock(vcpu->kvm, data);
  642. break;
  643. case MSR_KVM_SYSTEM_TIME: {
  644. if (vcpu->arch.time_page) {
  645. kvm_release_page_dirty(vcpu->arch.time_page);
  646. vcpu->arch.time_page = NULL;
  647. }
  648. vcpu->arch.time = data;
  649. /* we verify if the enable bit is set... */
  650. if (!(data & 1))
  651. break;
  652. /* ...but clean it before doing the actual write */
  653. vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
  654. vcpu->arch.time_page =
  655. gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
  656. if (is_error_page(vcpu->arch.time_page)) {
  657. kvm_release_page_clean(vcpu->arch.time_page);
  658. vcpu->arch.time_page = NULL;
  659. }
  660. kvm_write_guest_time(vcpu);
  661. break;
  662. }
  663. default:
  664. pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
  665. return 1;
  666. }
  667. return 0;
  668. }
  669. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  670. /*
  671. * Reads an msr value (of 'msr_index') into 'pdata'.
  672. * Returns 0 on success, non-0 otherwise.
  673. * Assumes vcpu_load() was already called.
  674. */
  675. int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  676. {
  677. return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
  678. }
  679. static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  680. {
  681. u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
  682. if (!msr_mtrr_valid(msr))
  683. return 1;
  684. if (msr == MSR_MTRRdefType)
  685. *pdata = vcpu->arch.mtrr_state.def_type +
  686. (vcpu->arch.mtrr_state.enabled << 10);
  687. else if (msr == MSR_MTRRfix64K_00000)
  688. *pdata = p[0];
  689. else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
  690. *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
  691. else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
  692. *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
  693. else if (msr == MSR_IA32_CR_PAT)
  694. *pdata = vcpu->arch.pat;
  695. else { /* Variable MTRRs */
  696. int idx, is_mtrr_mask;
  697. u64 *pt;
  698. idx = (msr - 0x200) / 2;
  699. is_mtrr_mask = msr - 0x200 - 2 * idx;
  700. if (!is_mtrr_mask)
  701. pt =
  702. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
  703. else
  704. pt =
  705. (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
  706. *pdata = *pt;
  707. }
  708. return 0;
  709. }
  710. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  711. {
  712. u64 data;
  713. switch (msr) {
  714. case 0xc0010010: /* SYSCFG */
  715. case 0xc0010015: /* HWCR */
  716. case MSR_IA32_PLATFORM_ID:
  717. case MSR_IA32_P5_MC_ADDR:
  718. case MSR_IA32_P5_MC_TYPE:
  719. case MSR_IA32_MC0_CTL:
  720. case MSR_IA32_MCG_STATUS:
  721. case MSR_IA32_MCG_CAP:
  722. case MSR_IA32_MCG_CTL:
  723. case MSR_IA32_MC0_MISC:
  724. case MSR_IA32_MC0_MISC+4:
  725. case MSR_IA32_MC0_MISC+8:
  726. case MSR_IA32_MC0_MISC+12:
  727. case MSR_IA32_MC0_MISC+16:
  728. case MSR_IA32_MC0_MISC+20:
  729. case MSR_IA32_UCODE_REV:
  730. case MSR_IA32_EBL_CR_POWERON:
  731. case MSR_IA32_DEBUGCTLMSR:
  732. case MSR_IA32_LASTBRANCHFROMIP:
  733. case MSR_IA32_LASTBRANCHTOIP:
  734. case MSR_IA32_LASTINTFROMIP:
  735. case MSR_IA32_LASTINTTOIP:
  736. data = 0;
  737. break;
  738. case MSR_MTRRcap:
  739. data = 0x500 | KVM_NR_VAR_MTRR;
  740. break;
  741. case 0x200 ... 0x2ff:
  742. return get_msr_mtrr(vcpu, msr, pdata);
  743. case 0xcd: /* fsb frequency */
  744. data = 3;
  745. break;
  746. case MSR_IA32_APICBASE:
  747. data = kvm_get_apic_base(vcpu);
  748. break;
  749. case MSR_IA32_MISC_ENABLE:
  750. data = vcpu->arch.ia32_misc_enable_msr;
  751. break;
  752. case MSR_IA32_PERF_STATUS:
  753. /* TSC increment by tick */
  754. data = 1000ULL;
  755. /* CPU multiplier */
  756. data |= (((uint64_t)4ULL) << 40);
  757. break;
  758. case MSR_EFER:
  759. data = vcpu->arch.shadow_efer;
  760. break;
  761. case MSR_KVM_WALL_CLOCK:
  762. data = vcpu->kvm->arch.wall_clock;
  763. break;
  764. case MSR_KVM_SYSTEM_TIME:
  765. data = vcpu->arch.time;
  766. break;
  767. default:
  768. pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
  769. return 1;
  770. }
  771. *pdata = data;
  772. return 0;
  773. }
  774. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  775. /*
  776. * Read or write a bunch of msrs. All parameters are kernel addresses.
  777. *
  778. * @return number of msrs set successfully.
  779. */
  780. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  781. struct kvm_msr_entry *entries,
  782. int (*do_msr)(struct kvm_vcpu *vcpu,
  783. unsigned index, u64 *data))
  784. {
  785. int i;
  786. vcpu_load(vcpu);
  787. down_read(&vcpu->kvm->slots_lock);
  788. for (i = 0; i < msrs->nmsrs; ++i)
  789. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  790. break;
  791. up_read(&vcpu->kvm->slots_lock);
  792. vcpu_put(vcpu);
  793. return i;
  794. }
  795. /*
  796. * Read or write a bunch of msrs. Parameters are user addresses.
  797. *
  798. * @return number of msrs set successfully.
  799. */
  800. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  801. int (*do_msr)(struct kvm_vcpu *vcpu,
  802. unsigned index, u64 *data),
  803. int writeback)
  804. {
  805. struct kvm_msrs msrs;
  806. struct kvm_msr_entry *entries;
  807. int r, n;
  808. unsigned size;
  809. r = -EFAULT;
  810. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  811. goto out;
  812. r = -E2BIG;
  813. if (msrs.nmsrs >= MAX_IO_MSRS)
  814. goto out;
  815. r = -ENOMEM;
  816. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  817. entries = vmalloc(size);
  818. if (!entries)
  819. goto out;
  820. r = -EFAULT;
  821. if (copy_from_user(entries, user_msrs->entries, size))
  822. goto out_free;
  823. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  824. if (r < 0)
  825. goto out_free;
  826. r = -EFAULT;
  827. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  828. goto out_free;
  829. r = n;
  830. out_free:
  831. vfree(entries);
  832. out:
  833. return r;
  834. }
  835. int kvm_dev_ioctl_check_extension(long ext)
  836. {
  837. int r;
  838. switch (ext) {
  839. case KVM_CAP_IRQCHIP:
  840. case KVM_CAP_HLT:
  841. case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
  842. case KVM_CAP_USER_MEMORY:
  843. case KVM_CAP_SET_TSS_ADDR:
  844. case KVM_CAP_EXT_CPUID:
  845. case KVM_CAP_CLOCKSOURCE:
  846. case KVM_CAP_PIT:
  847. case KVM_CAP_NOP_IO_DELAY:
  848. case KVM_CAP_MP_STATE:
  849. case KVM_CAP_SYNC_MMU:
  850. r = 1;
  851. break;
  852. case KVM_CAP_COALESCED_MMIO:
  853. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  854. break;
  855. case KVM_CAP_VAPIC:
  856. r = !kvm_x86_ops->cpu_has_accelerated_tpr();
  857. break;
  858. case KVM_CAP_NR_VCPUS:
  859. r = KVM_MAX_VCPUS;
  860. break;
  861. case KVM_CAP_NR_MEMSLOTS:
  862. r = KVM_MEMORY_SLOTS;
  863. break;
  864. case KVM_CAP_PV_MMU:
  865. r = !tdp_enabled;
  866. break;
  867. case KVM_CAP_IOMMU:
  868. r = intel_iommu_found();
  869. break;
  870. default:
  871. r = 0;
  872. break;
  873. }
  874. return r;
  875. }
  876. long kvm_arch_dev_ioctl(struct file *filp,
  877. unsigned int ioctl, unsigned long arg)
  878. {
  879. void __user *argp = (void __user *)arg;
  880. long r;
  881. switch (ioctl) {
  882. case KVM_GET_MSR_INDEX_LIST: {
  883. struct kvm_msr_list __user *user_msr_list = argp;
  884. struct kvm_msr_list msr_list;
  885. unsigned n;
  886. r = -EFAULT;
  887. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  888. goto out;
  889. n = msr_list.nmsrs;
  890. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  891. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  892. goto out;
  893. r = -E2BIG;
  894. if (n < num_msrs_to_save)
  895. goto out;
  896. r = -EFAULT;
  897. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  898. num_msrs_to_save * sizeof(u32)))
  899. goto out;
  900. if (copy_to_user(user_msr_list->indices
  901. + num_msrs_to_save * sizeof(u32),
  902. &emulated_msrs,
  903. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  904. goto out;
  905. r = 0;
  906. break;
  907. }
  908. case KVM_GET_SUPPORTED_CPUID: {
  909. struct kvm_cpuid2 __user *cpuid_arg = argp;
  910. struct kvm_cpuid2 cpuid;
  911. r = -EFAULT;
  912. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  913. goto out;
  914. r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
  915. cpuid_arg->entries);
  916. if (r)
  917. goto out;
  918. r = -EFAULT;
  919. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  920. goto out;
  921. r = 0;
  922. break;
  923. }
  924. default:
  925. r = -EINVAL;
  926. }
  927. out:
  928. return r;
  929. }
  930. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  931. {
  932. kvm_x86_ops->vcpu_load(vcpu, cpu);
  933. kvm_write_guest_time(vcpu);
  934. }
  935. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  936. {
  937. kvm_x86_ops->vcpu_put(vcpu);
  938. kvm_put_guest_fpu(vcpu);
  939. }
  940. static int is_efer_nx(void)
  941. {
  942. u64 efer;
  943. rdmsrl(MSR_EFER, efer);
  944. return efer & EFER_NX;
  945. }
  946. static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
  947. {
  948. int i;
  949. struct kvm_cpuid_entry2 *e, *entry;
  950. entry = NULL;
  951. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  952. e = &vcpu->arch.cpuid_entries[i];
  953. if (e->function == 0x80000001) {
  954. entry = e;
  955. break;
  956. }
  957. }
  958. if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
  959. entry->edx &= ~(1 << 20);
  960. printk(KERN_INFO "kvm: guest NX capability removed\n");
  961. }
  962. }
  963. /* when an old userspace process fills a new kernel module */
  964. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  965. struct kvm_cpuid *cpuid,
  966. struct kvm_cpuid_entry __user *entries)
  967. {
  968. int r, i;
  969. struct kvm_cpuid_entry *cpuid_entries;
  970. r = -E2BIG;
  971. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  972. goto out;
  973. r = -ENOMEM;
  974. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  975. if (!cpuid_entries)
  976. goto out;
  977. r = -EFAULT;
  978. if (copy_from_user(cpuid_entries, entries,
  979. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  980. goto out_free;
  981. for (i = 0; i < cpuid->nent; i++) {
  982. vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
  983. vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
  984. vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
  985. vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
  986. vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
  987. vcpu->arch.cpuid_entries[i].index = 0;
  988. vcpu->arch.cpuid_entries[i].flags = 0;
  989. vcpu->arch.cpuid_entries[i].padding[0] = 0;
  990. vcpu->arch.cpuid_entries[i].padding[1] = 0;
  991. vcpu->arch.cpuid_entries[i].padding[2] = 0;
  992. }
  993. vcpu->arch.cpuid_nent = cpuid->nent;
  994. cpuid_fix_nx_cap(vcpu);
  995. r = 0;
  996. out_free:
  997. vfree(cpuid_entries);
  998. out:
  999. return r;
  1000. }
  1001. static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  1002. struct kvm_cpuid2 *cpuid,
  1003. struct kvm_cpuid_entry2 __user *entries)
  1004. {
  1005. int r;
  1006. r = -E2BIG;
  1007. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  1008. goto out;
  1009. r = -EFAULT;
  1010. if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
  1011. cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
  1012. goto out;
  1013. vcpu->arch.cpuid_nent = cpuid->nent;
  1014. return 0;
  1015. out:
  1016. return r;
  1017. }
  1018. static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  1019. struct kvm_cpuid2 *cpuid,
  1020. struct kvm_cpuid_entry2 __user *entries)
  1021. {
  1022. int r;
  1023. r = -E2BIG;
  1024. if (cpuid->nent < vcpu->arch.cpuid_nent)
  1025. goto out;
  1026. r = -EFAULT;
  1027. if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
  1028. vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
  1029. goto out;
  1030. return 0;
  1031. out:
  1032. cpuid->nent = vcpu->arch.cpuid_nent;
  1033. return r;
  1034. }
  1035. static inline u32 bit(int bitno)
  1036. {
  1037. return 1 << (bitno & 31);
  1038. }
  1039. static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1040. u32 index)
  1041. {
  1042. entry->function = function;
  1043. entry->index = index;
  1044. cpuid_count(entry->function, entry->index,
  1045. &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
  1046. entry->flags = 0;
  1047. }
  1048. static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
  1049. u32 index, int *nent, int maxnent)
  1050. {
  1051. const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
  1052. bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
  1053. bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
  1054. bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
  1055. bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
  1056. bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
  1057. bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
  1058. bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
  1059. bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
  1060. bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
  1061. const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
  1062. bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
  1063. bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
  1064. bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
  1065. bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
  1066. bit(X86_FEATURE_PGE) |
  1067. bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
  1068. bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
  1069. bit(X86_FEATURE_SYSCALL) |
  1070. (bit(X86_FEATURE_NX) && is_efer_nx()) |
  1071. #ifdef CONFIG_X86_64
  1072. bit(X86_FEATURE_LM) |
  1073. #endif
  1074. bit(X86_FEATURE_MMXEXT) |
  1075. bit(X86_FEATURE_3DNOWEXT) |
  1076. bit(X86_FEATURE_3DNOW);
  1077. const u32 kvm_supported_word3_x86_features =
  1078. bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
  1079. const u32 kvm_supported_word6_x86_features =
  1080. bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
  1081. /* all func 2 cpuid_count() should be called on the same cpu */
  1082. get_cpu();
  1083. do_cpuid_1_ent(entry, function, index);
  1084. ++*nent;
  1085. switch (function) {
  1086. case 0:
  1087. entry->eax = min(entry->eax, (u32)0xb);
  1088. break;
  1089. case 1:
  1090. entry->edx &= kvm_supported_word0_x86_features;
  1091. entry->ecx &= kvm_supported_word3_x86_features;
  1092. break;
  1093. /* function 2 entries are STATEFUL. That is, repeated cpuid commands
  1094. * may return different values. This forces us to get_cpu() before
  1095. * issuing the first command, and also to emulate this annoying behavior
  1096. * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
  1097. case 2: {
  1098. int t, times = entry->eax & 0xff;
  1099. entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1100. for (t = 1; t < times && *nent < maxnent; ++t) {
  1101. do_cpuid_1_ent(&entry[t], function, 0);
  1102. entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
  1103. ++*nent;
  1104. }
  1105. break;
  1106. }
  1107. /* function 4 and 0xb have additional index. */
  1108. case 4: {
  1109. int i, cache_type;
  1110. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1111. /* read more entries until cache_type is zero */
  1112. for (i = 1; *nent < maxnent; ++i) {
  1113. cache_type = entry[i - 1].eax & 0x1f;
  1114. if (!cache_type)
  1115. break;
  1116. do_cpuid_1_ent(&entry[i], function, i);
  1117. entry[i].flags |=
  1118. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1119. ++*nent;
  1120. }
  1121. break;
  1122. }
  1123. case 0xb: {
  1124. int i, level_type;
  1125. entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1126. /* read more entries until level_type is zero */
  1127. for (i = 1; *nent < maxnent; ++i) {
  1128. level_type = entry[i - 1].ecx & 0xff;
  1129. if (!level_type)
  1130. break;
  1131. do_cpuid_1_ent(&entry[i], function, i);
  1132. entry[i].flags |=
  1133. KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
  1134. ++*nent;
  1135. }
  1136. break;
  1137. }
  1138. case 0x80000000:
  1139. entry->eax = min(entry->eax, 0x8000001a);
  1140. break;
  1141. case 0x80000001:
  1142. entry->edx &= kvm_supported_word1_x86_features;
  1143. entry->ecx &= kvm_supported_word6_x86_features;
  1144. break;
  1145. }
  1146. put_cpu();
  1147. }
  1148. static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
  1149. struct kvm_cpuid_entry2 __user *entries)
  1150. {
  1151. struct kvm_cpuid_entry2 *cpuid_entries;
  1152. int limit, nent = 0, r = -E2BIG;
  1153. u32 func;
  1154. if (cpuid->nent < 1)
  1155. goto out;
  1156. r = -ENOMEM;
  1157. cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
  1158. if (!cpuid_entries)
  1159. goto out;
  1160. do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
  1161. limit = cpuid_entries[0].eax;
  1162. for (func = 1; func <= limit && nent < cpuid->nent; ++func)
  1163. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1164. &nent, cpuid->nent);
  1165. r = -E2BIG;
  1166. if (nent >= cpuid->nent)
  1167. goto out_free;
  1168. do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
  1169. limit = cpuid_entries[nent - 1].eax;
  1170. for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
  1171. do_cpuid_ent(&cpuid_entries[nent], func, 0,
  1172. &nent, cpuid->nent);
  1173. r = -EFAULT;
  1174. if (copy_to_user(entries, cpuid_entries,
  1175. nent * sizeof(struct kvm_cpuid_entry2)))
  1176. goto out_free;
  1177. cpuid->nent = nent;
  1178. r = 0;
  1179. out_free:
  1180. vfree(cpuid_entries);
  1181. out:
  1182. return r;
  1183. }
  1184. static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
  1185. struct kvm_lapic_state *s)
  1186. {
  1187. vcpu_load(vcpu);
  1188. memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
  1189. vcpu_put(vcpu);
  1190. return 0;
  1191. }
  1192. static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
  1193. struct kvm_lapic_state *s)
  1194. {
  1195. vcpu_load(vcpu);
  1196. memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
  1197. kvm_apic_post_state_restore(vcpu);
  1198. vcpu_put(vcpu);
  1199. return 0;
  1200. }
  1201. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  1202. struct kvm_interrupt *irq)
  1203. {
  1204. if (irq->irq < 0 || irq->irq >= 256)
  1205. return -EINVAL;
  1206. if (irqchip_in_kernel(vcpu->kvm))
  1207. return -ENXIO;
  1208. vcpu_load(vcpu);
  1209. set_bit(irq->irq, vcpu->arch.irq_pending);
  1210. set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
  1211. vcpu_put(vcpu);
  1212. return 0;
  1213. }
  1214. static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  1215. {
  1216. vcpu_load(vcpu);
  1217. kvm_inject_nmi(vcpu);
  1218. vcpu_put(vcpu);
  1219. return 0;
  1220. }
  1221. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  1222. struct kvm_tpr_access_ctl *tac)
  1223. {
  1224. if (tac->flags)
  1225. return -EINVAL;
  1226. vcpu->arch.tpr_access_reporting = !!tac->enabled;
  1227. return 0;
  1228. }
  1229. long kvm_arch_vcpu_ioctl(struct file *filp,
  1230. unsigned int ioctl, unsigned long arg)
  1231. {
  1232. struct kvm_vcpu *vcpu = filp->private_data;
  1233. void __user *argp = (void __user *)arg;
  1234. int r;
  1235. struct kvm_lapic_state *lapic = NULL;
  1236. switch (ioctl) {
  1237. case KVM_GET_LAPIC: {
  1238. lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  1239. r = -ENOMEM;
  1240. if (!lapic)
  1241. goto out;
  1242. r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
  1243. if (r)
  1244. goto out;
  1245. r = -EFAULT;
  1246. if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
  1247. goto out;
  1248. r = 0;
  1249. break;
  1250. }
  1251. case KVM_SET_LAPIC: {
  1252. lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
  1253. r = -ENOMEM;
  1254. if (!lapic)
  1255. goto out;
  1256. r = -EFAULT;
  1257. if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
  1258. goto out;
  1259. r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
  1260. if (r)
  1261. goto out;
  1262. r = 0;
  1263. break;
  1264. }
  1265. case KVM_INTERRUPT: {
  1266. struct kvm_interrupt irq;
  1267. r = -EFAULT;
  1268. if (copy_from_user(&irq, argp, sizeof irq))
  1269. goto out;
  1270. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  1271. if (r)
  1272. goto out;
  1273. r = 0;
  1274. break;
  1275. }
  1276. case KVM_NMI: {
  1277. r = kvm_vcpu_ioctl_nmi(vcpu);
  1278. if (r)
  1279. goto out;
  1280. r = 0;
  1281. break;
  1282. }
  1283. case KVM_SET_CPUID: {
  1284. struct kvm_cpuid __user *cpuid_arg = argp;
  1285. struct kvm_cpuid cpuid;
  1286. r = -EFAULT;
  1287. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1288. goto out;
  1289. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  1290. if (r)
  1291. goto out;
  1292. break;
  1293. }
  1294. case KVM_SET_CPUID2: {
  1295. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1296. struct kvm_cpuid2 cpuid;
  1297. r = -EFAULT;
  1298. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1299. goto out;
  1300. r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
  1301. cpuid_arg->entries);
  1302. if (r)
  1303. goto out;
  1304. break;
  1305. }
  1306. case KVM_GET_CPUID2: {
  1307. struct kvm_cpuid2 __user *cpuid_arg = argp;
  1308. struct kvm_cpuid2 cpuid;
  1309. r = -EFAULT;
  1310. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  1311. goto out;
  1312. r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
  1313. cpuid_arg->entries);
  1314. if (r)
  1315. goto out;
  1316. r = -EFAULT;
  1317. if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
  1318. goto out;
  1319. r = 0;
  1320. break;
  1321. }
  1322. case KVM_GET_MSRS:
  1323. r = msr_io(vcpu, argp, kvm_get_msr, 1);
  1324. break;
  1325. case KVM_SET_MSRS:
  1326. r = msr_io(vcpu, argp, do_set_msr, 0);
  1327. break;
  1328. case KVM_TPR_ACCESS_REPORTING: {
  1329. struct kvm_tpr_access_ctl tac;
  1330. r = -EFAULT;
  1331. if (copy_from_user(&tac, argp, sizeof tac))
  1332. goto out;
  1333. r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
  1334. if (r)
  1335. goto out;
  1336. r = -EFAULT;
  1337. if (copy_to_user(argp, &tac, sizeof tac))
  1338. goto out;
  1339. r = 0;
  1340. break;
  1341. };
  1342. case KVM_SET_VAPIC_ADDR: {
  1343. struct kvm_vapic_addr va;
  1344. r = -EINVAL;
  1345. if (!irqchip_in_kernel(vcpu->kvm))
  1346. goto out;
  1347. r = -EFAULT;
  1348. if (copy_from_user(&va, argp, sizeof va))
  1349. goto out;
  1350. r = 0;
  1351. kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
  1352. break;
  1353. }
  1354. default:
  1355. r = -EINVAL;
  1356. }
  1357. out:
  1358. if (lapic)
  1359. kfree(lapic);
  1360. return r;
  1361. }
  1362. static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
  1363. {
  1364. int ret;
  1365. if (addr > (unsigned int)(-3 * PAGE_SIZE))
  1366. return -1;
  1367. ret = kvm_x86_ops->set_tss_addr(kvm, addr);
  1368. return ret;
  1369. }
  1370. static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
  1371. u32 kvm_nr_mmu_pages)
  1372. {
  1373. if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
  1374. return -EINVAL;
  1375. down_write(&kvm->slots_lock);
  1376. kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
  1377. kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
  1378. up_write(&kvm->slots_lock);
  1379. return 0;
  1380. }
  1381. static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  1382. {
  1383. return kvm->arch.n_alloc_mmu_pages;
  1384. }
  1385. gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  1386. {
  1387. int i;
  1388. struct kvm_mem_alias *alias;
  1389. for (i = 0; i < kvm->arch.naliases; ++i) {
  1390. alias = &kvm->arch.aliases[i];
  1391. if (gfn >= alias->base_gfn
  1392. && gfn < alias->base_gfn + alias->npages)
  1393. return alias->target_gfn + gfn - alias->base_gfn;
  1394. }
  1395. return gfn;
  1396. }
  1397. /*
  1398. * Set a new alias region. Aliases map a portion of physical memory into
  1399. * another portion. This is useful for memory windows, for example the PC
  1400. * VGA region.
  1401. */
  1402. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  1403. struct kvm_memory_alias *alias)
  1404. {
  1405. int r, n;
  1406. struct kvm_mem_alias *p;
  1407. r = -EINVAL;
  1408. /* General sanity checks */
  1409. if (alias->memory_size & (PAGE_SIZE - 1))
  1410. goto out;
  1411. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  1412. goto out;
  1413. if (alias->slot >= KVM_ALIAS_SLOTS)
  1414. goto out;
  1415. if (alias->guest_phys_addr + alias->memory_size
  1416. < alias->guest_phys_addr)
  1417. goto out;
  1418. if (alias->target_phys_addr + alias->memory_size
  1419. < alias->target_phys_addr)
  1420. goto out;
  1421. down_write(&kvm->slots_lock);
  1422. spin_lock(&kvm->mmu_lock);
  1423. p = &kvm->arch.aliases[alias->slot];
  1424. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  1425. p->npages = alias->memory_size >> PAGE_SHIFT;
  1426. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  1427. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  1428. if (kvm->arch.aliases[n - 1].npages)
  1429. break;
  1430. kvm->arch.naliases = n;
  1431. spin_unlock(&kvm->mmu_lock);
  1432. kvm_mmu_zap_all(kvm);
  1433. up_write(&kvm->slots_lock);
  1434. return 0;
  1435. out:
  1436. return r;
  1437. }
  1438. static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  1439. {
  1440. int r;
  1441. r = 0;
  1442. switch (chip->chip_id) {
  1443. case KVM_IRQCHIP_PIC_MASTER:
  1444. memcpy(&chip->chip.pic,
  1445. &pic_irqchip(kvm)->pics[0],
  1446. sizeof(struct kvm_pic_state));
  1447. break;
  1448. case KVM_IRQCHIP_PIC_SLAVE:
  1449. memcpy(&chip->chip.pic,
  1450. &pic_irqchip(kvm)->pics[1],
  1451. sizeof(struct kvm_pic_state));
  1452. break;
  1453. case KVM_IRQCHIP_IOAPIC:
  1454. memcpy(&chip->chip.ioapic,
  1455. ioapic_irqchip(kvm),
  1456. sizeof(struct kvm_ioapic_state));
  1457. break;
  1458. default:
  1459. r = -EINVAL;
  1460. break;
  1461. }
  1462. return r;
  1463. }
  1464. static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
  1465. {
  1466. int r;
  1467. r = 0;
  1468. switch (chip->chip_id) {
  1469. case KVM_IRQCHIP_PIC_MASTER:
  1470. memcpy(&pic_irqchip(kvm)->pics[0],
  1471. &chip->chip.pic,
  1472. sizeof(struct kvm_pic_state));
  1473. break;
  1474. case KVM_IRQCHIP_PIC_SLAVE:
  1475. memcpy(&pic_irqchip(kvm)->pics[1],
  1476. &chip->chip.pic,
  1477. sizeof(struct kvm_pic_state));
  1478. break;
  1479. case KVM_IRQCHIP_IOAPIC:
  1480. memcpy(ioapic_irqchip(kvm),
  1481. &chip->chip.ioapic,
  1482. sizeof(struct kvm_ioapic_state));
  1483. break;
  1484. default:
  1485. r = -EINVAL;
  1486. break;
  1487. }
  1488. kvm_pic_update_irq(pic_irqchip(kvm));
  1489. return r;
  1490. }
  1491. static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  1492. {
  1493. int r = 0;
  1494. memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
  1495. return r;
  1496. }
  1497. static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
  1498. {
  1499. int r = 0;
  1500. memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
  1501. kvm_pit_load_count(kvm, 0, ps->channels[0].count);
  1502. return r;
  1503. }
  1504. /*
  1505. * Get (and clear) the dirty memory log for a memory slot.
  1506. */
  1507. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  1508. struct kvm_dirty_log *log)
  1509. {
  1510. int r;
  1511. int n;
  1512. struct kvm_memory_slot *memslot;
  1513. int is_dirty = 0;
  1514. down_write(&kvm->slots_lock);
  1515. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  1516. if (r)
  1517. goto out;
  1518. /* If nothing is dirty, don't bother messing with page tables. */
  1519. if (is_dirty) {
  1520. kvm_mmu_slot_remove_write_access(kvm, log->slot);
  1521. kvm_flush_remote_tlbs(kvm);
  1522. memslot = &kvm->memslots[log->slot];
  1523. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  1524. memset(memslot->dirty_bitmap, 0, n);
  1525. }
  1526. r = 0;
  1527. out:
  1528. up_write(&kvm->slots_lock);
  1529. return r;
  1530. }
  1531. long kvm_arch_vm_ioctl(struct file *filp,
  1532. unsigned int ioctl, unsigned long arg)
  1533. {
  1534. struct kvm *kvm = filp->private_data;
  1535. void __user *argp = (void __user *)arg;
  1536. int r = -EINVAL;
  1537. /*
  1538. * This union makes it completely explicit to gcc-3.x
  1539. * that these two variables' stack usage should be
  1540. * combined, not added together.
  1541. */
  1542. union {
  1543. struct kvm_pit_state ps;
  1544. struct kvm_memory_alias alias;
  1545. } u;
  1546. switch (ioctl) {
  1547. case KVM_SET_TSS_ADDR:
  1548. r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
  1549. if (r < 0)
  1550. goto out;
  1551. break;
  1552. case KVM_SET_MEMORY_REGION: {
  1553. struct kvm_memory_region kvm_mem;
  1554. struct kvm_userspace_memory_region kvm_userspace_mem;
  1555. r = -EFAULT;
  1556. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  1557. goto out;
  1558. kvm_userspace_mem.slot = kvm_mem.slot;
  1559. kvm_userspace_mem.flags = kvm_mem.flags;
  1560. kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
  1561. kvm_userspace_mem.memory_size = kvm_mem.memory_size;
  1562. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
  1563. if (r)
  1564. goto out;
  1565. break;
  1566. }
  1567. case KVM_SET_NR_MMU_PAGES:
  1568. r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
  1569. if (r)
  1570. goto out;
  1571. break;
  1572. case KVM_GET_NR_MMU_PAGES:
  1573. r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
  1574. break;
  1575. case KVM_SET_MEMORY_ALIAS:
  1576. r = -EFAULT;
  1577. if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
  1578. goto out;
  1579. r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
  1580. if (r)
  1581. goto out;
  1582. break;
  1583. case KVM_CREATE_IRQCHIP:
  1584. r = -ENOMEM;
  1585. kvm->arch.vpic = kvm_create_pic(kvm);
  1586. if (kvm->arch.vpic) {
  1587. r = kvm_ioapic_init(kvm);
  1588. if (r) {
  1589. kfree(kvm->arch.vpic);
  1590. kvm->arch.vpic = NULL;
  1591. goto out;
  1592. }
  1593. } else
  1594. goto out;
  1595. break;
  1596. case KVM_CREATE_PIT:
  1597. r = -ENOMEM;
  1598. kvm->arch.vpit = kvm_create_pit(kvm);
  1599. if (kvm->arch.vpit)
  1600. r = 0;
  1601. break;
  1602. case KVM_IRQ_LINE: {
  1603. struct kvm_irq_level irq_event;
  1604. r = -EFAULT;
  1605. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  1606. goto out;
  1607. if (irqchip_in_kernel(kvm)) {
  1608. mutex_lock(&kvm->lock);
  1609. kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
  1610. irq_event.irq, irq_event.level);
  1611. mutex_unlock(&kvm->lock);
  1612. r = 0;
  1613. }
  1614. break;
  1615. }
  1616. case KVM_GET_IRQCHIP: {
  1617. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  1618. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  1619. r = -ENOMEM;
  1620. if (!chip)
  1621. goto out;
  1622. r = -EFAULT;
  1623. if (copy_from_user(chip, argp, sizeof *chip))
  1624. goto get_irqchip_out;
  1625. r = -ENXIO;
  1626. if (!irqchip_in_kernel(kvm))
  1627. goto get_irqchip_out;
  1628. r = kvm_vm_ioctl_get_irqchip(kvm, chip);
  1629. if (r)
  1630. goto get_irqchip_out;
  1631. r = -EFAULT;
  1632. if (copy_to_user(argp, chip, sizeof *chip))
  1633. goto get_irqchip_out;
  1634. r = 0;
  1635. get_irqchip_out:
  1636. kfree(chip);
  1637. if (r)
  1638. goto out;
  1639. break;
  1640. }
  1641. case KVM_SET_IRQCHIP: {
  1642. /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
  1643. struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
  1644. r = -ENOMEM;
  1645. if (!chip)
  1646. goto out;
  1647. r = -EFAULT;
  1648. if (copy_from_user(chip, argp, sizeof *chip))
  1649. goto set_irqchip_out;
  1650. r = -ENXIO;
  1651. if (!irqchip_in_kernel(kvm))
  1652. goto set_irqchip_out;
  1653. r = kvm_vm_ioctl_set_irqchip(kvm, chip);
  1654. if (r)
  1655. goto set_irqchip_out;
  1656. r = 0;
  1657. set_irqchip_out:
  1658. kfree(chip);
  1659. if (r)
  1660. goto out;
  1661. break;
  1662. }
  1663. case KVM_GET_PIT: {
  1664. r = -EFAULT;
  1665. if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
  1666. goto out;
  1667. r = -ENXIO;
  1668. if (!kvm->arch.vpit)
  1669. goto out;
  1670. r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
  1671. if (r)
  1672. goto out;
  1673. r = -EFAULT;
  1674. if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
  1675. goto out;
  1676. r = 0;
  1677. break;
  1678. }
  1679. case KVM_SET_PIT: {
  1680. r = -EFAULT;
  1681. if (copy_from_user(&u.ps, argp, sizeof u.ps))
  1682. goto out;
  1683. r = -ENXIO;
  1684. if (!kvm->arch.vpit)
  1685. goto out;
  1686. r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
  1687. if (r)
  1688. goto out;
  1689. r = 0;
  1690. break;
  1691. }
  1692. default:
  1693. ;
  1694. }
  1695. out:
  1696. return r;
  1697. }
  1698. static void kvm_init_msr_list(void)
  1699. {
  1700. u32 dummy[2];
  1701. unsigned i, j;
  1702. for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
  1703. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  1704. continue;
  1705. if (j < i)
  1706. msrs_to_save[j] = msrs_to_save[i];
  1707. j++;
  1708. }
  1709. num_msrs_to_save = j;
  1710. }
  1711. /*
  1712. * Only apic need an MMIO device hook, so shortcut now..
  1713. */
  1714. static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
  1715. gpa_t addr, int len,
  1716. int is_write)
  1717. {
  1718. struct kvm_io_device *dev;
  1719. if (vcpu->arch.apic) {
  1720. dev = &vcpu->arch.apic->dev;
  1721. if (dev->in_range(dev, addr, len, is_write))
  1722. return dev;
  1723. }
  1724. return NULL;
  1725. }
  1726. static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
  1727. gpa_t addr, int len,
  1728. int is_write)
  1729. {
  1730. struct kvm_io_device *dev;
  1731. dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
  1732. if (dev == NULL)
  1733. dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
  1734. is_write);
  1735. return dev;
  1736. }
  1737. int emulator_read_std(unsigned long addr,
  1738. void *val,
  1739. unsigned int bytes,
  1740. struct kvm_vcpu *vcpu)
  1741. {
  1742. void *data = val;
  1743. int r = X86EMUL_CONTINUE;
  1744. while (bytes) {
  1745. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1746. unsigned offset = addr & (PAGE_SIZE-1);
  1747. unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
  1748. int ret;
  1749. if (gpa == UNMAPPED_GVA) {
  1750. r = X86EMUL_PROPAGATE_FAULT;
  1751. goto out;
  1752. }
  1753. ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
  1754. if (ret < 0) {
  1755. r = X86EMUL_UNHANDLEABLE;
  1756. goto out;
  1757. }
  1758. bytes -= tocopy;
  1759. data += tocopy;
  1760. addr += tocopy;
  1761. }
  1762. out:
  1763. return r;
  1764. }
  1765. EXPORT_SYMBOL_GPL(emulator_read_std);
  1766. static int emulator_read_emulated(unsigned long addr,
  1767. void *val,
  1768. unsigned int bytes,
  1769. struct kvm_vcpu *vcpu)
  1770. {
  1771. struct kvm_io_device *mmio_dev;
  1772. gpa_t gpa;
  1773. if (vcpu->mmio_read_completed) {
  1774. memcpy(val, vcpu->mmio_data, bytes);
  1775. vcpu->mmio_read_completed = 0;
  1776. return X86EMUL_CONTINUE;
  1777. }
  1778. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1779. /* For APIC access vmexit */
  1780. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1781. goto mmio;
  1782. if (emulator_read_std(addr, val, bytes, vcpu)
  1783. == X86EMUL_CONTINUE)
  1784. return X86EMUL_CONTINUE;
  1785. if (gpa == UNMAPPED_GVA)
  1786. return X86EMUL_PROPAGATE_FAULT;
  1787. mmio:
  1788. /*
  1789. * Is this MMIO handled locally?
  1790. */
  1791. mutex_lock(&vcpu->kvm->lock);
  1792. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
  1793. if (mmio_dev) {
  1794. kvm_iodevice_read(mmio_dev, gpa, bytes, val);
  1795. mutex_unlock(&vcpu->kvm->lock);
  1796. return X86EMUL_CONTINUE;
  1797. }
  1798. mutex_unlock(&vcpu->kvm->lock);
  1799. vcpu->mmio_needed = 1;
  1800. vcpu->mmio_phys_addr = gpa;
  1801. vcpu->mmio_size = bytes;
  1802. vcpu->mmio_is_write = 0;
  1803. return X86EMUL_UNHANDLEABLE;
  1804. }
  1805. int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  1806. const void *val, int bytes)
  1807. {
  1808. int ret;
  1809. ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
  1810. if (ret < 0)
  1811. return 0;
  1812. kvm_mmu_pte_write(vcpu, gpa, val, bytes);
  1813. return 1;
  1814. }
  1815. static int emulator_write_emulated_onepage(unsigned long addr,
  1816. const void *val,
  1817. unsigned int bytes,
  1818. struct kvm_vcpu *vcpu)
  1819. {
  1820. struct kvm_io_device *mmio_dev;
  1821. gpa_t gpa;
  1822. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1823. if (gpa == UNMAPPED_GVA) {
  1824. kvm_inject_page_fault(vcpu, addr, 2);
  1825. return X86EMUL_PROPAGATE_FAULT;
  1826. }
  1827. /* For APIC access vmexit */
  1828. if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1829. goto mmio;
  1830. if (emulator_write_phys(vcpu, gpa, val, bytes))
  1831. return X86EMUL_CONTINUE;
  1832. mmio:
  1833. /*
  1834. * Is this MMIO handled locally?
  1835. */
  1836. mutex_lock(&vcpu->kvm->lock);
  1837. mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
  1838. if (mmio_dev) {
  1839. kvm_iodevice_write(mmio_dev, gpa, bytes, val);
  1840. mutex_unlock(&vcpu->kvm->lock);
  1841. return X86EMUL_CONTINUE;
  1842. }
  1843. mutex_unlock(&vcpu->kvm->lock);
  1844. vcpu->mmio_needed = 1;
  1845. vcpu->mmio_phys_addr = gpa;
  1846. vcpu->mmio_size = bytes;
  1847. vcpu->mmio_is_write = 1;
  1848. memcpy(vcpu->mmio_data, val, bytes);
  1849. return X86EMUL_CONTINUE;
  1850. }
  1851. int emulator_write_emulated(unsigned long addr,
  1852. const void *val,
  1853. unsigned int bytes,
  1854. struct kvm_vcpu *vcpu)
  1855. {
  1856. /* Crossing a page boundary? */
  1857. if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
  1858. int rc, now;
  1859. now = -addr & ~PAGE_MASK;
  1860. rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
  1861. if (rc != X86EMUL_CONTINUE)
  1862. return rc;
  1863. addr += now;
  1864. val += now;
  1865. bytes -= now;
  1866. }
  1867. return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
  1868. }
  1869. EXPORT_SYMBOL_GPL(emulator_write_emulated);
  1870. static int emulator_cmpxchg_emulated(unsigned long addr,
  1871. const void *old,
  1872. const void *new,
  1873. unsigned int bytes,
  1874. struct kvm_vcpu *vcpu)
  1875. {
  1876. static int reported;
  1877. if (!reported) {
  1878. reported = 1;
  1879. printk(KERN_WARNING "kvm: emulating exchange as write\n");
  1880. }
  1881. #ifndef CONFIG_X86_64
  1882. /* guests cmpxchg8b have to be emulated atomically */
  1883. if (bytes == 8) {
  1884. gpa_t gpa;
  1885. struct page *page;
  1886. char *kaddr;
  1887. u64 val;
  1888. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
  1889. if (gpa == UNMAPPED_GVA ||
  1890. (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
  1891. goto emul_write;
  1892. if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
  1893. goto emul_write;
  1894. val = *(u64 *)new;
  1895. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  1896. kaddr = kmap_atomic(page, KM_USER0);
  1897. set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
  1898. kunmap_atomic(kaddr, KM_USER0);
  1899. kvm_release_page_dirty(page);
  1900. }
  1901. emul_write:
  1902. #endif
  1903. return emulator_write_emulated(addr, new, bytes, vcpu);
  1904. }
  1905. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  1906. {
  1907. return kvm_x86_ops->get_segment_base(vcpu, seg);
  1908. }
  1909. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  1910. {
  1911. kvm_mmu_invlpg(vcpu, address);
  1912. return X86EMUL_CONTINUE;
  1913. }
  1914. int emulate_clts(struct kvm_vcpu *vcpu)
  1915. {
  1916. KVMTRACE_0D(CLTS, vcpu, handler);
  1917. kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
  1918. return X86EMUL_CONTINUE;
  1919. }
  1920. int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
  1921. {
  1922. struct kvm_vcpu *vcpu = ctxt->vcpu;
  1923. switch (dr) {
  1924. case 0 ... 3:
  1925. *dest = kvm_x86_ops->get_dr(vcpu, dr);
  1926. return X86EMUL_CONTINUE;
  1927. default:
  1928. pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
  1929. return X86EMUL_UNHANDLEABLE;
  1930. }
  1931. }
  1932. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  1933. {
  1934. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  1935. int exception;
  1936. kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
  1937. if (exception) {
  1938. /* FIXME: better handling */
  1939. return X86EMUL_UNHANDLEABLE;
  1940. }
  1941. return X86EMUL_CONTINUE;
  1942. }
  1943. void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
  1944. {
  1945. u8 opcodes[4];
  1946. unsigned long rip = kvm_rip_read(vcpu);
  1947. unsigned long rip_linear;
  1948. if (!printk_ratelimit())
  1949. return;
  1950. rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
  1951. emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
  1952. printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
  1953. context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  1954. }
  1955. EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
  1956. static struct x86_emulate_ops emulate_ops = {
  1957. .read_std = emulator_read_std,
  1958. .read_emulated = emulator_read_emulated,
  1959. .write_emulated = emulator_write_emulated,
  1960. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  1961. };
  1962. static void cache_all_regs(struct kvm_vcpu *vcpu)
  1963. {
  1964. kvm_register_read(vcpu, VCPU_REGS_RAX);
  1965. kvm_register_read(vcpu, VCPU_REGS_RSP);
  1966. kvm_register_read(vcpu, VCPU_REGS_RIP);
  1967. vcpu->arch.regs_dirty = ~0;
  1968. }
  1969. int emulate_instruction(struct kvm_vcpu *vcpu,
  1970. struct kvm_run *run,
  1971. unsigned long cr2,
  1972. u16 error_code,
  1973. int emulation_type)
  1974. {
  1975. int r;
  1976. struct decode_cache *c;
  1977. kvm_clear_exception_queue(vcpu);
  1978. vcpu->arch.mmio_fault_cr2 = cr2;
  1979. /*
  1980. * TODO: fix x86_emulate.c to use guest_read/write_register
  1981. * instead of direct ->regs accesses, can save hundred cycles
  1982. * on Intel for instructions that don't read/change RSP, for
  1983. * for example.
  1984. */
  1985. cache_all_regs(vcpu);
  1986. vcpu->mmio_is_write = 0;
  1987. vcpu->arch.pio.string = 0;
  1988. if (!(emulation_type & EMULTYPE_NO_DECODE)) {
  1989. int cs_db, cs_l;
  1990. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  1991. vcpu->arch.emulate_ctxt.vcpu = vcpu;
  1992. vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
  1993. vcpu->arch.emulate_ctxt.mode =
  1994. (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
  1995. ? X86EMUL_MODE_REAL : cs_l
  1996. ? X86EMUL_MODE_PROT64 : cs_db
  1997. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  1998. r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  1999. /* Reject the instructions other than VMCALL/VMMCALL when
  2000. * try to emulate invalid opcode */
  2001. c = &vcpu->arch.emulate_ctxt.decode;
  2002. if ((emulation_type & EMULTYPE_TRAP_UD) &&
  2003. (!(c->twobyte && c->b == 0x01 &&
  2004. (c->modrm_reg == 0 || c->modrm_reg == 3) &&
  2005. c->modrm_mod == 3 && c->modrm_rm == 1)))
  2006. return EMULATE_FAIL;
  2007. ++vcpu->stat.insn_emulation;
  2008. if (r) {
  2009. ++vcpu->stat.insn_emulation_fail;
  2010. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  2011. return EMULATE_DONE;
  2012. return EMULATE_FAIL;
  2013. }
  2014. }
  2015. r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
  2016. if (vcpu->arch.pio.string)
  2017. return EMULATE_DO_MMIO;
  2018. if ((r || vcpu->mmio_is_write) && run) {
  2019. run->exit_reason = KVM_EXIT_MMIO;
  2020. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  2021. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  2022. run->mmio.len = vcpu->mmio_size;
  2023. run->mmio.is_write = vcpu->mmio_is_write;
  2024. }
  2025. if (r) {
  2026. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  2027. return EMULATE_DONE;
  2028. if (!vcpu->mmio_needed) {
  2029. kvm_report_emulation_failure(vcpu, "mmio");
  2030. return EMULATE_FAIL;
  2031. }
  2032. return EMULATE_DO_MMIO;
  2033. }
  2034. kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
  2035. if (vcpu->mmio_is_write) {
  2036. vcpu->mmio_needed = 0;
  2037. return EMULATE_DO_MMIO;
  2038. }
  2039. return EMULATE_DONE;
  2040. }
  2041. EXPORT_SYMBOL_GPL(emulate_instruction);
  2042. static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
  2043. {
  2044. int i;
  2045. for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
  2046. if (vcpu->arch.pio.guest_pages[i]) {
  2047. kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
  2048. vcpu->arch.pio.guest_pages[i] = NULL;
  2049. }
  2050. }
  2051. static int pio_copy_data(struct kvm_vcpu *vcpu)
  2052. {
  2053. void *p = vcpu->arch.pio_data;
  2054. void *q;
  2055. unsigned bytes;
  2056. int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
  2057. q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
  2058. PAGE_KERNEL);
  2059. if (!q) {
  2060. free_pio_guest_pages(vcpu);
  2061. return -ENOMEM;
  2062. }
  2063. q += vcpu->arch.pio.guest_page_offset;
  2064. bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
  2065. if (vcpu->arch.pio.in)
  2066. memcpy(q, p, bytes);
  2067. else
  2068. memcpy(p, q, bytes);
  2069. q -= vcpu->arch.pio.guest_page_offset;
  2070. vunmap(q);
  2071. free_pio_guest_pages(vcpu);
  2072. return 0;
  2073. }
  2074. int complete_pio(struct kvm_vcpu *vcpu)
  2075. {
  2076. struct kvm_pio_request *io = &vcpu->arch.pio;
  2077. long delta;
  2078. int r;
  2079. unsigned long val;
  2080. if (!io->string) {
  2081. if (io->in) {
  2082. val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2083. memcpy(&val, vcpu->arch.pio_data, io->size);
  2084. kvm_register_write(vcpu, VCPU_REGS_RAX, val);
  2085. }
  2086. } else {
  2087. if (io->in) {
  2088. r = pio_copy_data(vcpu);
  2089. if (r)
  2090. return r;
  2091. }
  2092. delta = 1;
  2093. if (io->rep) {
  2094. delta *= io->cur_count;
  2095. /*
  2096. * The size of the register should really depend on
  2097. * current address size.
  2098. */
  2099. val = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2100. val -= delta;
  2101. kvm_register_write(vcpu, VCPU_REGS_RCX, val);
  2102. }
  2103. if (io->down)
  2104. delta = -delta;
  2105. delta *= io->size;
  2106. if (io->in) {
  2107. val = kvm_register_read(vcpu, VCPU_REGS_RDI);
  2108. val += delta;
  2109. kvm_register_write(vcpu, VCPU_REGS_RDI, val);
  2110. } else {
  2111. val = kvm_register_read(vcpu, VCPU_REGS_RSI);
  2112. val += delta;
  2113. kvm_register_write(vcpu, VCPU_REGS_RSI, val);
  2114. }
  2115. }
  2116. io->count -= io->cur_count;
  2117. io->cur_count = 0;
  2118. return 0;
  2119. }
  2120. static void kernel_pio(struct kvm_io_device *pio_dev,
  2121. struct kvm_vcpu *vcpu,
  2122. void *pd)
  2123. {
  2124. /* TODO: String I/O for in kernel device */
  2125. mutex_lock(&vcpu->kvm->lock);
  2126. if (vcpu->arch.pio.in)
  2127. kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
  2128. vcpu->arch.pio.size,
  2129. pd);
  2130. else
  2131. kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
  2132. vcpu->arch.pio.size,
  2133. pd);
  2134. mutex_unlock(&vcpu->kvm->lock);
  2135. }
  2136. static void pio_string_write(struct kvm_io_device *pio_dev,
  2137. struct kvm_vcpu *vcpu)
  2138. {
  2139. struct kvm_pio_request *io = &vcpu->arch.pio;
  2140. void *pd = vcpu->arch.pio_data;
  2141. int i;
  2142. mutex_lock(&vcpu->kvm->lock);
  2143. for (i = 0; i < io->cur_count; i++) {
  2144. kvm_iodevice_write(pio_dev, io->port,
  2145. io->size,
  2146. pd);
  2147. pd += io->size;
  2148. }
  2149. mutex_unlock(&vcpu->kvm->lock);
  2150. }
  2151. static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
  2152. gpa_t addr, int len,
  2153. int is_write)
  2154. {
  2155. return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
  2156. }
  2157. int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  2158. int size, unsigned port)
  2159. {
  2160. struct kvm_io_device *pio_dev;
  2161. unsigned long val;
  2162. vcpu->run->exit_reason = KVM_EXIT_IO;
  2163. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  2164. vcpu->run->io.size = vcpu->arch.pio.size = size;
  2165. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  2166. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
  2167. vcpu->run->io.port = vcpu->arch.pio.port = port;
  2168. vcpu->arch.pio.in = in;
  2169. vcpu->arch.pio.string = 0;
  2170. vcpu->arch.pio.down = 0;
  2171. vcpu->arch.pio.guest_page_offset = 0;
  2172. vcpu->arch.pio.rep = 0;
  2173. if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
  2174. KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
  2175. handler);
  2176. else
  2177. KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
  2178. handler);
  2179. val = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2180. memcpy(vcpu->arch.pio_data, &val, 4);
  2181. pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
  2182. if (pio_dev) {
  2183. kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
  2184. complete_pio(vcpu);
  2185. return 1;
  2186. }
  2187. return 0;
  2188. }
  2189. EXPORT_SYMBOL_GPL(kvm_emulate_pio);
  2190. int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  2191. int size, unsigned long count, int down,
  2192. gva_t address, int rep, unsigned port)
  2193. {
  2194. unsigned now, in_page;
  2195. int i, ret = 0;
  2196. int nr_pages = 1;
  2197. struct page *page;
  2198. struct kvm_io_device *pio_dev;
  2199. vcpu->run->exit_reason = KVM_EXIT_IO;
  2200. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  2201. vcpu->run->io.size = vcpu->arch.pio.size = size;
  2202. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  2203. vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
  2204. vcpu->run->io.port = vcpu->arch.pio.port = port;
  2205. vcpu->arch.pio.in = in;
  2206. vcpu->arch.pio.string = 1;
  2207. vcpu->arch.pio.down = down;
  2208. vcpu->arch.pio.guest_page_offset = offset_in_page(address);
  2209. vcpu->arch.pio.rep = rep;
  2210. if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
  2211. KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
  2212. handler);
  2213. else
  2214. KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
  2215. handler);
  2216. if (!count) {
  2217. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2218. return 1;
  2219. }
  2220. if (!down)
  2221. in_page = PAGE_SIZE - offset_in_page(address);
  2222. else
  2223. in_page = offset_in_page(address) + size;
  2224. now = min(count, (unsigned long)in_page / size);
  2225. if (!now) {
  2226. /*
  2227. * String I/O straddles page boundary. Pin two guest pages
  2228. * so that we satisfy atomicity constraints. Do just one
  2229. * transaction to avoid complexity.
  2230. */
  2231. nr_pages = 2;
  2232. now = 1;
  2233. }
  2234. if (down) {
  2235. /*
  2236. * String I/O in reverse. Yuck. Kill the guest, fix later.
  2237. */
  2238. pr_unimpl(vcpu, "guest string pio down\n");
  2239. kvm_inject_gp(vcpu, 0);
  2240. return 1;
  2241. }
  2242. vcpu->run->io.count = now;
  2243. vcpu->arch.pio.cur_count = now;
  2244. if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
  2245. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2246. for (i = 0; i < nr_pages; ++i) {
  2247. page = gva_to_page(vcpu, address + i * PAGE_SIZE);
  2248. vcpu->arch.pio.guest_pages[i] = page;
  2249. if (!page) {
  2250. kvm_inject_gp(vcpu, 0);
  2251. free_pio_guest_pages(vcpu);
  2252. return 1;
  2253. }
  2254. }
  2255. pio_dev = vcpu_find_pio_dev(vcpu, port,
  2256. vcpu->arch.pio.cur_count,
  2257. !vcpu->arch.pio.in);
  2258. if (!vcpu->arch.pio.in) {
  2259. /* string PIO write */
  2260. ret = pio_copy_data(vcpu);
  2261. if (ret >= 0 && pio_dev) {
  2262. pio_string_write(pio_dev, vcpu);
  2263. complete_pio(vcpu);
  2264. if (vcpu->arch.pio.count == 0)
  2265. ret = 1;
  2266. }
  2267. } else if (pio_dev)
  2268. pr_unimpl(vcpu, "no string pio read support yet, "
  2269. "port %x size %d count %ld\n",
  2270. port, size, count);
  2271. return ret;
  2272. }
  2273. EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
  2274. int kvm_arch_init(void *opaque)
  2275. {
  2276. int r;
  2277. struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
  2278. if (kvm_x86_ops) {
  2279. printk(KERN_ERR "kvm: already loaded the other module\n");
  2280. r = -EEXIST;
  2281. goto out;
  2282. }
  2283. if (!ops->cpu_has_kvm_support()) {
  2284. printk(KERN_ERR "kvm: no hardware support\n");
  2285. r = -EOPNOTSUPP;
  2286. goto out;
  2287. }
  2288. if (ops->disabled_by_bios()) {
  2289. printk(KERN_ERR "kvm: disabled by bios\n");
  2290. r = -EOPNOTSUPP;
  2291. goto out;
  2292. }
  2293. r = kvm_mmu_module_init();
  2294. if (r)
  2295. goto out;
  2296. kvm_init_msr_list();
  2297. kvm_x86_ops = ops;
  2298. kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
  2299. kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
  2300. kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
  2301. PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
  2302. return 0;
  2303. out:
  2304. return r;
  2305. }
  2306. void kvm_arch_exit(void)
  2307. {
  2308. kvm_x86_ops = NULL;
  2309. kvm_mmu_module_exit();
  2310. }
  2311. int kvm_emulate_halt(struct kvm_vcpu *vcpu)
  2312. {
  2313. ++vcpu->stat.halt_exits;
  2314. KVMTRACE_0D(HLT, vcpu, handler);
  2315. if (irqchip_in_kernel(vcpu->kvm)) {
  2316. vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
  2317. return 1;
  2318. } else {
  2319. vcpu->run->exit_reason = KVM_EXIT_HLT;
  2320. return 0;
  2321. }
  2322. }
  2323. EXPORT_SYMBOL_GPL(kvm_emulate_halt);
  2324. static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
  2325. unsigned long a1)
  2326. {
  2327. if (is_long_mode(vcpu))
  2328. return a0;
  2329. else
  2330. return a0 | ((gpa_t)a1 << 32);
  2331. }
  2332. int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
  2333. {
  2334. unsigned long nr, a0, a1, a2, a3, ret;
  2335. int r = 1;
  2336. nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2337. a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
  2338. a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2339. a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
  2340. a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
  2341. KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
  2342. if (!is_long_mode(vcpu)) {
  2343. nr &= 0xFFFFFFFF;
  2344. a0 &= 0xFFFFFFFF;
  2345. a1 &= 0xFFFFFFFF;
  2346. a2 &= 0xFFFFFFFF;
  2347. a3 &= 0xFFFFFFFF;
  2348. }
  2349. switch (nr) {
  2350. case KVM_HC_VAPIC_POLL_IRQ:
  2351. ret = 0;
  2352. break;
  2353. case KVM_HC_MMU_OP:
  2354. r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
  2355. break;
  2356. default:
  2357. ret = -KVM_ENOSYS;
  2358. break;
  2359. }
  2360. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  2361. ++vcpu->stat.hypercalls;
  2362. return r;
  2363. }
  2364. EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
  2365. int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
  2366. {
  2367. char instruction[3];
  2368. int ret = 0;
  2369. unsigned long rip = kvm_rip_read(vcpu);
  2370. /*
  2371. * Blow out the MMU to ensure that no other VCPU has an active mapping
  2372. * to ensure that the updated hypercall appears atomically across all
  2373. * VCPUs.
  2374. */
  2375. kvm_mmu_zap_all(vcpu->kvm);
  2376. kvm_x86_ops->patch_hypercall(vcpu, instruction);
  2377. if (emulator_write_emulated(rip, instruction, 3, vcpu)
  2378. != X86EMUL_CONTINUE)
  2379. ret = -EFAULT;
  2380. return ret;
  2381. }
  2382. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  2383. {
  2384. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  2385. }
  2386. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  2387. {
  2388. struct descriptor_table dt = { limit, base };
  2389. kvm_x86_ops->set_gdt(vcpu, &dt);
  2390. }
  2391. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  2392. {
  2393. struct descriptor_table dt = { limit, base };
  2394. kvm_x86_ops->set_idt(vcpu, &dt);
  2395. }
  2396. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  2397. unsigned long *rflags)
  2398. {
  2399. kvm_lmsw(vcpu, msw);
  2400. *rflags = kvm_x86_ops->get_rflags(vcpu);
  2401. }
  2402. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  2403. {
  2404. unsigned long value;
  2405. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  2406. switch (cr) {
  2407. case 0:
  2408. value = vcpu->arch.cr0;
  2409. break;
  2410. case 2:
  2411. value = vcpu->arch.cr2;
  2412. break;
  2413. case 3:
  2414. value = vcpu->arch.cr3;
  2415. break;
  2416. case 4:
  2417. value = vcpu->arch.cr4;
  2418. break;
  2419. case 8:
  2420. value = kvm_get_cr8(vcpu);
  2421. break;
  2422. default:
  2423. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  2424. return 0;
  2425. }
  2426. KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
  2427. (u32)((u64)value >> 32), handler);
  2428. return value;
  2429. }
  2430. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  2431. unsigned long *rflags)
  2432. {
  2433. KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
  2434. (u32)((u64)val >> 32), handler);
  2435. switch (cr) {
  2436. case 0:
  2437. kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
  2438. *rflags = kvm_x86_ops->get_rflags(vcpu);
  2439. break;
  2440. case 2:
  2441. vcpu->arch.cr2 = val;
  2442. break;
  2443. case 3:
  2444. kvm_set_cr3(vcpu, val);
  2445. break;
  2446. case 4:
  2447. kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
  2448. break;
  2449. case 8:
  2450. kvm_set_cr8(vcpu, val & 0xfUL);
  2451. break;
  2452. default:
  2453. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
  2454. }
  2455. }
  2456. static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
  2457. {
  2458. struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
  2459. int j, nent = vcpu->arch.cpuid_nent;
  2460. e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
  2461. /* when no next entry is found, the current entry[i] is reselected */
  2462. for (j = i + 1; j == i; j = (j + 1) % nent) {
  2463. struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
  2464. if (ej->function == e->function) {
  2465. ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
  2466. return j;
  2467. }
  2468. }
  2469. return 0; /* silence gcc, even though control never reaches here */
  2470. }
  2471. /* find an entry with matching function, matching index (if needed), and that
  2472. * should be read next (if it's stateful) */
  2473. static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
  2474. u32 function, u32 index)
  2475. {
  2476. if (e->function != function)
  2477. return 0;
  2478. if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
  2479. return 0;
  2480. if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
  2481. !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
  2482. return 0;
  2483. return 1;
  2484. }
  2485. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  2486. {
  2487. int i;
  2488. u32 function, index;
  2489. struct kvm_cpuid_entry2 *e, *best;
  2490. function = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2491. index = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2492. kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
  2493. kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
  2494. kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
  2495. kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
  2496. best = NULL;
  2497. for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
  2498. e = &vcpu->arch.cpuid_entries[i];
  2499. if (is_matching_cpuid_entry(e, function, index)) {
  2500. if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
  2501. move_to_next_stateful_cpuid_entry(vcpu, i);
  2502. best = e;
  2503. break;
  2504. }
  2505. /*
  2506. * Both basic or both extended?
  2507. */
  2508. if (((e->function ^ function) & 0x80000000) == 0)
  2509. if (!best || e->function > best->function)
  2510. best = e;
  2511. }
  2512. if (best) {
  2513. kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
  2514. kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
  2515. kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
  2516. kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
  2517. }
  2518. kvm_x86_ops->skip_emulated_instruction(vcpu);
  2519. KVMTRACE_5D(CPUID, vcpu, function,
  2520. (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
  2521. (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
  2522. (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
  2523. (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
  2524. }
  2525. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  2526. /*
  2527. * Check if userspace requested an interrupt window, and that the
  2528. * interrupt window is open.
  2529. *
  2530. * No need to exit to userspace if we already have an interrupt queued.
  2531. */
  2532. static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
  2533. struct kvm_run *kvm_run)
  2534. {
  2535. return (!vcpu->arch.irq_summary &&
  2536. kvm_run->request_interrupt_window &&
  2537. vcpu->arch.interrupt_window_open &&
  2538. (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
  2539. }
  2540. /*
  2541. * Check if userspace requested a NMI window, and that the NMI window
  2542. * is open.
  2543. *
  2544. * No need to exit to userspace if we already have a NMI queued.
  2545. */
  2546. static int dm_request_for_nmi_injection(struct kvm_vcpu *vcpu,
  2547. struct kvm_run *kvm_run)
  2548. {
  2549. return (!vcpu->arch.nmi_pending &&
  2550. kvm_run->request_nmi_window &&
  2551. vcpu->arch.nmi_window_open);
  2552. }
  2553. static void post_kvm_run_save(struct kvm_vcpu *vcpu,
  2554. struct kvm_run *kvm_run)
  2555. {
  2556. kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
  2557. kvm_run->cr8 = kvm_get_cr8(vcpu);
  2558. kvm_run->apic_base = kvm_get_apic_base(vcpu);
  2559. if (irqchip_in_kernel(vcpu->kvm)) {
  2560. kvm_run->ready_for_interrupt_injection = 1;
  2561. kvm_run->ready_for_nmi_injection = 1;
  2562. } else {
  2563. kvm_run->ready_for_interrupt_injection =
  2564. (vcpu->arch.interrupt_window_open &&
  2565. vcpu->arch.irq_summary == 0);
  2566. kvm_run->ready_for_nmi_injection =
  2567. (vcpu->arch.nmi_window_open &&
  2568. vcpu->arch.nmi_pending == 0);
  2569. }
  2570. }
  2571. static void vapic_enter(struct kvm_vcpu *vcpu)
  2572. {
  2573. struct kvm_lapic *apic = vcpu->arch.apic;
  2574. struct page *page;
  2575. if (!apic || !apic->vapic_addr)
  2576. return;
  2577. page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  2578. vcpu->arch.apic->vapic_page = page;
  2579. }
  2580. static void vapic_exit(struct kvm_vcpu *vcpu)
  2581. {
  2582. struct kvm_lapic *apic = vcpu->arch.apic;
  2583. if (!apic || !apic->vapic_addr)
  2584. return;
  2585. down_read(&vcpu->kvm->slots_lock);
  2586. kvm_release_page_dirty(apic->vapic_page);
  2587. mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
  2588. up_read(&vcpu->kvm->slots_lock);
  2589. }
  2590. static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2591. {
  2592. int r;
  2593. if (vcpu->requests)
  2594. if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
  2595. kvm_mmu_unload(vcpu);
  2596. r = kvm_mmu_reload(vcpu);
  2597. if (unlikely(r))
  2598. goto out;
  2599. if (vcpu->requests) {
  2600. if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
  2601. __kvm_migrate_timers(vcpu);
  2602. if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
  2603. kvm_mmu_sync_roots(vcpu);
  2604. if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
  2605. kvm_x86_ops->tlb_flush(vcpu);
  2606. if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
  2607. &vcpu->requests)) {
  2608. kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
  2609. r = 0;
  2610. goto out;
  2611. }
  2612. if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
  2613. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  2614. r = 0;
  2615. goto out;
  2616. }
  2617. }
  2618. clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
  2619. kvm_inject_pending_timer_irqs(vcpu);
  2620. preempt_disable();
  2621. kvm_x86_ops->prepare_guest_switch(vcpu);
  2622. kvm_load_guest_fpu(vcpu);
  2623. local_irq_disable();
  2624. if (vcpu->requests || need_resched() || signal_pending(current)) {
  2625. local_irq_enable();
  2626. preempt_enable();
  2627. r = 1;
  2628. goto out;
  2629. }
  2630. if (vcpu->guest_debug.enabled)
  2631. kvm_x86_ops->guest_debug_pre(vcpu);
  2632. vcpu->guest_mode = 1;
  2633. /*
  2634. * Make sure that guest_mode assignment won't happen after
  2635. * testing the pending IRQ vector bitmap.
  2636. */
  2637. smp_wmb();
  2638. if (vcpu->arch.exception.pending)
  2639. __queue_exception(vcpu);
  2640. else if (irqchip_in_kernel(vcpu->kvm))
  2641. kvm_x86_ops->inject_pending_irq(vcpu);
  2642. else
  2643. kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
  2644. kvm_lapic_sync_to_vapic(vcpu);
  2645. up_read(&vcpu->kvm->slots_lock);
  2646. kvm_guest_enter();
  2647. KVMTRACE_0D(VMENTRY, vcpu, entryexit);
  2648. kvm_x86_ops->run(vcpu, kvm_run);
  2649. vcpu->guest_mode = 0;
  2650. local_irq_enable();
  2651. ++vcpu->stat.exits;
  2652. /*
  2653. * We must have an instruction between local_irq_enable() and
  2654. * kvm_guest_exit(), so the timer interrupt isn't delayed by
  2655. * the interrupt shadow. The stat.exits increment will do nicely.
  2656. * But we need to prevent reordering, hence this barrier():
  2657. */
  2658. barrier();
  2659. kvm_guest_exit();
  2660. preempt_enable();
  2661. down_read(&vcpu->kvm->slots_lock);
  2662. /*
  2663. * Profile KVM exit RIPs:
  2664. */
  2665. if (unlikely(prof_on == KVM_PROFILING)) {
  2666. unsigned long rip = kvm_rip_read(vcpu);
  2667. profile_hit(KVM_PROFILING, (void *)rip);
  2668. }
  2669. if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
  2670. vcpu->arch.exception.pending = false;
  2671. kvm_lapic_sync_from_vapic(vcpu);
  2672. r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
  2673. out:
  2674. return r;
  2675. }
  2676. static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2677. {
  2678. int r;
  2679. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
  2680. pr_debug("vcpu %d received sipi with vector # %x\n",
  2681. vcpu->vcpu_id, vcpu->arch.sipi_vector);
  2682. kvm_lapic_reset(vcpu);
  2683. r = kvm_arch_vcpu_reset(vcpu);
  2684. if (r)
  2685. return r;
  2686. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  2687. }
  2688. down_read(&vcpu->kvm->slots_lock);
  2689. vapic_enter(vcpu);
  2690. r = 1;
  2691. while (r > 0) {
  2692. if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
  2693. r = vcpu_enter_guest(vcpu, kvm_run);
  2694. else {
  2695. up_read(&vcpu->kvm->slots_lock);
  2696. kvm_vcpu_block(vcpu);
  2697. down_read(&vcpu->kvm->slots_lock);
  2698. if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
  2699. if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
  2700. vcpu->arch.mp_state =
  2701. KVM_MP_STATE_RUNNABLE;
  2702. if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
  2703. r = -EINTR;
  2704. }
  2705. if (r > 0) {
  2706. if (dm_request_for_nmi_injection(vcpu, kvm_run)) {
  2707. r = -EINTR;
  2708. kvm_run->exit_reason = KVM_EXIT_NMI;
  2709. ++vcpu->stat.request_nmi_exits;
  2710. }
  2711. if (dm_request_for_irq_injection(vcpu, kvm_run)) {
  2712. r = -EINTR;
  2713. kvm_run->exit_reason = KVM_EXIT_INTR;
  2714. ++vcpu->stat.request_irq_exits;
  2715. }
  2716. if (signal_pending(current)) {
  2717. r = -EINTR;
  2718. kvm_run->exit_reason = KVM_EXIT_INTR;
  2719. ++vcpu->stat.signal_exits;
  2720. }
  2721. if (need_resched()) {
  2722. up_read(&vcpu->kvm->slots_lock);
  2723. kvm_resched(vcpu);
  2724. down_read(&vcpu->kvm->slots_lock);
  2725. }
  2726. }
  2727. }
  2728. up_read(&vcpu->kvm->slots_lock);
  2729. post_kvm_run_save(vcpu, kvm_run);
  2730. vapic_exit(vcpu);
  2731. return r;
  2732. }
  2733. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  2734. {
  2735. int r;
  2736. sigset_t sigsaved;
  2737. vcpu_load(vcpu);
  2738. if (vcpu->sigset_active)
  2739. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  2740. if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
  2741. kvm_vcpu_block(vcpu);
  2742. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  2743. r = -EAGAIN;
  2744. goto out;
  2745. }
  2746. /* re-sync apic's tpr */
  2747. if (!irqchip_in_kernel(vcpu->kvm))
  2748. kvm_set_cr8(vcpu, kvm_run->cr8);
  2749. if (vcpu->arch.pio.cur_count) {
  2750. r = complete_pio(vcpu);
  2751. if (r)
  2752. goto out;
  2753. }
  2754. #if CONFIG_HAS_IOMEM
  2755. if (vcpu->mmio_needed) {
  2756. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  2757. vcpu->mmio_read_completed = 1;
  2758. vcpu->mmio_needed = 0;
  2759. down_read(&vcpu->kvm->slots_lock);
  2760. r = emulate_instruction(vcpu, kvm_run,
  2761. vcpu->arch.mmio_fault_cr2, 0,
  2762. EMULTYPE_NO_DECODE);
  2763. up_read(&vcpu->kvm->slots_lock);
  2764. if (r == EMULATE_DO_MMIO) {
  2765. /*
  2766. * Read-modify-write. Back to userspace.
  2767. */
  2768. r = 0;
  2769. goto out;
  2770. }
  2771. }
  2772. #endif
  2773. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
  2774. kvm_register_write(vcpu, VCPU_REGS_RAX,
  2775. kvm_run->hypercall.ret);
  2776. r = __vcpu_run(vcpu, kvm_run);
  2777. out:
  2778. if (vcpu->sigset_active)
  2779. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  2780. vcpu_put(vcpu);
  2781. return r;
  2782. }
  2783. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  2784. {
  2785. vcpu_load(vcpu);
  2786. regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  2787. regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  2788. regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  2789. regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  2790. regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  2791. regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  2792. regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  2793. regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  2794. #ifdef CONFIG_X86_64
  2795. regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
  2796. regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
  2797. regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
  2798. regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
  2799. regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
  2800. regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
  2801. regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
  2802. regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
  2803. #endif
  2804. regs->rip = kvm_rip_read(vcpu);
  2805. regs->rflags = kvm_x86_ops->get_rflags(vcpu);
  2806. /*
  2807. * Don't leak debug flags in case they were set for guest debugging
  2808. */
  2809. if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
  2810. regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  2811. vcpu_put(vcpu);
  2812. return 0;
  2813. }
  2814. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  2815. {
  2816. vcpu_load(vcpu);
  2817. kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
  2818. kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
  2819. kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
  2820. kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
  2821. kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
  2822. kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
  2823. kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
  2824. kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
  2825. #ifdef CONFIG_X86_64
  2826. kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
  2827. kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
  2828. kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
  2829. kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
  2830. kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
  2831. kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
  2832. kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
  2833. kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
  2834. #endif
  2835. kvm_rip_write(vcpu, regs->rip);
  2836. kvm_x86_ops->set_rflags(vcpu, regs->rflags);
  2837. vcpu->arch.exception.pending = false;
  2838. vcpu_put(vcpu);
  2839. return 0;
  2840. }
  2841. void kvm_get_segment(struct kvm_vcpu *vcpu,
  2842. struct kvm_segment *var, int seg)
  2843. {
  2844. kvm_x86_ops->get_segment(vcpu, var, seg);
  2845. }
  2846. void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
  2847. {
  2848. struct kvm_segment cs;
  2849. kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
  2850. *db = cs.db;
  2851. *l = cs.l;
  2852. }
  2853. EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
  2854. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  2855. struct kvm_sregs *sregs)
  2856. {
  2857. struct descriptor_table dt;
  2858. int pending_vec;
  2859. vcpu_load(vcpu);
  2860. kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  2861. kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  2862. kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  2863. kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  2864. kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  2865. kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  2866. kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  2867. kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  2868. kvm_x86_ops->get_idt(vcpu, &dt);
  2869. sregs->idt.limit = dt.limit;
  2870. sregs->idt.base = dt.base;
  2871. kvm_x86_ops->get_gdt(vcpu, &dt);
  2872. sregs->gdt.limit = dt.limit;
  2873. sregs->gdt.base = dt.base;
  2874. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  2875. sregs->cr0 = vcpu->arch.cr0;
  2876. sregs->cr2 = vcpu->arch.cr2;
  2877. sregs->cr3 = vcpu->arch.cr3;
  2878. sregs->cr4 = vcpu->arch.cr4;
  2879. sregs->cr8 = kvm_get_cr8(vcpu);
  2880. sregs->efer = vcpu->arch.shadow_efer;
  2881. sregs->apic_base = kvm_get_apic_base(vcpu);
  2882. if (irqchip_in_kernel(vcpu->kvm)) {
  2883. memset(sregs->interrupt_bitmap, 0,
  2884. sizeof sregs->interrupt_bitmap);
  2885. pending_vec = kvm_x86_ops->get_irq(vcpu);
  2886. if (pending_vec >= 0)
  2887. set_bit(pending_vec,
  2888. (unsigned long *)sregs->interrupt_bitmap);
  2889. } else
  2890. memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
  2891. sizeof sregs->interrupt_bitmap);
  2892. vcpu_put(vcpu);
  2893. return 0;
  2894. }
  2895. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  2896. struct kvm_mp_state *mp_state)
  2897. {
  2898. vcpu_load(vcpu);
  2899. mp_state->mp_state = vcpu->arch.mp_state;
  2900. vcpu_put(vcpu);
  2901. return 0;
  2902. }
  2903. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  2904. struct kvm_mp_state *mp_state)
  2905. {
  2906. vcpu_load(vcpu);
  2907. vcpu->arch.mp_state = mp_state->mp_state;
  2908. vcpu_put(vcpu);
  2909. return 0;
  2910. }
  2911. static void kvm_set_segment(struct kvm_vcpu *vcpu,
  2912. struct kvm_segment *var, int seg)
  2913. {
  2914. kvm_x86_ops->set_segment(vcpu, var, seg);
  2915. }
  2916. static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
  2917. struct kvm_segment *kvm_desct)
  2918. {
  2919. kvm_desct->base = seg_desc->base0;
  2920. kvm_desct->base |= seg_desc->base1 << 16;
  2921. kvm_desct->base |= seg_desc->base2 << 24;
  2922. kvm_desct->limit = seg_desc->limit0;
  2923. kvm_desct->limit |= seg_desc->limit << 16;
  2924. if (seg_desc->g) {
  2925. kvm_desct->limit <<= 12;
  2926. kvm_desct->limit |= 0xfff;
  2927. }
  2928. kvm_desct->selector = selector;
  2929. kvm_desct->type = seg_desc->type;
  2930. kvm_desct->present = seg_desc->p;
  2931. kvm_desct->dpl = seg_desc->dpl;
  2932. kvm_desct->db = seg_desc->d;
  2933. kvm_desct->s = seg_desc->s;
  2934. kvm_desct->l = seg_desc->l;
  2935. kvm_desct->g = seg_desc->g;
  2936. kvm_desct->avl = seg_desc->avl;
  2937. if (!selector)
  2938. kvm_desct->unusable = 1;
  2939. else
  2940. kvm_desct->unusable = 0;
  2941. kvm_desct->padding = 0;
  2942. }
  2943. static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
  2944. u16 selector,
  2945. struct descriptor_table *dtable)
  2946. {
  2947. if (selector & 1 << 2) {
  2948. struct kvm_segment kvm_seg;
  2949. kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
  2950. if (kvm_seg.unusable)
  2951. dtable->limit = 0;
  2952. else
  2953. dtable->limit = kvm_seg.limit;
  2954. dtable->base = kvm_seg.base;
  2955. }
  2956. else
  2957. kvm_x86_ops->get_gdt(vcpu, dtable);
  2958. }
  2959. /* allowed just for 8 bytes segments */
  2960. static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  2961. struct desc_struct *seg_desc)
  2962. {
  2963. gpa_t gpa;
  2964. struct descriptor_table dtable;
  2965. u16 index = selector >> 3;
  2966. get_segment_descriptor_dtable(vcpu, selector, &dtable);
  2967. if (dtable.limit < index * 8 + 7) {
  2968. kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
  2969. return 1;
  2970. }
  2971. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
  2972. gpa += index * 8;
  2973. return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
  2974. }
  2975. /* allowed just for 8 bytes segments */
  2976. static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  2977. struct desc_struct *seg_desc)
  2978. {
  2979. gpa_t gpa;
  2980. struct descriptor_table dtable;
  2981. u16 index = selector >> 3;
  2982. get_segment_descriptor_dtable(vcpu, selector, &dtable);
  2983. if (dtable.limit < index * 8 + 7)
  2984. return 1;
  2985. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
  2986. gpa += index * 8;
  2987. return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
  2988. }
  2989. static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
  2990. struct desc_struct *seg_desc)
  2991. {
  2992. u32 base_addr;
  2993. base_addr = seg_desc->base0;
  2994. base_addr |= (seg_desc->base1 << 16);
  2995. base_addr |= (seg_desc->base2 << 24);
  2996. return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
  2997. }
  2998. static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
  2999. {
  3000. struct kvm_segment kvm_seg;
  3001. kvm_get_segment(vcpu, &kvm_seg, seg);
  3002. return kvm_seg.selector;
  3003. }
  3004. static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
  3005. u16 selector,
  3006. struct kvm_segment *kvm_seg)
  3007. {
  3008. struct desc_struct seg_desc;
  3009. if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
  3010. return 1;
  3011. seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
  3012. return 0;
  3013. }
  3014. static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
  3015. {
  3016. struct kvm_segment segvar = {
  3017. .base = selector << 4,
  3018. .limit = 0xffff,
  3019. .selector = selector,
  3020. .type = 3,
  3021. .present = 1,
  3022. .dpl = 3,
  3023. .db = 0,
  3024. .s = 1,
  3025. .l = 0,
  3026. .g = 0,
  3027. .avl = 0,
  3028. .unusable = 0,
  3029. };
  3030. kvm_x86_ops->set_segment(vcpu, &segvar, seg);
  3031. return 0;
  3032. }
  3033. int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
  3034. int type_bits, int seg)
  3035. {
  3036. struct kvm_segment kvm_seg;
  3037. if (!(vcpu->arch.cr0 & X86_CR0_PE))
  3038. return kvm_load_realmode_segment(vcpu, selector, seg);
  3039. if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
  3040. return 1;
  3041. kvm_seg.type |= type_bits;
  3042. if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
  3043. seg != VCPU_SREG_LDTR)
  3044. if (!kvm_seg.s)
  3045. kvm_seg.unusable = 1;
  3046. kvm_set_segment(vcpu, &kvm_seg, seg);
  3047. return 0;
  3048. }
  3049. static void save_state_to_tss32(struct kvm_vcpu *vcpu,
  3050. struct tss_segment_32 *tss)
  3051. {
  3052. tss->cr3 = vcpu->arch.cr3;
  3053. tss->eip = kvm_rip_read(vcpu);
  3054. tss->eflags = kvm_x86_ops->get_rflags(vcpu);
  3055. tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3056. tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3057. tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3058. tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3059. tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  3060. tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  3061. tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3062. tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
  3063. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  3064. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  3065. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  3066. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  3067. tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
  3068. tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
  3069. tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  3070. tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
  3071. }
  3072. static int load_state_from_tss32(struct kvm_vcpu *vcpu,
  3073. struct tss_segment_32 *tss)
  3074. {
  3075. kvm_set_cr3(vcpu, tss->cr3);
  3076. kvm_rip_write(vcpu, tss->eip);
  3077. kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
  3078. kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
  3079. kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
  3080. kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
  3081. kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
  3082. kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
  3083. kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
  3084. kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
  3085. kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
  3086. if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
  3087. return 1;
  3088. if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  3089. return 1;
  3090. if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  3091. return 1;
  3092. if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  3093. return 1;
  3094. if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  3095. return 1;
  3096. if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
  3097. return 1;
  3098. if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
  3099. return 1;
  3100. return 0;
  3101. }
  3102. static void save_state_to_tss16(struct kvm_vcpu *vcpu,
  3103. struct tss_segment_16 *tss)
  3104. {
  3105. tss->ip = kvm_rip_read(vcpu);
  3106. tss->flag = kvm_x86_ops->get_rflags(vcpu);
  3107. tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
  3108. tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
  3109. tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
  3110. tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
  3111. tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
  3112. tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
  3113. tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
  3114. tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
  3115. tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
  3116. tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
  3117. tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
  3118. tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
  3119. tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
  3120. tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
  3121. }
  3122. static int load_state_from_tss16(struct kvm_vcpu *vcpu,
  3123. struct tss_segment_16 *tss)
  3124. {
  3125. kvm_rip_write(vcpu, tss->ip);
  3126. kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
  3127. kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
  3128. kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
  3129. kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
  3130. kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
  3131. kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
  3132. kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
  3133. kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
  3134. kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
  3135. if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
  3136. return 1;
  3137. if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
  3138. return 1;
  3139. if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
  3140. return 1;
  3141. if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
  3142. return 1;
  3143. if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
  3144. return 1;
  3145. return 0;
  3146. }
  3147. static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
  3148. u32 old_tss_base,
  3149. struct desc_struct *nseg_desc)
  3150. {
  3151. struct tss_segment_16 tss_segment_16;
  3152. int ret = 0;
  3153. if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
  3154. sizeof tss_segment_16))
  3155. goto out;
  3156. save_state_to_tss16(vcpu, &tss_segment_16);
  3157. if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
  3158. sizeof tss_segment_16))
  3159. goto out;
  3160. if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
  3161. &tss_segment_16, sizeof tss_segment_16))
  3162. goto out;
  3163. if (load_state_from_tss16(vcpu, &tss_segment_16))
  3164. goto out;
  3165. ret = 1;
  3166. out:
  3167. return ret;
  3168. }
  3169. static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
  3170. u32 old_tss_base,
  3171. struct desc_struct *nseg_desc)
  3172. {
  3173. struct tss_segment_32 tss_segment_32;
  3174. int ret = 0;
  3175. if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
  3176. sizeof tss_segment_32))
  3177. goto out;
  3178. save_state_to_tss32(vcpu, &tss_segment_32);
  3179. if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
  3180. sizeof tss_segment_32))
  3181. goto out;
  3182. if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
  3183. &tss_segment_32, sizeof tss_segment_32))
  3184. goto out;
  3185. if (load_state_from_tss32(vcpu, &tss_segment_32))
  3186. goto out;
  3187. ret = 1;
  3188. out:
  3189. return ret;
  3190. }
  3191. int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
  3192. {
  3193. struct kvm_segment tr_seg;
  3194. struct desc_struct cseg_desc;
  3195. struct desc_struct nseg_desc;
  3196. int ret = 0;
  3197. u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
  3198. u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
  3199. old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
  3200. /* FIXME: Handle errors. Failure to read either TSS or their
  3201. * descriptors should generate a pagefault.
  3202. */
  3203. if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
  3204. goto out;
  3205. if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
  3206. goto out;
  3207. if (reason != TASK_SWITCH_IRET) {
  3208. int cpl;
  3209. cpl = kvm_x86_ops->get_cpl(vcpu);
  3210. if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
  3211. kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
  3212. return 1;
  3213. }
  3214. }
  3215. if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
  3216. kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
  3217. return 1;
  3218. }
  3219. if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
  3220. cseg_desc.type &= ~(1 << 1); //clear the B flag
  3221. save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
  3222. }
  3223. if (reason == TASK_SWITCH_IRET) {
  3224. u32 eflags = kvm_x86_ops->get_rflags(vcpu);
  3225. kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
  3226. }
  3227. kvm_x86_ops->skip_emulated_instruction(vcpu);
  3228. if (nseg_desc.type & 8)
  3229. ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
  3230. &nseg_desc);
  3231. else
  3232. ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
  3233. &nseg_desc);
  3234. if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
  3235. u32 eflags = kvm_x86_ops->get_rflags(vcpu);
  3236. kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
  3237. }
  3238. if (reason != TASK_SWITCH_IRET) {
  3239. nseg_desc.type |= (1 << 1);
  3240. save_guest_segment_descriptor(vcpu, tss_selector,
  3241. &nseg_desc);
  3242. }
  3243. kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
  3244. seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
  3245. tr_seg.type = 11;
  3246. kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
  3247. out:
  3248. return ret;
  3249. }
  3250. EXPORT_SYMBOL_GPL(kvm_task_switch);
  3251. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  3252. struct kvm_sregs *sregs)
  3253. {
  3254. int mmu_reset_needed = 0;
  3255. int i, pending_vec, max_bits;
  3256. struct descriptor_table dt;
  3257. vcpu_load(vcpu);
  3258. dt.limit = sregs->idt.limit;
  3259. dt.base = sregs->idt.base;
  3260. kvm_x86_ops->set_idt(vcpu, &dt);
  3261. dt.limit = sregs->gdt.limit;
  3262. dt.base = sregs->gdt.base;
  3263. kvm_x86_ops->set_gdt(vcpu, &dt);
  3264. vcpu->arch.cr2 = sregs->cr2;
  3265. mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
  3266. vcpu->arch.cr3 = sregs->cr3;
  3267. kvm_set_cr8(vcpu, sregs->cr8);
  3268. mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
  3269. kvm_x86_ops->set_efer(vcpu, sregs->efer);
  3270. kvm_set_apic_base(vcpu, sregs->apic_base);
  3271. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  3272. mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
  3273. kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
  3274. vcpu->arch.cr0 = sregs->cr0;
  3275. mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
  3276. kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
  3277. if (!is_long_mode(vcpu) && is_pae(vcpu))
  3278. load_pdptrs(vcpu, vcpu->arch.cr3);
  3279. if (mmu_reset_needed)
  3280. kvm_mmu_reset_context(vcpu);
  3281. if (!irqchip_in_kernel(vcpu->kvm)) {
  3282. memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
  3283. sizeof vcpu->arch.irq_pending);
  3284. vcpu->arch.irq_summary = 0;
  3285. for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
  3286. if (vcpu->arch.irq_pending[i])
  3287. __set_bit(i, &vcpu->arch.irq_summary);
  3288. } else {
  3289. max_bits = (sizeof sregs->interrupt_bitmap) << 3;
  3290. pending_vec = find_first_bit(
  3291. (const unsigned long *)sregs->interrupt_bitmap,
  3292. max_bits);
  3293. /* Only pending external irq is handled here */
  3294. if (pending_vec < max_bits) {
  3295. kvm_x86_ops->set_irq(vcpu, pending_vec);
  3296. pr_debug("Set back pending irq %d\n",
  3297. pending_vec);
  3298. }
  3299. kvm_pic_clear_isr_ack(vcpu->kvm);
  3300. }
  3301. kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  3302. kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  3303. kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  3304. kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  3305. kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  3306. kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  3307. kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  3308. kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  3309. /* Older userspace won't unhalt the vcpu on reset. */
  3310. if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
  3311. sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
  3312. !(vcpu->arch.cr0 & X86_CR0_PE))
  3313. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  3314. vcpu_put(vcpu);
  3315. return 0;
  3316. }
  3317. int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
  3318. struct kvm_debug_guest *dbg)
  3319. {
  3320. int r;
  3321. vcpu_load(vcpu);
  3322. r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
  3323. vcpu_put(vcpu);
  3324. return r;
  3325. }
  3326. /*
  3327. * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
  3328. * we have asm/x86/processor.h
  3329. */
  3330. struct fxsave {
  3331. u16 cwd;
  3332. u16 swd;
  3333. u16 twd;
  3334. u16 fop;
  3335. u64 rip;
  3336. u64 rdp;
  3337. u32 mxcsr;
  3338. u32 mxcsr_mask;
  3339. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  3340. #ifdef CONFIG_X86_64
  3341. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  3342. #else
  3343. u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
  3344. #endif
  3345. };
  3346. /*
  3347. * Translate a guest virtual address to a guest physical address.
  3348. */
  3349. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  3350. struct kvm_translation *tr)
  3351. {
  3352. unsigned long vaddr = tr->linear_address;
  3353. gpa_t gpa;
  3354. vcpu_load(vcpu);
  3355. down_read(&vcpu->kvm->slots_lock);
  3356. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
  3357. up_read(&vcpu->kvm->slots_lock);
  3358. tr->physical_address = gpa;
  3359. tr->valid = gpa != UNMAPPED_GVA;
  3360. tr->writeable = 1;
  3361. tr->usermode = 0;
  3362. vcpu_put(vcpu);
  3363. return 0;
  3364. }
  3365. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  3366. {
  3367. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  3368. vcpu_load(vcpu);
  3369. memcpy(fpu->fpr, fxsave->st_space, 128);
  3370. fpu->fcw = fxsave->cwd;
  3371. fpu->fsw = fxsave->swd;
  3372. fpu->ftwx = fxsave->twd;
  3373. fpu->last_opcode = fxsave->fop;
  3374. fpu->last_ip = fxsave->rip;
  3375. fpu->last_dp = fxsave->rdp;
  3376. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  3377. vcpu_put(vcpu);
  3378. return 0;
  3379. }
  3380. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  3381. {
  3382. struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
  3383. vcpu_load(vcpu);
  3384. memcpy(fxsave->st_space, fpu->fpr, 128);
  3385. fxsave->cwd = fpu->fcw;
  3386. fxsave->swd = fpu->fsw;
  3387. fxsave->twd = fpu->ftwx;
  3388. fxsave->fop = fpu->last_opcode;
  3389. fxsave->rip = fpu->last_ip;
  3390. fxsave->rdp = fpu->last_dp;
  3391. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  3392. vcpu_put(vcpu);
  3393. return 0;
  3394. }
  3395. void fx_init(struct kvm_vcpu *vcpu)
  3396. {
  3397. unsigned after_mxcsr_mask;
  3398. /*
  3399. * Touch the fpu the first time in non atomic context as if
  3400. * this is the first fpu instruction the exception handler
  3401. * will fire before the instruction returns and it'll have to
  3402. * allocate ram with GFP_KERNEL.
  3403. */
  3404. if (!used_math())
  3405. kvm_fx_save(&vcpu->arch.host_fx_image);
  3406. /* Initialize guest FPU by resetting ours and saving into guest's */
  3407. preempt_disable();
  3408. kvm_fx_save(&vcpu->arch.host_fx_image);
  3409. kvm_fx_finit();
  3410. kvm_fx_save(&vcpu->arch.guest_fx_image);
  3411. kvm_fx_restore(&vcpu->arch.host_fx_image);
  3412. preempt_enable();
  3413. vcpu->arch.cr0 |= X86_CR0_ET;
  3414. after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
  3415. vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
  3416. memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
  3417. 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
  3418. }
  3419. EXPORT_SYMBOL_GPL(fx_init);
  3420. void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  3421. {
  3422. if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
  3423. return;
  3424. vcpu->guest_fpu_loaded = 1;
  3425. kvm_fx_save(&vcpu->arch.host_fx_image);
  3426. kvm_fx_restore(&vcpu->arch.guest_fx_image);
  3427. }
  3428. EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
  3429. void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  3430. {
  3431. if (!vcpu->guest_fpu_loaded)
  3432. return;
  3433. vcpu->guest_fpu_loaded = 0;
  3434. kvm_fx_save(&vcpu->arch.guest_fx_image);
  3435. kvm_fx_restore(&vcpu->arch.host_fx_image);
  3436. ++vcpu->stat.fpu_reload;
  3437. }
  3438. EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
  3439. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  3440. {
  3441. kvm_x86_ops->vcpu_free(vcpu);
  3442. }
  3443. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
  3444. unsigned int id)
  3445. {
  3446. return kvm_x86_ops->vcpu_create(kvm, id);
  3447. }
  3448. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  3449. {
  3450. int r;
  3451. /* We do fxsave: this must be aligned. */
  3452. BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
  3453. vcpu->arch.mtrr_state.have_fixed = 1;
  3454. vcpu_load(vcpu);
  3455. r = kvm_arch_vcpu_reset(vcpu);
  3456. if (r == 0)
  3457. r = kvm_mmu_setup(vcpu);
  3458. vcpu_put(vcpu);
  3459. if (r < 0)
  3460. goto free_vcpu;
  3461. return 0;
  3462. free_vcpu:
  3463. kvm_x86_ops->vcpu_free(vcpu);
  3464. return r;
  3465. }
  3466. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  3467. {
  3468. vcpu_load(vcpu);
  3469. kvm_mmu_unload(vcpu);
  3470. vcpu_put(vcpu);
  3471. kvm_x86_ops->vcpu_free(vcpu);
  3472. }
  3473. int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
  3474. {
  3475. vcpu->arch.nmi_pending = false;
  3476. vcpu->arch.nmi_injected = false;
  3477. return kvm_x86_ops->vcpu_reset(vcpu);
  3478. }
  3479. void kvm_arch_hardware_enable(void *garbage)
  3480. {
  3481. kvm_x86_ops->hardware_enable(garbage);
  3482. }
  3483. void kvm_arch_hardware_disable(void *garbage)
  3484. {
  3485. kvm_x86_ops->hardware_disable(garbage);
  3486. }
  3487. int kvm_arch_hardware_setup(void)
  3488. {
  3489. return kvm_x86_ops->hardware_setup();
  3490. }
  3491. void kvm_arch_hardware_unsetup(void)
  3492. {
  3493. kvm_x86_ops->hardware_unsetup();
  3494. }
  3495. void kvm_arch_check_processor_compat(void *rtn)
  3496. {
  3497. kvm_x86_ops->check_processor_compatibility(rtn);
  3498. }
  3499. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  3500. {
  3501. struct page *page;
  3502. struct kvm *kvm;
  3503. int r;
  3504. BUG_ON(vcpu->kvm == NULL);
  3505. kvm = vcpu->kvm;
  3506. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  3507. if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
  3508. vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  3509. else
  3510. vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  3511. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  3512. if (!page) {
  3513. r = -ENOMEM;
  3514. goto fail;
  3515. }
  3516. vcpu->arch.pio_data = page_address(page);
  3517. r = kvm_mmu_create(vcpu);
  3518. if (r < 0)
  3519. goto fail_free_pio_data;
  3520. if (irqchip_in_kernel(kvm)) {
  3521. r = kvm_create_lapic(vcpu);
  3522. if (r < 0)
  3523. goto fail_mmu_destroy;
  3524. }
  3525. return 0;
  3526. fail_mmu_destroy:
  3527. kvm_mmu_destroy(vcpu);
  3528. fail_free_pio_data:
  3529. free_page((unsigned long)vcpu->arch.pio_data);
  3530. fail:
  3531. return r;
  3532. }
  3533. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  3534. {
  3535. kvm_free_lapic(vcpu);
  3536. down_read(&vcpu->kvm->slots_lock);
  3537. kvm_mmu_destroy(vcpu);
  3538. up_read(&vcpu->kvm->slots_lock);
  3539. free_page((unsigned long)vcpu->arch.pio_data);
  3540. }
  3541. struct kvm *kvm_arch_create_vm(void)
  3542. {
  3543. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  3544. if (!kvm)
  3545. return ERR_PTR(-ENOMEM);
  3546. INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
  3547. INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
  3548. /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
  3549. set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  3550. return kvm;
  3551. }
  3552. static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
  3553. {
  3554. vcpu_load(vcpu);
  3555. kvm_mmu_unload(vcpu);
  3556. vcpu_put(vcpu);
  3557. }
  3558. static void kvm_free_vcpus(struct kvm *kvm)
  3559. {
  3560. unsigned int i;
  3561. /*
  3562. * Unpin any mmu pages first.
  3563. */
  3564. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  3565. if (kvm->vcpus[i])
  3566. kvm_unload_vcpu_mmu(kvm->vcpus[i]);
  3567. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  3568. if (kvm->vcpus[i]) {
  3569. kvm_arch_vcpu_free(kvm->vcpus[i]);
  3570. kvm->vcpus[i] = NULL;
  3571. }
  3572. }
  3573. }
  3574. void kvm_arch_destroy_vm(struct kvm *kvm)
  3575. {
  3576. kvm_free_all_assigned_devices(kvm);
  3577. kvm_iommu_unmap_guest(kvm);
  3578. kvm_free_pit(kvm);
  3579. kfree(kvm->arch.vpic);
  3580. kfree(kvm->arch.vioapic);
  3581. kvm_free_vcpus(kvm);
  3582. kvm_free_physmem(kvm);
  3583. if (kvm->arch.apic_access_page)
  3584. put_page(kvm->arch.apic_access_page);
  3585. if (kvm->arch.ept_identity_pagetable)
  3586. put_page(kvm->arch.ept_identity_pagetable);
  3587. kfree(kvm);
  3588. }
  3589. int kvm_arch_set_memory_region(struct kvm *kvm,
  3590. struct kvm_userspace_memory_region *mem,
  3591. struct kvm_memory_slot old,
  3592. int user_alloc)
  3593. {
  3594. int npages = mem->memory_size >> PAGE_SHIFT;
  3595. struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
  3596. /*To keep backward compatibility with older userspace,
  3597. *x86 needs to hanlde !user_alloc case.
  3598. */
  3599. if (!user_alloc) {
  3600. if (npages && !old.rmap) {
  3601. unsigned long userspace_addr;
  3602. down_write(&current->mm->mmap_sem);
  3603. userspace_addr = do_mmap(NULL, 0,
  3604. npages * PAGE_SIZE,
  3605. PROT_READ | PROT_WRITE,
  3606. MAP_PRIVATE | MAP_ANONYMOUS,
  3607. 0);
  3608. up_write(&current->mm->mmap_sem);
  3609. if (IS_ERR((void *)userspace_addr))
  3610. return PTR_ERR((void *)userspace_addr);
  3611. /* set userspace_addr atomically for kvm_hva_to_rmapp */
  3612. spin_lock(&kvm->mmu_lock);
  3613. memslot->userspace_addr = userspace_addr;
  3614. spin_unlock(&kvm->mmu_lock);
  3615. } else {
  3616. if (!old.user_alloc && old.rmap) {
  3617. int ret;
  3618. down_write(&current->mm->mmap_sem);
  3619. ret = do_munmap(current->mm, old.userspace_addr,
  3620. old.npages * PAGE_SIZE);
  3621. up_write(&current->mm->mmap_sem);
  3622. if (ret < 0)
  3623. printk(KERN_WARNING
  3624. "kvm_vm_ioctl_set_memory_region: "
  3625. "failed to munmap memory\n");
  3626. }
  3627. }
  3628. }
  3629. if (!kvm->arch.n_requested_mmu_pages) {
  3630. unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
  3631. kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
  3632. }
  3633. kvm_mmu_slot_remove_write_access(kvm, mem->slot);
  3634. kvm_flush_remote_tlbs(kvm);
  3635. return 0;
  3636. }
  3637. void kvm_arch_flush_shadow(struct kvm *kvm)
  3638. {
  3639. kvm_mmu_zap_all(kvm);
  3640. }
  3641. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  3642. {
  3643. return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
  3644. || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
  3645. || vcpu->arch.nmi_pending;
  3646. }
  3647. static void vcpu_kick_intr(void *info)
  3648. {
  3649. #ifdef DEBUG
  3650. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
  3651. printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
  3652. #endif
  3653. }
  3654. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  3655. {
  3656. int ipi_pcpu = vcpu->cpu;
  3657. int cpu = get_cpu();
  3658. if (waitqueue_active(&vcpu->wq)) {
  3659. wake_up_interruptible(&vcpu->wq);
  3660. ++vcpu->stat.halt_wakeup;
  3661. }
  3662. /*
  3663. * We may be called synchronously with irqs disabled in guest mode,
  3664. * So need not to call smp_call_function_single() in that case.
  3665. */
  3666. if (vcpu->guest_mode && vcpu->cpu != cpu)
  3667. smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
  3668. put_cpu();
  3669. }