svm.c 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * AMD SVM support
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. *
  8. * Authors:
  9. * Yaniv Kamay <yaniv@qumranet.com>
  10. * Avi Kivity <avi@qumranet.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2. See
  13. * the COPYING file in the top-level directory.
  14. *
  15. */
  16. #include <linux/kvm_host.h>
  17. #include "irq.h"
  18. #include "mmu.h"
  19. #include "kvm_cache_regs.h"
  20. #include "x86.h"
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/highmem.h>
  25. #include <linux/sched.h>
  26. #include <linux/ftrace_event.h>
  27. #include <linux/slab.h>
  28. #include <asm/desc.h>
  29. #include <asm/virtext.h>
  30. #include "trace.h"
  31. #define __ex(x) __kvm_handle_fault_on_reboot(x)
  32. MODULE_AUTHOR("Qumranet");
  33. MODULE_LICENSE("GPL");
  34. #define IOPM_ALLOC_ORDER 2
  35. #define MSRPM_ALLOC_ORDER 1
  36. #define SEG_TYPE_LDT 2
  37. #define SEG_TYPE_BUSY_TSS16 3
  38. #define SVM_FEATURE_NPT (1 << 0)
  39. #define SVM_FEATURE_LBRV (1 << 1)
  40. #define SVM_FEATURE_SVML (1 << 2)
  41. #define SVM_FEATURE_NRIP (1 << 3)
  42. #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
  43. #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
  44. #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
  45. #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
  46. #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
  47. static const u32 host_save_user_msrs[] = {
  48. #ifdef CONFIG_X86_64
  49. MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
  50. MSR_FS_BASE,
  51. #endif
  52. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  53. };
  54. #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
  55. struct kvm_vcpu;
  56. struct nested_state {
  57. struct vmcb *hsave;
  58. u64 hsave_msr;
  59. u64 vm_cr_msr;
  60. u64 vmcb;
  61. /* These are the merged vectors */
  62. u32 *msrpm;
  63. /* gpa pointers to the real vectors */
  64. u64 vmcb_msrpm;
  65. u64 vmcb_iopm;
  66. /* A VMEXIT is required but not yet emulated */
  67. bool exit_required;
  68. /* cache for intercepts of the guest */
  69. u16 intercept_cr_read;
  70. u16 intercept_cr_write;
  71. u16 intercept_dr_read;
  72. u16 intercept_dr_write;
  73. u32 intercept_exceptions;
  74. u64 intercept;
  75. };
  76. #define MSRPM_OFFSETS 16
  77. static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
  78. struct vcpu_svm {
  79. struct kvm_vcpu vcpu;
  80. struct vmcb *vmcb;
  81. unsigned long vmcb_pa;
  82. struct svm_cpu_data *svm_data;
  83. uint64_t asid_generation;
  84. uint64_t sysenter_esp;
  85. uint64_t sysenter_eip;
  86. u64 next_rip;
  87. u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
  88. u64 host_gs_base;
  89. u32 *msrpm;
  90. struct nested_state nested;
  91. bool nmi_singlestep;
  92. unsigned int3_injected;
  93. unsigned long int3_rip;
  94. };
  95. #define MSR_INVALID 0xffffffffU
  96. static struct svm_direct_access_msrs {
  97. u32 index; /* Index of the MSR */
  98. bool always; /* True if intercept is always on */
  99. } direct_access_msrs[] = {
  100. { .index = MSR_K6_STAR, .always = true },
  101. { .index = MSR_IA32_SYSENTER_CS, .always = true },
  102. #ifdef CONFIG_X86_64
  103. { .index = MSR_GS_BASE, .always = true },
  104. { .index = MSR_FS_BASE, .always = true },
  105. { .index = MSR_KERNEL_GS_BASE, .always = true },
  106. { .index = MSR_LSTAR, .always = true },
  107. { .index = MSR_CSTAR, .always = true },
  108. { .index = MSR_SYSCALL_MASK, .always = true },
  109. #endif
  110. { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
  111. { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
  112. { .index = MSR_IA32_LASTINTFROMIP, .always = false },
  113. { .index = MSR_IA32_LASTINTTOIP, .always = false },
  114. { .index = MSR_INVALID, .always = false },
  115. };
  116. /* enable NPT for AMD64 and X86 with PAE */
  117. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  118. static bool npt_enabled = true;
  119. #else
  120. static bool npt_enabled;
  121. #endif
  122. static int npt = 1;
  123. module_param(npt, int, S_IRUGO);
  124. static int nested = 1;
  125. module_param(nested, int, S_IRUGO);
  126. static void svm_flush_tlb(struct kvm_vcpu *vcpu);
  127. static void svm_complete_interrupts(struct vcpu_svm *svm);
  128. static int nested_svm_exit_handled(struct vcpu_svm *svm);
  129. static int nested_svm_intercept(struct vcpu_svm *svm);
  130. static int nested_svm_vmexit(struct vcpu_svm *svm);
  131. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  132. bool has_error_code, u32 error_code);
  133. static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
  134. {
  135. return container_of(vcpu, struct vcpu_svm, vcpu);
  136. }
  137. static inline bool is_nested(struct vcpu_svm *svm)
  138. {
  139. return svm->nested.vmcb;
  140. }
  141. static inline void enable_gif(struct vcpu_svm *svm)
  142. {
  143. svm->vcpu.arch.hflags |= HF_GIF_MASK;
  144. }
  145. static inline void disable_gif(struct vcpu_svm *svm)
  146. {
  147. svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
  148. }
  149. static inline bool gif_set(struct vcpu_svm *svm)
  150. {
  151. return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
  152. }
  153. static unsigned long iopm_base;
  154. struct kvm_ldttss_desc {
  155. u16 limit0;
  156. u16 base0;
  157. unsigned base1:8, type:5, dpl:2, p:1;
  158. unsigned limit1:4, zero0:3, g:1, base2:8;
  159. u32 base3;
  160. u32 zero1;
  161. } __attribute__((packed));
  162. struct svm_cpu_data {
  163. int cpu;
  164. u64 asid_generation;
  165. u32 max_asid;
  166. u32 next_asid;
  167. struct kvm_ldttss_desc *tss_desc;
  168. struct page *save_area;
  169. };
  170. static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
  171. static uint32_t svm_features;
  172. struct svm_init_data {
  173. int cpu;
  174. int r;
  175. };
  176. static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
  177. #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
  178. #define MSRS_RANGE_SIZE 2048
  179. #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
  180. static u32 svm_msrpm_offset(u32 msr)
  181. {
  182. u32 offset;
  183. int i;
  184. for (i = 0; i < NUM_MSR_MAPS; i++) {
  185. if (msr < msrpm_ranges[i] ||
  186. msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
  187. continue;
  188. offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
  189. offset += (i * MSRS_RANGE_SIZE); /* add range offset */
  190. /* Now we have the u8 offset - but need the u32 offset */
  191. return offset / 4;
  192. }
  193. /* MSR not in any range */
  194. return MSR_INVALID;
  195. }
  196. #define MAX_INST_SIZE 15
  197. static inline u32 svm_has(u32 feat)
  198. {
  199. return svm_features & feat;
  200. }
  201. static inline void clgi(void)
  202. {
  203. asm volatile (__ex(SVM_CLGI));
  204. }
  205. static inline void stgi(void)
  206. {
  207. asm volatile (__ex(SVM_STGI));
  208. }
  209. static inline void invlpga(unsigned long addr, u32 asid)
  210. {
  211. asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
  212. }
  213. static inline void force_new_asid(struct kvm_vcpu *vcpu)
  214. {
  215. to_svm(vcpu)->asid_generation--;
  216. }
  217. static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
  218. {
  219. force_new_asid(vcpu);
  220. }
  221. static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
  222. {
  223. if (!npt_enabled && !(efer & EFER_LMA))
  224. efer &= ~EFER_LME;
  225. to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
  226. vcpu->arch.efer = efer;
  227. }
  228. static int is_external_interrupt(u32 info)
  229. {
  230. info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
  231. return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
  232. }
  233. static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  234. {
  235. struct vcpu_svm *svm = to_svm(vcpu);
  236. u32 ret = 0;
  237. if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
  238. ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
  239. return ret & mask;
  240. }
  241. static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
  242. {
  243. struct vcpu_svm *svm = to_svm(vcpu);
  244. if (mask == 0)
  245. svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
  246. else
  247. svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
  248. }
  249. static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
  250. {
  251. struct vcpu_svm *svm = to_svm(vcpu);
  252. if (svm->vmcb->control.next_rip != 0)
  253. svm->next_rip = svm->vmcb->control.next_rip;
  254. if (!svm->next_rip) {
  255. if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
  256. EMULATE_DONE)
  257. printk(KERN_DEBUG "%s: NOP\n", __func__);
  258. return;
  259. }
  260. if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
  261. printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
  262. __func__, kvm_rip_read(vcpu), svm->next_rip);
  263. kvm_rip_write(vcpu, svm->next_rip);
  264. svm_set_interrupt_shadow(vcpu, 0);
  265. }
  266. static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
  267. bool has_error_code, u32 error_code,
  268. bool reinject)
  269. {
  270. struct vcpu_svm *svm = to_svm(vcpu);
  271. /*
  272. * If we are within a nested VM we'd better #VMEXIT and let the guest
  273. * handle the exception
  274. */
  275. if (!reinject &&
  276. nested_svm_check_exception(svm, nr, has_error_code, error_code))
  277. return;
  278. if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
  279. unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
  280. /*
  281. * For guest debugging where we have to reinject #BP if some
  282. * INT3 is guest-owned:
  283. * Emulate nRIP by moving RIP forward. Will fail if injection
  284. * raises a fault that is not intercepted. Still better than
  285. * failing in all cases.
  286. */
  287. skip_emulated_instruction(&svm->vcpu);
  288. rip = kvm_rip_read(&svm->vcpu);
  289. svm->int3_rip = rip + svm->vmcb->save.cs.base;
  290. svm->int3_injected = rip - old_rip;
  291. }
  292. svm->vmcb->control.event_inj = nr
  293. | SVM_EVTINJ_VALID
  294. | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
  295. | SVM_EVTINJ_TYPE_EXEPT;
  296. svm->vmcb->control.event_inj_err = error_code;
  297. }
  298. static int has_svm(void)
  299. {
  300. const char *msg;
  301. if (!cpu_has_svm(&msg)) {
  302. printk(KERN_INFO "has_svm: %s\n", msg);
  303. return 0;
  304. }
  305. return 1;
  306. }
  307. static void svm_hardware_disable(void *garbage)
  308. {
  309. cpu_svm_disable();
  310. }
  311. static int svm_hardware_enable(void *garbage)
  312. {
  313. struct svm_cpu_data *sd;
  314. uint64_t efer;
  315. struct desc_ptr gdt_descr;
  316. struct desc_struct *gdt;
  317. int me = raw_smp_processor_id();
  318. rdmsrl(MSR_EFER, efer);
  319. if (efer & EFER_SVME)
  320. return -EBUSY;
  321. if (!has_svm()) {
  322. printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
  323. me);
  324. return -EINVAL;
  325. }
  326. sd = per_cpu(svm_data, me);
  327. if (!sd) {
  328. printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
  329. me);
  330. return -EINVAL;
  331. }
  332. sd->asid_generation = 1;
  333. sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
  334. sd->next_asid = sd->max_asid + 1;
  335. native_store_gdt(&gdt_descr);
  336. gdt = (struct desc_struct *)gdt_descr.address;
  337. sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
  338. wrmsrl(MSR_EFER, efer | EFER_SVME);
  339. wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
  340. return 0;
  341. }
  342. static void svm_cpu_uninit(int cpu)
  343. {
  344. struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
  345. if (!sd)
  346. return;
  347. per_cpu(svm_data, raw_smp_processor_id()) = NULL;
  348. __free_page(sd->save_area);
  349. kfree(sd);
  350. }
  351. static int svm_cpu_init(int cpu)
  352. {
  353. struct svm_cpu_data *sd;
  354. int r;
  355. sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
  356. if (!sd)
  357. return -ENOMEM;
  358. sd->cpu = cpu;
  359. sd->save_area = alloc_page(GFP_KERNEL);
  360. r = -ENOMEM;
  361. if (!sd->save_area)
  362. goto err_1;
  363. per_cpu(svm_data, cpu) = sd;
  364. return 0;
  365. err_1:
  366. kfree(sd);
  367. return r;
  368. }
  369. static bool valid_msr_intercept(u32 index)
  370. {
  371. int i;
  372. for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
  373. if (direct_access_msrs[i].index == index)
  374. return true;
  375. return false;
  376. }
  377. static void set_msr_interception(u32 *msrpm, unsigned msr,
  378. int read, int write)
  379. {
  380. u8 bit_read, bit_write;
  381. unsigned long tmp;
  382. u32 offset;
  383. /*
  384. * If this warning triggers extend the direct_access_msrs list at the
  385. * beginning of the file
  386. */
  387. WARN_ON(!valid_msr_intercept(msr));
  388. offset = svm_msrpm_offset(msr);
  389. bit_read = 2 * (msr & 0x0f);
  390. bit_write = 2 * (msr & 0x0f) + 1;
  391. tmp = msrpm[offset];
  392. BUG_ON(offset == MSR_INVALID);
  393. read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
  394. write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
  395. msrpm[offset] = tmp;
  396. }
  397. static void svm_vcpu_init_msrpm(u32 *msrpm)
  398. {
  399. int i;
  400. memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
  401. for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
  402. if (!direct_access_msrs[i].always)
  403. continue;
  404. set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
  405. }
  406. }
  407. static void add_msr_offset(u32 offset)
  408. {
  409. int i;
  410. for (i = 0; i < MSRPM_OFFSETS; ++i) {
  411. /* Offset already in list? */
  412. if (msrpm_offsets[i] == offset)
  413. return;
  414. /* Slot used by another offset? */
  415. if (msrpm_offsets[i] != MSR_INVALID)
  416. continue;
  417. /* Add offset to list */
  418. msrpm_offsets[i] = offset;
  419. return;
  420. }
  421. /*
  422. * If this BUG triggers the msrpm_offsets table has an overflow. Just
  423. * increase MSRPM_OFFSETS in this case.
  424. */
  425. BUG();
  426. }
  427. static void init_msrpm_offsets(void)
  428. {
  429. int i;
  430. memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
  431. for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
  432. u32 offset;
  433. offset = svm_msrpm_offset(direct_access_msrs[i].index);
  434. BUG_ON(offset == MSR_INVALID);
  435. add_msr_offset(offset);
  436. }
  437. }
  438. static void svm_enable_lbrv(struct vcpu_svm *svm)
  439. {
  440. u32 *msrpm = svm->msrpm;
  441. svm->vmcb->control.lbr_ctl = 1;
  442. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
  443. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
  444. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
  445. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
  446. }
  447. static void svm_disable_lbrv(struct vcpu_svm *svm)
  448. {
  449. u32 *msrpm = svm->msrpm;
  450. svm->vmcb->control.lbr_ctl = 0;
  451. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
  452. set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
  453. set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
  454. set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
  455. }
  456. static __init int svm_hardware_setup(void)
  457. {
  458. int cpu;
  459. struct page *iopm_pages;
  460. void *iopm_va;
  461. int r;
  462. iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
  463. if (!iopm_pages)
  464. return -ENOMEM;
  465. iopm_va = page_address(iopm_pages);
  466. memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
  467. iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
  468. init_msrpm_offsets();
  469. if (boot_cpu_has(X86_FEATURE_NX))
  470. kvm_enable_efer_bits(EFER_NX);
  471. if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
  472. kvm_enable_efer_bits(EFER_FFXSR);
  473. if (nested) {
  474. printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
  475. kvm_enable_efer_bits(EFER_SVME);
  476. }
  477. for_each_possible_cpu(cpu) {
  478. r = svm_cpu_init(cpu);
  479. if (r)
  480. goto err;
  481. }
  482. svm_features = cpuid_edx(SVM_CPUID_FUNC);
  483. if (!svm_has(SVM_FEATURE_NPT))
  484. npt_enabled = false;
  485. if (npt_enabled && !npt) {
  486. printk(KERN_INFO "kvm: Nested Paging disabled\n");
  487. npt_enabled = false;
  488. }
  489. if (npt_enabled) {
  490. printk(KERN_INFO "kvm: Nested Paging enabled\n");
  491. kvm_enable_tdp();
  492. } else
  493. kvm_disable_tdp();
  494. return 0;
  495. err:
  496. __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
  497. iopm_base = 0;
  498. return r;
  499. }
  500. static __exit void svm_hardware_unsetup(void)
  501. {
  502. int cpu;
  503. for_each_possible_cpu(cpu)
  504. svm_cpu_uninit(cpu);
  505. __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
  506. iopm_base = 0;
  507. }
  508. static void init_seg(struct vmcb_seg *seg)
  509. {
  510. seg->selector = 0;
  511. seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
  512. SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
  513. seg->limit = 0xffff;
  514. seg->base = 0;
  515. }
  516. static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
  517. {
  518. seg->selector = 0;
  519. seg->attrib = SVM_SELECTOR_P_MASK | type;
  520. seg->limit = 0xffff;
  521. seg->base = 0;
  522. }
  523. static void init_vmcb(struct vcpu_svm *svm)
  524. {
  525. struct vmcb_control_area *control = &svm->vmcb->control;
  526. struct vmcb_save_area *save = &svm->vmcb->save;
  527. svm->vcpu.fpu_active = 1;
  528. control->intercept_cr_read = INTERCEPT_CR0_MASK |
  529. INTERCEPT_CR3_MASK |
  530. INTERCEPT_CR4_MASK;
  531. control->intercept_cr_write = INTERCEPT_CR0_MASK |
  532. INTERCEPT_CR3_MASK |
  533. INTERCEPT_CR4_MASK |
  534. INTERCEPT_CR8_MASK;
  535. control->intercept_dr_read = INTERCEPT_DR0_MASK |
  536. INTERCEPT_DR1_MASK |
  537. INTERCEPT_DR2_MASK |
  538. INTERCEPT_DR3_MASK |
  539. INTERCEPT_DR4_MASK |
  540. INTERCEPT_DR5_MASK |
  541. INTERCEPT_DR6_MASK |
  542. INTERCEPT_DR7_MASK;
  543. control->intercept_dr_write = INTERCEPT_DR0_MASK |
  544. INTERCEPT_DR1_MASK |
  545. INTERCEPT_DR2_MASK |
  546. INTERCEPT_DR3_MASK |
  547. INTERCEPT_DR4_MASK |
  548. INTERCEPT_DR5_MASK |
  549. INTERCEPT_DR6_MASK |
  550. INTERCEPT_DR7_MASK;
  551. control->intercept_exceptions = (1 << PF_VECTOR) |
  552. (1 << UD_VECTOR) |
  553. (1 << MC_VECTOR);
  554. control->intercept = (1ULL << INTERCEPT_INTR) |
  555. (1ULL << INTERCEPT_NMI) |
  556. (1ULL << INTERCEPT_SMI) |
  557. (1ULL << INTERCEPT_SELECTIVE_CR0) |
  558. (1ULL << INTERCEPT_CPUID) |
  559. (1ULL << INTERCEPT_INVD) |
  560. (1ULL << INTERCEPT_HLT) |
  561. (1ULL << INTERCEPT_INVLPG) |
  562. (1ULL << INTERCEPT_INVLPGA) |
  563. (1ULL << INTERCEPT_IOIO_PROT) |
  564. (1ULL << INTERCEPT_MSR_PROT) |
  565. (1ULL << INTERCEPT_TASK_SWITCH) |
  566. (1ULL << INTERCEPT_SHUTDOWN) |
  567. (1ULL << INTERCEPT_VMRUN) |
  568. (1ULL << INTERCEPT_VMMCALL) |
  569. (1ULL << INTERCEPT_VMLOAD) |
  570. (1ULL << INTERCEPT_VMSAVE) |
  571. (1ULL << INTERCEPT_STGI) |
  572. (1ULL << INTERCEPT_CLGI) |
  573. (1ULL << INTERCEPT_SKINIT) |
  574. (1ULL << INTERCEPT_WBINVD) |
  575. (1ULL << INTERCEPT_MONITOR) |
  576. (1ULL << INTERCEPT_MWAIT);
  577. control->iopm_base_pa = iopm_base;
  578. control->msrpm_base_pa = __pa(svm->msrpm);
  579. control->tsc_offset = 0;
  580. control->int_ctl = V_INTR_MASKING_MASK;
  581. init_seg(&save->es);
  582. init_seg(&save->ss);
  583. init_seg(&save->ds);
  584. init_seg(&save->fs);
  585. init_seg(&save->gs);
  586. save->cs.selector = 0xf000;
  587. /* Executable/Readable Code Segment */
  588. save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
  589. SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
  590. save->cs.limit = 0xffff;
  591. /*
  592. * cs.base should really be 0xffff0000, but vmx can't handle that, so
  593. * be consistent with it.
  594. *
  595. * Replace when we have real mode working for vmx.
  596. */
  597. save->cs.base = 0xf0000;
  598. save->gdtr.limit = 0xffff;
  599. save->idtr.limit = 0xffff;
  600. init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
  601. init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
  602. save->efer = EFER_SVME;
  603. save->dr6 = 0xffff0ff0;
  604. save->dr7 = 0x400;
  605. save->rflags = 2;
  606. save->rip = 0x0000fff0;
  607. svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
  608. /*
  609. * This is the guest-visible cr0 value.
  610. * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
  611. */
  612. svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
  613. kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
  614. save->cr4 = X86_CR4_PAE;
  615. /* rdx = ?? */
  616. if (npt_enabled) {
  617. /* Setup VMCB for Nested Paging */
  618. control->nested_ctl = 1;
  619. control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
  620. (1ULL << INTERCEPT_INVLPG));
  621. control->intercept_exceptions &= ~(1 << PF_VECTOR);
  622. control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
  623. control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
  624. save->g_pat = 0x0007040600070406ULL;
  625. save->cr3 = 0;
  626. save->cr4 = 0;
  627. }
  628. force_new_asid(&svm->vcpu);
  629. svm->nested.vmcb = 0;
  630. svm->vcpu.arch.hflags = 0;
  631. if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
  632. control->pause_filter_count = 3000;
  633. control->intercept |= (1ULL << INTERCEPT_PAUSE);
  634. }
  635. enable_gif(svm);
  636. }
  637. static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
  638. {
  639. struct vcpu_svm *svm = to_svm(vcpu);
  640. init_vmcb(svm);
  641. if (!kvm_vcpu_is_bsp(vcpu)) {
  642. kvm_rip_write(vcpu, 0);
  643. svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
  644. svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
  645. }
  646. vcpu->arch.regs_avail = ~0;
  647. vcpu->arch.regs_dirty = ~0;
  648. return 0;
  649. }
  650. static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
  651. {
  652. struct vcpu_svm *svm;
  653. struct page *page;
  654. struct page *msrpm_pages;
  655. struct page *hsave_page;
  656. struct page *nested_msrpm_pages;
  657. int err;
  658. svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
  659. if (!svm) {
  660. err = -ENOMEM;
  661. goto out;
  662. }
  663. err = kvm_vcpu_init(&svm->vcpu, kvm, id);
  664. if (err)
  665. goto free_svm;
  666. err = -ENOMEM;
  667. page = alloc_page(GFP_KERNEL);
  668. if (!page)
  669. goto uninit;
  670. msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  671. if (!msrpm_pages)
  672. goto free_page1;
  673. nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
  674. if (!nested_msrpm_pages)
  675. goto free_page2;
  676. hsave_page = alloc_page(GFP_KERNEL);
  677. if (!hsave_page)
  678. goto free_page3;
  679. svm->nested.hsave = page_address(hsave_page);
  680. svm->msrpm = page_address(msrpm_pages);
  681. svm_vcpu_init_msrpm(svm->msrpm);
  682. svm->nested.msrpm = page_address(nested_msrpm_pages);
  683. svm_vcpu_init_msrpm(svm->nested.msrpm);
  684. svm->vmcb = page_address(page);
  685. clear_page(svm->vmcb);
  686. svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
  687. svm->asid_generation = 0;
  688. init_vmcb(svm);
  689. fx_init(&svm->vcpu);
  690. svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
  691. if (kvm_vcpu_is_bsp(&svm->vcpu))
  692. svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
  693. return &svm->vcpu;
  694. free_page3:
  695. __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
  696. free_page2:
  697. __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
  698. free_page1:
  699. __free_page(page);
  700. uninit:
  701. kvm_vcpu_uninit(&svm->vcpu);
  702. free_svm:
  703. kmem_cache_free(kvm_vcpu_cache, svm);
  704. out:
  705. return ERR_PTR(err);
  706. }
  707. static void svm_free_vcpu(struct kvm_vcpu *vcpu)
  708. {
  709. struct vcpu_svm *svm = to_svm(vcpu);
  710. __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
  711. __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
  712. __free_page(virt_to_page(svm->nested.hsave));
  713. __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
  714. kvm_vcpu_uninit(vcpu);
  715. kmem_cache_free(kvm_vcpu_cache, svm);
  716. }
  717. static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  718. {
  719. struct vcpu_svm *svm = to_svm(vcpu);
  720. int i;
  721. if (unlikely(cpu != vcpu->cpu)) {
  722. u64 delta;
  723. if (check_tsc_unstable()) {
  724. /*
  725. * Make sure that the guest sees a monotonically
  726. * increasing TSC.
  727. */
  728. delta = vcpu->arch.host_tsc - native_read_tsc();
  729. svm->vmcb->control.tsc_offset += delta;
  730. if (is_nested(svm))
  731. svm->nested.hsave->control.tsc_offset += delta;
  732. }
  733. vcpu->cpu = cpu;
  734. kvm_migrate_timers(vcpu);
  735. svm->asid_generation = 0;
  736. }
  737. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  738. rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  739. }
  740. static void svm_vcpu_put(struct kvm_vcpu *vcpu)
  741. {
  742. struct vcpu_svm *svm = to_svm(vcpu);
  743. int i;
  744. ++vcpu->stat.host_state_reload;
  745. for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
  746. wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
  747. vcpu->arch.host_tsc = native_read_tsc();
  748. }
  749. static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
  750. {
  751. return to_svm(vcpu)->vmcb->save.rflags;
  752. }
  753. static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
  754. {
  755. to_svm(vcpu)->vmcb->save.rflags = rflags;
  756. }
  757. static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
  758. {
  759. switch (reg) {
  760. case VCPU_EXREG_PDPTR:
  761. BUG_ON(!npt_enabled);
  762. load_pdptrs(vcpu, vcpu->arch.cr3);
  763. break;
  764. default:
  765. BUG();
  766. }
  767. }
  768. static void svm_set_vintr(struct vcpu_svm *svm)
  769. {
  770. svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
  771. }
  772. static void svm_clear_vintr(struct vcpu_svm *svm)
  773. {
  774. svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
  775. }
  776. static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
  777. {
  778. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  779. switch (seg) {
  780. case VCPU_SREG_CS: return &save->cs;
  781. case VCPU_SREG_DS: return &save->ds;
  782. case VCPU_SREG_ES: return &save->es;
  783. case VCPU_SREG_FS: return &save->fs;
  784. case VCPU_SREG_GS: return &save->gs;
  785. case VCPU_SREG_SS: return &save->ss;
  786. case VCPU_SREG_TR: return &save->tr;
  787. case VCPU_SREG_LDTR: return &save->ldtr;
  788. }
  789. BUG();
  790. return NULL;
  791. }
  792. static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
  793. {
  794. struct vmcb_seg *s = svm_seg(vcpu, seg);
  795. return s->base;
  796. }
  797. static void svm_get_segment(struct kvm_vcpu *vcpu,
  798. struct kvm_segment *var, int seg)
  799. {
  800. struct vmcb_seg *s = svm_seg(vcpu, seg);
  801. var->base = s->base;
  802. var->limit = s->limit;
  803. var->selector = s->selector;
  804. var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
  805. var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
  806. var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
  807. var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
  808. var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
  809. var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
  810. var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
  811. var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
  812. /*
  813. * AMD's VMCB does not have an explicit unusable field, so emulate it
  814. * for cross vendor migration purposes by "not present"
  815. */
  816. var->unusable = !var->present || (var->type == 0);
  817. switch (seg) {
  818. case VCPU_SREG_CS:
  819. /*
  820. * SVM always stores 0 for the 'G' bit in the CS selector in
  821. * the VMCB on a VMEXIT. This hurts cross-vendor migration:
  822. * Intel's VMENTRY has a check on the 'G' bit.
  823. */
  824. var->g = s->limit > 0xfffff;
  825. break;
  826. case VCPU_SREG_TR:
  827. /*
  828. * Work around a bug where the busy flag in the tr selector
  829. * isn't exposed
  830. */
  831. var->type |= 0x2;
  832. break;
  833. case VCPU_SREG_DS:
  834. case VCPU_SREG_ES:
  835. case VCPU_SREG_FS:
  836. case VCPU_SREG_GS:
  837. /*
  838. * The accessed bit must always be set in the segment
  839. * descriptor cache, although it can be cleared in the
  840. * descriptor, the cached bit always remains at 1. Since
  841. * Intel has a check on this, set it here to support
  842. * cross-vendor migration.
  843. */
  844. if (!var->unusable)
  845. var->type |= 0x1;
  846. break;
  847. case VCPU_SREG_SS:
  848. /*
  849. * On AMD CPUs sometimes the DB bit in the segment
  850. * descriptor is left as 1, although the whole segment has
  851. * been made unusable. Clear it here to pass an Intel VMX
  852. * entry check when cross vendor migrating.
  853. */
  854. if (var->unusable)
  855. var->db = 0;
  856. break;
  857. }
  858. }
  859. static int svm_get_cpl(struct kvm_vcpu *vcpu)
  860. {
  861. struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
  862. return save->cpl;
  863. }
  864. static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  865. {
  866. struct vcpu_svm *svm = to_svm(vcpu);
  867. dt->size = svm->vmcb->save.idtr.limit;
  868. dt->address = svm->vmcb->save.idtr.base;
  869. }
  870. static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  871. {
  872. struct vcpu_svm *svm = to_svm(vcpu);
  873. svm->vmcb->save.idtr.limit = dt->size;
  874. svm->vmcb->save.idtr.base = dt->address ;
  875. }
  876. static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  877. {
  878. struct vcpu_svm *svm = to_svm(vcpu);
  879. dt->size = svm->vmcb->save.gdtr.limit;
  880. dt->address = svm->vmcb->save.gdtr.base;
  881. }
  882. static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
  883. {
  884. struct vcpu_svm *svm = to_svm(vcpu);
  885. svm->vmcb->save.gdtr.limit = dt->size;
  886. svm->vmcb->save.gdtr.base = dt->address ;
  887. }
  888. static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
  889. {
  890. }
  891. static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
  892. {
  893. }
  894. static void update_cr0_intercept(struct vcpu_svm *svm)
  895. {
  896. struct vmcb *vmcb = svm->vmcb;
  897. ulong gcr0 = svm->vcpu.arch.cr0;
  898. u64 *hcr0 = &svm->vmcb->save.cr0;
  899. if (!svm->vcpu.fpu_active)
  900. *hcr0 |= SVM_CR0_SELECTIVE_MASK;
  901. else
  902. *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
  903. | (gcr0 & SVM_CR0_SELECTIVE_MASK);
  904. if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
  905. vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
  906. vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
  907. if (is_nested(svm)) {
  908. struct vmcb *hsave = svm->nested.hsave;
  909. hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
  910. hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
  911. vmcb->control.intercept_cr_read |= svm->nested.intercept_cr_read;
  912. vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
  913. }
  914. } else {
  915. svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
  916. svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
  917. if (is_nested(svm)) {
  918. struct vmcb *hsave = svm->nested.hsave;
  919. hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
  920. hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
  921. }
  922. }
  923. }
  924. static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  925. {
  926. struct vcpu_svm *svm = to_svm(vcpu);
  927. if (is_nested(svm)) {
  928. /*
  929. * We are here because we run in nested mode, the host kvm
  930. * intercepts cr0 writes but the l1 hypervisor does not.
  931. * But the L1 hypervisor may intercept selective cr0 writes.
  932. * This needs to be checked here.
  933. */
  934. unsigned long old, new;
  935. /* Remove bits that would trigger a real cr0 write intercept */
  936. old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
  937. new = cr0 & SVM_CR0_SELECTIVE_MASK;
  938. if (old == new) {
  939. /* cr0 write with ts and mp unchanged */
  940. svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
  941. if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
  942. return;
  943. }
  944. }
  945. #ifdef CONFIG_X86_64
  946. if (vcpu->arch.efer & EFER_LME) {
  947. if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
  948. vcpu->arch.efer |= EFER_LMA;
  949. svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
  950. }
  951. if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
  952. vcpu->arch.efer &= ~EFER_LMA;
  953. svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
  954. }
  955. }
  956. #endif
  957. vcpu->arch.cr0 = cr0;
  958. if (!npt_enabled)
  959. cr0 |= X86_CR0_PG | X86_CR0_WP;
  960. if (!vcpu->fpu_active)
  961. cr0 |= X86_CR0_TS;
  962. /*
  963. * re-enable caching here because the QEMU bios
  964. * does not do it - this results in some delay at
  965. * reboot
  966. */
  967. cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
  968. svm->vmcb->save.cr0 = cr0;
  969. update_cr0_intercept(svm);
  970. }
  971. static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  972. {
  973. unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
  974. unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
  975. if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
  976. force_new_asid(vcpu);
  977. vcpu->arch.cr4 = cr4;
  978. if (!npt_enabled)
  979. cr4 |= X86_CR4_PAE;
  980. cr4 |= host_cr4_mce;
  981. to_svm(vcpu)->vmcb->save.cr4 = cr4;
  982. }
  983. static void svm_set_segment(struct kvm_vcpu *vcpu,
  984. struct kvm_segment *var, int seg)
  985. {
  986. struct vcpu_svm *svm = to_svm(vcpu);
  987. struct vmcb_seg *s = svm_seg(vcpu, seg);
  988. s->base = var->base;
  989. s->limit = var->limit;
  990. s->selector = var->selector;
  991. if (var->unusable)
  992. s->attrib = 0;
  993. else {
  994. s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
  995. s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
  996. s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
  997. s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
  998. s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
  999. s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
  1000. s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
  1001. s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
  1002. }
  1003. if (seg == VCPU_SREG_CS)
  1004. svm->vmcb->save.cpl
  1005. = (svm->vmcb->save.cs.attrib
  1006. >> SVM_SELECTOR_DPL_SHIFT) & 3;
  1007. }
  1008. static void update_db_intercept(struct kvm_vcpu *vcpu)
  1009. {
  1010. struct vcpu_svm *svm = to_svm(vcpu);
  1011. svm->vmcb->control.intercept_exceptions &=
  1012. ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
  1013. if (svm->nmi_singlestep)
  1014. svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
  1015. if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
  1016. if (vcpu->guest_debug &
  1017. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
  1018. svm->vmcb->control.intercept_exceptions |=
  1019. 1 << DB_VECTOR;
  1020. if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
  1021. svm->vmcb->control.intercept_exceptions |=
  1022. 1 << BP_VECTOR;
  1023. } else
  1024. vcpu->guest_debug = 0;
  1025. }
  1026. static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
  1027. {
  1028. struct vcpu_svm *svm = to_svm(vcpu);
  1029. if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
  1030. svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
  1031. else
  1032. svm->vmcb->save.dr7 = vcpu->arch.dr7;
  1033. update_db_intercept(vcpu);
  1034. }
  1035. static void load_host_msrs(struct kvm_vcpu *vcpu)
  1036. {
  1037. #ifdef CONFIG_X86_64
  1038. wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  1039. #endif
  1040. }
  1041. static void save_host_msrs(struct kvm_vcpu *vcpu)
  1042. {
  1043. #ifdef CONFIG_X86_64
  1044. rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
  1045. #endif
  1046. }
  1047. static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
  1048. {
  1049. if (sd->next_asid > sd->max_asid) {
  1050. ++sd->asid_generation;
  1051. sd->next_asid = 1;
  1052. svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
  1053. }
  1054. svm->asid_generation = sd->asid_generation;
  1055. svm->vmcb->control.asid = sd->next_asid++;
  1056. }
  1057. static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
  1058. {
  1059. struct vcpu_svm *svm = to_svm(vcpu);
  1060. svm->vmcb->save.dr7 = value;
  1061. }
  1062. static int pf_interception(struct vcpu_svm *svm)
  1063. {
  1064. u64 fault_address;
  1065. u32 error_code;
  1066. fault_address = svm->vmcb->control.exit_info_2;
  1067. error_code = svm->vmcb->control.exit_info_1;
  1068. trace_kvm_page_fault(fault_address, error_code);
  1069. if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
  1070. kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
  1071. return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
  1072. }
  1073. static int db_interception(struct vcpu_svm *svm)
  1074. {
  1075. struct kvm_run *kvm_run = svm->vcpu.run;
  1076. if (!(svm->vcpu.guest_debug &
  1077. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
  1078. !svm->nmi_singlestep) {
  1079. kvm_queue_exception(&svm->vcpu, DB_VECTOR);
  1080. return 1;
  1081. }
  1082. if (svm->nmi_singlestep) {
  1083. svm->nmi_singlestep = false;
  1084. if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
  1085. svm->vmcb->save.rflags &=
  1086. ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  1087. update_db_intercept(&svm->vcpu);
  1088. }
  1089. if (svm->vcpu.guest_debug &
  1090. (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
  1091. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1092. kvm_run->debug.arch.pc =
  1093. svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  1094. kvm_run->debug.arch.exception = DB_VECTOR;
  1095. return 0;
  1096. }
  1097. return 1;
  1098. }
  1099. static int bp_interception(struct vcpu_svm *svm)
  1100. {
  1101. struct kvm_run *kvm_run = svm->vcpu.run;
  1102. kvm_run->exit_reason = KVM_EXIT_DEBUG;
  1103. kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
  1104. kvm_run->debug.arch.exception = BP_VECTOR;
  1105. return 0;
  1106. }
  1107. static int ud_interception(struct vcpu_svm *svm)
  1108. {
  1109. int er;
  1110. er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
  1111. if (er != EMULATE_DONE)
  1112. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1113. return 1;
  1114. }
  1115. static void svm_fpu_activate(struct kvm_vcpu *vcpu)
  1116. {
  1117. struct vcpu_svm *svm = to_svm(vcpu);
  1118. u32 excp;
  1119. if (is_nested(svm)) {
  1120. u32 h_excp, n_excp;
  1121. h_excp = svm->nested.hsave->control.intercept_exceptions;
  1122. n_excp = svm->nested.intercept_exceptions;
  1123. h_excp &= ~(1 << NM_VECTOR);
  1124. excp = h_excp | n_excp;
  1125. } else {
  1126. excp = svm->vmcb->control.intercept_exceptions;
  1127. excp &= ~(1 << NM_VECTOR);
  1128. }
  1129. svm->vmcb->control.intercept_exceptions = excp;
  1130. svm->vcpu.fpu_active = 1;
  1131. update_cr0_intercept(svm);
  1132. }
  1133. static int nm_interception(struct vcpu_svm *svm)
  1134. {
  1135. svm_fpu_activate(&svm->vcpu);
  1136. return 1;
  1137. }
  1138. static int mc_interception(struct vcpu_svm *svm)
  1139. {
  1140. /*
  1141. * On an #MC intercept the MCE handler is not called automatically in
  1142. * the host. So do it by hand here.
  1143. */
  1144. asm volatile (
  1145. "int $0x12\n");
  1146. /* not sure if we ever come back to this point */
  1147. return 1;
  1148. }
  1149. static int shutdown_interception(struct vcpu_svm *svm)
  1150. {
  1151. struct kvm_run *kvm_run = svm->vcpu.run;
  1152. /*
  1153. * VMCB is undefined after a SHUTDOWN intercept
  1154. * so reinitialize it.
  1155. */
  1156. clear_page(svm->vmcb);
  1157. init_vmcb(svm);
  1158. kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
  1159. return 0;
  1160. }
  1161. static int io_interception(struct vcpu_svm *svm)
  1162. {
  1163. struct kvm_vcpu *vcpu = &svm->vcpu;
  1164. u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
  1165. int size, in, string;
  1166. unsigned port;
  1167. ++svm->vcpu.stat.io_exits;
  1168. string = (io_info & SVM_IOIO_STR_MASK) != 0;
  1169. in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
  1170. if (string || in)
  1171. return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
  1172. port = io_info >> 16;
  1173. size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
  1174. svm->next_rip = svm->vmcb->control.exit_info_2;
  1175. skip_emulated_instruction(&svm->vcpu);
  1176. return kvm_fast_pio_out(vcpu, size, port);
  1177. }
  1178. static int nmi_interception(struct vcpu_svm *svm)
  1179. {
  1180. return 1;
  1181. }
  1182. static int intr_interception(struct vcpu_svm *svm)
  1183. {
  1184. ++svm->vcpu.stat.irq_exits;
  1185. return 1;
  1186. }
  1187. static int nop_on_interception(struct vcpu_svm *svm)
  1188. {
  1189. return 1;
  1190. }
  1191. static int halt_interception(struct vcpu_svm *svm)
  1192. {
  1193. svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
  1194. skip_emulated_instruction(&svm->vcpu);
  1195. return kvm_emulate_halt(&svm->vcpu);
  1196. }
  1197. static int vmmcall_interception(struct vcpu_svm *svm)
  1198. {
  1199. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1200. skip_emulated_instruction(&svm->vcpu);
  1201. kvm_emulate_hypercall(&svm->vcpu);
  1202. return 1;
  1203. }
  1204. static int nested_svm_check_permissions(struct vcpu_svm *svm)
  1205. {
  1206. if (!(svm->vcpu.arch.efer & EFER_SVME)
  1207. || !is_paging(&svm->vcpu)) {
  1208. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1209. return 1;
  1210. }
  1211. if (svm->vmcb->save.cpl) {
  1212. kvm_inject_gp(&svm->vcpu, 0);
  1213. return 1;
  1214. }
  1215. return 0;
  1216. }
  1217. static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  1218. bool has_error_code, u32 error_code)
  1219. {
  1220. int vmexit;
  1221. if (!is_nested(svm))
  1222. return 0;
  1223. svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
  1224. svm->vmcb->control.exit_code_hi = 0;
  1225. svm->vmcb->control.exit_info_1 = error_code;
  1226. svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
  1227. vmexit = nested_svm_intercept(svm);
  1228. if (vmexit == NESTED_EXIT_DONE)
  1229. svm->nested.exit_required = true;
  1230. return vmexit;
  1231. }
  1232. /* This function returns true if it is save to enable the irq window */
  1233. static inline bool nested_svm_intr(struct vcpu_svm *svm)
  1234. {
  1235. if (!is_nested(svm))
  1236. return true;
  1237. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1238. return true;
  1239. if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
  1240. return false;
  1241. svm->vmcb->control.exit_code = SVM_EXIT_INTR;
  1242. svm->vmcb->control.exit_info_1 = 0;
  1243. svm->vmcb->control.exit_info_2 = 0;
  1244. if (svm->nested.intercept & 1ULL) {
  1245. /*
  1246. * The #vmexit can't be emulated here directly because this
  1247. * code path runs with irqs and preemtion disabled. A
  1248. * #vmexit emulation might sleep. Only signal request for
  1249. * the #vmexit here.
  1250. */
  1251. svm->nested.exit_required = true;
  1252. trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
  1253. return false;
  1254. }
  1255. return true;
  1256. }
  1257. /* This function returns true if it is save to enable the nmi window */
  1258. static inline bool nested_svm_nmi(struct vcpu_svm *svm)
  1259. {
  1260. if (!is_nested(svm))
  1261. return true;
  1262. if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
  1263. return true;
  1264. svm->vmcb->control.exit_code = SVM_EXIT_NMI;
  1265. svm->nested.exit_required = true;
  1266. return false;
  1267. }
  1268. static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
  1269. {
  1270. struct page *page;
  1271. might_sleep();
  1272. page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
  1273. if (is_error_page(page))
  1274. goto error;
  1275. *_page = page;
  1276. return kmap(page);
  1277. error:
  1278. kvm_release_page_clean(page);
  1279. kvm_inject_gp(&svm->vcpu, 0);
  1280. return NULL;
  1281. }
  1282. static void nested_svm_unmap(struct page *page)
  1283. {
  1284. kunmap(page);
  1285. kvm_release_page_dirty(page);
  1286. }
  1287. static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
  1288. {
  1289. unsigned port;
  1290. u8 val, bit;
  1291. u64 gpa;
  1292. if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
  1293. return NESTED_EXIT_HOST;
  1294. port = svm->vmcb->control.exit_info_1 >> 16;
  1295. gpa = svm->nested.vmcb_iopm + (port / 8);
  1296. bit = port % 8;
  1297. val = 0;
  1298. if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
  1299. val &= (1 << bit);
  1300. return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  1301. }
  1302. static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
  1303. {
  1304. u32 offset, msr, value;
  1305. int write, mask;
  1306. if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
  1307. return NESTED_EXIT_HOST;
  1308. msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1309. offset = svm_msrpm_offset(msr);
  1310. write = svm->vmcb->control.exit_info_1 & 1;
  1311. mask = 1 << ((2 * (msr & 0xf)) + write);
  1312. if (offset == MSR_INVALID)
  1313. return NESTED_EXIT_DONE;
  1314. /* Offset is in 32 bit units but need in 8 bit units */
  1315. offset *= 4;
  1316. if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
  1317. return NESTED_EXIT_DONE;
  1318. return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  1319. }
  1320. static int nested_svm_exit_special(struct vcpu_svm *svm)
  1321. {
  1322. u32 exit_code = svm->vmcb->control.exit_code;
  1323. switch (exit_code) {
  1324. case SVM_EXIT_INTR:
  1325. case SVM_EXIT_NMI:
  1326. case SVM_EXIT_EXCP_BASE + MC_VECTOR:
  1327. return NESTED_EXIT_HOST;
  1328. case SVM_EXIT_NPF:
  1329. /* For now we are always handling NPFs when using them */
  1330. if (npt_enabled)
  1331. return NESTED_EXIT_HOST;
  1332. break;
  1333. case SVM_EXIT_EXCP_BASE + PF_VECTOR:
  1334. /* When we're shadowing, trap PFs */
  1335. if (!npt_enabled)
  1336. return NESTED_EXIT_HOST;
  1337. break;
  1338. case SVM_EXIT_EXCP_BASE + NM_VECTOR:
  1339. nm_interception(svm);
  1340. break;
  1341. default:
  1342. break;
  1343. }
  1344. return NESTED_EXIT_CONTINUE;
  1345. }
  1346. /*
  1347. * If this function returns true, this #vmexit was already handled
  1348. */
  1349. static int nested_svm_intercept(struct vcpu_svm *svm)
  1350. {
  1351. u32 exit_code = svm->vmcb->control.exit_code;
  1352. int vmexit = NESTED_EXIT_HOST;
  1353. switch (exit_code) {
  1354. case SVM_EXIT_MSR:
  1355. vmexit = nested_svm_exit_handled_msr(svm);
  1356. break;
  1357. case SVM_EXIT_IOIO:
  1358. vmexit = nested_svm_intercept_ioio(svm);
  1359. break;
  1360. case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
  1361. u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
  1362. if (svm->nested.intercept_cr_read & cr_bits)
  1363. vmexit = NESTED_EXIT_DONE;
  1364. break;
  1365. }
  1366. case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
  1367. u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
  1368. if (svm->nested.intercept_cr_write & cr_bits)
  1369. vmexit = NESTED_EXIT_DONE;
  1370. break;
  1371. }
  1372. case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
  1373. u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
  1374. if (svm->nested.intercept_dr_read & dr_bits)
  1375. vmexit = NESTED_EXIT_DONE;
  1376. break;
  1377. }
  1378. case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
  1379. u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
  1380. if (svm->nested.intercept_dr_write & dr_bits)
  1381. vmexit = NESTED_EXIT_DONE;
  1382. break;
  1383. }
  1384. case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
  1385. u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
  1386. if (svm->nested.intercept_exceptions & excp_bits)
  1387. vmexit = NESTED_EXIT_DONE;
  1388. break;
  1389. }
  1390. case SVM_EXIT_ERR: {
  1391. vmexit = NESTED_EXIT_DONE;
  1392. break;
  1393. }
  1394. default: {
  1395. u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
  1396. if (svm->nested.intercept & exit_bits)
  1397. vmexit = NESTED_EXIT_DONE;
  1398. }
  1399. }
  1400. return vmexit;
  1401. }
  1402. static int nested_svm_exit_handled(struct vcpu_svm *svm)
  1403. {
  1404. int vmexit;
  1405. vmexit = nested_svm_intercept(svm);
  1406. if (vmexit == NESTED_EXIT_DONE)
  1407. nested_svm_vmexit(svm);
  1408. return vmexit;
  1409. }
  1410. static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
  1411. {
  1412. struct vmcb_control_area *dst = &dst_vmcb->control;
  1413. struct vmcb_control_area *from = &from_vmcb->control;
  1414. dst->intercept_cr_read = from->intercept_cr_read;
  1415. dst->intercept_cr_write = from->intercept_cr_write;
  1416. dst->intercept_dr_read = from->intercept_dr_read;
  1417. dst->intercept_dr_write = from->intercept_dr_write;
  1418. dst->intercept_exceptions = from->intercept_exceptions;
  1419. dst->intercept = from->intercept;
  1420. dst->iopm_base_pa = from->iopm_base_pa;
  1421. dst->msrpm_base_pa = from->msrpm_base_pa;
  1422. dst->tsc_offset = from->tsc_offset;
  1423. dst->asid = from->asid;
  1424. dst->tlb_ctl = from->tlb_ctl;
  1425. dst->int_ctl = from->int_ctl;
  1426. dst->int_vector = from->int_vector;
  1427. dst->int_state = from->int_state;
  1428. dst->exit_code = from->exit_code;
  1429. dst->exit_code_hi = from->exit_code_hi;
  1430. dst->exit_info_1 = from->exit_info_1;
  1431. dst->exit_info_2 = from->exit_info_2;
  1432. dst->exit_int_info = from->exit_int_info;
  1433. dst->exit_int_info_err = from->exit_int_info_err;
  1434. dst->nested_ctl = from->nested_ctl;
  1435. dst->event_inj = from->event_inj;
  1436. dst->event_inj_err = from->event_inj_err;
  1437. dst->nested_cr3 = from->nested_cr3;
  1438. dst->lbr_ctl = from->lbr_ctl;
  1439. }
  1440. static int nested_svm_vmexit(struct vcpu_svm *svm)
  1441. {
  1442. struct vmcb *nested_vmcb;
  1443. struct vmcb *hsave = svm->nested.hsave;
  1444. struct vmcb *vmcb = svm->vmcb;
  1445. struct page *page;
  1446. trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
  1447. vmcb->control.exit_info_1,
  1448. vmcb->control.exit_info_2,
  1449. vmcb->control.exit_int_info,
  1450. vmcb->control.exit_int_info_err);
  1451. nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
  1452. if (!nested_vmcb)
  1453. return 1;
  1454. /* Exit nested SVM mode */
  1455. svm->nested.vmcb = 0;
  1456. /* Give the current vmcb to the guest */
  1457. disable_gif(svm);
  1458. nested_vmcb->save.es = vmcb->save.es;
  1459. nested_vmcb->save.cs = vmcb->save.cs;
  1460. nested_vmcb->save.ss = vmcb->save.ss;
  1461. nested_vmcb->save.ds = vmcb->save.ds;
  1462. nested_vmcb->save.gdtr = vmcb->save.gdtr;
  1463. nested_vmcb->save.idtr = vmcb->save.idtr;
  1464. nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
  1465. nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
  1466. nested_vmcb->save.cr2 = vmcb->save.cr2;
  1467. nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
  1468. nested_vmcb->save.rflags = vmcb->save.rflags;
  1469. nested_vmcb->save.rip = vmcb->save.rip;
  1470. nested_vmcb->save.rsp = vmcb->save.rsp;
  1471. nested_vmcb->save.rax = vmcb->save.rax;
  1472. nested_vmcb->save.dr7 = vmcb->save.dr7;
  1473. nested_vmcb->save.dr6 = vmcb->save.dr6;
  1474. nested_vmcb->save.cpl = vmcb->save.cpl;
  1475. nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
  1476. nested_vmcb->control.int_vector = vmcb->control.int_vector;
  1477. nested_vmcb->control.int_state = vmcb->control.int_state;
  1478. nested_vmcb->control.exit_code = vmcb->control.exit_code;
  1479. nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
  1480. nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
  1481. nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
  1482. nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
  1483. nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
  1484. /*
  1485. * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
  1486. * to make sure that we do not lose injected events. So check event_inj
  1487. * here and copy it to exit_int_info if it is valid.
  1488. * Exit_int_info and event_inj can't be both valid because the case
  1489. * below only happens on a VMRUN instruction intercept which has
  1490. * no valid exit_int_info set.
  1491. */
  1492. if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
  1493. struct vmcb_control_area *nc = &nested_vmcb->control;
  1494. nc->exit_int_info = vmcb->control.event_inj;
  1495. nc->exit_int_info_err = vmcb->control.event_inj_err;
  1496. }
  1497. nested_vmcb->control.tlb_ctl = 0;
  1498. nested_vmcb->control.event_inj = 0;
  1499. nested_vmcb->control.event_inj_err = 0;
  1500. /* We always set V_INTR_MASKING and remember the old value in hflags */
  1501. if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
  1502. nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
  1503. /* Restore the original control entries */
  1504. copy_vmcb_control_area(vmcb, hsave);
  1505. kvm_clear_exception_queue(&svm->vcpu);
  1506. kvm_clear_interrupt_queue(&svm->vcpu);
  1507. /* Restore selected save entries */
  1508. svm->vmcb->save.es = hsave->save.es;
  1509. svm->vmcb->save.cs = hsave->save.cs;
  1510. svm->vmcb->save.ss = hsave->save.ss;
  1511. svm->vmcb->save.ds = hsave->save.ds;
  1512. svm->vmcb->save.gdtr = hsave->save.gdtr;
  1513. svm->vmcb->save.idtr = hsave->save.idtr;
  1514. svm->vmcb->save.rflags = hsave->save.rflags;
  1515. svm_set_efer(&svm->vcpu, hsave->save.efer);
  1516. svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
  1517. svm_set_cr4(&svm->vcpu, hsave->save.cr4);
  1518. if (npt_enabled) {
  1519. svm->vmcb->save.cr3 = hsave->save.cr3;
  1520. svm->vcpu.arch.cr3 = hsave->save.cr3;
  1521. } else {
  1522. kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
  1523. }
  1524. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
  1525. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
  1526. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
  1527. svm->vmcb->save.dr7 = 0;
  1528. svm->vmcb->save.cpl = 0;
  1529. svm->vmcb->control.exit_int_info = 0;
  1530. nested_svm_unmap(page);
  1531. kvm_mmu_reset_context(&svm->vcpu);
  1532. kvm_mmu_load(&svm->vcpu);
  1533. return 0;
  1534. }
  1535. static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
  1536. {
  1537. /*
  1538. * This function merges the msr permission bitmaps of kvm and the
  1539. * nested vmcb. It is omptimized in that it only merges the parts where
  1540. * the kvm msr permission bitmap may contain zero bits
  1541. */
  1542. int i;
  1543. if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
  1544. return true;
  1545. for (i = 0; i < MSRPM_OFFSETS; i++) {
  1546. u32 value, p;
  1547. u64 offset;
  1548. if (msrpm_offsets[i] == 0xffffffff)
  1549. break;
  1550. p = msrpm_offsets[i];
  1551. offset = svm->nested.vmcb_msrpm + (p * 4);
  1552. if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
  1553. return false;
  1554. svm->nested.msrpm[p] = svm->msrpm[p] | value;
  1555. }
  1556. svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
  1557. return true;
  1558. }
  1559. static bool nested_svm_vmrun(struct vcpu_svm *svm)
  1560. {
  1561. struct vmcb *nested_vmcb;
  1562. struct vmcb *hsave = svm->nested.hsave;
  1563. struct vmcb *vmcb = svm->vmcb;
  1564. struct page *page;
  1565. u64 vmcb_gpa;
  1566. vmcb_gpa = svm->vmcb->save.rax;
  1567. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1568. if (!nested_vmcb)
  1569. return false;
  1570. trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, vmcb_gpa,
  1571. nested_vmcb->save.rip,
  1572. nested_vmcb->control.int_ctl,
  1573. nested_vmcb->control.event_inj,
  1574. nested_vmcb->control.nested_ctl);
  1575. trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
  1576. nested_vmcb->control.intercept_cr_write,
  1577. nested_vmcb->control.intercept_exceptions,
  1578. nested_vmcb->control.intercept);
  1579. /* Clear internal status */
  1580. kvm_clear_exception_queue(&svm->vcpu);
  1581. kvm_clear_interrupt_queue(&svm->vcpu);
  1582. /*
  1583. * Save the old vmcb, so we don't need to pick what we save, but can
  1584. * restore everything when a VMEXIT occurs
  1585. */
  1586. hsave->save.es = vmcb->save.es;
  1587. hsave->save.cs = vmcb->save.cs;
  1588. hsave->save.ss = vmcb->save.ss;
  1589. hsave->save.ds = vmcb->save.ds;
  1590. hsave->save.gdtr = vmcb->save.gdtr;
  1591. hsave->save.idtr = vmcb->save.idtr;
  1592. hsave->save.efer = svm->vcpu.arch.efer;
  1593. hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
  1594. hsave->save.cr4 = svm->vcpu.arch.cr4;
  1595. hsave->save.rflags = vmcb->save.rflags;
  1596. hsave->save.rip = svm->next_rip;
  1597. hsave->save.rsp = vmcb->save.rsp;
  1598. hsave->save.rax = vmcb->save.rax;
  1599. if (npt_enabled)
  1600. hsave->save.cr3 = vmcb->save.cr3;
  1601. else
  1602. hsave->save.cr3 = svm->vcpu.arch.cr3;
  1603. copy_vmcb_control_area(hsave, vmcb);
  1604. if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
  1605. svm->vcpu.arch.hflags |= HF_HIF_MASK;
  1606. else
  1607. svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
  1608. /* Load the nested guest state */
  1609. svm->vmcb->save.es = nested_vmcb->save.es;
  1610. svm->vmcb->save.cs = nested_vmcb->save.cs;
  1611. svm->vmcb->save.ss = nested_vmcb->save.ss;
  1612. svm->vmcb->save.ds = nested_vmcb->save.ds;
  1613. svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
  1614. svm->vmcb->save.idtr = nested_vmcb->save.idtr;
  1615. svm->vmcb->save.rflags = nested_vmcb->save.rflags;
  1616. svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
  1617. svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
  1618. svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
  1619. if (npt_enabled) {
  1620. svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
  1621. svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
  1622. } else
  1623. kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
  1624. /* Guest paging mode is active - reset mmu */
  1625. kvm_mmu_reset_context(&svm->vcpu);
  1626. svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
  1627. kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
  1628. kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
  1629. kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
  1630. /* In case we don't even reach vcpu_run, the fields are not updated */
  1631. svm->vmcb->save.rax = nested_vmcb->save.rax;
  1632. svm->vmcb->save.rsp = nested_vmcb->save.rsp;
  1633. svm->vmcb->save.rip = nested_vmcb->save.rip;
  1634. svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
  1635. svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
  1636. svm->vmcb->save.cpl = nested_vmcb->save.cpl;
  1637. svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
  1638. svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
  1639. /* cache intercepts */
  1640. svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read;
  1641. svm->nested.intercept_cr_write = nested_vmcb->control.intercept_cr_write;
  1642. svm->nested.intercept_dr_read = nested_vmcb->control.intercept_dr_read;
  1643. svm->nested.intercept_dr_write = nested_vmcb->control.intercept_dr_write;
  1644. svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
  1645. svm->nested.intercept = nested_vmcb->control.intercept;
  1646. force_new_asid(&svm->vcpu);
  1647. svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
  1648. if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
  1649. svm->vcpu.arch.hflags |= HF_VINTR_MASK;
  1650. else
  1651. svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
  1652. if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
  1653. /* We only want the cr8 intercept bits of the guest */
  1654. svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
  1655. svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
  1656. }
  1657. /* We don't want to see VMMCALLs from a nested guest */
  1658. svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
  1659. /*
  1660. * We don't want a nested guest to be more powerful than the guest, so
  1661. * all intercepts are ORed
  1662. */
  1663. svm->vmcb->control.intercept_cr_read |=
  1664. nested_vmcb->control.intercept_cr_read;
  1665. svm->vmcb->control.intercept_cr_write |=
  1666. nested_vmcb->control.intercept_cr_write;
  1667. svm->vmcb->control.intercept_dr_read |=
  1668. nested_vmcb->control.intercept_dr_read;
  1669. svm->vmcb->control.intercept_dr_write |=
  1670. nested_vmcb->control.intercept_dr_write;
  1671. svm->vmcb->control.intercept_exceptions |=
  1672. nested_vmcb->control.intercept_exceptions;
  1673. svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
  1674. svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
  1675. svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
  1676. svm->vmcb->control.int_state = nested_vmcb->control.int_state;
  1677. svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
  1678. svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
  1679. svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
  1680. nested_svm_unmap(page);
  1681. /* nested_vmcb is our indicator if nested SVM is activated */
  1682. svm->nested.vmcb = vmcb_gpa;
  1683. enable_gif(svm);
  1684. return true;
  1685. }
  1686. static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
  1687. {
  1688. to_vmcb->save.fs = from_vmcb->save.fs;
  1689. to_vmcb->save.gs = from_vmcb->save.gs;
  1690. to_vmcb->save.tr = from_vmcb->save.tr;
  1691. to_vmcb->save.ldtr = from_vmcb->save.ldtr;
  1692. to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
  1693. to_vmcb->save.star = from_vmcb->save.star;
  1694. to_vmcb->save.lstar = from_vmcb->save.lstar;
  1695. to_vmcb->save.cstar = from_vmcb->save.cstar;
  1696. to_vmcb->save.sfmask = from_vmcb->save.sfmask;
  1697. to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
  1698. to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
  1699. to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
  1700. }
  1701. static int vmload_interception(struct vcpu_svm *svm)
  1702. {
  1703. struct vmcb *nested_vmcb;
  1704. struct page *page;
  1705. if (nested_svm_check_permissions(svm))
  1706. return 1;
  1707. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1708. skip_emulated_instruction(&svm->vcpu);
  1709. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1710. if (!nested_vmcb)
  1711. return 1;
  1712. nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
  1713. nested_svm_unmap(page);
  1714. return 1;
  1715. }
  1716. static int vmsave_interception(struct vcpu_svm *svm)
  1717. {
  1718. struct vmcb *nested_vmcb;
  1719. struct page *page;
  1720. if (nested_svm_check_permissions(svm))
  1721. return 1;
  1722. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1723. skip_emulated_instruction(&svm->vcpu);
  1724. nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
  1725. if (!nested_vmcb)
  1726. return 1;
  1727. nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
  1728. nested_svm_unmap(page);
  1729. return 1;
  1730. }
  1731. static int vmrun_interception(struct vcpu_svm *svm)
  1732. {
  1733. if (nested_svm_check_permissions(svm))
  1734. return 1;
  1735. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1736. skip_emulated_instruction(&svm->vcpu);
  1737. if (!nested_svm_vmrun(svm))
  1738. return 1;
  1739. if (!nested_svm_vmrun_msrpm(svm))
  1740. goto failed;
  1741. return 1;
  1742. failed:
  1743. svm->vmcb->control.exit_code = SVM_EXIT_ERR;
  1744. svm->vmcb->control.exit_code_hi = 0;
  1745. svm->vmcb->control.exit_info_1 = 0;
  1746. svm->vmcb->control.exit_info_2 = 0;
  1747. nested_svm_vmexit(svm);
  1748. return 1;
  1749. }
  1750. static int stgi_interception(struct vcpu_svm *svm)
  1751. {
  1752. if (nested_svm_check_permissions(svm))
  1753. return 1;
  1754. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1755. skip_emulated_instruction(&svm->vcpu);
  1756. enable_gif(svm);
  1757. return 1;
  1758. }
  1759. static int clgi_interception(struct vcpu_svm *svm)
  1760. {
  1761. if (nested_svm_check_permissions(svm))
  1762. return 1;
  1763. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1764. skip_emulated_instruction(&svm->vcpu);
  1765. disable_gif(svm);
  1766. /* After a CLGI no interrupts should come */
  1767. svm_clear_vintr(svm);
  1768. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  1769. return 1;
  1770. }
  1771. static int invlpga_interception(struct vcpu_svm *svm)
  1772. {
  1773. struct kvm_vcpu *vcpu = &svm->vcpu;
  1774. trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
  1775. vcpu->arch.regs[VCPU_REGS_RAX]);
  1776. /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
  1777. kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
  1778. svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
  1779. skip_emulated_instruction(&svm->vcpu);
  1780. return 1;
  1781. }
  1782. static int skinit_interception(struct vcpu_svm *svm)
  1783. {
  1784. trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
  1785. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1786. return 1;
  1787. }
  1788. static int invalid_op_interception(struct vcpu_svm *svm)
  1789. {
  1790. kvm_queue_exception(&svm->vcpu, UD_VECTOR);
  1791. return 1;
  1792. }
  1793. static int task_switch_interception(struct vcpu_svm *svm)
  1794. {
  1795. u16 tss_selector;
  1796. int reason;
  1797. int int_type = svm->vmcb->control.exit_int_info &
  1798. SVM_EXITINTINFO_TYPE_MASK;
  1799. int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
  1800. uint32_t type =
  1801. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
  1802. uint32_t idt_v =
  1803. svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
  1804. bool has_error_code = false;
  1805. u32 error_code = 0;
  1806. tss_selector = (u16)svm->vmcb->control.exit_info_1;
  1807. if (svm->vmcb->control.exit_info_2 &
  1808. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
  1809. reason = TASK_SWITCH_IRET;
  1810. else if (svm->vmcb->control.exit_info_2 &
  1811. (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
  1812. reason = TASK_SWITCH_JMP;
  1813. else if (idt_v)
  1814. reason = TASK_SWITCH_GATE;
  1815. else
  1816. reason = TASK_SWITCH_CALL;
  1817. if (reason == TASK_SWITCH_GATE) {
  1818. switch (type) {
  1819. case SVM_EXITINTINFO_TYPE_NMI:
  1820. svm->vcpu.arch.nmi_injected = false;
  1821. break;
  1822. case SVM_EXITINTINFO_TYPE_EXEPT:
  1823. if (svm->vmcb->control.exit_info_2 &
  1824. (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
  1825. has_error_code = true;
  1826. error_code =
  1827. (u32)svm->vmcb->control.exit_info_2;
  1828. }
  1829. kvm_clear_exception_queue(&svm->vcpu);
  1830. break;
  1831. case SVM_EXITINTINFO_TYPE_INTR:
  1832. kvm_clear_interrupt_queue(&svm->vcpu);
  1833. break;
  1834. default:
  1835. break;
  1836. }
  1837. }
  1838. if (reason != TASK_SWITCH_GATE ||
  1839. int_type == SVM_EXITINTINFO_TYPE_SOFT ||
  1840. (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
  1841. (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
  1842. skip_emulated_instruction(&svm->vcpu);
  1843. if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
  1844. has_error_code, error_code) == EMULATE_FAIL) {
  1845. svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  1846. svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  1847. svm->vcpu.run->internal.ndata = 0;
  1848. return 0;
  1849. }
  1850. return 1;
  1851. }
  1852. static int cpuid_interception(struct vcpu_svm *svm)
  1853. {
  1854. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  1855. kvm_emulate_cpuid(&svm->vcpu);
  1856. return 1;
  1857. }
  1858. static int iret_interception(struct vcpu_svm *svm)
  1859. {
  1860. ++svm->vcpu.stat.nmi_window_exits;
  1861. svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
  1862. svm->vcpu.arch.hflags |= HF_IRET_MASK;
  1863. return 1;
  1864. }
  1865. static int invlpg_interception(struct vcpu_svm *svm)
  1866. {
  1867. if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
  1868. pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
  1869. return 1;
  1870. }
  1871. static int emulate_on_interception(struct vcpu_svm *svm)
  1872. {
  1873. if (emulate_instruction(&svm->vcpu, 0, 0, 0) != EMULATE_DONE)
  1874. pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
  1875. return 1;
  1876. }
  1877. static int cr8_write_interception(struct vcpu_svm *svm)
  1878. {
  1879. struct kvm_run *kvm_run = svm->vcpu.run;
  1880. u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
  1881. /* instruction emulation calls kvm_set_cr8() */
  1882. emulate_instruction(&svm->vcpu, 0, 0, 0);
  1883. if (irqchip_in_kernel(svm->vcpu.kvm)) {
  1884. svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
  1885. return 1;
  1886. }
  1887. if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
  1888. return 1;
  1889. kvm_run->exit_reason = KVM_EXIT_SET_TPR;
  1890. return 0;
  1891. }
  1892. static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
  1893. {
  1894. struct vcpu_svm *svm = to_svm(vcpu);
  1895. switch (ecx) {
  1896. case MSR_IA32_TSC: {
  1897. u64 tsc_offset;
  1898. if (is_nested(svm))
  1899. tsc_offset = svm->nested.hsave->control.tsc_offset;
  1900. else
  1901. tsc_offset = svm->vmcb->control.tsc_offset;
  1902. *data = tsc_offset + native_read_tsc();
  1903. break;
  1904. }
  1905. case MSR_K6_STAR:
  1906. *data = svm->vmcb->save.star;
  1907. break;
  1908. #ifdef CONFIG_X86_64
  1909. case MSR_LSTAR:
  1910. *data = svm->vmcb->save.lstar;
  1911. break;
  1912. case MSR_CSTAR:
  1913. *data = svm->vmcb->save.cstar;
  1914. break;
  1915. case MSR_KERNEL_GS_BASE:
  1916. *data = svm->vmcb->save.kernel_gs_base;
  1917. break;
  1918. case MSR_SYSCALL_MASK:
  1919. *data = svm->vmcb->save.sfmask;
  1920. break;
  1921. #endif
  1922. case MSR_IA32_SYSENTER_CS:
  1923. *data = svm->vmcb->save.sysenter_cs;
  1924. break;
  1925. case MSR_IA32_SYSENTER_EIP:
  1926. *data = svm->sysenter_eip;
  1927. break;
  1928. case MSR_IA32_SYSENTER_ESP:
  1929. *data = svm->sysenter_esp;
  1930. break;
  1931. /*
  1932. * Nobody will change the following 5 values in the VMCB so we can
  1933. * safely return them on rdmsr. They will always be 0 until LBRV is
  1934. * implemented.
  1935. */
  1936. case MSR_IA32_DEBUGCTLMSR:
  1937. *data = svm->vmcb->save.dbgctl;
  1938. break;
  1939. case MSR_IA32_LASTBRANCHFROMIP:
  1940. *data = svm->vmcb->save.br_from;
  1941. break;
  1942. case MSR_IA32_LASTBRANCHTOIP:
  1943. *data = svm->vmcb->save.br_to;
  1944. break;
  1945. case MSR_IA32_LASTINTFROMIP:
  1946. *data = svm->vmcb->save.last_excp_from;
  1947. break;
  1948. case MSR_IA32_LASTINTTOIP:
  1949. *data = svm->vmcb->save.last_excp_to;
  1950. break;
  1951. case MSR_VM_HSAVE_PA:
  1952. *data = svm->nested.hsave_msr;
  1953. break;
  1954. case MSR_VM_CR:
  1955. *data = svm->nested.vm_cr_msr;
  1956. break;
  1957. case MSR_IA32_UCODE_REV:
  1958. *data = 0x01000065;
  1959. break;
  1960. default:
  1961. return kvm_get_msr_common(vcpu, ecx, data);
  1962. }
  1963. return 0;
  1964. }
  1965. static int rdmsr_interception(struct vcpu_svm *svm)
  1966. {
  1967. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1968. u64 data;
  1969. if (svm_get_msr(&svm->vcpu, ecx, &data)) {
  1970. trace_kvm_msr_read_ex(ecx);
  1971. kvm_inject_gp(&svm->vcpu, 0);
  1972. } else {
  1973. trace_kvm_msr_read(ecx, data);
  1974. svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
  1975. svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
  1976. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  1977. skip_emulated_instruction(&svm->vcpu);
  1978. }
  1979. return 1;
  1980. }
  1981. static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
  1982. {
  1983. struct vcpu_svm *svm = to_svm(vcpu);
  1984. int svm_dis, chg_mask;
  1985. if (data & ~SVM_VM_CR_VALID_MASK)
  1986. return 1;
  1987. chg_mask = SVM_VM_CR_VALID_MASK;
  1988. if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
  1989. chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
  1990. svm->nested.vm_cr_msr &= ~chg_mask;
  1991. svm->nested.vm_cr_msr |= (data & chg_mask);
  1992. svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
  1993. /* check for svm_disable while efer.svme is set */
  1994. if (svm_dis && (vcpu->arch.efer & EFER_SVME))
  1995. return 1;
  1996. return 0;
  1997. }
  1998. static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
  1999. {
  2000. struct vcpu_svm *svm = to_svm(vcpu);
  2001. switch (ecx) {
  2002. case MSR_IA32_TSC: {
  2003. u64 tsc_offset = data - native_read_tsc();
  2004. u64 g_tsc_offset = 0;
  2005. if (is_nested(svm)) {
  2006. g_tsc_offset = svm->vmcb->control.tsc_offset -
  2007. svm->nested.hsave->control.tsc_offset;
  2008. svm->nested.hsave->control.tsc_offset = tsc_offset;
  2009. }
  2010. svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
  2011. break;
  2012. }
  2013. case MSR_K6_STAR:
  2014. svm->vmcb->save.star = data;
  2015. break;
  2016. #ifdef CONFIG_X86_64
  2017. case MSR_LSTAR:
  2018. svm->vmcb->save.lstar = data;
  2019. break;
  2020. case MSR_CSTAR:
  2021. svm->vmcb->save.cstar = data;
  2022. break;
  2023. case MSR_KERNEL_GS_BASE:
  2024. svm->vmcb->save.kernel_gs_base = data;
  2025. break;
  2026. case MSR_SYSCALL_MASK:
  2027. svm->vmcb->save.sfmask = data;
  2028. break;
  2029. #endif
  2030. case MSR_IA32_SYSENTER_CS:
  2031. svm->vmcb->save.sysenter_cs = data;
  2032. break;
  2033. case MSR_IA32_SYSENTER_EIP:
  2034. svm->sysenter_eip = data;
  2035. svm->vmcb->save.sysenter_eip = data;
  2036. break;
  2037. case MSR_IA32_SYSENTER_ESP:
  2038. svm->sysenter_esp = data;
  2039. svm->vmcb->save.sysenter_esp = data;
  2040. break;
  2041. case MSR_IA32_DEBUGCTLMSR:
  2042. if (!svm_has(SVM_FEATURE_LBRV)) {
  2043. pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
  2044. __func__, data);
  2045. break;
  2046. }
  2047. if (data & DEBUGCTL_RESERVED_BITS)
  2048. return 1;
  2049. svm->vmcb->save.dbgctl = data;
  2050. if (data & (1ULL<<0))
  2051. svm_enable_lbrv(svm);
  2052. else
  2053. svm_disable_lbrv(svm);
  2054. break;
  2055. case MSR_VM_HSAVE_PA:
  2056. svm->nested.hsave_msr = data;
  2057. break;
  2058. case MSR_VM_CR:
  2059. return svm_set_vm_cr(vcpu, data);
  2060. case MSR_VM_IGNNE:
  2061. pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
  2062. break;
  2063. default:
  2064. return kvm_set_msr_common(vcpu, ecx, data);
  2065. }
  2066. return 0;
  2067. }
  2068. static int wrmsr_interception(struct vcpu_svm *svm)
  2069. {
  2070. u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  2071. u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
  2072. | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
  2073. svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
  2074. if (svm_set_msr(&svm->vcpu, ecx, data)) {
  2075. trace_kvm_msr_write_ex(ecx, data);
  2076. kvm_inject_gp(&svm->vcpu, 0);
  2077. } else {
  2078. trace_kvm_msr_write(ecx, data);
  2079. skip_emulated_instruction(&svm->vcpu);
  2080. }
  2081. return 1;
  2082. }
  2083. static int msr_interception(struct vcpu_svm *svm)
  2084. {
  2085. if (svm->vmcb->control.exit_info_1)
  2086. return wrmsr_interception(svm);
  2087. else
  2088. return rdmsr_interception(svm);
  2089. }
  2090. static int interrupt_window_interception(struct vcpu_svm *svm)
  2091. {
  2092. struct kvm_run *kvm_run = svm->vcpu.run;
  2093. svm_clear_vintr(svm);
  2094. svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
  2095. /*
  2096. * If the user space waits to inject interrupts, exit as soon as
  2097. * possible
  2098. */
  2099. if (!irqchip_in_kernel(svm->vcpu.kvm) &&
  2100. kvm_run->request_interrupt_window &&
  2101. !kvm_cpu_has_interrupt(&svm->vcpu)) {
  2102. ++svm->vcpu.stat.irq_window_exits;
  2103. kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  2104. return 0;
  2105. }
  2106. return 1;
  2107. }
  2108. static int pause_interception(struct vcpu_svm *svm)
  2109. {
  2110. kvm_vcpu_on_spin(&(svm->vcpu));
  2111. return 1;
  2112. }
  2113. static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
  2114. [SVM_EXIT_READ_CR0] = emulate_on_interception,
  2115. [SVM_EXIT_READ_CR3] = emulate_on_interception,
  2116. [SVM_EXIT_READ_CR4] = emulate_on_interception,
  2117. [SVM_EXIT_READ_CR8] = emulate_on_interception,
  2118. [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
  2119. [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
  2120. [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
  2121. [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
  2122. [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
  2123. [SVM_EXIT_READ_DR0] = emulate_on_interception,
  2124. [SVM_EXIT_READ_DR1] = emulate_on_interception,
  2125. [SVM_EXIT_READ_DR2] = emulate_on_interception,
  2126. [SVM_EXIT_READ_DR3] = emulate_on_interception,
  2127. [SVM_EXIT_READ_DR4] = emulate_on_interception,
  2128. [SVM_EXIT_READ_DR5] = emulate_on_interception,
  2129. [SVM_EXIT_READ_DR6] = emulate_on_interception,
  2130. [SVM_EXIT_READ_DR7] = emulate_on_interception,
  2131. [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
  2132. [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
  2133. [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
  2134. [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
  2135. [SVM_EXIT_WRITE_DR4] = emulate_on_interception,
  2136. [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
  2137. [SVM_EXIT_WRITE_DR6] = emulate_on_interception,
  2138. [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
  2139. [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
  2140. [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
  2141. [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
  2142. [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
  2143. [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
  2144. [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
  2145. [SVM_EXIT_INTR] = intr_interception,
  2146. [SVM_EXIT_NMI] = nmi_interception,
  2147. [SVM_EXIT_SMI] = nop_on_interception,
  2148. [SVM_EXIT_INIT] = nop_on_interception,
  2149. [SVM_EXIT_VINTR] = interrupt_window_interception,
  2150. [SVM_EXIT_CPUID] = cpuid_interception,
  2151. [SVM_EXIT_IRET] = iret_interception,
  2152. [SVM_EXIT_INVD] = emulate_on_interception,
  2153. [SVM_EXIT_PAUSE] = pause_interception,
  2154. [SVM_EXIT_HLT] = halt_interception,
  2155. [SVM_EXIT_INVLPG] = invlpg_interception,
  2156. [SVM_EXIT_INVLPGA] = invlpga_interception,
  2157. [SVM_EXIT_IOIO] = io_interception,
  2158. [SVM_EXIT_MSR] = msr_interception,
  2159. [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
  2160. [SVM_EXIT_SHUTDOWN] = shutdown_interception,
  2161. [SVM_EXIT_VMRUN] = vmrun_interception,
  2162. [SVM_EXIT_VMMCALL] = vmmcall_interception,
  2163. [SVM_EXIT_VMLOAD] = vmload_interception,
  2164. [SVM_EXIT_VMSAVE] = vmsave_interception,
  2165. [SVM_EXIT_STGI] = stgi_interception,
  2166. [SVM_EXIT_CLGI] = clgi_interception,
  2167. [SVM_EXIT_SKINIT] = skinit_interception,
  2168. [SVM_EXIT_WBINVD] = emulate_on_interception,
  2169. [SVM_EXIT_MONITOR] = invalid_op_interception,
  2170. [SVM_EXIT_MWAIT] = invalid_op_interception,
  2171. [SVM_EXIT_NPF] = pf_interception,
  2172. };
  2173. static int handle_exit(struct kvm_vcpu *vcpu)
  2174. {
  2175. struct vcpu_svm *svm = to_svm(vcpu);
  2176. struct kvm_run *kvm_run = vcpu->run;
  2177. u32 exit_code = svm->vmcb->control.exit_code;
  2178. trace_kvm_exit(exit_code, vcpu);
  2179. if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
  2180. vcpu->arch.cr0 = svm->vmcb->save.cr0;
  2181. if (npt_enabled)
  2182. vcpu->arch.cr3 = svm->vmcb->save.cr3;
  2183. if (unlikely(svm->nested.exit_required)) {
  2184. nested_svm_vmexit(svm);
  2185. svm->nested.exit_required = false;
  2186. return 1;
  2187. }
  2188. if (is_nested(svm)) {
  2189. int vmexit;
  2190. trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
  2191. svm->vmcb->control.exit_info_1,
  2192. svm->vmcb->control.exit_info_2,
  2193. svm->vmcb->control.exit_int_info,
  2194. svm->vmcb->control.exit_int_info_err);
  2195. vmexit = nested_svm_exit_special(svm);
  2196. if (vmexit == NESTED_EXIT_CONTINUE)
  2197. vmexit = nested_svm_exit_handled(svm);
  2198. if (vmexit == NESTED_EXIT_DONE)
  2199. return 1;
  2200. }
  2201. svm_complete_interrupts(svm);
  2202. if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
  2203. kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
  2204. kvm_run->fail_entry.hardware_entry_failure_reason
  2205. = svm->vmcb->control.exit_code;
  2206. return 0;
  2207. }
  2208. if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
  2209. exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
  2210. exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH)
  2211. printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
  2212. "exit_code 0x%x\n",
  2213. __func__, svm->vmcb->control.exit_int_info,
  2214. exit_code);
  2215. if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
  2216. || !svm_exit_handlers[exit_code]) {
  2217. kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
  2218. kvm_run->hw.hardware_exit_reason = exit_code;
  2219. return 0;
  2220. }
  2221. return svm_exit_handlers[exit_code](svm);
  2222. }
  2223. static void reload_tss(struct kvm_vcpu *vcpu)
  2224. {
  2225. int cpu = raw_smp_processor_id();
  2226. struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
  2227. sd->tss_desc->type = 9; /* available 32/64-bit TSS */
  2228. load_TR_desc();
  2229. }
  2230. static void pre_svm_run(struct vcpu_svm *svm)
  2231. {
  2232. int cpu = raw_smp_processor_id();
  2233. struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
  2234. svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
  2235. /* FIXME: handle wraparound of asid_generation */
  2236. if (svm->asid_generation != sd->asid_generation)
  2237. new_asid(svm, sd);
  2238. }
  2239. static void svm_inject_nmi(struct kvm_vcpu *vcpu)
  2240. {
  2241. struct vcpu_svm *svm = to_svm(vcpu);
  2242. svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
  2243. vcpu->arch.hflags |= HF_NMI_MASK;
  2244. svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
  2245. ++vcpu->stat.nmi_injections;
  2246. }
  2247. static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
  2248. {
  2249. struct vmcb_control_area *control;
  2250. trace_kvm_inj_virq(irq);
  2251. ++svm->vcpu.stat.irq_injections;
  2252. control = &svm->vmcb->control;
  2253. control->int_vector = irq;
  2254. control->int_ctl &= ~V_INTR_PRIO_MASK;
  2255. control->int_ctl |= V_IRQ_MASK |
  2256. ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
  2257. }
  2258. static void svm_set_irq(struct kvm_vcpu *vcpu)
  2259. {
  2260. struct vcpu_svm *svm = to_svm(vcpu);
  2261. BUG_ON(!(gif_set(svm)));
  2262. svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
  2263. SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
  2264. }
  2265. static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
  2266. {
  2267. struct vcpu_svm *svm = to_svm(vcpu);
  2268. if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2269. return;
  2270. if (irr == -1)
  2271. return;
  2272. if (tpr >= irr)
  2273. svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
  2274. }
  2275. static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
  2276. {
  2277. struct vcpu_svm *svm = to_svm(vcpu);
  2278. struct vmcb *vmcb = svm->vmcb;
  2279. int ret;
  2280. ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
  2281. !(svm->vcpu.arch.hflags & HF_NMI_MASK);
  2282. ret = ret && gif_set(svm) && nested_svm_nmi(svm);
  2283. return ret;
  2284. }
  2285. static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
  2286. {
  2287. struct vcpu_svm *svm = to_svm(vcpu);
  2288. return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
  2289. }
  2290. static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
  2291. {
  2292. struct vcpu_svm *svm = to_svm(vcpu);
  2293. if (masked) {
  2294. svm->vcpu.arch.hflags |= HF_NMI_MASK;
  2295. svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
  2296. } else {
  2297. svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
  2298. svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
  2299. }
  2300. }
  2301. static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
  2302. {
  2303. struct vcpu_svm *svm = to_svm(vcpu);
  2304. struct vmcb *vmcb = svm->vmcb;
  2305. int ret;
  2306. if (!gif_set(svm) ||
  2307. (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
  2308. return 0;
  2309. ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
  2310. if (is_nested(svm))
  2311. return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
  2312. return ret;
  2313. }
  2314. static void enable_irq_window(struct kvm_vcpu *vcpu)
  2315. {
  2316. struct vcpu_svm *svm = to_svm(vcpu);
  2317. /*
  2318. * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
  2319. * 1, because that's a separate STGI/VMRUN intercept. The next time we
  2320. * get that intercept, this function will be called again though and
  2321. * we'll get the vintr intercept.
  2322. */
  2323. if (gif_set(svm) && nested_svm_intr(svm)) {
  2324. svm_set_vintr(svm);
  2325. svm_inject_irq(svm, 0x0);
  2326. }
  2327. }
  2328. static void enable_nmi_window(struct kvm_vcpu *vcpu)
  2329. {
  2330. struct vcpu_svm *svm = to_svm(vcpu);
  2331. if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
  2332. == HF_NMI_MASK)
  2333. return; /* IRET will cause a vm exit */
  2334. /*
  2335. * Something prevents NMI from been injected. Single step over possible
  2336. * problem (IRET or exception injection or interrupt shadow)
  2337. */
  2338. svm->nmi_singlestep = true;
  2339. svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
  2340. update_db_intercept(vcpu);
  2341. }
  2342. static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
  2343. {
  2344. return 0;
  2345. }
  2346. static void svm_flush_tlb(struct kvm_vcpu *vcpu)
  2347. {
  2348. force_new_asid(vcpu);
  2349. }
  2350. static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
  2351. {
  2352. }
  2353. static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
  2354. {
  2355. struct vcpu_svm *svm = to_svm(vcpu);
  2356. if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2357. return;
  2358. if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
  2359. int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
  2360. kvm_set_cr8(vcpu, cr8);
  2361. }
  2362. }
  2363. static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
  2364. {
  2365. struct vcpu_svm *svm = to_svm(vcpu);
  2366. u64 cr8;
  2367. if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
  2368. return;
  2369. cr8 = kvm_get_cr8(vcpu);
  2370. svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
  2371. svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
  2372. }
  2373. static void svm_complete_interrupts(struct vcpu_svm *svm)
  2374. {
  2375. u8 vector;
  2376. int type;
  2377. u32 exitintinfo = svm->vmcb->control.exit_int_info;
  2378. unsigned int3_injected = svm->int3_injected;
  2379. svm->int3_injected = 0;
  2380. if (svm->vcpu.arch.hflags & HF_IRET_MASK)
  2381. svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
  2382. svm->vcpu.arch.nmi_injected = false;
  2383. kvm_clear_exception_queue(&svm->vcpu);
  2384. kvm_clear_interrupt_queue(&svm->vcpu);
  2385. if (!(exitintinfo & SVM_EXITINTINFO_VALID))
  2386. return;
  2387. vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
  2388. type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
  2389. switch (type) {
  2390. case SVM_EXITINTINFO_TYPE_NMI:
  2391. svm->vcpu.arch.nmi_injected = true;
  2392. break;
  2393. case SVM_EXITINTINFO_TYPE_EXEPT:
  2394. /*
  2395. * In case of software exceptions, do not reinject the vector,
  2396. * but re-execute the instruction instead. Rewind RIP first
  2397. * if we emulated INT3 before.
  2398. */
  2399. if (kvm_exception_is_soft(vector)) {
  2400. if (vector == BP_VECTOR && int3_injected &&
  2401. kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
  2402. kvm_rip_write(&svm->vcpu,
  2403. kvm_rip_read(&svm->vcpu) -
  2404. int3_injected);
  2405. break;
  2406. }
  2407. if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
  2408. u32 err = svm->vmcb->control.exit_int_info_err;
  2409. kvm_requeue_exception_e(&svm->vcpu, vector, err);
  2410. } else
  2411. kvm_requeue_exception(&svm->vcpu, vector);
  2412. break;
  2413. case SVM_EXITINTINFO_TYPE_INTR:
  2414. kvm_queue_interrupt(&svm->vcpu, vector, false);
  2415. break;
  2416. default:
  2417. break;
  2418. }
  2419. }
  2420. #ifdef CONFIG_X86_64
  2421. #define R "r"
  2422. #else
  2423. #define R "e"
  2424. #endif
  2425. static void svm_vcpu_run(struct kvm_vcpu *vcpu)
  2426. {
  2427. struct vcpu_svm *svm = to_svm(vcpu);
  2428. u16 fs_selector;
  2429. u16 gs_selector;
  2430. u16 ldt_selector;
  2431. svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
  2432. svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
  2433. svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
  2434. /*
  2435. * A vmexit emulation is required before the vcpu can be executed
  2436. * again.
  2437. */
  2438. if (unlikely(svm->nested.exit_required))
  2439. return;
  2440. pre_svm_run(svm);
  2441. sync_lapic_to_cr8(vcpu);
  2442. save_host_msrs(vcpu);
  2443. fs_selector = kvm_read_fs();
  2444. gs_selector = kvm_read_gs();
  2445. ldt_selector = kvm_read_ldt();
  2446. svm->vmcb->save.cr2 = vcpu->arch.cr2;
  2447. /* required for live migration with NPT */
  2448. if (npt_enabled)
  2449. svm->vmcb->save.cr3 = vcpu->arch.cr3;
  2450. clgi();
  2451. local_irq_enable();
  2452. asm volatile (
  2453. "push %%"R"bp; \n\t"
  2454. "mov %c[rbx](%[svm]), %%"R"bx \n\t"
  2455. "mov %c[rcx](%[svm]), %%"R"cx \n\t"
  2456. "mov %c[rdx](%[svm]), %%"R"dx \n\t"
  2457. "mov %c[rsi](%[svm]), %%"R"si \n\t"
  2458. "mov %c[rdi](%[svm]), %%"R"di \n\t"
  2459. "mov %c[rbp](%[svm]), %%"R"bp \n\t"
  2460. #ifdef CONFIG_X86_64
  2461. "mov %c[r8](%[svm]), %%r8 \n\t"
  2462. "mov %c[r9](%[svm]), %%r9 \n\t"
  2463. "mov %c[r10](%[svm]), %%r10 \n\t"
  2464. "mov %c[r11](%[svm]), %%r11 \n\t"
  2465. "mov %c[r12](%[svm]), %%r12 \n\t"
  2466. "mov %c[r13](%[svm]), %%r13 \n\t"
  2467. "mov %c[r14](%[svm]), %%r14 \n\t"
  2468. "mov %c[r15](%[svm]), %%r15 \n\t"
  2469. #endif
  2470. /* Enter guest mode */
  2471. "push %%"R"ax \n\t"
  2472. "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
  2473. __ex(SVM_VMLOAD) "\n\t"
  2474. __ex(SVM_VMRUN) "\n\t"
  2475. __ex(SVM_VMSAVE) "\n\t"
  2476. "pop %%"R"ax \n\t"
  2477. /* Save guest registers, load host registers */
  2478. "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
  2479. "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
  2480. "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
  2481. "mov %%"R"si, %c[rsi](%[svm]) \n\t"
  2482. "mov %%"R"di, %c[rdi](%[svm]) \n\t"
  2483. "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
  2484. #ifdef CONFIG_X86_64
  2485. "mov %%r8, %c[r8](%[svm]) \n\t"
  2486. "mov %%r9, %c[r9](%[svm]) \n\t"
  2487. "mov %%r10, %c[r10](%[svm]) \n\t"
  2488. "mov %%r11, %c[r11](%[svm]) \n\t"
  2489. "mov %%r12, %c[r12](%[svm]) \n\t"
  2490. "mov %%r13, %c[r13](%[svm]) \n\t"
  2491. "mov %%r14, %c[r14](%[svm]) \n\t"
  2492. "mov %%r15, %c[r15](%[svm]) \n\t"
  2493. #endif
  2494. "pop %%"R"bp"
  2495. :
  2496. : [svm]"a"(svm),
  2497. [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
  2498. [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
  2499. [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
  2500. [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
  2501. [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
  2502. [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
  2503. [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
  2504. #ifdef CONFIG_X86_64
  2505. , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
  2506. [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
  2507. [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
  2508. [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
  2509. [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
  2510. [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
  2511. [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
  2512. [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
  2513. #endif
  2514. : "cc", "memory"
  2515. , R"bx", R"cx", R"dx", R"si", R"di"
  2516. #ifdef CONFIG_X86_64
  2517. , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
  2518. #endif
  2519. );
  2520. vcpu->arch.cr2 = svm->vmcb->save.cr2;
  2521. vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
  2522. vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
  2523. vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
  2524. kvm_load_fs(fs_selector);
  2525. kvm_load_gs(gs_selector);
  2526. kvm_load_ldt(ldt_selector);
  2527. load_host_msrs(vcpu);
  2528. reload_tss(vcpu);
  2529. local_irq_disable();
  2530. stgi();
  2531. sync_cr8_to_lapic(vcpu);
  2532. svm->next_rip = 0;
  2533. if (npt_enabled) {
  2534. vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
  2535. vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
  2536. }
  2537. }
  2538. #undef R
  2539. static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
  2540. {
  2541. struct vcpu_svm *svm = to_svm(vcpu);
  2542. if (npt_enabled) {
  2543. svm->vmcb->control.nested_cr3 = root;
  2544. force_new_asid(vcpu);
  2545. return;
  2546. }
  2547. svm->vmcb->save.cr3 = root;
  2548. force_new_asid(vcpu);
  2549. }
  2550. static int is_disabled(void)
  2551. {
  2552. u64 vm_cr;
  2553. rdmsrl(MSR_VM_CR, vm_cr);
  2554. if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
  2555. return 1;
  2556. return 0;
  2557. }
  2558. static void
  2559. svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
  2560. {
  2561. /*
  2562. * Patch in the VMMCALL instruction:
  2563. */
  2564. hypercall[0] = 0x0f;
  2565. hypercall[1] = 0x01;
  2566. hypercall[2] = 0xd9;
  2567. }
  2568. static void svm_check_processor_compat(void *rtn)
  2569. {
  2570. *(int *)rtn = 0;
  2571. }
  2572. static bool svm_cpu_has_accelerated_tpr(void)
  2573. {
  2574. return false;
  2575. }
  2576. static int get_npt_level(void)
  2577. {
  2578. #ifdef CONFIG_X86_64
  2579. return PT64_ROOT_LEVEL;
  2580. #else
  2581. return PT32E_ROOT_LEVEL;
  2582. #endif
  2583. }
  2584. static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
  2585. {
  2586. return 0;
  2587. }
  2588. static void svm_cpuid_update(struct kvm_vcpu *vcpu)
  2589. {
  2590. }
  2591. static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
  2592. {
  2593. switch (func) {
  2594. case 0x8000000A:
  2595. entry->eax = 1; /* SVM revision 1 */
  2596. entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
  2597. ASID emulation to nested SVM */
  2598. entry->ecx = 0; /* Reserved */
  2599. entry->edx = 0; /* Do not support any additional features */
  2600. break;
  2601. }
  2602. }
  2603. static const struct trace_print_flags svm_exit_reasons_str[] = {
  2604. { SVM_EXIT_READ_CR0, "read_cr0" },
  2605. { SVM_EXIT_READ_CR3, "read_cr3" },
  2606. { SVM_EXIT_READ_CR4, "read_cr4" },
  2607. { SVM_EXIT_READ_CR8, "read_cr8" },
  2608. { SVM_EXIT_WRITE_CR0, "write_cr0" },
  2609. { SVM_EXIT_WRITE_CR3, "write_cr3" },
  2610. { SVM_EXIT_WRITE_CR4, "write_cr4" },
  2611. { SVM_EXIT_WRITE_CR8, "write_cr8" },
  2612. { SVM_EXIT_READ_DR0, "read_dr0" },
  2613. { SVM_EXIT_READ_DR1, "read_dr1" },
  2614. { SVM_EXIT_READ_DR2, "read_dr2" },
  2615. { SVM_EXIT_READ_DR3, "read_dr3" },
  2616. { SVM_EXIT_WRITE_DR0, "write_dr0" },
  2617. { SVM_EXIT_WRITE_DR1, "write_dr1" },
  2618. { SVM_EXIT_WRITE_DR2, "write_dr2" },
  2619. { SVM_EXIT_WRITE_DR3, "write_dr3" },
  2620. { SVM_EXIT_WRITE_DR5, "write_dr5" },
  2621. { SVM_EXIT_WRITE_DR7, "write_dr7" },
  2622. { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
  2623. { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
  2624. { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
  2625. { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
  2626. { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
  2627. { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
  2628. { SVM_EXIT_INTR, "interrupt" },
  2629. { SVM_EXIT_NMI, "nmi" },
  2630. { SVM_EXIT_SMI, "smi" },
  2631. { SVM_EXIT_INIT, "init" },
  2632. { SVM_EXIT_VINTR, "vintr" },
  2633. { SVM_EXIT_CPUID, "cpuid" },
  2634. { SVM_EXIT_INVD, "invd" },
  2635. { SVM_EXIT_HLT, "hlt" },
  2636. { SVM_EXIT_INVLPG, "invlpg" },
  2637. { SVM_EXIT_INVLPGA, "invlpga" },
  2638. { SVM_EXIT_IOIO, "io" },
  2639. { SVM_EXIT_MSR, "msr" },
  2640. { SVM_EXIT_TASK_SWITCH, "task_switch" },
  2641. { SVM_EXIT_SHUTDOWN, "shutdown" },
  2642. { SVM_EXIT_VMRUN, "vmrun" },
  2643. { SVM_EXIT_VMMCALL, "hypercall" },
  2644. { SVM_EXIT_VMLOAD, "vmload" },
  2645. { SVM_EXIT_VMSAVE, "vmsave" },
  2646. { SVM_EXIT_STGI, "stgi" },
  2647. { SVM_EXIT_CLGI, "clgi" },
  2648. { SVM_EXIT_SKINIT, "skinit" },
  2649. { SVM_EXIT_WBINVD, "wbinvd" },
  2650. { SVM_EXIT_MONITOR, "monitor" },
  2651. { SVM_EXIT_MWAIT, "mwait" },
  2652. { SVM_EXIT_NPF, "npf" },
  2653. { -1, NULL }
  2654. };
  2655. static int svm_get_lpage_level(void)
  2656. {
  2657. return PT_PDPE_LEVEL;
  2658. }
  2659. static bool svm_rdtscp_supported(void)
  2660. {
  2661. return false;
  2662. }
  2663. static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
  2664. {
  2665. struct vcpu_svm *svm = to_svm(vcpu);
  2666. svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
  2667. if (is_nested(svm))
  2668. svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
  2669. update_cr0_intercept(svm);
  2670. }
  2671. static struct kvm_x86_ops svm_x86_ops = {
  2672. .cpu_has_kvm_support = has_svm,
  2673. .disabled_by_bios = is_disabled,
  2674. .hardware_setup = svm_hardware_setup,
  2675. .hardware_unsetup = svm_hardware_unsetup,
  2676. .check_processor_compatibility = svm_check_processor_compat,
  2677. .hardware_enable = svm_hardware_enable,
  2678. .hardware_disable = svm_hardware_disable,
  2679. .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
  2680. .vcpu_create = svm_create_vcpu,
  2681. .vcpu_free = svm_free_vcpu,
  2682. .vcpu_reset = svm_vcpu_reset,
  2683. .prepare_guest_switch = svm_prepare_guest_switch,
  2684. .vcpu_load = svm_vcpu_load,
  2685. .vcpu_put = svm_vcpu_put,
  2686. .set_guest_debug = svm_guest_debug,
  2687. .get_msr = svm_get_msr,
  2688. .set_msr = svm_set_msr,
  2689. .get_segment_base = svm_get_segment_base,
  2690. .get_segment = svm_get_segment,
  2691. .set_segment = svm_set_segment,
  2692. .get_cpl = svm_get_cpl,
  2693. .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
  2694. .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
  2695. .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
  2696. .set_cr0 = svm_set_cr0,
  2697. .set_cr3 = svm_set_cr3,
  2698. .set_cr4 = svm_set_cr4,
  2699. .set_efer = svm_set_efer,
  2700. .get_idt = svm_get_idt,
  2701. .set_idt = svm_set_idt,
  2702. .get_gdt = svm_get_gdt,
  2703. .set_gdt = svm_set_gdt,
  2704. .set_dr7 = svm_set_dr7,
  2705. .cache_reg = svm_cache_reg,
  2706. .get_rflags = svm_get_rflags,
  2707. .set_rflags = svm_set_rflags,
  2708. .fpu_activate = svm_fpu_activate,
  2709. .fpu_deactivate = svm_fpu_deactivate,
  2710. .tlb_flush = svm_flush_tlb,
  2711. .run = svm_vcpu_run,
  2712. .handle_exit = handle_exit,
  2713. .skip_emulated_instruction = skip_emulated_instruction,
  2714. .set_interrupt_shadow = svm_set_interrupt_shadow,
  2715. .get_interrupt_shadow = svm_get_interrupt_shadow,
  2716. .patch_hypercall = svm_patch_hypercall,
  2717. .set_irq = svm_set_irq,
  2718. .set_nmi = svm_inject_nmi,
  2719. .queue_exception = svm_queue_exception,
  2720. .interrupt_allowed = svm_interrupt_allowed,
  2721. .nmi_allowed = svm_nmi_allowed,
  2722. .get_nmi_mask = svm_get_nmi_mask,
  2723. .set_nmi_mask = svm_set_nmi_mask,
  2724. .enable_nmi_window = enable_nmi_window,
  2725. .enable_irq_window = enable_irq_window,
  2726. .update_cr8_intercept = update_cr8_intercept,
  2727. .set_tss_addr = svm_set_tss_addr,
  2728. .get_tdp_level = get_npt_level,
  2729. .get_mt_mask = svm_get_mt_mask,
  2730. .exit_reasons_str = svm_exit_reasons_str,
  2731. .get_lpage_level = svm_get_lpage_level,
  2732. .cpuid_update = svm_cpuid_update,
  2733. .rdtscp_supported = svm_rdtscp_supported,
  2734. .set_supported_cpuid = svm_set_supported_cpuid,
  2735. };
  2736. static int __init svm_init(void)
  2737. {
  2738. return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
  2739. __alignof__(struct vcpu_svm), THIS_MODULE);
  2740. }
  2741. static void __exit svm_exit(void)
  2742. {
  2743. kvm_exit();
  2744. }
  2745. module_init(svm_init)
  2746. module_exit(svm_exit)