kvm_main.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "kvm.h"
  18. #include <linux/kvm.h>
  19. #include <linux/module.h>
  20. #include <linux/errno.h>
  21. #include <asm/processor.h>
  22. #include <linux/percpu.h>
  23. #include <linux/gfp.h>
  24. #include <asm/msr.h>
  25. #include <linux/mm.h>
  26. #include <linux/miscdevice.h>
  27. #include <linux/vmalloc.h>
  28. #include <asm/uaccess.h>
  29. #include <linux/reboot.h>
  30. #include <asm/io.h>
  31. #include <linux/debugfs.h>
  32. #include <linux/highmem.h>
  33. #include <linux/file.h>
  34. #include <asm/desc.h>
  35. #include "x86_emulate.h"
  36. #include "segment_descriptor.h"
  37. MODULE_AUTHOR("Qumranet");
  38. MODULE_LICENSE("GPL");
  39. struct kvm_arch_ops *kvm_arch_ops;
  40. struct kvm_stat kvm_stat;
  41. EXPORT_SYMBOL_GPL(kvm_stat);
  42. static struct kvm_stats_debugfs_item {
  43. const char *name;
  44. u32 *data;
  45. struct dentry *dentry;
  46. } debugfs_entries[] = {
  47. { "pf_fixed", &kvm_stat.pf_fixed },
  48. { "pf_guest", &kvm_stat.pf_guest },
  49. { "tlb_flush", &kvm_stat.tlb_flush },
  50. { "invlpg", &kvm_stat.invlpg },
  51. { "exits", &kvm_stat.exits },
  52. { "io_exits", &kvm_stat.io_exits },
  53. { "mmio_exits", &kvm_stat.mmio_exits },
  54. { "signal_exits", &kvm_stat.signal_exits },
  55. { "irq_window", &kvm_stat.irq_window_exits },
  56. { "halt_exits", &kvm_stat.halt_exits },
  57. { "request_irq", &kvm_stat.request_irq_exits },
  58. { "irq_exits", &kvm_stat.irq_exits },
  59. { NULL, NULL }
  60. };
  61. static struct dentry *debugfs_dir;
  62. #define MAX_IO_MSRS 256
  63. #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
  64. #define LMSW_GUEST_MASK 0x0eULL
  65. #define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
  66. #define CR8_RESEVED_BITS (~0x0fULL)
  67. #define EFER_RESERVED_BITS 0xfffffffffffff2fe
  68. #ifdef CONFIG_X86_64
  69. // LDT or TSS descriptor in the GDT. 16 bytes.
  70. struct segment_descriptor_64 {
  71. struct segment_descriptor s;
  72. u32 base_higher;
  73. u32 pad_zero;
  74. };
  75. #endif
  76. unsigned long segment_base(u16 selector)
  77. {
  78. struct descriptor_table gdt;
  79. struct segment_descriptor *d;
  80. unsigned long table_base;
  81. typedef unsigned long ul;
  82. unsigned long v;
  83. if (selector == 0)
  84. return 0;
  85. asm ("sgdt %0" : "=m"(gdt));
  86. table_base = gdt.base;
  87. if (selector & 4) { /* from ldt */
  88. u16 ldt_selector;
  89. asm ("sldt %0" : "=g"(ldt_selector));
  90. table_base = segment_base(ldt_selector);
  91. }
  92. d = (struct segment_descriptor *)(table_base + (selector & ~7));
  93. v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
  94. #ifdef CONFIG_X86_64
  95. if (d->system == 0
  96. && (d->type == 2 || d->type == 9 || d->type == 11))
  97. v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
  98. #endif
  99. return v;
  100. }
  101. EXPORT_SYMBOL_GPL(segment_base);
  102. static inline int valid_vcpu(int n)
  103. {
  104. return likely(n >= 0 && n < KVM_MAX_VCPUS);
  105. }
  106. int kvm_read_guest(struct kvm_vcpu *vcpu,
  107. gva_t addr,
  108. unsigned long size,
  109. void *dest)
  110. {
  111. unsigned char *host_buf = dest;
  112. unsigned long req_size = size;
  113. while (size) {
  114. hpa_t paddr;
  115. unsigned now;
  116. unsigned offset;
  117. hva_t guest_buf;
  118. paddr = gva_to_hpa(vcpu, addr);
  119. if (is_error_hpa(paddr))
  120. break;
  121. guest_buf = (hva_t)kmap_atomic(
  122. pfn_to_page(paddr >> PAGE_SHIFT),
  123. KM_USER0);
  124. offset = addr & ~PAGE_MASK;
  125. guest_buf |= offset;
  126. now = min(size, PAGE_SIZE - offset);
  127. memcpy(host_buf, (void*)guest_buf, now);
  128. host_buf += now;
  129. addr += now;
  130. size -= now;
  131. kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
  132. }
  133. return req_size - size;
  134. }
  135. EXPORT_SYMBOL_GPL(kvm_read_guest);
  136. int kvm_write_guest(struct kvm_vcpu *vcpu,
  137. gva_t addr,
  138. unsigned long size,
  139. void *data)
  140. {
  141. unsigned char *host_buf = data;
  142. unsigned long req_size = size;
  143. while (size) {
  144. hpa_t paddr;
  145. unsigned now;
  146. unsigned offset;
  147. hva_t guest_buf;
  148. paddr = gva_to_hpa(vcpu, addr);
  149. if (is_error_hpa(paddr))
  150. break;
  151. guest_buf = (hva_t)kmap_atomic(
  152. pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
  153. offset = addr & ~PAGE_MASK;
  154. guest_buf |= offset;
  155. now = min(size, PAGE_SIZE - offset);
  156. memcpy((void*)guest_buf, host_buf, now);
  157. host_buf += now;
  158. addr += now;
  159. size -= now;
  160. kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
  161. }
  162. return req_size - size;
  163. }
  164. EXPORT_SYMBOL_GPL(kvm_write_guest);
  165. static int vcpu_slot(struct kvm_vcpu *vcpu)
  166. {
  167. return vcpu - vcpu->kvm->vcpus;
  168. }
  169. /*
  170. * Switches to specified vcpu, until a matching vcpu_put()
  171. */
  172. static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot)
  173. {
  174. struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot];
  175. mutex_lock(&vcpu->mutex);
  176. if (unlikely(!vcpu->vmcs)) {
  177. mutex_unlock(&vcpu->mutex);
  178. return NULL;
  179. }
  180. return kvm_arch_ops->vcpu_load(vcpu);
  181. }
  182. static void vcpu_put(struct kvm_vcpu *vcpu)
  183. {
  184. kvm_arch_ops->vcpu_put(vcpu);
  185. mutex_unlock(&vcpu->mutex);
  186. }
  187. static int kvm_dev_open(struct inode *inode, struct file *filp)
  188. {
  189. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  190. int i;
  191. if (!kvm)
  192. return -ENOMEM;
  193. spin_lock_init(&kvm->lock);
  194. INIT_LIST_HEAD(&kvm->active_mmu_pages);
  195. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  196. struct kvm_vcpu *vcpu = &kvm->vcpus[i];
  197. mutex_init(&vcpu->mutex);
  198. vcpu->kvm = kvm;
  199. vcpu->mmu.root_hpa = INVALID_PAGE;
  200. INIT_LIST_HEAD(&vcpu->free_pages);
  201. }
  202. filp->private_data = kvm;
  203. return 0;
  204. }
  205. /*
  206. * Free any memory in @free but not in @dont.
  207. */
  208. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  209. struct kvm_memory_slot *dont)
  210. {
  211. int i;
  212. if (!dont || free->phys_mem != dont->phys_mem)
  213. if (free->phys_mem) {
  214. for (i = 0; i < free->npages; ++i)
  215. if (free->phys_mem[i])
  216. __free_page(free->phys_mem[i]);
  217. vfree(free->phys_mem);
  218. }
  219. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  220. vfree(free->dirty_bitmap);
  221. free->phys_mem = NULL;
  222. free->npages = 0;
  223. free->dirty_bitmap = NULL;
  224. }
  225. static void kvm_free_physmem(struct kvm *kvm)
  226. {
  227. int i;
  228. for (i = 0; i < kvm->nmemslots; ++i)
  229. kvm_free_physmem_slot(&kvm->memslots[i], NULL);
  230. }
  231. static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
  232. {
  233. if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu)))
  234. return;
  235. kvm_mmu_destroy(vcpu);
  236. vcpu_put(vcpu);
  237. kvm_arch_ops->vcpu_free(vcpu);
  238. }
  239. static void kvm_free_vcpus(struct kvm *kvm)
  240. {
  241. unsigned int i;
  242. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  243. kvm_free_vcpu(&kvm->vcpus[i]);
  244. }
  245. static int kvm_dev_release(struct inode *inode, struct file *filp)
  246. {
  247. struct kvm *kvm = filp->private_data;
  248. kvm_free_vcpus(kvm);
  249. kvm_free_physmem(kvm);
  250. kfree(kvm);
  251. return 0;
  252. }
  253. static void inject_gp(struct kvm_vcpu *vcpu)
  254. {
  255. kvm_arch_ops->inject_gp(vcpu, 0);
  256. }
  257. /*
  258. * Load the pae pdptrs. Return true is they are all valid.
  259. */
  260. static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  261. {
  262. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  263. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  264. int i;
  265. u64 pdpte;
  266. u64 *pdpt;
  267. int ret;
  268. struct kvm_memory_slot *memslot;
  269. spin_lock(&vcpu->kvm->lock);
  270. memslot = gfn_to_memslot(vcpu->kvm, pdpt_gfn);
  271. /* FIXME: !memslot - emulate? 0xff? */
  272. pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0);
  273. ret = 1;
  274. for (i = 0; i < 4; ++i) {
  275. pdpte = pdpt[offset + i];
  276. if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
  277. ret = 0;
  278. goto out;
  279. }
  280. }
  281. for (i = 0; i < 4; ++i)
  282. vcpu->pdptrs[i] = pdpt[offset + i];
  283. out:
  284. kunmap_atomic(pdpt, KM_USER0);
  285. spin_unlock(&vcpu->kvm->lock);
  286. return ret;
  287. }
  288. void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  289. {
  290. if (cr0 & CR0_RESEVED_BITS) {
  291. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  292. cr0, vcpu->cr0);
  293. inject_gp(vcpu);
  294. return;
  295. }
  296. if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) {
  297. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  298. inject_gp(vcpu);
  299. return;
  300. }
  301. if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) {
  302. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  303. "and a clear PE flag\n");
  304. inject_gp(vcpu);
  305. return;
  306. }
  307. if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
  308. #ifdef CONFIG_X86_64
  309. if ((vcpu->shadow_efer & EFER_LME)) {
  310. int cs_db, cs_l;
  311. if (!is_pae(vcpu)) {
  312. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  313. "in long mode while PAE is disabled\n");
  314. inject_gp(vcpu);
  315. return;
  316. }
  317. kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  318. if (cs_l) {
  319. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  320. "in long mode while CS.L == 1\n");
  321. inject_gp(vcpu);
  322. return;
  323. }
  324. } else
  325. #endif
  326. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
  327. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  328. "reserved bits\n");
  329. inject_gp(vcpu);
  330. return;
  331. }
  332. }
  333. kvm_arch_ops->set_cr0(vcpu, cr0);
  334. vcpu->cr0 = cr0;
  335. spin_lock(&vcpu->kvm->lock);
  336. kvm_mmu_reset_context(vcpu);
  337. spin_unlock(&vcpu->kvm->lock);
  338. return;
  339. }
  340. EXPORT_SYMBOL_GPL(set_cr0);
  341. void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  342. {
  343. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  344. set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
  345. }
  346. EXPORT_SYMBOL_GPL(lmsw);
  347. void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  348. {
  349. if (cr4 & CR4_RESEVED_BITS) {
  350. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  351. inject_gp(vcpu);
  352. return;
  353. }
  354. if (is_long_mode(vcpu)) {
  355. if (!(cr4 & CR4_PAE_MASK)) {
  356. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  357. "in long mode\n");
  358. inject_gp(vcpu);
  359. return;
  360. }
  361. } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
  362. && !load_pdptrs(vcpu, vcpu->cr3)) {
  363. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  364. inject_gp(vcpu);
  365. }
  366. if (cr4 & CR4_VMXE_MASK) {
  367. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  368. inject_gp(vcpu);
  369. return;
  370. }
  371. kvm_arch_ops->set_cr4(vcpu, cr4);
  372. spin_lock(&vcpu->kvm->lock);
  373. kvm_mmu_reset_context(vcpu);
  374. spin_unlock(&vcpu->kvm->lock);
  375. }
  376. EXPORT_SYMBOL_GPL(set_cr4);
  377. void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  378. {
  379. if (is_long_mode(vcpu)) {
  380. if ( cr3 & CR3_L_MODE_RESEVED_BITS) {
  381. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  382. inject_gp(vcpu);
  383. return;
  384. }
  385. } else {
  386. if (cr3 & CR3_RESEVED_BITS) {
  387. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  388. inject_gp(vcpu);
  389. return;
  390. }
  391. if (is_paging(vcpu) && is_pae(vcpu) &&
  392. !load_pdptrs(vcpu, cr3)) {
  393. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  394. "reserved bits\n");
  395. inject_gp(vcpu);
  396. return;
  397. }
  398. }
  399. vcpu->cr3 = cr3;
  400. spin_lock(&vcpu->kvm->lock);
  401. /*
  402. * Does the new cr3 value map to physical memory? (Note, we
  403. * catch an invalid cr3 even in real-mode, because it would
  404. * cause trouble later on when we turn on paging anyway.)
  405. *
  406. * A real CPU would silently accept an invalid cr3 and would
  407. * attempt to use it - with largely undefined (and often hard
  408. * to debug) behavior on the guest side.
  409. */
  410. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  411. inject_gp(vcpu);
  412. else
  413. vcpu->mmu.new_cr3(vcpu);
  414. spin_unlock(&vcpu->kvm->lock);
  415. }
  416. EXPORT_SYMBOL_GPL(set_cr3);
  417. void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  418. {
  419. if ( cr8 & CR8_RESEVED_BITS) {
  420. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  421. inject_gp(vcpu);
  422. return;
  423. }
  424. vcpu->cr8 = cr8;
  425. }
  426. EXPORT_SYMBOL_GPL(set_cr8);
  427. void fx_init(struct kvm_vcpu *vcpu)
  428. {
  429. struct __attribute__ ((__packed__)) fx_image_s {
  430. u16 control; //fcw
  431. u16 status; //fsw
  432. u16 tag; // ftw
  433. u16 opcode; //fop
  434. u64 ip; // fpu ip
  435. u64 operand;// fpu dp
  436. u32 mxcsr;
  437. u32 mxcsr_mask;
  438. } *fx_image;
  439. fx_save(vcpu->host_fx_image);
  440. fpu_init();
  441. fx_save(vcpu->guest_fx_image);
  442. fx_restore(vcpu->host_fx_image);
  443. fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
  444. fx_image->mxcsr = 0x1f80;
  445. memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
  446. 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
  447. }
  448. EXPORT_SYMBOL_GPL(fx_init);
  449. /*
  450. * Creates some virtual cpus. Good luck creating more than one.
  451. */
  452. static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
  453. {
  454. int r;
  455. struct kvm_vcpu *vcpu;
  456. r = -EINVAL;
  457. if (!valid_vcpu(n))
  458. goto out;
  459. vcpu = &kvm->vcpus[n];
  460. mutex_lock(&vcpu->mutex);
  461. if (vcpu->vmcs) {
  462. mutex_unlock(&vcpu->mutex);
  463. return -EEXIST;
  464. }
  465. vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
  466. FX_IMAGE_ALIGN);
  467. vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
  468. vcpu->cpu = -1; /* First load will set up TR */
  469. r = kvm_arch_ops->vcpu_create(vcpu);
  470. if (r < 0)
  471. goto out_free_vcpus;
  472. r = kvm_mmu_create(vcpu);
  473. if (r < 0)
  474. goto out_free_vcpus;
  475. kvm_arch_ops->vcpu_load(vcpu);
  476. r = kvm_mmu_setup(vcpu);
  477. if (r >= 0)
  478. r = kvm_arch_ops->vcpu_setup(vcpu);
  479. vcpu_put(vcpu);
  480. if (r < 0)
  481. goto out_free_vcpus;
  482. return 0;
  483. out_free_vcpus:
  484. kvm_free_vcpu(vcpu);
  485. mutex_unlock(&vcpu->mutex);
  486. out:
  487. return r;
  488. }
  489. /*
  490. * Allocate some memory and give it an address in the guest physical address
  491. * space.
  492. *
  493. * Discontiguous memory is allowed, mostly for framebuffers.
  494. */
  495. static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
  496. struct kvm_memory_region *mem)
  497. {
  498. int r;
  499. gfn_t base_gfn;
  500. unsigned long npages;
  501. unsigned long i;
  502. struct kvm_memory_slot *memslot;
  503. struct kvm_memory_slot old, new;
  504. int memory_config_version;
  505. r = -EINVAL;
  506. /* General sanity checks */
  507. if (mem->memory_size & (PAGE_SIZE - 1))
  508. goto out;
  509. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  510. goto out;
  511. if (mem->slot >= KVM_MEMORY_SLOTS)
  512. goto out;
  513. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  514. goto out;
  515. memslot = &kvm->memslots[mem->slot];
  516. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  517. npages = mem->memory_size >> PAGE_SHIFT;
  518. if (!npages)
  519. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  520. raced:
  521. spin_lock(&kvm->lock);
  522. memory_config_version = kvm->memory_config_version;
  523. new = old = *memslot;
  524. new.base_gfn = base_gfn;
  525. new.npages = npages;
  526. new.flags = mem->flags;
  527. /* Disallow changing a memory slot's size. */
  528. r = -EINVAL;
  529. if (npages && old.npages && npages != old.npages)
  530. goto out_unlock;
  531. /* Check for overlaps */
  532. r = -EEXIST;
  533. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  534. struct kvm_memory_slot *s = &kvm->memslots[i];
  535. if (s == memslot)
  536. continue;
  537. if (!((base_gfn + npages <= s->base_gfn) ||
  538. (base_gfn >= s->base_gfn + s->npages)))
  539. goto out_unlock;
  540. }
  541. /*
  542. * Do memory allocations outside lock. memory_config_version will
  543. * detect any races.
  544. */
  545. spin_unlock(&kvm->lock);
  546. /* Deallocate if slot is being removed */
  547. if (!npages)
  548. new.phys_mem = NULL;
  549. /* Free page dirty bitmap if unneeded */
  550. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  551. new.dirty_bitmap = NULL;
  552. r = -ENOMEM;
  553. /* Allocate if a slot is being created */
  554. if (npages && !new.phys_mem) {
  555. new.phys_mem = vmalloc(npages * sizeof(struct page *));
  556. if (!new.phys_mem)
  557. goto out_free;
  558. memset(new.phys_mem, 0, npages * sizeof(struct page *));
  559. for (i = 0; i < npages; ++i) {
  560. new.phys_mem[i] = alloc_page(GFP_HIGHUSER
  561. | __GFP_ZERO);
  562. if (!new.phys_mem[i])
  563. goto out_free;
  564. new.phys_mem[i]->private = 0;
  565. }
  566. }
  567. /* Allocate page dirty bitmap if needed */
  568. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  569. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  570. new.dirty_bitmap = vmalloc(dirty_bytes);
  571. if (!new.dirty_bitmap)
  572. goto out_free;
  573. memset(new.dirty_bitmap, 0, dirty_bytes);
  574. }
  575. spin_lock(&kvm->lock);
  576. if (memory_config_version != kvm->memory_config_version) {
  577. spin_unlock(&kvm->lock);
  578. kvm_free_physmem_slot(&new, &old);
  579. goto raced;
  580. }
  581. r = -EAGAIN;
  582. if (kvm->busy)
  583. goto out_unlock;
  584. if (mem->slot >= kvm->nmemslots)
  585. kvm->nmemslots = mem->slot + 1;
  586. *memslot = new;
  587. ++kvm->memory_config_version;
  588. spin_unlock(&kvm->lock);
  589. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  590. struct kvm_vcpu *vcpu;
  591. vcpu = vcpu_load(kvm, i);
  592. if (!vcpu)
  593. continue;
  594. kvm_mmu_reset_context(vcpu);
  595. vcpu_put(vcpu);
  596. }
  597. kvm_free_physmem_slot(&old, &new);
  598. return 0;
  599. out_unlock:
  600. spin_unlock(&kvm->lock);
  601. out_free:
  602. kvm_free_physmem_slot(&new, &old);
  603. out:
  604. return r;
  605. }
  606. static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
  607. {
  608. spin_lock(&vcpu->kvm->lock);
  609. kvm_mmu_slot_remove_write_access(vcpu, slot);
  610. spin_unlock(&vcpu->kvm->lock);
  611. }
  612. /*
  613. * Get (and clear) the dirty memory log for a memory slot.
  614. */
  615. static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
  616. struct kvm_dirty_log *log)
  617. {
  618. struct kvm_memory_slot *memslot;
  619. int r, i;
  620. int n;
  621. int cleared;
  622. unsigned long any = 0;
  623. spin_lock(&kvm->lock);
  624. /*
  625. * Prevent changes to guest memory configuration even while the lock
  626. * is not taken.
  627. */
  628. ++kvm->busy;
  629. spin_unlock(&kvm->lock);
  630. r = -EINVAL;
  631. if (log->slot >= KVM_MEMORY_SLOTS)
  632. goto out;
  633. memslot = &kvm->memslots[log->slot];
  634. r = -ENOENT;
  635. if (!memslot->dirty_bitmap)
  636. goto out;
  637. n = ALIGN(memslot->npages, 8) / 8;
  638. for (i = 0; !any && i < n; ++i)
  639. any = memslot->dirty_bitmap[i];
  640. r = -EFAULT;
  641. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  642. goto out;
  643. if (any) {
  644. cleared = 0;
  645. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  646. struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
  647. if (!vcpu)
  648. continue;
  649. if (!cleared) {
  650. do_remove_write_access(vcpu, log->slot);
  651. memset(memslot->dirty_bitmap, 0, n);
  652. cleared = 1;
  653. }
  654. kvm_arch_ops->tlb_flush(vcpu);
  655. vcpu_put(vcpu);
  656. }
  657. }
  658. r = 0;
  659. out:
  660. spin_lock(&kvm->lock);
  661. --kvm->busy;
  662. spin_unlock(&kvm->lock);
  663. return r;
  664. }
  665. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  666. {
  667. int i;
  668. for (i = 0; i < kvm->nmemslots; ++i) {
  669. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  670. if (gfn >= memslot->base_gfn
  671. && gfn < memslot->base_gfn + memslot->npages)
  672. return memslot;
  673. }
  674. return NULL;
  675. }
  676. EXPORT_SYMBOL_GPL(gfn_to_memslot);
  677. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  678. {
  679. int i;
  680. struct kvm_memory_slot *memslot = NULL;
  681. unsigned long rel_gfn;
  682. for (i = 0; i < kvm->nmemslots; ++i) {
  683. memslot = &kvm->memslots[i];
  684. if (gfn >= memslot->base_gfn
  685. && gfn < memslot->base_gfn + memslot->npages) {
  686. if (!memslot || !memslot->dirty_bitmap)
  687. return;
  688. rel_gfn = gfn - memslot->base_gfn;
  689. /* avoid RMW */
  690. if (!test_bit(rel_gfn, memslot->dirty_bitmap))
  691. set_bit(rel_gfn, memslot->dirty_bitmap);
  692. return;
  693. }
  694. }
  695. }
  696. static int emulator_read_std(unsigned long addr,
  697. unsigned long *val,
  698. unsigned int bytes,
  699. struct x86_emulate_ctxt *ctxt)
  700. {
  701. struct kvm_vcpu *vcpu = ctxt->vcpu;
  702. void *data = val;
  703. while (bytes) {
  704. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  705. unsigned offset = addr & (PAGE_SIZE-1);
  706. unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
  707. unsigned long pfn;
  708. struct kvm_memory_slot *memslot;
  709. void *page;
  710. if (gpa == UNMAPPED_GVA)
  711. return X86EMUL_PROPAGATE_FAULT;
  712. pfn = gpa >> PAGE_SHIFT;
  713. memslot = gfn_to_memslot(vcpu->kvm, pfn);
  714. if (!memslot)
  715. return X86EMUL_UNHANDLEABLE;
  716. page = kmap_atomic(gfn_to_page(memslot, pfn), KM_USER0);
  717. memcpy(data, page + offset, tocopy);
  718. kunmap_atomic(page, KM_USER0);
  719. bytes -= tocopy;
  720. data += tocopy;
  721. addr += tocopy;
  722. }
  723. return X86EMUL_CONTINUE;
  724. }
  725. static int emulator_write_std(unsigned long addr,
  726. unsigned long val,
  727. unsigned int bytes,
  728. struct x86_emulate_ctxt *ctxt)
  729. {
  730. printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
  731. addr, bytes);
  732. return X86EMUL_UNHANDLEABLE;
  733. }
  734. static int emulator_read_emulated(unsigned long addr,
  735. unsigned long *val,
  736. unsigned int bytes,
  737. struct x86_emulate_ctxt *ctxt)
  738. {
  739. struct kvm_vcpu *vcpu = ctxt->vcpu;
  740. if (vcpu->mmio_read_completed) {
  741. memcpy(val, vcpu->mmio_data, bytes);
  742. vcpu->mmio_read_completed = 0;
  743. return X86EMUL_CONTINUE;
  744. } else if (emulator_read_std(addr, val, bytes, ctxt)
  745. == X86EMUL_CONTINUE)
  746. return X86EMUL_CONTINUE;
  747. else {
  748. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  749. if (gpa == UNMAPPED_GVA)
  750. return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT;
  751. vcpu->mmio_needed = 1;
  752. vcpu->mmio_phys_addr = gpa;
  753. vcpu->mmio_size = bytes;
  754. vcpu->mmio_is_write = 0;
  755. return X86EMUL_UNHANDLEABLE;
  756. }
  757. }
  758. static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  759. unsigned long val, int bytes)
  760. {
  761. struct kvm_memory_slot *m;
  762. struct page *page;
  763. void *virt;
  764. if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
  765. return 0;
  766. m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
  767. if (!m)
  768. return 0;
  769. page = gfn_to_page(m, gpa >> PAGE_SHIFT);
  770. kvm_mmu_pre_write(vcpu, gpa, bytes);
  771. virt = kmap_atomic(page, KM_USER0);
  772. memcpy(virt + offset_in_page(gpa), &val, bytes);
  773. kunmap_atomic(virt, KM_USER0);
  774. kvm_mmu_post_write(vcpu, gpa, bytes);
  775. return 1;
  776. }
  777. static int emulator_write_emulated(unsigned long addr,
  778. unsigned long val,
  779. unsigned int bytes,
  780. struct x86_emulate_ctxt *ctxt)
  781. {
  782. struct kvm_vcpu *vcpu = ctxt->vcpu;
  783. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  784. if (gpa == UNMAPPED_GVA)
  785. return X86EMUL_PROPAGATE_FAULT;
  786. if (emulator_write_phys(vcpu, gpa, val, bytes))
  787. return X86EMUL_CONTINUE;
  788. vcpu->mmio_needed = 1;
  789. vcpu->mmio_phys_addr = gpa;
  790. vcpu->mmio_size = bytes;
  791. vcpu->mmio_is_write = 1;
  792. memcpy(vcpu->mmio_data, &val, bytes);
  793. return X86EMUL_CONTINUE;
  794. }
  795. static int emulator_cmpxchg_emulated(unsigned long addr,
  796. unsigned long old,
  797. unsigned long new,
  798. unsigned int bytes,
  799. struct x86_emulate_ctxt *ctxt)
  800. {
  801. static int reported;
  802. if (!reported) {
  803. reported = 1;
  804. printk(KERN_WARNING "kvm: emulating exchange as write\n");
  805. }
  806. return emulator_write_emulated(addr, new, bytes, ctxt);
  807. }
  808. #ifdef CONFIG_X86_32
  809. static int emulator_cmpxchg8b_emulated(unsigned long addr,
  810. unsigned long old_lo,
  811. unsigned long old_hi,
  812. unsigned long new_lo,
  813. unsigned long new_hi,
  814. struct x86_emulate_ctxt *ctxt)
  815. {
  816. static int reported;
  817. int r;
  818. if (!reported) {
  819. reported = 1;
  820. printk(KERN_WARNING "kvm: emulating exchange8b as write\n");
  821. }
  822. r = emulator_write_emulated(addr, new_lo, 4, ctxt);
  823. if (r != X86EMUL_CONTINUE)
  824. return r;
  825. return emulator_write_emulated(addr+4, new_hi, 4, ctxt);
  826. }
  827. #endif
  828. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  829. {
  830. return kvm_arch_ops->get_segment_base(vcpu, seg);
  831. }
  832. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  833. {
  834. return X86EMUL_CONTINUE;
  835. }
  836. int emulate_clts(struct kvm_vcpu *vcpu)
  837. {
  838. unsigned long cr0;
  839. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  840. cr0 = vcpu->cr0 & ~CR0_TS_MASK;
  841. kvm_arch_ops->set_cr0(vcpu, cr0);
  842. return X86EMUL_CONTINUE;
  843. }
  844. int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
  845. {
  846. struct kvm_vcpu *vcpu = ctxt->vcpu;
  847. switch (dr) {
  848. case 0 ... 3:
  849. *dest = kvm_arch_ops->get_dr(vcpu, dr);
  850. return X86EMUL_CONTINUE;
  851. default:
  852. printk(KERN_DEBUG "%s: unexpected dr %u\n",
  853. __FUNCTION__, dr);
  854. return X86EMUL_UNHANDLEABLE;
  855. }
  856. }
  857. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  858. {
  859. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  860. int exception;
  861. kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
  862. if (exception) {
  863. /* FIXME: better handling */
  864. return X86EMUL_UNHANDLEABLE;
  865. }
  866. return X86EMUL_CONTINUE;
  867. }
  868. static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
  869. {
  870. static int reported;
  871. u8 opcodes[4];
  872. unsigned long rip = ctxt->vcpu->rip;
  873. unsigned long rip_linear;
  874. rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
  875. if (reported)
  876. return;
  877. emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
  878. printk(KERN_ERR "emulation failed but !mmio_needed?"
  879. " rip %lx %02x %02x %02x %02x\n",
  880. rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  881. reported = 1;
  882. }
  883. struct x86_emulate_ops emulate_ops = {
  884. .read_std = emulator_read_std,
  885. .write_std = emulator_write_std,
  886. .read_emulated = emulator_read_emulated,
  887. .write_emulated = emulator_write_emulated,
  888. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  889. #ifdef CONFIG_X86_32
  890. .cmpxchg8b_emulated = emulator_cmpxchg8b_emulated,
  891. #endif
  892. };
  893. int emulate_instruction(struct kvm_vcpu *vcpu,
  894. struct kvm_run *run,
  895. unsigned long cr2,
  896. u16 error_code)
  897. {
  898. struct x86_emulate_ctxt emulate_ctxt;
  899. int r;
  900. int cs_db, cs_l;
  901. kvm_arch_ops->cache_regs(vcpu);
  902. kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  903. emulate_ctxt.vcpu = vcpu;
  904. emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
  905. emulate_ctxt.cr2 = cr2;
  906. emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
  907. ? X86EMUL_MODE_REAL : cs_l
  908. ? X86EMUL_MODE_PROT64 : cs_db
  909. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  910. if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
  911. emulate_ctxt.cs_base = 0;
  912. emulate_ctxt.ds_base = 0;
  913. emulate_ctxt.es_base = 0;
  914. emulate_ctxt.ss_base = 0;
  915. } else {
  916. emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
  917. emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
  918. emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
  919. emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
  920. }
  921. emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
  922. emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
  923. vcpu->mmio_is_write = 0;
  924. r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
  925. if ((r || vcpu->mmio_is_write) && run) {
  926. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  927. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  928. run->mmio.len = vcpu->mmio_size;
  929. run->mmio.is_write = vcpu->mmio_is_write;
  930. }
  931. if (r) {
  932. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  933. return EMULATE_DONE;
  934. if (!vcpu->mmio_needed) {
  935. report_emulation_failure(&emulate_ctxt);
  936. return EMULATE_FAIL;
  937. }
  938. return EMULATE_DO_MMIO;
  939. }
  940. kvm_arch_ops->decache_regs(vcpu);
  941. kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
  942. if (vcpu->mmio_is_write)
  943. return EMULATE_DO_MMIO;
  944. return EMULATE_DONE;
  945. }
  946. EXPORT_SYMBOL_GPL(emulate_instruction);
  947. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  948. {
  949. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  950. }
  951. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  952. {
  953. struct descriptor_table dt = { limit, base };
  954. kvm_arch_ops->set_gdt(vcpu, &dt);
  955. }
  956. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  957. {
  958. struct descriptor_table dt = { limit, base };
  959. kvm_arch_ops->set_idt(vcpu, &dt);
  960. }
  961. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  962. unsigned long *rflags)
  963. {
  964. lmsw(vcpu, msw);
  965. *rflags = kvm_arch_ops->get_rflags(vcpu);
  966. }
  967. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  968. {
  969. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  970. switch (cr) {
  971. case 0:
  972. return vcpu->cr0;
  973. case 2:
  974. return vcpu->cr2;
  975. case 3:
  976. return vcpu->cr3;
  977. case 4:
  978. return vcpu->cr4;
  979. default:
  980. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  981. return 0;
  982. }
  983. }
  984. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  985. unsigned long *rflags)
  986. {
  987. switch (cr) {
  988. case 0:
  989. set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
  990. *rflags = kvm_arch_ops->get_rflags(vcpu);
  991. break;
  992. case 2:
  993. vcpu->cr2 = val;
  994. break;
  995. case 3:
  996. set_cr3(vcpu, val);
  997. break;
  998. case 4:
  999. set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
  1000. break;
  1001. default:
  1002. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  1003. }
  1004. }
  1005. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1006. {
  1007. u64 data;
  1008. switch (msr) {
  1009. case 0xc0010010: /* SYSCFG */
  1010. case 0xc0010015: /* HWCR */
  1011. case MSR_IA32_PLATFORM_ID:
  1012. case MSR_IA32_P5_MC_ADDR:
  1013. case MSR_IA32_P5_MC_TYPE:
  1014. case MSR_IA32_MC0_CTL:
  1015. case MSR_IA32_MCG_STATUS:
  1016. case MSR_IA32_MCG_CAP:
  1017. case MSR_IA32_MC0_MISC:
  1018. case MSR_IA32_MC0_MISC+4:
  1019. case MSR_IA32_MC0_MISC+8:
  1020. case MSR_IA32_MC0_MISC+12:
  1021. case MSR_IA32_MC0_MISC+16:
  1022. case MSR_IA32_UCODE_REV:
  1023. case MSR_IA32_PERF_STATUS:
  1024. /* MTRR registers */
  1025. case 0xfe:
  1026. case 0x200 ... 0x2ff:
  1027. data = 0;
  1028. break;
  1029. case 0xcd: /* fsb frequency */
  1030. data = 3;
  1031. break;
  1032. case MSR_IA32_APICBASE:
  1033. data = vcpu->apic_base;
  1034. break;
  1035. case MSR_IA32_MISC_ENABLE:
  1036. data = vcpu->ia32_misc_enable_msr;
  1037. break;
  1038. #ifdef CONFIG_X86_64
  1039. case MSR_EFER:
  1040. data = vcpu->shadow_efer;
  1041. break;
  1042. #endif
  1043. default:
  1044. printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr);
  1045. return 1;
  1046. }
  1047. *pdata = data;
  1048. return 0;
  1049. }
  1050. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1051. /*
  1052. * Reads an msr value (of 'msr_index') into 'pdata'.
  1053. * Returns 0 on success, non-0 otherwise.
  1054. * Assumes vcpu_load() was already called.
  1055. */
  1056. static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1057. {
  1058. return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
  1059. }
  1060. #ifdef CONFIG_X86_64
  1061. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  1062. {
  1063. if (efer & EFER_RESERVED_BITS) {
  1064. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  1065. efer);
  1066. inject_gp(vcpu);
  1067. return;
  1068. }
  1069. if (is_paging(vcpu)
  1070. && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  1071. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  1072. inject_gp(vcpu);
  1073. return;
  1074. }
  1075. kvm_arch_ops->set_efer(vcpu, efer);
  1076. efer &= ~EFER_LMA;
  1077. efer |= vcpu->shadow_efer & EFER_LMA;
  1078. vcpu->shadow_efer = efer;
  1079. }
  1080. #endif
  1081. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1082. {
  1083. switch (msr) {
  1084. #ifdef CONFIG_X86_64
  1085. case MSR_EFER:
  1086. set_efer(vcpu, data);
  1087. break;
  1088. #endif
  1089. case MSR_IA32_MC0_STATUS:
  1090. printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
  1091. __FUNCTION__, data);
  1092. break;
  1093. case MSR_IA32_UCODE_REV:
  1094. case MSR_IA32_UCODE_WRITE:
  1095. case 0x200 ... 0x2ff: /* MTRRs */
  1096. break;
  1097. case MSR_IA32_APICBASE:
  1098. vcpu->apic_base = data;
  1099. break;
  1100. case MSR_IA32_MISC_ENABLE:
  1101. vcpu->ia32_misc_enable_msr = data;
  1102. break;
  1103. default:
  1104. printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
  1105. return 1;
  1106. }
  1107. return 0;
  1108. }
  1109. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1110. /*
  1111. * Writes msr value into into the appropriate "register".
  1112. * Returns 0 on success, non-0 otherwise.
  1113. * Assumes vcpu_load() was already called.
  1114. */
  1115. static int set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  1116. {
  1117. return kvm_arch_ops->set_msr(vcpu, msr_index, data);
  1118. }
  1119. void kvm_resched(struct kvm_vcpu *vcpu)
  1120. {
  1121. vcpu_put(vcpu);
  1122. cond_resched();
  1123. /* Cannot fail - no vcpu unplug yet. */
  1124. vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
  1125. }
  1126. EXPORT_SYMBOL_GPL(kvm_resched);
  1127. void load_msrs(struct vmx_msr_entry *e, int n)
  1128. {
  1129. int i;
  1130. for (i = 0; i < n; ++i)
  1131. wrmsrl(e[i].index, e[i].data);
  1132. }
  1133. EXPORT_SYMBOL_GPL(load_msrs);
  1134. void save_msrs(struct vmx_msr_entry *e, int n)
  1135. {
  1136. int i;
  1137. for (i = 0; i < n; ++i)
  1138. rdmsrl(e[i].index, e[i].data);
  1139. }
  1140. EXPORT_SYMBOL_GPL(save_msrs);
  1141. static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run)
  1142. {
  1143. struct kvm_vcpu *vcpu;
  1144. int r;
  1145. if (!valid_vcpu(kvm_run->vcpu))
  1146. return -EINVAL;
  1147. vcpu = vcpu_load(kvm, kvm_run->vcpu);
  1148. if (!vcpu)
  1149. return -ENOENT;
  1150. /* re-sync apic's tpr */
  1151. vcpu->cr8 = kvm_run->cr8;
  1152. if (kvm_run->emulated) {
  1153. kvm_arch_ops->skip_emulated_instruction(vcpu);
  1154. kvm_run->emulated = 0;
  1155. }
  1156. if (kvm_run->mmio_completed) {
  1157. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  1158. vcpu->mmio_read_completed = 1;
  1159. }
  1160. vcpu->mmio_needed = 0;
  1161. r = kvm_arch_ops->run(vcpu, kvm_run);
  1162. vcpu_put(vcpu);
  1163. return r;
  1164. }
  1165. static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
  1166. {
  1167. struct kvm_vcpu *vcpu;
  1168. if (!valid_vcpu(regs->vcpu))
  1169. return -EINVAL;
  1170. vcpu = vcpu_load(kvm, regs->vcpu);
  1171. if (!vcpu)
  1172. return -ENOENT;
  1173. kvm_arch_ops->cache_regs(vcpu);
  1174. regs->rax = vcpu->regs[VCPU_REGS_RAX];
  1175. regs->rbx = vcpu->regs[VCPU_REGS_RBX];
  1176. regs->rcx = vcpu->regs[VCPU_REGS_RCX];
  1177. regs->rdx = vcpu->regs[VCPU_REGS_RDX];
  1178. regs->rsi = vcpu->regs[VCPU_REGS_RSI];
  1179. regs->rdi = vcpu->regs[VCPU_REGS_RDI];
  1180. regs->rsp = vcpu->regs[VCPU_REGS_RSP];
  1181. regs->rbp = vcpu->regs[VCPU_REGS_RBP];
  1182. #ifdef CONFIG_X86_64
  1183. regs->r8 = vcpu->regs[VCPU_REGS_R8];
  1184. regs->r9 = vcpu->regs[VCPU_REGS_R9];
  1185. regs->r10 = vcpu->regs[VCPU_REGS_R10];
  1186. regs->r11 = vcpu->regs[VCPU_REGS_R11];
  1187. regs->r12 = vcpu->regs[VCPU_REGS_R12];
  1188. regs->r13 = vcpu->regs[VCPU_REGS_R13];
  1189. regs->r14 = vcpu->regs[VCPU_REGS_R14];
  1190. regs->r15 = vcpu->regs[VCPU_REGS_R15];
  1191. #endif
  1192. regs->rip = vcpu->rip;
  1193. regs->rflags = kvm_arch_ops->get_rflags(vcpu);
  1194. /*
  1195. * Don't leak debug flags in case they were set for guest debugging
  1196. */
  1197. if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
  1198. regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  1199. vcpu_put(vcpu);
  1200. return 0;
  1201. }
  1202. static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
  1203. {
  1204. struct kvm_vcpu *vcpu;
  1205. if (!valid_vcpu(regs->vcpu))
  1206. return -EINVAL;
  1207. vcpu = vcpu_load(kvm, regs->vcpu);
  1208. if (!vcpu)
  1209. return -ENOENT;
  1210. vcpu->regs[VCPU_REGS_RAX] = regs->rax;
  1211. vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
  1212. vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
  1213. vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
  1214. vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
  1215. vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
  1216. vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
  1217. vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
  1218. #ifdef CONFIG_X86_64
  1219. vcpu->regs[VCPU_REGS_R8] = regs->r8;
  1220. vcpu->regs[VCPU_REGS_R9] = regs->r9;
  1221. vcpu->regs[VCPU_REGS_R10] = regs->r10;
  1222. vcpu->regs[VCPU_REGS_R11] = regs->r11;
  1223. vcpu->regs[VCPU_REGS_R12] = regs->r12;
  1224. vcpu->regs[VCPU_REGS_R13] = regs->r13;
  1225. vcpu->regs[VCPU_REGS_R14] = regs->r14;
  1226. vcpu->regs[VCPU_REGS_R15] = regs->r15;
  1227. #endif
  1228. vcpu->rip = regs->rip;
  1229. kvm_arch_ops->set_rflags(vcpu, regs->rflags);
  1230. kvm_arch_ops->decache_regs(vcpu);
  1231. vcpu_put(vcpu);
  1232. return 0;
  1233. }
  1234. static void get_segment(struct kvm_vcpu *vcpu,
  1235. struct kvm_segment *var, int seg)
  1236. {
  1237. return kvm_arch_ops->get_segment(vcpu, var, seg);
  1238. }
  1239. static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
  1240. {
  1241. struct kvm_vcpu *vcpu;
  1242. struct descriptor_table dt;
  1243. if (!valid_vcpu(sregs->vcpu))
  1244. return -EINVAL;
  1245. vcpu = vcpu_load(kvm, sregs->vcpu);
  1246. if (!vcpu)
  1247. return -ENOENT;
  1248. get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  1249. get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  1250. get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  1251. get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  1252. get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  1253. get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  1254. get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  1255. get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  1256. kvm_arch_ops->get_idt(vcpu, &dt);
  1257. sregs->idt.limit = dt.limit;
  1258. sregs->idt.base = dt.base;
  1259. kvm_arch_ops->get_gdt(vcpu, &dt);
  1260. sregs->gdt.limit = dt.limit;
  1261. sregs->gdt.base = dt.base;
  1262. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  1263. sregs->cr0 = vcpu->cr0;
  1264. sregs->cr2 = vcpu->cr2;
  1265. sregs->cr3 = vcpu->cr3;
  1266. sregs->cr4 = vcpu->cr4;
  1267. sregs->cr8 = vcpu->cr8;
  1268. sregs->efer = vcpu->shadow_efer;
  1269. sregs->apic_base = vcpu->apic_base;
  1270. memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
  1271. sizeof sregs->interrupt_bitmap);
  1272. vcpu_put(vcpu);
  1273. return 0;
  1274. }
  1275. static void set_segment(struct kvm_vcpu *vcpu,
  1276. struct kvm_segment *var, int seg)
  1277. {
  1278. return kvm_arch_ops->set_segment(vcpu, var, seg);
  1279. }
  1280. static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
  1281. {
  1282. struct kvm_vcpu *vcpu;
  1283. int mmu_reset_needed = 0;
  1284. int i;
  1285. struct descriptor_table dt;
  1286. if (!valid_vcpu(sregs->vcpu))
  1287. return -EINVAL;
  1288. vcpu = vcpu_load(kvm, sregs->vcpu);
  1289. if (!vcpu)
  1290. return -ENOENT;
  1291. set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  1292. set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  1293. set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  1294. set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  1295. set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  1296. set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  1297. set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  1298. set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  1299. dt.limit = sregs->idt.limit;
  1300. dt.base = sregs->idt.base;
  1301. kvm_arch_ops->set_idt(vcpu, &dt);
  1302. dt.limit = sregs->gdt.limit;
  1303. dt.base = sregs->gdt.base;
  1304. kvm_arch_ops->set_gdt(vcpu, &dt);
  1305. vcpu->cr2 = sregs->cr2;
  1306. mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
  1307. vcpu->cr3 = sregs->cr3;
  1308. vcpu->cr8 = sregs->cr8;
  1309. mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
  1310. #ifdef CONFIG_X86_64
  1311. kvm_arch_ops->set_efer(vcpu, sregs->efer);
  1312. #endif
  1313. vcpu->apic_base = sregs->apic_base;
  1314. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  1315. mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
  1316. kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0);
  1317. mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
  1318. kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
  1319. if (!is_long_mode(vcpu) && is_pae(vcpu))
  1320. load_pdptrs(vcpu, vcpu->cr3);
  1321. if (mmu_reset_needed)
  1322. kvm_mmu_reset_context(vcpu);
  1323. memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
  1324. sizeof vcpu->irq_pending);
  1325. vcpu->irq_summary = 0;
  1326. for (i = 0; i < NR_IRQ_WORDS; ++i)
  1327. if (vcpu->irq_pending[i])
  1328. __set_bit(i, &vcpu->irq_summary);
  1329. vcpu_put(vcpu);
  1330. return 0;
  1331. }
  1332. /*
  1333. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  1334. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  1335. *
  1336. * This list is modified at module load time to reflect the
  1337. * capabilities of the host cpu.
  1338. */
  1339. static u32 msrs_to_save[] = {
  1340. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  1341. MSR_K6_STAR,
  1342. #ifdef CONFIG_X86_64
  1343. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  1344. #endif
  1345. MSR_IA32_TIME_STAMP_COUNTER,
  1346. };
  1347. static unsigned num_msrs_to_save;
  1348. static u32 emulated_msrs[] = {
  1349. MSR_IA32_MISC_ENABLE,
  1350. };
  1351. static __init void kvm_init_msr_list(void)
  1352. {
  1353. u32 dummy[2];
  1354. unsigned i, j;
  1355. for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
  1356. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  1357. continue;
  1358. if (j < i)
  1359. msrs_to_save[j] = msrs_to_save[i];
  1360. j++;
  1361. }
  1362. num_msrs_to_save = j;
  1363. }
  1364. /*
  1365. * Adapt set_msr() to msr_io()'s calling convention
  1366. */
  1367. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  1368. {
  1369. return set_msr(vcpu, index, *data);
  1370. }
  1371. /*
  1372. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1373. *
  1374. * @return number of msrs set successfully.
  1375. */
  1376. static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs,
  1377. struct kvm_msr_entry *entries,
  1378. int (*do_msr)(struct kvm_vcpu *vcpu,
  1379. unsigned index, u64 *data))
  1380. {
  1381. struct kvm_vcpu *vcpu;
  1382. int i;
  1383. if (!valid_vcpu(msrs->vcpu))
  1384. return -EINVAL;
  1385. vcpu = vcpu_load(kvm, msrs->vcpu);
  1386. if (!vcpu)
  1387. return -ENOENT;
  1388. for (i = 0; i < msrs->nmsrs; ++i)
  1389. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  1390. break;
  1391. vcpu_put(vcpu);
  1392. return i;
  1393. }
  1394. /*
  1395. * Read or write a bunch of msrs. Parameters are user addresses.
  1396. *
  1397. * @return number of msrs set successfully.
  1398. */
  1399. static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs,
  1400. int (*do_msr)(struct kvm_vcpu *vcpu,
  1401. unsigned index, u64 *data),
  1402. int writeback)
  1403. {
  1404. struct kvm_msrs msrs;
  1405. struct kvm_msr_entry *entries;
  1406. int r, n;
  1407. unsigned size;
  1408. r = -EFAULT;
  1409. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  1410. goto out;
  1411. r = -E2BIG;
  1412. if (msrs.nmsrs >= MAX_IO_MSRS)
  1413. goto out;
  1414. r = -ENOMEM;
  1415. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  1416. entries = vmalloc(size);
  1417. if (!entries)
  1418. goto out;
  1419. r = -EFAULT;
  1420. if (copy_from_user(entries, user_msrs->entries, size))
  1421. goto out_free;
  1422. r = n = __msr_io(kvm, &msrs, entries, do_msr);
  1423. if (r < 0)
  1424. goto out_free;
  1425. r = -EFAULT;
  1426. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  1427. goto out_free;
  1428. r = n;
  1429. out_free:
  1430. vfree(entries);
  1431. out:
  1432. return r;
  1433. }
  1434. /*
  1435. * Translate a guest virtual address to a guest physical address.
  1436. */
  1437. static int kvm_dev_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr)
  1438. {
  1439. unsigned long vaddr = tr->linear_address;
  1440. struct kvm_vcpu *vcpu;
  1441. gpa_t gpa;
  1442. vcpu = vcpu_load(kvm, tr->vcpu);
  1443. if (!vcpu)
  1444. return -ENOENT;
  1445. spin_lock(&kvm->lock);
  1446. gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
  1447. tr->physical_address = gpa;
  1448. tr->valid = gpa != UNMAPPED_GVA;
  1449. tr->writeable = 1;
  1450. tr->usermode = 0;
  1451. spin_unlock(&kvm->lock);
  1452. vcpu_put(vcpu);
  1453. return 0;
  1454. }
  1455. static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq)
  1456. {
  1457. struct kvm_vcpu *vcpu;
  1458. if (!valid_vcpu(irq->vcpu))
  1459. return -EINVAL;
  1460. if (irq->irq < 0 || irq->irq >= 256)
  1461. return -EINVAL;
  1462. vcpu = vcpu_load(kvm, irq->vcpu);
  1463. if (!vcpu)
  1464. return -ENOENT;
  1465. set_bit(irq->irq, vcpu->irq_pending);
  1466. set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
  1467. vcpu_put(vcpu);
  1468. return 0;
  1469. }
  1470. static int kvm_dev_ioctl_debug_guest(struct kvm *kvm,
  1471. struct kvm_debug_guest *dbg)
  1472. {
  1473. struct kvm_vcpu *vcpu;
  1474. int r;
  1475. if (!valid_vcpu(dbg->vcpu))
  1476. return -EINVAL;
  1477. vcpu = vcpu_load(kvm, dbg->vcpu);
  1478. if (!vcpu)
  1479. return -ENOENT;
  1480. r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
  1481. vcpu_put(vcpu);
  1482. return r;
  1483. }
  1484. static long kvm_dev_ioctl(struct file *filp,
  1485. unsigned int ioctl, unsigned long arg)
  1486. {
  1487. struct kvm *kvm = filp->private_data;
  1488. void __user *argp = (void __user *)arg;
  1489. int r = -EINVAL;
  1490. switch (ioctl) {
  1491. case KVM_GET_API_VERSION:
  1492. r = KVM_API_VERSION;
  1493. break;
  1494. case KVM_CREATE_VCPU: {
  1495. r = kvm_dev_ioctl_create_vcpu(kvm, arg);
  1496. if (r)
  1497. goto out;
  1498. break;
  1499. }
  1500. case KVM_RUN: {
  1501. struct kvm_run kvm_run;
  1502. r = -EFAULT;
  1503. if (copy_from_user(&kvm_run, argp, sizeof kvm_run))
  1504. goto out;
  1505. r = kvm_dev_ioctl_run(kvm, &kvm_run);
  1506. if (r < 0 && r != -EINTR)
  1507. goto out;
  1508. if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) {
  1509. r = -EFAULT;
  1510. goto out;
  1511. }
  1512. break;
  1513. }
  1514. case KVM_GET_REGS: {
  1515. struct kvm_regs kvm_regs;
  1516. r = -EFAULT;
  1517. if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
  1518. goto out;
  1519. r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs);
  1520. if (r)
  1521. goto out;
  1522. r = -EFAULT;
  1523. if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
  1524. goto out;
  1525. r = 0;
  1526. break;
  1527. }
  1528. case KVM_SET_REGS: {
  1529. struct kvm_regs kvm_regs;
  1530. r = -EFAULT;
  1531. if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
  1532. goto out;
  1533. r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs);
  1534. if (r)
  1535. goto out;
  1536. r = 0;
  1537. break;
  1538. }
  1539. case KVM_GET_SREGS: {
  1540. struct kvm_sregs kvm_sregs;
  1541. r = -EFAULT;
  1542. if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
  1543. goto out;
  1544. r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs);
  1545. if (r)
  1546. goto out;
  1547. r = -EFAULT;
  1548. if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
  1549. goto out;
  1550. r = 0;
  1551. break;
  1552. }
  1553. case KVM_SET_SREGS: {
  1554. struct kvm_sregs kvm_sregs;
  1555. r = -EFAULT;
  1556. if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
  1557. goto out;
  1558. r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs);
  1559. if (r)
  1560. goto out;
  1561. r = 0;
  1562. break;
  1563. }
  1564. case KVM_TRANSLATE: {
  1565. struct kvm_translation tr;
  1566. r = -EFAULT;
  1567. if (copy_from_user(&tr, argp, sizeof tr))
  1568. goto out;
  1569. r = kvm_dev_ioctl_translate(kvm, &tr);
  1570. if (r)
  1571. goto out;
  1572. r = -EFAULT;
  1573. if (copy_to_user(argp, &tr, sizeof tr))
  1574. goto out;
  1575. r = 0;
  1576. break;
  1577. }
  1578. case KVM_INTERRUPT: {
  1579. struct kvm_interrupt irq;
  1580. r = -EFAULT;
  1581. if (copy_from_user(&irq, argp, sizeof irq))
  1582. goto out;
  1583. r = kvm_dev_ioctl_interrupt(kvm, &irq);
  1584. if (r)
  1585. goto out;
  1586. r = 0;
  1587. break;
  1588. }
  1589. case KVM_DEBUG_GUEST: {
  1590. struct kvm_debug_guest dbg;
  1591. r = -EFAULT;
  1592. if (copy_from_user(&dbg, argp, sizeof dbg))
  1593. goto out;
  1594. r = kvm_dev_ioctl_debug_guest(kvm, &dbg);
  1595. if (r)
  1596. goto out;
  1597. r = 0;
  1598. break;
  1599. }
  1600. case KVM_SET_MEMORY_REGION: {
  1601. struct kvm_memory_region kvm_mem;
  1602. r = -EFAULT;
  1603. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  1604. goto out;
  1605. r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem);
  1606. if (r)
  1607. goto out;
  1608. break;
  1609. }
  1610. case KVM_GET_DIRTY_LOG: {
  1611. struct kvm_dirty_log log;
  1612. r = -EFAULT;
  1613. if (copy_from_user(&log, argp, sizeof log))
  1614. goto out;
  1615. r = kvm_dev_ioctl_get_dirty_log(kvm, &log);
  1616. if (r)
  1617. goto out;
  1618. break;
  1619. }
  1620. case KVM_GET_MSRS:
  1621. r = msr_io(kvm, argp, get_msr, 1);
  1622. break;
  1623. case KVM_SET_MSRS:
  1624. r = msr_io(kvm, argp, do_set_msr, 0);
  1625. break;
  1626. case KVM_GET_MSR_INDEX_LIST: {
  1627. struct kvm_msr_list __user *user_msr_list = argp;
  1628. struct kvm_msr_list msr_list;
  1629. unsigned n;
  1630. r = -EFAULT;
  1631. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  1632. goto out;
  1633. n = msr_list.nmsrs;
  1634. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  1635. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  1636. goto out;
  1637. r = -E2BIG;
  1638. if (n < num_msrs_to_save)
  1639. goto out;
  1640. r = -EFAULT;
  1641. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  1642. num_msrs_to_save * sizeof(u32)))
  1643. goto out;
  1644. if (copy_to_user(user_msr_list->indices
  1645. + num_msrs_to_save * sizeof(u32),
  1646. &emulated_msrs,
  1647. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  1648. goto out;
  1649. r = 0;
  1650. break;
  1651. }
  1652. default:
  1653. ;
  1654. }
  1655. out:
  1656. return r;
  1657. }
  1658. static struct page *kvm_dev_nopage(struct vm_area_struct *vma,
  1659. unsigned long address,
  1660. int *type)
  1661. {
  1662. struct kvm *kvm = vma->vm_file->private_data;
  1663. unsigned long pgoff;
  1664. struct kvm_memory_slot *slot;
  1665. struct page *page;
  1666. *type = VM_FAULT_MINOR;
  1667. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  1668. slot = gfn_to_memslot(kvm, pgoff);
  1669. if (!slot)
  1670. return NOPAGE_SIGBUS;
  1671. page = gfn_to_page(slot, pgoff);
  1672. if (!page)
  1673. return NOPAGE_SIGBUS;
  1674. get_page(page);
  1675. return page;
  1676. }
  1677. static struct vm_operations_struct kvm_dev_vm_ops = {
  1678. .nopage = kvm_dev_nopage,
  1679. };
  1680. static int kvm_dev_mmap(struct file *file, struct vm_area_struct *vma)
  1681. {
  1682. vma->vm_ops = &kvm_dev_vm_ops;
  1683. return 0;
  1684. }
  1685. static struct file_operations kvm_chardev_ops = {
  1686. .open = kvm_dev_open,
  1687. .release = kvm_dev_release,
  1688. .unlocked_ioctl = kvm_dev_ioctl,
  1689. .compat_ioctl = kvm_dev_ioctl,
  1690. .mmap = kvm_dev_mmap,
  1691. };
  1692. static struct miscdevice kvm_dev = {
  1693. MISC_DYNAMIC_MINOR,
  1694. "kvm",
  1695. &kvm_chardev_ops,
  1696. };
  1697. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1698. void *v)
  1699. {
  1700. if (val == SYS_RESTART) {
  1701. /*
  1702. * Some (well, at least mine) BIOSes hang on reboot if
  1703. * in vmx root mode.
  1704. */
  1705. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  1706. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  1707. }
  1708. return NOTIFY_OK;
  1709. }
  1710. static struct notifier_block kvm_reboot_notifier = {
  1711. .notifier_call = kvm_reboot,
  1712. .priority = 0,
  1713. };
  1714. static __init void kvm_init_debug(void)
  1715. {
  1716. struct kvm_stats_debugfs_item *p;
  1717. debugfs_dir = debugfs_create_dir("kvm", NULL);
  1718. for (p = debugfs_entries; p->name; ++p)
  1719. p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir,
  1720. p->data);
  1721. }
  1722. static void kvm_exit_debug(void)
  1723. {
  1724. struct kvm_stats_debugfs_item *p;
  1725. for (p = debugfs_entries; p->name; ++p)
  1726. debugfs_remove(p->dentry);
  1727. debugfs_remove(debugfs_dir);
  1728. }
  1729. hpa_t bad_page_address;
  1730. int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
  1731. {
  1732. int r;
  1733. if (kvm_arch_ops) {
  1734. printk(KERN_ERR "kvm: already loaded the other module\n");
  1735. return -EEXIST;
  1736. }
  1737. if (!ops->cpu_has_kvm_support()) {
  1738. printk(KERN_ERR "kvm: no hardware support\n");
  1739. return -EOPNOTSUPP;
  1740. }
  1741. if (ops->disabled_by_bios()) {
  1742. printk(KERN_ERR "kvm: disabled by bios\n");
  1743. return -EOPNOTSUPP;
  1744. }
  1745. kvm_arch_ops = ops;
  1746. r = kvm_arch_ops->hardware_setup();
  1747. if (r < 0)
  1748. return r;
  1749. on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
  1750. register_reboot_notifier(&kvm_reboot_notifier);
  1751. kvm_chardev_ops.owner = module;
  1752. r = misc_register(&kvm_dev);
  1753. if (r) {
  1754. printk (KERN_ERR "kvm: misc device register failed\n");
  1755. goto out_free;
  1756. }
  1757. return r;
  1758. out_free:
  1759. unregister_reboot_notifier(&kvm_reboot_notifier);
  1760. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  1761. kvm_arch_ops->hardware_unsetup();
  1762. return r;
  1763. }
  1764. void kvm_exit_arch(void)
  1765. {
  1766. misc_deregister(&kvm_dev);
  1767. unregister_reboot_notifier(&kvm_reboot_notifier);
  1768. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  1769. kvm_arch_ops->hardware_unsetup();
  1770. kvm_arch_ops = NULL;
  1771. }
  1772. static __init int kvm_init(void)
  1773. {
  1774. static struct page *bad_page;
  1775. int r = 0;
  1776. kvm_init_debug();
  1777. kvm_init_msr_list();
  1778. if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
  1779. r = -ENOMEM;
  1780. goto out;
  1781. }
  1782. bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
  1783. memset(__va(bad_page_address), 0, PAGE_SIZE);
  1784. return r;
  1785. out:
  1786. kvm_exit_debug();
  1787. return r;
  1788. }
  1789. static __exit void kvm_exit(void)
  1790. {
  1791. kvm_exit_debug();
  1792. __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
  1793. }
  1794. module_init(kvm_init)
  1795. module_exit(kvm_exit)
  1796. EXPORT_SYMBOL_GPL(kvm_init_arch);
  1797. EXPORT_SYMBOL_GPL(kvm_exit_arch);