kvm_main.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "kvm.h"
  18. #include <linux/kvm.h>
  19. #include <linux/module.h>
  20. #include <linux/errno.h>
  21. #include <asm/processor.h>
  22. #include <linux/percpu.h>
  23. #include <linux/gfp.h>
  24. #include <asm/msr.h>
  25. #include <linux/mm.h>
  26. #include <linux/miscdevice.h>
  27. #include <linux/vmalloc.h>
  28. #include <asm/uaccess.h>
  29. #include <linux/reboot.h>
  30. #include <asm/io.h>
  31. #include <linux/debugfs.h>
  32. #include <linux/highmem.h>
  33. #include <linux/file.h>
  34. #include <asm/desc.h>
  35. #include <linux/sysdev.h>
  36. #include <linux/cpu.h>
  37. #include "x86_emulate.h"
  38. #include "segment_descriptor.h"
  39. MODULE_AUTHOR("Qumranet");
  40. MODULE_LICENSE("GPL");
  41. static DEFINE_SPINLOCK(kvm_lock);
  42. static LIST_HEAD(vm_list);
  43. struct kvm_arch_ops *kvm_arch_ops;
  44. struct kvm_stat kvm_stat;
  45. EXPORT_SYMBOL_GPL(kvm_stat);
  46. static struct kvm_stats_debugfs_item {
  47. const char *name;
  48. u32 *data;
  49. struct dentry *dentry;
  50. } debugfs_entries[] = {
  51. { "pf_fixed", &kvm_stat.pf_fixed },
  52. { "pf_guest", &kvm_stat.pf_guest },
  53. { "tlb_flush", &kvm_stat.tlb_flush },
  54. { "invlpg", &kvm_stat.invlpg },
  55. { "exits", &kvm_stat.exits },
  56. { "io_exits", &kvm_stat.io_exits },
  57. { "mmio_exits", &kvm_stat.mmio_exits },
  58. { "signal_exits", &kvm_stat.signal_exits },
  59. { "irq_window", &kvm_stat.irq_window_exits },
  60. { "halt_exits", &kvm_stat.halt_exits },
  61. { "request_irq", &kvm_stat.request_irq_exits },
  62. { "irq_exits", &kvm_stat.irq_exits },
  63. { NULL, NULL }
  64. };
  65. static struct dentry *debugfs_dir;
  66. #define MAX_IO_MSRS 256
  67. #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
  68. #define LMSW_GUEST_MASK 0x0eULL
  69. #define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
  70. #define CR8_RESEVED_BITS (~0x0fULL)
  71. #define EFER_RESERVED_BITS 0xfffffffffffff2fe
  72. #ifdef CONFIG_X86_64
  73. // LDT or TSS descriptor in the GDT. 16 bytes.
  74. struct segment_descriptor_64 {
  75. struct segment_descriptor s;
  76. u32 base_higher;
  77. u32 pad_zero;
  78. };
  79. #endif
  80. unsigned long segment_base(u16 selector)
  81. {
  82. struct descriptor_table gdt;
  83. struct segment_descriptor *d;
  84. unsigned long table_base;
  85. typedef unsigned long ul;
  86. unsigned long v;
  87. if (selector == 0)
  88. return 0;
  89. asm ("sgdt %0" : "=m"(gdt));
  90. table_base = gdt.base;
  91. if (selector & 4) { /* from ldt */
  92. u16 ldt_selector;
  93. asm ("sldt %0" : "=g"(ldt_selector));
  94. table_base = segment_base(ldt_selector);
  95. }
  96. d = (struct segment_descriptor *)(table_base + (selector & ~7));
  97. v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
  98. #ifdef CONFIG_X86_64
  99. if (d->system == 0
  100. && (d->type == 2 || d->type == 9 || d->type == 11))
  101. v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
  102. #endif
  103. return v;
  104. }
  105. EXPORT_SYMBOL_GPL(segment_base);
  106. static inline int valid_vcpu(int n)
  107. {
  108. return likely(n >= 0 && n < KVM_MAX_VCPUS);
  109. }
  110. int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
  111. void *dest)
  112. {
  113. unsigned char *host_buf = dest;
  114. unsigned long req_size = size;
  115. while (size) {
  116. hpa_t paddr;
  117. unsigned now;
  118. unsigned offset;
  119. hva_t guest_buf;
  120. paddr = gva_to_hpa(vcpu, addr);
  121. if (is_error_hpa(paddr))
  122. break;
  123. guest_buf = (hva_t)kmap_atomic(
  124. pfn_to_page(paddr >> PAGE_SHIFT),
  125. KM_USER0);
  126. offset = addr & ~PAGE_MASK;
  127. guest_buf |= offset;
  128. now = min(size, PAGE_SIZE - offset);
  129. memcpy(host_buf, (void*)guest_buf, now);
  130. host_buf += now;
  131. addr += now;
  132. size -= now;
  133. kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
  134. }
  135. return req_size - size;
  136. }
  137. EXPORT_SYMBOL_GPL(kvm_read_guest);
  138. int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
  139. void *data)
  140. {
  141. unsigned char *host_buf = data;
  142. unsigned long req_size = size;
  143. while (size) {
  144. hpa_t paddr;
  145. unsigned now;
  146. unsigned offset;
  147. hva_t guest_buf;
  148. paddr = gva_to_hpa(vcpu, addr);
  149. if (is_error_hpa(paddr))
  150. break;
  151. guest_buf = (hva_t)kmap_atomic(
  152. pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
  153. offset = addr & ~PAGE_MASK;
  154. guest_buf |= offset;
  155. now = min(size, PAGE_SIZE - offset);
  156. memcpy((void*)guest_buf, host_buf, now);
  157. host_buf += now;
  158. addr += now;
  159. size -= now;
  160. kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
  161. }
  162. return req_size - size;
  163. }
  164. EXPORT_SYMBOL_GPL(kvm_write_guest);
  165. static int vcpu_slot(struct kvm_vcpu *vcpu)
  166. {
  167. return vcpu - vcpu->kvm->vcpus;
  168. }
  169. /*
  170. * Switches to specified vcpu, until a matching vcpu_put()
  171. */
  172. static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot)
  173. {
  174. struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot];
  175. mutex_lock(&vcpu->mutex);
  176. if (unlikely(!vcpu->vmcs)) {
  177. mutex_unlock(&vcpu->mutex);
  178. return NULL;
  179. }
  180. return kvm_arch_ops->vcpu_load(vcpu);
  181. }
  182. static void vcpu_put(struct kvm_vcpu *vcpu)
  183. {
  184. kvm_arch_ops->vcpu_put(vcpu);
  185. mutex_unlock(&vcpu->mutex);
  186. }
  187. static int kvm_dev_open(struct inode *inode, struct file *filp)
  188. {
  189. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  190. int i;
  191. if (!kvm)
  192. return -ENOMEM;
  193. spin_lock_init(&kvm->lock);
  194. INIT_LIST_HEAD(&kvm->active_mmu_pages);
  195. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  196. struct kvm_vcpu *vcpu = &kvm->vcpus[i];
  197. mutex_init(&vcpu->mutex);
  198. vcpu->cpu = -1;
  199. vcpu->kvm = kvm;
  200. vcpu->mmu.root_hpa = INVALID_PAGE;
  201. INIT_LIST_HEAD(&vcpu->free_pages);
  202. spin_lock(&kvm_lock);
  203. list_add(&kvm->vm_list, &vm_list);
  204. spin_unlock(&kvm_lock);
  205. }
  206. filp->private_data = kvm;
  207. return 0;
  208. }
  209. /*
  210. * Free any memory in @free but not in @dont.
  211. */
  212. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  213. struct kvm_memory_slot *dont)
  214. {
  215. int i;
  216. if (!dont || free->phys_mem != dont->phys_mem)
  217. if (free->phys_mem) {
  218. for (i = 0; i < free->npages; ++i)
  219. if (free->phys_mem[i])
  220. __free_page(free->phys_mem[i]);
  221. vfree(free->phys_mem);
  222. }
  223. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  224. vfree(free->dirty_bitmap);
  225. free->phys_mem = NULL;
  226. free->npages = 0;
  227. free->dirty_bitmap = NULL;
  228. }
  229. static void kvm_free_physmem(struct kvm *kvm)
  230. {
  231. int i;
  232. for (i = 0; i < kvm->nmemslots; ++i)
  233. kvm_free_physmem_slot(&kvm->memslots[i], NULL);
  234. }
  235. static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
  236. {
  237. if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu)))
  238. return;
  239. kvm_mmu_destroy(vcpu);
  240. vcpu_put(vcpu);
  241. kvm_arch_ops->vcpu_free(vcpu);
  242. }
  243. static void kvm_free_vcpus(struct kvm *kvm)
  244. {
  245. unsigned int i;
  246. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  247. kvm_free_vcpu(&kvm->vcpus[i]);
  248. }
  249. static int kvm_dev_release(struct inode *inode, struct file *filp)
  250. {
  251. struct kvm *kvm = filp->private_data;
  252. spin_lock(&kvm_lock);
  253. list_del(&kvm->vm_list);
  254. spin_unlock(&kvm_lock);
  255. kvm_free_vcpus(kvm);
  256. kvm_free_physmem(kvm);
  257. kfree(kvm);
  258. return 0;
  259. }
  260. static void inject_gp(struct kvm_vcpu *vcpu)
  261. {
  262. kvm_arch_ops->inject_gp(vcpu, 0);
  263. }
  264. /*
  265. * Load the pae pdptrs. Return true is they are all valid.
  266. */
  267. static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  268. {
  269. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  270. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  271. int i;
  272. u64 pdpte;
  273. u64 *pdpt;
  274. int ret;
  275. struct kvm_memory_slot *memslot;
  276. spin_lock(&vcpu->kvm->lock);
  277. memslot = gfn_to_memslot(vcpu->kvm, pdpt_gfn);
  278. /* FIXME: !memslot - emulate? 0xff? */
  279. pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0);
  280. ret = 1;
  281. for (i = 0; i < 4; ++i) {
  282. pdpte = pdpt[offset + i];
  283. if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
  284. ret = 0;
  285. goto out;
  286. }
  287. }
  288. for (i = 0; i < 4; ++i)
  289. vcpu->pdptrs[i] = pdpt[offset + i];
  290. out:
  291. kunmap_atomic(pdpt, KM_USER0);
  292. spin_unlock(&vcpu->kvm->lock);
  293. return ret;
  294. }
  295. void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  296. {
  297. if (cr0 & CR0_RESEVED_BITS) {
  298. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  299. cr0, vcpu->cr0);
  300. inject_gp(vcpu);
  301. return;
  302. }
  303. if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) {
  304. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  305. inject_gp(vcpu);
  306. return;
  307. }
  308. if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) {
  309. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  310. "and a clear PE flag\n");
  311. inject_gp(vcpu);
  312. return;
  313. }
  314. if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
  315. #ifdef CONFIG_X86_64
  316. if ((vcpu->shadow_efer & EFER_LME)) {
  317. int cs_db, cs_l;
  318. if (!is_pae(vcpu)) {
  319. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  320. "in long mode while PAE is disabled\n");
  321. inject_gp(vcpu);
  322. return;
  323. }
  324. kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  325. if (cs_l) {
  326. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  327. "in long mode while CS.L == 1\n");
  328. inject_gp(vcpu);
  329. return;
  330. }
  331. } else
  332. #endif
  333. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
  334. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  335. "reserved bits\n");
  336. inject_gp(vcpu);
  337. return;
  338. }
  339. }
  340. kvm_arch_ops->set_cr0(vcpu, cr0);
  341. vcpu->cr0 = cr0;
  342. spin_lock(&vcpu->kvm->lock);
  343. kvm_mmu_reset_context(vcpu);
  344. spin_unlock(&vcpu->kvm->lock);
  345. return;
  346. }
  347. EXPORT_SYMBOL_GPL(set_cr0);
  348. void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  349. {
  350. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  351. set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
  352. }
  353. EXPORT_SYMBOL_GPL(lmsw);
  354. void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  355. {
  356. if (cr4 & CR4_RESEVED_BITS) {
  357. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  358. inject_gp(vcpu);
  359. return;
  360. }
  361. if (is_long_mode(vcpu)) {
  362. if (!(cr4 & CR4_PAE_MASK)) {
  363. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  364. "in long mode\n");
  365. inject_gp(vcpu);
  366. return;
  367. }
  368. } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
  369. && !load_pdptrs(vcpu, vcpu->cr3)) {
  370. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  371. inject_gp(vcpu);
  372. }
  373. if (cr4 & CR4_VMXE_MASK) {
  374. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  375. inject_gp(vcpu);
  376. return;
  377. }
  378. kvm_arch_ops->set_cr4(vcpu, cr4);
  379. spin_lock(&vcpu->kvm->lock);
  380. kvm_mmu_reset_context(vcpu);
  381. spin_unlock(&vcpu->kvm->lock);
  382. }
  383. EXPORT_SYMBOL_GPL(set_cr4);
  384. void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  385. {
  386. if (is_long_mode(vcpu)) {
  387. if (cr3 & CR3_L_MODE_RESEVED_BITS) {
  388. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  389. inject_gp(vcpu);
  390. return;
  391. }
  392. } else {
  393. if (cr3 & CR3_RESEVED_BITS) {
  394. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  395. inject_gp(vcpu);
  396. return;
  397. }
  398. if (is_paging(vcpu) && is_pae(vcpu) &&
  399. !load_pdptrs(vcpu, cr3)) {
  400. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  401. "reserved bits\n");
  402. inject_gp(vcpu);
  403. return;
  404. }
  405. }
  406. vcpu->cr3 = cr3;
  407. spin_lock(&vcpu->kvm->lock);
  408. /*
  409. * Does the new cr3 value map to physical memory? (Note, we
  410. * catch an invalid cr3 even in real-mode, because it would
  411. * cause trouble later on when we turn on paging anyway.)
  412. *
  413. * A real CPU would silently accept an invalid cr3 and would
  414. * attempt to use it - with largely undefined (and often hard
  415. * to debug) behavior on the guest side.
  416. */
  417. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  418. inject_gp(vcpu);
  419. else
  420. vcpu->mmu.new_cr3(vcpu);
  421. spin_unlock(&vcpu->kvm->lock);
  422. }
  423. EXPORT_SYMBOL_GPL(set_cr3);
  424. void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  425. {
  426. if ( cr8 & CR8_RESEVED_BITS) {
  427. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  428. inject_gp(vcpu);
  429. return;
  430. }
  431. vcpu->cr8 = cr8;
  432. }
  433. EXPORT_SYMBOL_GPL(set_cr8);
  434. void fx_init(struct kvm_vcpu *vcpu)
  435. {
  436. struct __attribute__ ((__packed__)) fx_image_s {
  437. u16 control; //fcw
  438. u16 status; //fsw
  439. u16 tag; // ftw
  440. u16 opcode; //fop
  441. u64 ip; // fpu ip
  442. u64 operand;// fpu dp
  443. u32 mxcsr;
  444. u32 mxcsr_mask;
  445. } *fx_image;
  446. fx_save(vcpu->host_fx_image);
  447. fpu_init();
  448. fx_save(vcpu->guest_fx_image);
  449. fx_restore(vcpu->host_fx_image);
  450. fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
  451. fx_image->mxcsr = 0x1f80;
  452. memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
  453. 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
  454. }
  455. EXPORT_SYMBOL_GPL(fx_init);
  456. /*
  457. * Creates some virtual cpus. Good luck creating more than one.
  458. */
  459. static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
  460. {
  461. int r;
  462. struct kvm_vcpu *vcpu;
  463. r = -EINVAL;
  464. if (!valid_vcpu(n))
  465. goto out;
  466. vcpu = &kvm->vcpus[n];
  467. mutex_lock(&vcpu->mutex);
  468. if (vcpu->vmcs) {
  469. mutex_unlock(&vcpu->mutex);
  470. return -EEXIST;
  471. }
  472. vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
  473. FX_IMAGE_ALIGN);
  474. vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
  475. r = kvm_arch_ops->vcpu_create(vcpu);
  476. if (r < 0)
  477. goto out_free_vcpus;
  478. r = kvm_mmu_create(vcpu);
  479. if (r < 0)
  480. goto out_free_vcpus;
  481. kvm_arch_ops->vcpu_load(vcpu);
  482. r = kvm_mmu_setup(vcpu);
  483. if (r >= 0)
  484. r = kvm_arch_ops->vcpu_setup(vcpu);
  485. vcpu_put(vcpu);
  486. if (r < 0)
  487. goto out_free_vcpus;
  488. return 0;
  489. out_free_vcpus:
  490. kvm_free_vcpu(vcpu);
  491. mutex_unlock(&vcpu->mutex);
  492. out:
  493. return r;
  494. }
  495. /*
  496. * Allocate some memory and give it an address in the guest physical address
  497. * space.
  498. *
  499. * Discontiguous memory is allowed, mostly for framebuffers.
  500. */
  501. static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
  502. struct kvm_memory_region *mem)
  503. {
  504. int r;
  505. gfn_t base_gfn;
  506. unsigned long npages;
  507. unsigned long i;
  508. struct kvm_memory_slot *memslot;
  509. struct kvm_memory_slot old, new;
  510. int memory_config_version;
  511. r = -EINVAL;
  512. /* General sanity checks */
  513. if (mem->memory_size & (PAGE_SIZE - 1))
  514. goto out;
  515. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  516. goto out;
  517. if (mem->slot >= KVM_MEMORY_SLOTS)
  518. goto out;
  519. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  520. goto out;
  521. memslot = &kvm->memslots[mem->slot];
  522. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  523. npages = mem->memory_size >> PAGE_SHIFT;
  524. if (!npages)
  525. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  526. raced:
  527. spin_lock(&kvm->lock);
  528. memory_config_version = kvm->memory_config_version;
  529. new = old = *memslot;
  530. new.base_gfn = base_gfn;
  531. new.npages = npages;
  532. new.flags = mem->flags;
  533. /* Disallow changing a memory slot's size. */
  534. r = -EINVAL;
  535. if (npages && old.npages && npages != old.npages)
  536. goto out_unlock;
  537. /* Check for overlaps */
  538. r = -EEXIST;
  539. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  540. struct kvm_memory_slot *s = &kvm->memslots[i];
  541. if (s == memslot)
  542. continue;
  543. if (!((base_gfn + npages <= s->base_gfn) ||
  544. (base_gfn >= s->base_gfn + s->npages)))
  545. goto out_unlock;
  546. }
  547. /*
  548. * Do memory allocations outside lock. memory_config_version will
  549. * detect any races.
  550. */
  551. spin_unlock(&kvm->lock);
  552. /* Deallocate if slot is being removed */
  553. if (!npages)
  554. new.phys_mem = NULL;
  555. /* Free page dirty bitmap if unneeded */
  556. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  557. new.dirty_bitmap = NULL;
  558. r = -ENOMEM;
  559. /* Allocate if a slot is being created */
  560. if (npages && !new.phys_mem) {
  561. new.phys_mem = vmalloc(npages * sizeof(struct page *));
  562. if (!new.phys_mem)
  563. goto out_free;
  564. memset(new.phys_mem, 0, npages * sizeof(struct page *));
  565. for (i = 0; i < npages; ++i) {
  566. new.phys_mem[i] = alloc_page(GFP_HIGHUSER
  567. | __GFP_ZERO);
  568. if (!new.phys_mem[i])
  569. goto out_free;
  570. set_page_private(new.phys_mem[i],0);
  571. }
  572. }
  573. /* Allocate page dirty bitmap if needed */
  574. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  575. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  576. new.dirty_bitmap = vmalloc(dirty_bytes);
  577. if (!new.dirty_bitmap)
  578. goto out_free;
  579. memset(new.dirty_bitmap, 0, dirty_bytes);
  580. }
  581. spin_lock(&kvm->lock);
  582. if (memory_config_version != kvm->memory_config_version) {
  583. spin_unlock(&kvm->lock);
  584. kvm_free_physmem_slot(&new, &old);
  585. goto raced;
  586. }
  587. r = -EAGAIN;
  588. if (kvm->busy)
  589. goto out_unlock;
  590. if (mem->slot >= kvm->nmemslots)
  591. kvm->nmemslots = mem->slot + 1;
  592. *memslot = new;
  593. ++kvm->memory_config_version;
  594. spin_unlock(&kvm->lock);
  595. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  596. struct kvm_vcpu *vcpu;
  597. vcpu = vcpu_load(kvm, i);
  598. if (!vcpu)
  599. continue;
  600. kvm_mmu_reset_context(vcpu);
  601. vcpu_put(vcpu);
  602. }
  603. kvm_free_physmem_slot(&old, &new);
  604. return 0;
  605. out_unlock:
  606. spin_unlock(&kvm->lock);
  607. out_free:
  608. kvm_free_physmem_slot(&new, &old);
  609. out:
  610. return r;
  611. }
  612. static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
  613. {
  614. spin_lock(&vcpu->kvm->lock);
  615. kvm_mmu_slot_remove_write_access(vcpu, slot);
  616. spin_unlock(&vcpu->kvm->lock);
  617. }
  618. /*
  619. * Get (and clear) the dirty memory log for a memory slot.
  620. */
  621. static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
  622. struct kvm_dirty_log *log)
  623. {
  624. struct kvm_memory_slot *memslot;
  625. int r, i;
  626. int n;
  627. int cleared;
  628. unsigned long any = 0;
  629. spin_lock(&kvm->lock);
  630. /*
  631. * Prevent changes to guest memory configuration even while the lock
  632. * is not taken.
  633. */
  634. ++kvm->busy;
  635. spin_unlock(&kvm->lock);
  636. r = -EINVAL;
  637. if (log->slot >= KVM_MEMORY_SLOTS)
  638. goto out;
  639. memslot = &kvm->memslots[log->slot];
  640. r = -ENOENT;
  641. if (!memslot->dirty_bitmap)
  642. goto out;
  643. n = ALIGN(memslot->npages, 8) / 8;
  644. for (i = 0; !any && i < n; ++i)
  645. any = memslot->dirty_bitmap[i];
  646. r = -EFAULT;
  647. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  648. goto out;
  649. if (any) {
  650. cleared = 0;
  651. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  652. struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
  653. if (!vcpu)
  654. continue;
  655. if (!cleared) {
  656. do_remove_write_access(vcpu, log->slot);
  657. memset(memslot->dirty_bitmap, 0, n);
  658. cleared = 1;
  659. }
  660. kvm_arch_ops->tlb_flush(vcpu);
  661. vcpu_put(vcpu);
  662. }
  663. }
  664. r = 0;
  665. out:
  666. spin_lock(&kvm->lock);
  667. --kvm->busy;
  668. spin_unlock(&kvm->lock);
  669. return r;
  670. }
  671. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  672. {
  673. int i;
  674. for (i = 0; i < kvm->nmemslots; ++i) {
  675. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  676. if (gfn >= memslot->base_gfn
  677. && gfn < memslot->base_gfn + memslot->npages)
  678. return memslot;
  679. }
  680. return NULL;
  681. }
  682. EXPORT_SYMBOL_GPL(gfn_to_memslot);
  683. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  684. {
  685. int i;
  686. struct kvm_memory_slot *memslot = NULL;
  687. unsigned long rel_gfn;
  688. for (i = 0; i < kvm->nmemslots; ++i) {
  689. memslot = &kvm->memslots[i];
  690. if (gfn >= memslot->base_gfn
  691. && gfn < memslot->base_gfn + memslot->npages) {
  692. if (!memslot || !memslot->dirty_bitmap)
  693. return;
  694. rel_gfn = gfn - memslot->base_gfn;
  695. /* avoid RMW */
  696. if (!test_bit(rel_gfn, memslot->dirty_bitmap))
  697. set_bit(rel_gfn, memslot->dirty_bitmap);
  698. return;
  699. }
  700. }
  701. }
  702. static int emulator_read_std(unsigned long addr,
  703. unsigned long *val,
  704. unsigned int bytes,
  705. struct x86_emulate_ctxt *ctxt)
  706. {
  707. struct kvm_vcpu *vcpu = ctxt->vcpu;
  708. void *data = val;
  709. while (bytes) {
  710. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  711. unsigned offset = addr & (PAGE_SIZE-1);
  712. unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
  713. unsigned long pfn;
  714. struct kvm_memory_slot *memslot;
  715. void *page;
  716. if (gpa == UNMAPPED_GVA)
  717. return X86EMUL_PROPAGATE_FAULT;
  718. pfn = gpa >> PAGE_SHIFT;
  719. memslot = gfn_to_memslot(vcpu->kvm, pfn);
  720. if (!memslot)
  721. return X86EMUL_UNHANDLEABLE;
  722. page = kmap_atomic(gfn_to_page(memslot, pfn), KM_USER0);
  723. memcpy(data, page + offset, tocopy);
  724. kunmap_atomic(page, KM_USER0);
  725. bytes -= tocopy;
  726. data += tocopy;
  727. addr += tocopy;
  728. }
  729. return X86EMUL_CONTINUE;
  730. }
  731. static int emulator_write_std(unsigned long addr,
  732. unsigned long val,
  733. unsigned int bytes,
  734. struct x86_emulate_ctxt *ctxt)
  735. {
  736. printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
  737. addr, bytes);
  738. return X86EMUL_UNHANDLEABLE;
  739. }
  740. static int emulator_read_emulated(unsigned long addr,
  741. unsigned long *val,
  742. unsigned int bytes,
  743. struct x86_emulate_ctxt *ctxt)
  744. {
  745. struct kvm_vcpu *vcpu = ctxt->vcpu;
  746. if (vcpu->mmio_read_completed) {
  747. memcpy(val, vcpu->mmio_data, bytes);
  748. vcpu->mmio_read_completed = 0;
  749. return X86EMUL_CONTINUE;
  750. } else if (emulator_read_std(addr, val, bytes, ctxt)
  751. == X86EMUL_CONTINUE)
  752. return X86EMUL_CONTINUE;
  753. else {
  754. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  755. if (gpa == UNMAPPED_GVA)
  756. return X86EMUL_PROPAGATE_FAULT;
  757. vcpu->mmio_needed = 1;
  758. vcpu->mmio_phys_addr = gpa;
  759. vcpu->mmio_size = bytes;
  760. vcpu->mmio_is_write = 0;
  761. return X86EMUL_UNHANDLEABLE;
  762. }
  763. }
  764. static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  765. unsigned long val, int bytes)
  766. {
  767. struct kvm_memory_slot *m;
  768. struct page *page;
  769. void *virt;
  770. if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
  771. return 0;
  772. m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
  773. if (!m)
  774. return 0;
  775. page = gfn_to_page(m, gpa >> PAGE_SHIFT);
  776. kvm_mmu_pre_write(vcpu, gpa, bytes);
  777. virt = kmap_atomic(page, KM_USER0);
  778. memcpy(virt + offset_in_page(gpa), &val, bytes);
  779. kunmap_atomic(virt, KM_USER0);
  780. kvm_mmu_post_write(vcpu, gpa, bytes);
  781. return 1;
  782. }
  783. static int emulator_write_emulated(unsigned long addr,
  784. unsigned long val,
  785. unsigned int bytes,
  786. struct x86_emulate_ctxt *ctxt)
  787. {
  788. struct kvm_vcpu *vcpu = ctxt->vcpu;
  789. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  790. if (gpa == UNMAPPED_GVA)
  791. return X86EMUL_PROPAGATE_FAULT;
  792. if (emulator_write_phys(vcpu, gpa, val, bytes))
  793. return X86EMUL_CONTINUE;
  794. vcpu->mmio_needed = 1;
  795. vcpu->mmio_phys_addr = gpa;
  796. vcpu->mmio_size = bytes;
  797. vcpu->mmio_is_write = 1;
  798. memcpy(vcpu->mmio_data, &val, bytes);
  799. return X86EMUL_CONTINUE;
  800. }
  801. static int emulator_cmpxchg_emulated(unsigned long addr,
  802. unsigned long old,
  803. unsigned long new,
  804. unsigned int bytes,
  805. struct x86_emulate_ctxt *ctxt)
  806. {
  807. static int reported;
  808. if (!reported) {
  809. reported = 1;
  810. printk(KERN_WARNING "kvm: emulating exchange as write\n");
  811. }
  812. return emulator_write_emulated(addr, new, bytes, ctxt);
  813. }
  814. #ifdef CONFIG_X86_32
  815. static int emulator_cmpxchg8b_emulated(unsigned long addr,
  816. unsigned long old_lo,
  817. unsigned long old_hi,
  818. unsigned long new_lo,
  819. unsigned long new_hi,
  820. struct x86_emulate_ctxt *ctxt)
  821. {
  822. static int reported;
  823. int r;
  824. if (!reported) {
  825. reported = 1;
  826. printk(KERN_WARNING "kvm: emulating exchange8b as write\n");
  827. }
  828. r = emulator_write_emulated(addr, new_lo, 4, ctxt);
  829. if (r != X86EMUL_CONTINUE)
  830. return r;
  831. return emulator_write_emulated(addr+4, new_hi, 4, ctxt);
  832. }
  833. #endif
  834. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  835. {
  836. return kvm_arch_ops->get_segment_base(vcpu, seg);
  837. }
  838. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  839. {
  840. return X86EMUL_CONTINUE;
  841. }
  842. int emulate_clts(struct kvm_vcpu *vcpu)
  843. {
  844. unsigned long cr0;
  845. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  846. cr0 = vcpu->cr0 & ~CR0_TS_MASK;
  847. kvm_arch_ops->set_cr0(vcpu, cr0);
  848. return X86EMUL_CONTINUE;
  849. }
  850. int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
  851. {
  852. struct kvm_vcpu *vcpu = ctxt->vcpu;
  853. switch (dr) {
  854. case 0 ... 3:
  855. *dest = kvm_arch_ops->get_dr(vcpu, dr);
  856. return X86EMUL_CONTINUE;
  857. default:
  858. printk(KERN_DEBUG "%s: unexpected dr %u\n",
  859. __FUNCTION__, dr);
  860. return X86EMUL_UNHANDLEABLE;
  861. }
  862. }
  863. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  864. {
  865. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  866. int exception;
  867. kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
  868. if (exception) {
  869. /* FIXME: better handling */
  870. return X86EMUL_UNHANDLEABLE;
  871. }
  872. return X86EMUL_CONTINUE;
  873. }
  874. static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
  875. {
  876. static int reported;
  877. u8 opcodes[4];
  878. unsigned long rip = ctxt->vcpu->rip;
  879. unsigned long rip_linear;
  880. rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
  881. if (reported)
  882. return;
  883. emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
  884. printk(KERN_ERR "emulation failed but !mmio_needed?"
  885. " rip %lx %02x %02x %02x %02x\n",
  886. rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  887. reported = 1;
  888. }
  889. struct x86_emulate_ops emulate_ops = {
  890. .read_std = emulator_read_std,
  891. .write_std = emulator_write_std,
  892. .read_emulated = emulator_read_emulated,
  893. .write_emulated = emulator_write_emulated,
  894. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  895. #ifdef CONFIG_X86_32
  896. .cmpxchg8b_emulated = emulator_cmpxchg8b_emulated,
  897. #endif
  898. };
  899. int emulate_instruction(struct kvm_vcpu *vcpu,
  900. struct kvm_run *run,
  901. unsigned long cr2,
  902. u16 error_code)
  903. {
  904. struct x86_emulate_ctxt emulate_ctxt;
  905. int r;
  906. int cs_db, cs_l;
  907. kvm_arch_ops->cache_regs(vcpu);
  908. kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  909. emulate_ctxt.vcpu = vcpu;
  910. emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
  911. emulate_ctxt.cr2 = cr2;
  912. emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
  913. ? X86EMUL_MODE_REAL : cs_l
  914. ? X86EMUL_MODE_PROT64 : cs_db
  915. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  916. if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
  917. emulate_ctxt.cs_base = 0;
  918. emulate_ctxt.ds_base = 0;
  919. emulate_ctxt.es_base = 0;
  920. emulate_ctxt.ss_base = 0;
  921. } else {
  922. emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
  923. emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
  924. emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
  925. emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
  926. }
  927. emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
  928. emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
  929. vcpu->mmio_is_write = 0;
  930. r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
  931. if ((r || vcpu->mmio_is_write) && run) {
  932. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  933. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  934. run->mmio.len = vcpu->mmio_size;
  935. run->mmio.is_write = vcpu->mmio_is_write;
  936. }
  937. if (r) {
  938. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  939. return EMULATE_DONE;
  940. if (!vcpu->mmio_needed) {
  941. report_emulation_failure(&emulate_ctxt);
  942. return EMULATE_FAIL;
  943. }
  944. return EMULATE_DO_MMIO;
  945. }
  946. kvm_arch_ops->decache_regs(vcpu);
  947. kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
  948. if (vcpu->mmio_is_write)
  949. return EMULATE_DO_MMIO;
  950. return EMULATE_DONE;
  951. }
  952. EXPORT_SYMBOL_GPL(emulate_instruction);
  953. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  954. {
  955. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  956. }
  957. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  958. {
  959. struct descriptor_table dt = { limit, base };
  960. kvm_arch_ops->set_gdt(vcpu, &dt);
  961. }
  962. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  963. {
  964. struct descriptor_table dt = { limit, base };
  965. kvm_arch_ops->set_idt(vcpu, &dt);
  966. }
  967. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  968. unsigned long *rflags)
  969. {
  970. lmsw(vcpu, msw);
  971. *rflags = kvm_arch_ops->get_rflags(vcpu);
  972. }
  973. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  974. {
  975. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  976. switch (cr) {
  977. case 0:
  978. return vcpu->cr0;
  979. case 2:
  980. return vcpu->cr2;
  981. case 3:
  982. return vcpu->cr3;
  983. case 4:
  984. return vcpu->cr4;
  985. default:
  986. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  987. return 0;
  988. }
  989. }
  990. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  991. unsigned long *rflags)
  992. {
  993. switch (cr) {
  994. case 0:
  995. set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
  996. *rflags = kvm_arch_ops->get_rflags(vcpu);
  997. break;
  998. case 2:
  999. vcpu->cr2 = val;
  1000. break;
  1001. case 3:
  1002. set_cr3(vcpu, val);
  1003. break;
  1004. case 4:
  1005. set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
  1006. break;
  1007. default:
  1008. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  1009. }
  1010. }
  1011. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1012. {
  1013. u64 data;
  1014. switch (msr) {
  1015. case 0xc0010010: /* SYSCFG */
  1016. case 0xc0010015: /* HWCR */
  1017. case MSR_IA32_PLATFORM_ID:
  1018. case MSR_IA32_P5_MC_ADDR:
  1019. case MSR_IA32_P5_MC_TYPE:
  1020. case MSR_IA32_MC0_CTL:
  1021. case MSR_IA32_MCG_STATUS:
  1022. case MSR_IA32_MCG_CAP:
  1023. case MSR_IA32_MC0_MISC:
  1024. case MSR_IA32_MC0_MISC+4:
  1025. case MSR_IA32_MC0_MISC+8:
  1026. case MSR_IA32_MC0_MISC+12:
  1027. case MSR_IA32_MC0_MISC+16:
  1028. case MSR_IA32_UCODE_REV:
  1029. case MSR_IA32_PERF_STATUS:
  1030. /* MTRR registers */
  1031. case 0xfe:
  1032. case 0x200 ... 0x2ff:
  1033. data = 0;
  1034. break;
  1035. case 0xcd: /* fsb frequency */
  1036. data = 3;
  1037. break;
  1038. case MSR_IA32_APICBASE:
  1039. data = vcpu->apic_base;
  1040. break;
  1041. case MSR_IA32_MISC_ENABLE:
  1042. data = vcpu->ia32_misc_enable_msr;
  1043. break;
  1044. #ifdef CONFIG_X86_64
  1045. case MSR_EFER:
  1046. data = vcpu->shadow_efer;
  1047. break;
  1048. #endif
  1049. default:
  1050. printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr);
  1051. return 1;
  1052. }
  1053. *pdata = data;
  1054. return 0;
  1055. }
  1056. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1057. /*
  1058. * Reads an msr value (of 'msr_index') into 'pdata'.
  1059. * Returns 0 on success, non-0 otherwise.
  1060. * Assumes vcpu_load() was already called.
  1061. */
  1062. static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1063. {
  1064. return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
  1065. }
  1066. #ifdef CONFIG_X86_64
  1067. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  1068. {
  1069. if (efer & EFER_RESERVED_BITS) {
  1070. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  1071. efer);
  1072. inject_gp(vcpu);
  1073. return;
  1074. }
  1075. if (is_paging(vcpu)
  1076. && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  1077. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  1078. inject_gp(vcpu);
  1079. return;
  1080. }
  1081. kvm_arch_ops->set_efer(vcpu, efer);
  1082. efer &= ~EFER_LMA;
  1083. efer |= vcpu->shadow_efer & EFER_LMA;
  1084. vcpu->shadow_efer = efer;
  1085. }
  1086. #endif
  1087. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1088. {
  1089. switch (msr) {
  1090. #ifdef CONFIG_X86_64
  1091. case MSR_EFER:
  1092. set_efer(vcpu, data);
  1093. break;
  1094. #endif
  1095. case MSR_IA32_MC0_STATUS:
  1096. printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
  1097. __FUNCTION__, data);
  1098. break;
  1099. case MSR_IA32_UCODE_REV:
  1100. case MSR_IA32_UCODE_WRITE:
  1101. case 0x200 ... 0x2ff: /* MTRRs */
  1102. break;
  1103. case MSR_IA32_APICBASE:
  1104. vcpu->apic_base = data;
  1105. break;
  1106. case MSR_IA32_MISC_ENABLE:
  1107. vcpu->ia32_misc_enable_msr = data;
  1108. break;
  1109. default:
  1110. printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
  1111. return 1;
  1112. }
  1113. return 0;
  1114. }
  1115. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1116. /*
  1117. * Writes msr value into into the appropriate "register".
  1118. * Returns 0 on success, non-0 otherwise.
  1119. * Assumes vcpu_load() was already called.
  1120. */
  1121. static int set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  1122. {
  1123. return kvm_arch_ops->set_msr(vcpu, msr_index, data);
  1124. }
  1125. void kvm_resched(struct kvm_vcpu *vcpu)
  1126. {
  1127. vcpu_put(vcpu);
  1128. cond_resched();
  1129. /* Cannot fail - no vcpu unplug yet. */
  1130. vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
  1131. }
  1132. EXPORT_SYMBOL_GPL(kvm_resched);
  1133. void load_msrs(struct vmx_msr_entry *e, int n)
  1134. {
  1135. int i;
  1136. for (i = 0; i < n; ++i)
  1137. wrmsrl(e[i].index, e[i].data);
  1138. }
  1139. EXPORT_SYMBOL_GPL(load_msrs);
  1140. void save_msrs(struct vmx_msr_entry *e, int n)
  1141. {
  1142. int i;
  1143. for (i = 0; i < n; ++i)
  1144. rdmsrl(e[i].index, e[i].data);
  1145. }
  1146. EXPORT_SYMBOL_GPL(save_msrs);
  1147. static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run)
  1148. {
  1149. struct kvm_vcpu *vcpu;
  1150. int r;
  1151. if (!valid_vcpu(kvm_run->vcpu))
  1152. return -EINVAL;
  1153. vcpu = vcpu_load(kvm, kvm_run->vcpu);
  1154. if (!vcpu)
  1155. return -ENOENT;
  1156. /* re-sync apic's tpr */
  1157. vcpu->cr8 = kvm_run->cr8;
  1158. if (kvm_run->emulated) {
  1159. kvm_arch_ops->skip_emulated_instruction(vcpu);
  1160. kvm_run->emulated = 0;
  1161. }
  1162. if (kvm_run->mmio_completed) {
  1163. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  1164. vcpu->mmio_read_completed = 1;
  1165. }
  1166. vcpu->mmio_needed = 0;
  1167. r = kvm_arch_ops->run(vcpu, kvm_run);
  1168. vcpu_put(vcpu);
  1169. return r;
  1170. }
  1171. static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
  1172. {
  1173. struct kvm_vcpu *vcpu;
  1174. if (!valid_vcpu(regs->vcpu))
  1175. return -EINVAL;
  1176. vcpu = vcpu_load(kvm, regs->vcpu);
  1177. if (!vcpu)
  1178. return -ENOENT;
  1179. kvm_arch_ops->cache_regs(vcpu);
  1180. regs->rax = vcpu->regs[VCPU_REGS_RAX];
  1181. regs->rbx = vcpu->regs[VCPU_REGS_RBX];
  1182. regs->rcx = vcpu->regs[VCPU_REGS_RCX];
  1183. regs->rdx = vcpu->regs[VCPU_REGS_RDX];
  1184. regs->rsi = vcpu->regs[VCPU_REGS_RSI];
  1185. regs->rdi = vcpu->regs[VCPU_REGS_RDI];
  1186. regs->rsp = vcpu->regs[VCPU_REGS_RSP];
  1187. regs->rbp = vcpu->regs[VCPU_REGS_RBP];
  1188. #ifdef CONFIG_X86_64
  1189. regs->r8 = vcpu->regs[VCPU_REGS_R8];
  1190. regs->r9 = vcpu->regs[VCPU_REGS_R9];
  1191. regs->r10 = vcpu->regs[VCPU_REGS_R10];
  1192. regs->r11 = vcpu->regs[VCPU_REGS_R11];
  1193. regs->r12 = vcpu->regs[VCPU_REGS_R12];
  1194. regs->r13 = vcpu->regs[VCPU_REGS_R13];
  1195. regs->r14 = vcpu->regs[VCPU_REGS_R14];
  1196. regs->r15 = vcpu->regs[VCPU_REGS_R15];
  1197. #endif
  1198. regs->rip = vcpu->rip;
  1199. regs->rflags = kvm_arch_ops->get_rflags(vcpu);
  1200. /*
  1201. * Don't leak debug flags in case they were set for guest debugging
  1202. */
  1203. if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
  1204. regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  1205. vcpu_put(vcpu);
  1206. return 0;
  1207. }
  1208. static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
  1209. {
  1210. struct kvm_vcpu *vcpu;
  1211. if (!valid_vcpu(regs->vcpu))
  1212. return -EINVAL;
  1213. vcpu = vcpu_load(kvm, regs->vcpu);
  1214. if (!vcpu)
  1215. return -ENOENT;
  1216. vcpu->regs[VCPU_REGS_RAX] = regs->rax;
  1217. vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
  1218. vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
  1219. vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
  1220. vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
  1221. vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
  1222. vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
  1223. vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
  1224. #ifdef CONFIG_X86_64
  1225. vcpu->regs[VCPU_REGS_R8] = regs->r8;
  1226. vcpu->regs[VCPU_REGS_R9] = regs->r9;
  1227. vcpu->regs[VCPU_REGS_R10] = regs->r10;
  1228. vcpu->regs[VCPU_REGS_R11] = regs->r11;
  1229. vcpu->regs[VCPU_REGS_R12] = regs->r12;
  1230. vcpu->regs[VCPU_REGS_R13] = regs->r13;
  1231. vcpu->regs[VCPU_REGS_R14] = regs->r14;
  1232. vcpu->regs[VCPU_REGS_R15] = regs->r15;
  1233. #endif
  1234. vcpu->rip = regs->rip;
  1235. kvm_arch_ops->set_rflags(vcpu, regs->rflags);
  1236. kvm_arch_ops->decache_regs(vcpu);
  1237. vcpu_put(vcpu);
  1238. return 0;
  1239. }
  1240. static void get_segment(struct kvm_vcpu *vcpu,
  1241. struct kvm_segment *var, int seg)
  1242. {
  1243. return kvm_arch_ops->get_segment(vcpu, var, seg);
  1244. }
  1245. static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
  1246. {
  1247. struct kvm_vcpu *vcpu;
  1248. struct descriptor_table dt;
  1249. if (!valid_vcpu(sregs->vcpu))
  1250. return -EINVAL;
  1251. vcpu = vcpu_load(kvm, sregs->vcpu);
  1252. if (!vcpu)
  1253. return -ENOENT;
  1254. get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  1255. get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  1256. get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  1257. get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  1258. get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  1259. get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  1260. get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  1261. get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  1262. kvm_arch_ops->get_idt(vcpu, &dt);
  1263. sregs->idt.limit = dt.limit;
  1264. sregs->idt.base = dt.base;
  1265. kvm_arch_ops->get_gdt(vcpu, &dt);
  1266. sregs->gdt.limit = dt.limit;
  1267. sregs->gdt.base = dt.base;
  1268. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  1269. sregs->cr0 = vcpu->cr0;
  1270. sregs->cr2 = vcpu->cr2;
  1271. sregs->cr3 = vcpu->cr3;
  1272. sregs->cr4 = vcpu->cr4;
  1273. sregs->cr8 = vcpu->cr8;
  1274. sregs->efer = vcpu->shadow_efer;
  1275. sregs->apic_base = vcpu->apic_base;
  1276. memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
  1277. sizeof sregs->interrupt_bitmap);
  1278. vcpu_put(vcpu);
  1279. return 0;
  1280. }
  1281. static void set_segment(struct kvm_vcpu *vcpu,
  1282. struct kvm_segment *var, int seg)
  1283. {
  1284. return kvm_arch_ops->set_segment(vcpu, var, seg);
  1285. }
  1286. static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
  1287. {
  1288. struct kvm_vcpu *vcpu;
  1289. int mmu_reset_needed = 0;
  1290. int i;
  1291. struct descriptor_table dt;
  1292. if (!valid_vcpu(sregs->vcpu))
  1293. return -EINVAL;
  1294. vcpu = vcpu_load(kvm, sregs->vcpu);
  1295. if (!vcpu)
  1296. return -ENOENT;
  1297. set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  1298. set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  1299. set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  1300. set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  1301. set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  1302. set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  1303. set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  1304. set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  1305. dt.limit = sregs->idt.limit;
  1306. dt.base = sregs->idt.base;
  1307. kvm_arch_ops->set_idt(vcpu, &dt);
  1308. dt.limit = sregs->gdt.limit;
  1309. dt.base = sregs->gdt.base;
  1310. kvm_arch_ops->set_gdt(vcpu, &dt);
  1311. vcpu->cr2 = sregs->cr2;
  1312. mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
  1313. vcpu->cr3 = sregs->cr3;
  1314. vcpu->cr8 = sregs->cr8;
  1315. mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
  1316. #ifdef CONFIG_X86_64
  1317. kvm_arch_ops->set_efer(vcpu, sregs->efer);
  1318. #endif
  1319. vcpu->apic_base = sregs->apic_base;
  1320. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  1321. mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
  1322. kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0);
  1323. mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
  1324. kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
  1325. if (!is_long_mode(vcpu) && is_pae(vcpu))
  1326. load_pdptrs(vcpu, vcpu->cr3);
  1327. if (mmu_reset_needed)
  1328. kvm_mmu_reset_context(vcpu);
  1329. memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
  1330. sizeof vcpu->irq_pending);
  1331. vcpu->irq_summary = 0;
  1332. for (i = 0; i < NR_IRQ_WORDS; ++i)
  1333. if (vcpu->irq_pending[i])
  1334. __set_bit(i, &vcpu->irq_summary);
  1335. vcpu_put(vcpu);
  1336. return 0;
  1337. }
  1338. /*
  1339. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  1340. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  1341. *
  1342. * This list is modified at module load time to reflect the
  1343. * capabilities of the host cpu.
  1344. */
  1345. static u32 msrs_to_save[] = {
  1346. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  1347. MSR_K6_STAR,
  1348. #ifdef CONFIG_X86_64
  1349. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  1350. #endif
  1351. MSR_IA32_TIME_STAMP_COUNTER,
  1352. };
  1353. static unsigned num_msrs_to_save;
  1354. static u32 emulated_msrs[] = {
  1355. MSR_IA32_MISC_ENABLE,
  1356. };
  1357. static __init void kvm_init_msr_list(void)
  1358. {
  1359. u32 dummy[2];
  1360. unsigned i, j;
  1361. for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
  1362. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  1363. continue;
  1364. if (j < i)
  1365. msrs_to_save[j] = msrs_to_save[i];
  1366. j++;
  1367. }
  1368. num_msrs_to_save = j;
  1369. }
  1370. /*
  1371. * Adapt set_msr() to msr_io()'s calling convention
  1372. */
  1373. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  1374. {
  1375. return set_msr(vcpu, index, *data);
  1376. }
  1377. /*
  1378. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1379. *
  1380. * @return number of msrs set successfully.
  1381. */
  1382. static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs,
  1383. struct kvm_msr_entry *entries,
  1384. int (*do_msr)(struct kvm_vcpu *vcpu,
  1385. unsigned index, u64 *data))
  1386. {
  1387. struct kvm_vcpu *vcpu;
  1388. int i;
  1389. if (!valid_vcpu(msrs->vcpu))
  1390. return -EINVAL;
  1391. vcpu = vcpu_load(kvm, msrs->vcpu);
  1392. if (!vcpu)
  1393. return -ENOENT;
  1394. for (i = 0; i < msrs->nmsrs; ++i)
  1395. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  1396. break;
  1397. vcpu_put(vcpu);
  1398. return i;
  1399. }
  1400. /*
  1401. * Read or write a bunch of msrs. Parameters are user addresses.
  1402. *
  1403. * @return number of msrs set successfully.
  1404. */
  1405. static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs,
  1406. int (*do_msr)(struct kvm_vcpu *vcpu,
  1407. unsigned index, u64 *data),
  1408. int writeback)
  1409. {
  1410. struct kvm_msrs msrs;
  1411. struct kvm_msr_entry *entries;
  1412. int r, n;
  1413. unsigned size;
  1414. r = -EFAULT;
  1415. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  1416. goto out;
  1417. r = -E2BIG;
  1418. if (msrs.nmsrs >= MAX_IO_MSRS)
  1419. goto out;
  1420. r = -ENOMEM;
  1421. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  1422. entries = vmalloc(size);
  1423. if (!entries)
  1424. goto out;
  1425. r = -EFAULT;
  1426. if (copy_from_user(entries, user_msrs->entries, size))
  1427. goto out_free;
  1428. r = n = __msr_io(kvm, &msrs, entries, do_msr);
  1429. if (r < 0)
  1430. goto out_free;
  1431. r = -EFAULT;
  1432. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  1433. goto out_free;
  1434. r = n;
  1435. out_free:
  1436. vfree(entries);
  1437. out:
  1438. return r;
  1439. }
  1440. /*
  1441. * Translate a guest virtual address to a guest physical address.
  1442. */
  1443. static int kvm_dev_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr)
  1444. {
  1445. unsigned long vaddr = tr->linear_address;
  1446. struct kvm_vcpu *vcpu;
  1447. gpa_t gpa;
  1448. vcpu = vcpu_load(kvm, tr->vcpu);
  1449. if (!vcpu)
  1450. return -ENOENT;
  1451. spin_lock(&kvm->lock);
  1452. gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
  1453. tr->physical_address = gpa;
  1454. tr->valid = gpa != UNMAPPED_GVA;
  1455. tr->writeable = 1;
  1456. tr->usermode = 0;
  1457. spin_unlock(&kvm->lock);
  1458. vcpu_put(vcpu);
  1459. return 0;
  1460. }
  1461. static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq)
  1462. {
  1463. struct kvm_vcpu *vcpu;
  1464. if (!valid_vcpu(irq->vcpu))
  1465. return -EINVAL;
  1466. if (irq->irq < 0 || irq->irq >= 256)
  1467. return -EINVAL;
  1468. vcpu = vcpu_load(kvm, irq->vcpu);
  1469. if (!vcpu)
  1470. return -ENOENT;
  1471. set_bit(irq->irq, vcpu->irq_pending);
  1472. set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
  1473. vcpu_put(vcpu);
  1474. return 0;
  1475. }
  1476. static int kvm_dev_ioctl_debug_guest(struct kvm *kvm,
  1477. struct kvm_debug_guest *dbg)
  1478. {
  1479. struct kvm_vcpu *vcpu;
  1480. int r;
  1481. if (!valid_vcpu(dbg->vcpu))
  1482. return -EINVAL;
  1483. vcpu = vcpu_load(kvm, dbg->vcpu);
  1484. if (!vcpu)
  1485. return -ENOENT;
  1486. r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
  1487. vcpu_put(vcpu);
  1488. return r;
  1489. }
  1490. static long kvm_dev_ioctl(struct file *filp,
  1491. unsigned int ioctl, unsigned long arg)
  1492. {
  1493. struct kvm *kvm = filp->private_data;
  1494. void __user *argp = (void __user *)arg;
  1495. int r = -EINVAL;
  1496. switch (ioctl) {
  1497. case KVM_GET_API_VERSION:
  1498. r = KVM_API_VERSION;
  1499. break;
  1500. case KVM_CREATE_VCPU:
  1501. r = kvm_dev_ioctl_create_vcpu(kvm, arg);
  1502. if (r)
  1503. goto out;
  1504. break;
  1505. case KVM_RUN: {
  1506. struct kvm_run kvm_run;
  1507. r = -EFAULT;
  1508. if (copy_from_user(&kvm_run, argp, sizeof kvm_run))
  1509. goto out;
  1510. r = kvm_dev_ioctl_run(kvm, &kvm_run);
  1511. if (r < 0 && r != -EINTR)
  1512. goto out;
  1513. if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) {
  1514. r = -EFAULT;
  1515. goto out;
  1516. }
  1517. break;
  1518. }
  1519. case KVM_GET_REGS: {
  1520. struct kvm_regs kvm_regs;
  1521. r = -EFAULT;
  1522. if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
  1523. goto out;
  1524. r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs);
  1525. if (r)
  1526. goto out;
  1527. r = -EFAULT;
  1528. if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
  1529. goto out;
  1530. r = 0;
  1531. break;
  1532. }
  1533. case KVM_SET_REGS: {
  1534. struct kvm_regs kvm_regs;
  1535. r = -EFAULT;
  1536. if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
  1537. goto out;
  1538. r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs);
  1539. if (r)
  1540. goto out;
  1541. r = 0;
  1542. break;
  1543. }
  1544. case KVM_GET_SREGS: {
  1545. struct kvm_sregs kvm_sregs;
  1546. r = -EFAULT;
  1547. if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
  1548. goto out;
  1549. r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs);
  1550. if (r)
  1551. goto out;
  1552. r = -EFAULT;
  1553. if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
  1554. goto out;
  1555. r = 0;
  1556. break;
  1557. }
  1558. case KVM_SET_SREGS: {
  1559. struct kvm_sregs kvm_sregs;
  1560. r = -EFAULT;
  1561. if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
  1562. goto out;
  1563. r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs);
  1564. if (r)
  1565. goto out;
  1566. r = 0;
  1567. break;
  1568. }
  1569. case KVM_TRANSLATE: {
  1570. struct kvm_translation tr;
  1571. r = -EFAULT;
  1572. if (copy_from_user(&tr, argp, sizeof tr))
  1573. goto out;
  1574. r = kvm_dev_ioctl_translate(kvm, &tr);
  1575. if (r)
  1576. goto out;
  1577. r = -EFAULT;
  1578. if (copy_to_user(argp, &tr, sizeof tr))
  1579. goto out;
  1580. r = 0;
  1581. break;
  1582. }
  1583. case KVM_INTERRUPT: {
  1584. struct kvm_interrupt irq;
  1585. r = -EFAULT;
  1586. if (copy_from_user(&irq, argp, sizeof irq))
  1587. goto out;
  1588. r = kvm_dev_ioctl_interrupt(kvm, &irq);
  1589. if (r)
  1590. goto out;
  1591. r = 0;
  1592. break;
  1593. }
  1594. case KVM_DEBUG_GUEST: {
  1595. struct kvm_debug_guest dbg;
  1596. r = -EFAULT;
  1597. if (copy_from_user(&dbg, argp, sizeof dbg))
  1598. goto out;
  1599. r = kvm_dev_ioctl_debug_guest(kvm, &dbg);
  1600. if (r)
  1601. goto out;
  1602. r = 0;
  1603. break;
  1604. }
  1605. case KVM_SET_MEMORY_REGION: {
  1606. struct kvm_memory_region kvm_mem;
  1607. r = -EFAULT;
  1608. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  1609. goto out;
  1610. r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem);
  1611. if (r)
  1612. goto out;
  1613. break;
  1614. }
  1615. case KVM_GET_DIRTY_LOG: {
  1616. struct kvm_dirty_log log;
  1617. r = -EFAULT;
  1618. if (copy_from_user(&log, argp, sizeof log))
  1619. goto out;
  1620. r = kvm_dev_ioctl_get_dirty_log(kvm, &log);
  1621. if (r)
  1622. goto out;
  1623. break;
  1624. }
  1625. case KVM_GET_MSRS:
  1626. r = msr_io(kvm, argp, get_msr, 1);
  1627. break;
  1628. case KVM_SET_MSRS:
  1629. r = msr_io(kvm, argp, do_set_msr, 0);
  1630. break;
  1631. case KVM_GET_MSR_INDEX_LIST: {
  1632. struct kvm_msr_list __user *user_msr_list = argp;
  1633. struct kvm_msr_list msr_list;
  1634. unsigned n;
  1635. r = -EFAULT;
  1636. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  1637. goto out;
  1638. n = msr_list.nmsrs;
  1639. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  1640. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  1641. goto out;
  1642. r = -E2BIG;
  1643. if (n < num_msrs_to_save)
  1644. goto out;
  1645. r = -EFAULT;
  1646. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  1647. num_msrs_to_save * sizeof(u32)))
  1648. goto out;
  1649. if (copy_to_user(user_msr_list->indices
  1650. + num_msrs_to_save * sizeof(u32),
  1651. &emulated_msrs,
  1652. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  1653. goto out;
  1654. r = 0;
  1655. break;
  1656. }
  1657. default:
  1658. ;
  1659. }
  1660. out:
  1661. return r;
  1662. }
  1663. static struct page *kvm_dev_nopage(struct vm_area_struct *vma,
  1664. unsigned long address,
  1665. int *type)
  1666. {
  1667. struct kvm *kvm = vma->vm_file->private_data;
  1668. unsigned long pgoff;
  1669. struct kvm_memory_slot *slot;
  1670. struct page *page;
  1671. *type = VM_FAULT_MINOR;
  1672. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  1673. slot = gfn_to_memslot(kvm, pgoff);
  1674. if (!slot)
  1675. return NOPAGE_SIGBUS;
  1676. page = gfn_to_page(slot, pgoff);
  1677. if (!page)
  1678. return NOPAGE_SIGBUS;
  1679. get_page(page);
  1680. return page;
  1681. }
  1682. static struct vm_operations_struct kvm_dev_vm_ops = {
  1683. .nopage = kvm_dev_nopage,
  1684. };
  1685. static int kvm_dev_mmap(struct file *file, struct vm_area_struct *vma)
  1686. {
  1687. vma->vm_ops = &kvm_dev_vm_ops;
  1688. return 0;
  1689. }
  1690. static struct file_operations kvm_chardev_ops = {
  1691. .open = kvm_dev_open,
  1692. .release = kvm_dev_release,
  1693. .unlocked_ioctl = kvm_dev_ioctl,
  1694. .compat_ioctl = kvm_dev_ioctl,
  1695. .mmap = kvm_dev_mmap,
  1696. };
  1697. static struct miscdevice kvm_dev = {
  1698. MISC_DYNAMIC_MINOR,
  1699. "kvm",
  1700. &kvm_chardev_ops,
  1701. };
  1702. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1703. void *v)
  1704. {
  1705. if (val == SYS_RESTART) {
  1706. /*
  1707. * Some (well, at least mine) BIOSes hang on reboot if
  1708. * in vmx root mode.
  1709. */
  1710. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  1711. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  1712. }
  1713. return NOTIFY_OK;
  1714. }
  1715. static struct notifier_block kvm_reboot_notifier = {
  1716. .notifier_call = kvm_reboot,
  1717. .priority = 0,
  1718. };
  1719. /*
  1720. * Make sure that a cpu that is being hot-unplugged does not have any vcpus
  1721. * cached on it.
  1722. */
  1723. static void decache_vcpus_on_cpu(int cpu)
  1724. {
  1725. struct kvm *vm;
  1726. struct kvm_vcpu *vcpu;
  1727. int i;
  1728. spin_lock(&kvm_lock);
  1729. list_for_each_entry(vm, &vm_list, vm_list)
  1730. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  1731. vcpu = &vm->vcpus[i];
  1732. /*
  1733. * If the vcpu is locked, then it is running on some
  1734. * other cpu and therefore it is not cached on the
  1735. * cpu in question.
  1736. *
  1737. * If it's not locked, check the last cpu it executed
  1738. * on.
  1739. */
  1740. if (mutex_trylock(&vcpu->mutex)) {
  1741. if (vcpu->cpu == cpu) {
  1742. kvm_arch_ops->vcpu_decache(vcpu);
  1743. vcpu->cpu = -1;
  1744. }
  1745. mutex_unlock(&vcpu->mutex);
  1746. }
  1747. }
  1748. spin_unlock(&kvm_lock);
  1749. }
  1750. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  1751. void *v)
  1752. {
  1753. int cpu = (long)v;
  1754. switch (val) {
  1755. case CPU_DOWN_PREPARE:
  1756. case CPU_UP_CANCELED:
  1757. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1758. cpu);
  1759. decache_vcpus_on_cpu(cpu);
  1760. smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
  1761. NULL, 0, 1);
  1762. break;
  1763. case CPU_ONLINE:
  1764. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  1765. cpu);
  1766. smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
  1767. NULL, 0, 1);
  1768. break;
  1769. }
  1770. return NOTIFY_OK;
  1771. }
  1772. static struct notifier_block kvm_cpu_notifier = {
  1773. .notifier_call = kvm_cpu_hotplug,
  1774. .priority = 20, /* must be > scheduler priority */
  1775. };
  1776. static __init void kvm_init_debug(void)
  1777. {
  1778. struct kvm_stats_debugfs_item *p;
  1779. debugfs_dir = debugfs_create_dir("kvm", NULL);
  1780. for (p = debugfs_entries; p->name; ++p)
  1781. p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir,
  1782. p->data);
  1783. }
  1784. static void kvm_exit_debug(void)
  1785. {
  1786. struct kvm_stats_debugfs_item *p;
  1787. for (p = debugfs_entries; p->name; ++p)
  1788. debugfs_remove(p->dentry);
  1789. debugfs_remove(debugfs_dir);
  1790. }
  1791. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  1792. {
  1793. decache_vcpus_on_cpu(raw_smp_processor_id());
  1794. on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
  1795. return 0;
  1796. }
  1797. static int kvm_resume(struct sys_device *dev)
  1798. {
  1799. on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1);
  1800. return 0;
  1801. }
  1802. static struct sysdev_class kvm_sysdev_class = {
  1803. set_kset_name("kvm"),
  1804. .suspend = kvm_suspend,
  1805. .resume = kvm_resume,
  1806. };
  1807. static struct sys_device kvm_sysdev = {
  1808. .id = 0,
  1809. .cls = &kvm_sysdev_class,
  1810. };
  1811. hpa_t bad_page_address;
  1812. int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
  1813. {
  1814. int r;
  1815. if (kvm_arch_ops) {
  1816. printk(KERN_ERR "kvm: already loaded the other module\n");
  1817. return -EEXIST;
  1818. }
  1819. if (!ops->cpu_has_kvm_support()) {
  1820. printk(KERN_ERR "kvm: no hardware support\n");
  1821. return -EOPNOTSUPP;
  1822. }
  1823. if (ops->disabled_by_bios()) {
  1824. printk(KERN_ERR "kvm: disabled by bios\n");
  1825. return -EOPNOTSUPP;
  1826. }
  1827. kvm_arch_ops = ops;
  1828. r = kvm_arch_ops->hardware_setup();
  1829. if (r < 0)
  1830. return r;
  1831. on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
  1832. r = register_cpu_notifier(&kvm_cpu_notifier);
  1833. if (r)
  1834. goto out_free_1;
  1835. register_reboot_notifier(&kvm_reboot_notifier);
  1836. r = sysdev_class_register(&kvm_sysdev_class);
  1837. if (r)
  1838. goto out_free_2;
  1839. r = sysdev_register(&kvm_sysdev);
  1840. if (r)
  1841. goto out_free_3;
  1842. kvm_chardev_ops.owner = module;
  1843. r = misc_register(&kvm_dev);
  1844. if (r) {
  1845. printk (KERN_ERR "kvm: misc device register failed\n");
  1846. goto out_free;
  1847. }
  1848. return r;
  1849. out_free:
  1850. sysdev_unregister(&kvm_sysdev);
  1851. out_free_3:
  1852. sysdev_class_unregister(&kvm_sysdev_class);
  1853. out_free_2:
  1854. unregister_reboot_notifier(&kvm_reboot_notifier);
  1855. unregister_cpu_notifier(&kvm_cpu_notifier);
  1856. out_free_1:
  1857. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  1858. kvm_arch_ops->hardware_unsetup();
  1859. return r;
  1860. }
  1861. void kvm_exit_arch(void)
  1862. {
  1863. misc_deregister(&kvm_dev);
  1864. sysdev_unregister(&kvm_sysdev);
  1865. sysdev_class_unregister(&kvm_sysdev_class);
  1866. unregister_reboot_notifier(&kvm_reboot_notifier);
  1867. unregister_cpu_notifier(&kvm_cpu_notifier);
  1868. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  1869. kvm_arch_ops->hardware_unsetup();
  1870. kvm_arch_ops = NULL;
  1871. }
  1872. static __init int kvm_init(void)
  1873. {
  1874. static struct page *bad_page;
  1875. int r = 0;
  1876. kvm_init_debug();
  1877. kvm_init_msr_list();
  1878. if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
  1879. r = -ENOMEM;
  1880. goto out;
  1881. }
  1882. bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
  1883. memset(__va(bad_page_address), 0, PAGE_SIZE);
  1884. return r;
  1885. out:
  1886. kvm_exit_debug();
  1887. return r;
  1888. }
  1889. static __exit void kvm_exit(void)
  1890. {
  1891. kvm_exit_debug();
  1892. __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
  1893. }
  1894. module_init(kvm_init)
  1895. module_exit(kvm_exit)
  1896. EXPORT_SYMBOL_GPL(kvm_init_arch);
  1897. EXPORT_SYMBOL_GPL(kvm_exit_arch);