kvm_main.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "iodev.h"
  18. #include <linux/kvm_host.h>
  19. #include <linux/kvm.h>
  20. #include <linux/module.h>
  21. #include <linux/errno.h>
  22. #include <linux/percpu.h>
  23. #include <linux/gfp.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/sysdev.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched.h>
  34. #include <linux/cpumask.h>
  35. #include <linux/smp.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/profile.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/mman.h>
  41. #include <linux/swap.h>
  42. #include <asm/processor.h>
  43. #include <asm/io.h>
  44. #include <asm/uaccess.h>
  45. #include <asm/pgtable.h>
  46. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  47. #include "coalesced_mmio.h"
  48. #endif
  49. MODULE_AUTHOR("Qumranet");
  50. MODULE_LICENSE("GPL");
  51. DEFINE_SPINLOCK(kvm_lock);
  52. LIST_HEAD(vm_list);
  53. static cpumask_t cpus_hardware_enabled;
  54. struct kmem_cache *kvm_vcpu_cache;
  55. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  56. static __read_mostly struct preempt_ops kvm_preempt_ops;
  57. struct dentry *kvm_debugfs_dir;
  58. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  59. unsigned long arg);
  60. bool kvm_rebooting;
  61. static inline int valid_vcpu(int n)
  62. {
  63. return likely(n >= 0 && n < KVM_MAX_VCPUS);
  64. }
  65. /*
  66. * Switches to specified vcpu, until a matching vcpu_put()
  67. */
  68. void vcpu_load(struct kvm_vcpu *vcpu)
  69. {
  70. int cpu;
  71. mutex_lock(&vcpu->mutex);
  72. cpu = get_cpu();
  73. preempt_notifier_register(&vcpu->preempt_notifier);
  74. kvm_arch_vcpu_load(vcpu, cpu);
  75. put_cpu();
  76. }
  77. void vcpu_put(struct kvm_vcpu *vcpu)
  78. {
  79. preempt_disable();
  80. kvm_arch_vcpu_put(vcpu);
  81. preempt_notifier_unregister(&vcpu->preempt_notifier);
  82. preempt_enable();
  83. mutex_unlock(&vcpu->mutex);
  84. }
  85. static void ack_flush(void *_completed)
  86. {
  87. }
  88. void kvm_flush_remote_tlbs(struct kvm *kvm)
  89. {
  90. int i, cpu, me;
  91. cpumask_t cpus;
  92. struct kvm_vcpu *vcpu;
  93. me = get_cpu();
  94. cpus_clear(cpus);
  95. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  96. vcpu = kvm->vcpus[i];
  97. if (!vcpu)
  98. continue;
  99. if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
  100. continue;
  101. cpu = vcpu->cpu;
  102. if (cpu != -1 && cpu != me)
  103. cpu_set(cpu, cpus);
  104. }
  105. if (cpus_empty(cpus))
  106. goto out;
  107. ++kvm->stat.remote_tlb_flush;
  108. smp_call_function_mask(cpus, ack_flush, NULL, 1);
  109. out:
  110. put_cpu();
  111. }
  112. void kvm_reload_remote_mmus(struct kvm *kvm)
  113. {
  114. int i, cpu, me;
  115. cpumask_t cpus;
  116. struct kvm_vcpu *vcpu;
  117. me = get_cpu();
  118. cpus_clear(cpus);
  119. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  120. vcpu = kvm->vcpus[i];
  121. if (!vcpu)
  122. continue;
  123. if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
  124. continue;
  125. cpu = vcpu->cpu;
  126. if (cpu != -1 && cpu != me)
  127. cpu_set(cpu, cpus);
  128. }
  129. if (cpus_empty(cpus))
  130. goto out;
  131. smp_call_function_mask(cpus, ack_flush, NULL, 1);
  132. out:
  133. put_cpu();
  134. }
  135. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  136. {
  137. struct page *page;
  138. int r;
  139. mutex_init(&vcpu->mutex);
  140. vcpu->cpu = -1;
  141. vcpu->kvm = kvm;
  142. vcpu->vcpu_id = id;
  143. init_waitqueue_head(&vcpu->wq);
  144. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  145. if (!page) {
  146. r = -ENOMEM;
  147. goto fail;
  148. }
  149. vcpu->run = page_address(page);
  150. r = kvm_arch_vcpu_init(vcpu);
  151. if (r < 0)
  152. goto fail_free_run;
  153. return 0;
  154. fail_free_run:
  155. free_page((unsigned long)vcpu->run);
  156. fail:
  157. return r;
  158. }
  159. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  160. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  161. {
  162. kvm_arch_vcpu_uninit(vcpu);
  163. free_page((unsigned long)vcpu->run);
  164. }
  165. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  166. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  167. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  168. {
  169. return container_of(mn, struct kvm, mmu_notifier);
  170. }
  171. static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
  172. struct mm_struct *mm,
  173. unsigned long address)
  174. {
  175. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  176. int need_tlb_flush;
  177. /*
  178. * When ->invalidate_page runs, the linux pte has been zapped
  179. * already but the page is still allocated until
  180. * ->invalidate_page returns. So if we increase the sequence
  181. * here the kvm page fault will notice if the spte can't be
  182. * established because the page is going to be freed. If
  183. * instead the kvm page fault establishes the spte before
  184. * ->invalidate_page runs, kvm_unmap_hva will release it
  185. * before returning.
  186. *
  187. * The sequence increase only need to be seen at spin_unlock
  188. * time, and not at spin_lock time.
  189. *
  190. * Increasing the sequence after the spin_unlock would be
  191. * unsafe because the kvm page fault could then establish the
  192. * pte after kvm_unmap_hva returned, without noticing the page
  193. * is going to be freed.
  194. */
  195. spin_lock(&kvm->mmu_lock);
  196. kvm->mmu_notifier_seq++;
  197. need_tlb_flush = kvm_unmap_hva(kvm, address);
  198. spin_unlock(&kvm->mmu_lock);
  199. /* we've to flush the tlb before the pages can be freed */
  200. if (need_tlb_flush)
  201. kvm_flush_remote_tlbs(kvm);
  202. }
  203. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  204. struct mm_struct *mm,
  205. unsigned long start,
  206. unsigned long end)
  207. {
  208. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  209. int need_tlb_flush = 0;
  210. spin_lock(&kvm->mmu_lock);
  211. /*
  212. * The count increase must become visible at unlock time as no
  213. * spte can be established without taking the mmu_lock and
  214. * count is also read inside the mmu_lock critical section.
  215. */
  216. kvm->mmu_notifier_count++;
  217. for (; start < end; start += PAGE_SIZE)
  218. need_tlb_flush |= kvm_unmap_hva(kvm, start);
  219. spin_unlock(&kvm->mmu_lock);
  220. /* we've to flush the tlb before the pages can be freed */
  221. if (need_tlb_flush)
  222. kvm_flush_remote_tlbs(kvm);
  223. }
  224. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  225. struct mm_struct *mm,
  226. unsigned long start,
  227. unsigned long end)
  228. {
  229. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  230. spin_lock(&kvm->mmu_lock);
  231. /*
  232. * This sequence increase will notify the kvm page fault that
  233. * the page that is going to be mapped in the spte could have
  234. * been freed.
  235. */
  236. kvm->mmu_notifier_seq++;
  237. /*
  238. * The above sequence increase must be visible before the
  239. * below count decrease but both values are read by the kvm
  240. * page fault under mmu_lock spinlock so we don't need to add
  241. * a smb_wmb() here in between the two.
  242. */
  243. kvm->mmu_notifier_count--;
  244. spin_unlock(&kvm->mmu_lock);
  245. BUG_ON(kvm->mmu_notifier_count < 0);
  246. }
  247. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  248. struct mm_struct *mm,
  249. unsigned long address)
  250. {
  251. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  252. int young;
  253. spin_lock(&kvm->mmu_lock);
  254. young = kvm_age_hva(kvm, address);
  255. spin_unlock(&kvm->mmu_lock);
  256. if (young)
  257. kvm_flush_remote_tlbs(kvm);
  258. return young;
  259. }
  260. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  261. .invalidate_page = kvm_mmu_notifier_invalidate_page,
  262. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  263. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  264. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  265. };
  266. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  267. static struct kvm *kvm_create_vm(void)
  268. {
  269. struct kvm *kvm = kvm_arch_create_vm();
  270. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  271. struct page *page;
  272. #endif
  273. if (IS_ERR(kvm))
  274. goto out;
  275. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  276. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  277. if (!page) {
  278. kfree(kvm);
  279. return ERR_PTR(-ENOMEM);
  280. }
  281. kvm->coalesced_mmio_ring =
  282. (struct kvm_coalesced_mmio_ring *)page_address(page);
  283. #endif
  284. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  285. {
  286. int err;
  287. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  288. err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  289. if (err) {
  290. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  291. put_page(page);
  292. #endif
  293. kfree(kvm);
  294. return ERR_PTR(err);
  295. }
  296. }
  297. #endif
  298. kvm->mm = current->mm;
  299. atomic_inc(&kvm->mm->mm_count);
  300. spin_lock_init(&kvm->mmu_lock);
  301. kvm_io_bus_init(&kvm->pio_bus);
  302. mutex_init(&kvm->lock);
  303. kvm_io_bus_init(&kvm->mmio_bus);
  304. init_rwsem(&kvm->slots_lock);
  305. atomic_set(&kvm->users_count, 1);
  306. spin_lock(&kvm_lock);
  307. list_add(&kvm->vm_list, &vm_list);
  308. spin_unlock(&kvm_lock);
  309. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  310. kvm_coalesced_mmio_init(kvm);
  311. #endif
  312. out:
  313. return kvm;
  314. }
  315. /*
  316. * Free any memory in @free but not in @dont.
  317. */
  318. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  319. struct kvm_memory_slot *dont)
  320. {
  321. if (!dont || free->rmap != dont->rmap)
  322. vfree(free->rmap);
  323. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  324. vfree(free->dirty_bitmap);
  325. if (!dont || free->lpage_info != dont->lpage_info)
  326. vfree(free->lpage_info);
  327. free->npages = 0;
  328. free->dirty_bitmap = NULL;
  329. free->rmap = NULL;
  330. free->lpage_info = NULL;
  331. }
  332. void kvm_free_physmem(struct kvm *kvm)
  333. {
  334. int i;
  335. for (i = 0; i < kvm->nmemslots; ++i)
  336. kvm_free_physmem_slot(&kvm->memslots[i], NULL);
  337. }
  338. static void kvm_destroy_vm(struct kvm *kvm)
  339. {
  340. struct mm_struct *mm = kvm->mm;
  341. spin_lock(&kvm_lock);
  342. list_del(&kvm->vm_list);
  343. spin_unlock(&kvm_lock);
  344. kvm_io_bus_destroy(&kvm->pio_bus);
  345. kvm_io_bus_destroy(&kvm->mmio_bus);
  346. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  347. if (kvm->coalesced_mmio_ring != NULL)
  348. free_page((unsigned long)kvm->coalesced_mmio_ring);
  349. #endif
  350. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  351. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  352. #endif
  353. kvm_arch_destroy_vm(kvm);
  354. mmdrop(mm);
  355. }
  356. void kvm_get_kvm(struct kvm *kvm)
  357. {
  358. atomic_inc(&kvm->users_count);
  359. }
  360. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  361. void kvm_put_kvm(struct kvm *kvm)
  362. {
  363. if (atomic_dec_and_test(&kvm->users_count))
  364. kvm_destroy_vm(kvm);
  365. }
  366. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  367. static int kvm_vm_release(struct inode *inode, struct file *filp)
  368. {
  369. struct kvm *kvm = filp->private_data;
  370. kvm_put_kvm(kvm);
  371. return 0;
  372. }
  373. /*
  374. * Allocate some memory and give it an address in the guest physical address
  375. * space.
  376. *
  377. * Discontiguous memory is allowed, mostly for framebuffers.
  378. *
  379. * Must be called holding mmap_sem for write.
  380. */
  381. int __kvm_set_memory_region(struct kvm *kvm,
  382. struct kvm_userspace_memory_region *mem,
  383. int user_alloc)
  384. {
  385. int r;
  386. gfn_t base_gfn;
  387. unsigned long npages;
  388. unsigned long i;
  389. struct kvm_memory_slot *memslot;
  390. struct kvm_memory_slot old, new;
  391. r = -EINVAL;
  392. /* General sanity checks */
  393. if (mem->memory_size & (PAGE_SIZE - 1))
  394. goto out;
  395. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  396. goto out;
  397. if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  398. goto out;
  399. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  400. goto out;
  401. memslot = &kvm->memslots[mem->slot];
  402. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  403. npages = mem->memory_size >> PAGE_SHIFT;
  404. if (!npages)
  405. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  406. new = old = *memslot;
  407. new.base_gfn = base_gfn;
  408. new.npages = npages;
  409. new.flags = mem->flags;
  410. /* Disallow changing a memory slot's size. */
  411. r = -EINVAL;
  412. if (npages && old.npages && npages != old.npages)
  413. goto out_free;
  414. /* Check for overlaps */
  415. r = -EEXIST;
  416. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  417. struct kvm_memory_slot *s = &kvm->memslots[i];
  418. if (s == memslot)
  419. continue;
  420. if (!((base_gfn + npages <= s->base_gfn) ||
  421. (base_gfn >= s->base_gfn + s->npages)))
  422. goto out_free;
  423. }
  424. /* Free page dirty bitmap if unneeded */
  425. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  426. new.dirty_bitmap = NULL;
  427. r = -ENOMEM;
  428. /* Allocate if a slot is being created */
  429. #ifndef CONFIG_S390
  430. if (npages && !new.rmap) {
  431. new.rmap = vmalloc(npages * sizeof(struct page *));
  432. if (!new.rmap)
  433. goto out_free;
  434. memset(new.rmap, 0, npages * sizeof(*new.rmap));
  435. new.user_alloc = user_alloc;
  436. /*
  437. * hva_to_rmmap() serialzies with the mmu_lock and to be
  438. * safe it has to ignore memslots with !user_alloc &&
  439. * !userspace_addr.
  440. */
  441. if (user_alloc)
  442. new.userspace_addr = mem->userspace_addr;
  443. else
  444. new.userspace_addr = 0;
  445. }
  446. if (npages && !new.lpage_info) {
  447. int largepages = npages / KVM_PAGES_PER_HPAGE;
  448. if (npages % KVM_PAGES_PER_HPAGE)
  449. largepages++;
  450. if (base_gfn % KVM_PAGES_PER_HPAGE)
  451. largepages++;
  452. new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
  453. if (!new.lpage_info)
  454. goto out_free;
  455. memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
  456. if (base_gfn % KVM_PAGES_PER_HPAGE)
  457. new.lpage_info[0].write_count = 1;
  458. if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
  459. new.lpage_info[largepages-1].write_count = 1;
  460. }
  461. /* Allocate page dirty bitmap if needed */
  462. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  463. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  464. new.dirty_bitmap = vmalloc(dirty_bytes);
  465. if (!new.dirty_bitmap)
  466. goto out_free;
  467. memset(new.dirty_bitmap, 0, dirty_bytes);
  468. }
  469. #endif /* not defined CONFIG_S390 */
  470. if (!npages)
  471. kvm_arch_flush_shadow(kvm);
  472. spin_lock(&kvm->mmu_lock);
  473. if (mem->slot >= kvm->nmemslots)
  474. kvm->nmemslots = mem->slot + 1;
  475. *memslot = new;
  476. spin_unlock(&kvm->mmu_lock);
  477. r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
  478. if (r) {
  479. spin_lock(&kvm->mmu_lock);
  480. *memslot = old;
  481. spin_unlock(&kvm->mmu_lock);
  482. goto out_free;
  483. }
  484. kvm_free_physmem_slot(&old, &new);
  485. return 0;
  486. out_free:
  487. kvm_free_physmem_slot(&new, &old);
  488. out:
  489. return r;
  490. }
  491. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  492. int kvm_set_memory_region(struct kvm *kvm,
  493. struct kvm_userspace_memory_region *mem,
  494. int user_alloc)
  495. {
  496. int r;
  497. down_write(&kvm->slots_lock);
  498. r = __kvm_set_memory_region(kvm, mem, user_alloc);
  499. up_write(&kvm->slots_lock);
  500. return r;
  501. }
  502. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  503. int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  504. struct
  505. kvm_userspace_memory_region *mem,
  506. int user_alloc)
  507. {
  508. if (mem->slot >= KVM_MEMORY_SLOTS)
  509. return -EINVAL;
  510. return kvm_set_memory_region(kvm, mem, user_alloc);
  511. }
  512. int kvm_get_dirty_log(struct kvm *kvm,
  513. struct kvm_dirty_log *log, int *is_dirty)
  514. {
  515. struct kvm_memory_slot *memslot;
  516. int r, i;
  517. int n;
  518. unsigned long any = 0;
  519. r = -EINVAL;
  520. if (log->slot >= KVM_MEMORY_SLOTS)
  521. goto out;
  522. memslot = &kvm->memslots[log->slot];
  523. r = -ENOENT;
  524. if (!memslot->dirty_bitmap)
  525. goto out;
  526. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  527. for (i = 0; !any && i < n/sizeof(long); ++i)
  528. any = memslot->dirty_bitmap[i];
  529. r = -EFAULT;
  530. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  531. goto out;
  532. if (any)
  533. *is_dirty = 1;
  534. r = 0;
  535. out:
  536. return r;
  537. }
  538. int is_error_page(struct page *page)
  539. {
  540. return page == bad_page;
  541. }
  542. EXPORT_SYMBOL_GPL(is_error_page);
  543. int is_error_pfn(pfn_t pfn)
  544. {
  545. return pfn == bad_pfn;
  546. }
  547. EXPORT_SYMBOL_GPL(is_error_pfn);
  548. static inline unsigned long bad_hva(void)
  549. {
  550. return PAGE_OFFSET;
  551. }
  552. int kvm_is_error_hva(unsigned long addr)
  553. {
  554. return addr == bad_hva();
  555. }
  556. EXPORT_SYMBOL_GPL(kvm_is_error_hva);
  557. static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  558. {
  559. int i;
  560. for (i = 0; i < kvm->nmemslots; ++i) {
  561. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  562. if (gfn >= memslot->base_gfn
  563. && gfn < memslot->base_gfn + memslot->npages)
  564. return memslot;
  565. }
  566. return NULL;
  567. }
  568. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  569. {
  570. gfn = unalias_gfn(kvm, gfn);
  571. return __gfn_to_memslot(kvm, gfn);
  572. }
  573. int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  574. {
  575. int i;
  576. gfn = unalias_gfn(kvm, gfn);
  577. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  578. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  579. if (gfn >= memslot->base_gfn
  580. && gfn < memslot->base_gfn + memslot->npages)
  581. return 1;
  582. }
  583. return 0;
  584. }
  585. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  586. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  587. {
  588. struct kvm_memory_slot *slot;
  589. gfn = unalias_gfn(kvm, gfn);
  590. slot = __gfn_to_memslot(kvm, gfn);
  591. if (!slot)
  592. return bad_hva();
  593. return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
  594. }
  595. EXPORT_SYMBOL_GPL(gfn_to_hva);
  596. /*
  597. * Requires current->mm->mmap_sem to be held
  598. */
  599. pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  600. {
  601. struct page *page[1];
  602. unsigned long addr;
  603. int npages;
  604. pfn_t pfn;
  605. might_sleep();
  606. addr = gfn_to_hva(kvm, gfn);
  607. if (kvm_is_error_hva(addr)) {
  608. get_page(bad_page);
  609. return page_to_pfn(bad_page);
  610. }
  611. npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
  612. NULL);
  613. if (unlikely(npages != 1)) {
  614. struct vm_area_struct *vma;
  615. vma = find_vma(current->mm, addr);
  616. if (vma == NULL || addr < vma->vm_start ||
  617. !(vma->vm_flags & VM_PFNMAP)) {
  618. get_page(bad_page);
  619. return page_to_pfn(bad_page);
  620. }
  621. pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  622. BUG_ON(pfn_valid(pfn));
  623. } else
  624. pfn = page_to_pfn(page[0]);
  625. return pfn;
  626. }
  627. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  628. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  629. {
  630. pfn_t pfn;
  631. pfn = gfn_to_pfn(kvm, gfn);
  632. if (pfn_valid(pfn))
  633. return pfn_to_page(pfn);
  634. WARN_ON(!pfn_valid(pfn));
  635. get_page(bad_page);
  636. return bad_page;
  637. }
  638. EXPORT_SYMBOL_GPL(gfn_to_page);
  639. void kvm_release_page_clean(struct page *page)
  640. {
  641. kvm_release_pfn_clean(page_to_pfn(page));
  642. }
  643. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  644. void kvm_release_pfn_clean(pfn_t pfn)
  645. {
  646. if (pfn_valid(pfn))
  647. put_page(pfn_to_page(pfn));
  648. }
  649. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  650. void kvm_release_page_dirty(struct page *page)
  651. {
  652. kvm_release_pfn_dirty(page_to_pfn(page));
  653. }
  654. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  655. void kvm_release_pfn_dirty(pfn_t pfn)
  656. {
  657. kvm_set_pfn_dirty(pfn);
  658. kvm_release_pfn_clean(pfn);
  659. }
  660. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  661. void kvm_set_page_dirty(struct page *page)
  662. {
  663. kvm_set_pfn_dirty(page_to_pfn(page));
  664. }
  665. EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
  666. void kvm_set_pfn_dirty(pfn_t pfn)
  667. {
  668. if (pfn_valid(pfn)) {
  669. struct page *page = pfn_to_page(pfn);
  670. if (!PageReserved(page))
  671. SetPageDirty(page);
  672. }
  673. }
  674. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  675. void kvm_set_pfn_accessed(pfn_t pfn)
  676. {
  677. if (pfn_valid(pfn))
  678. mark_page_accessed(pfn_to_page(pfn));
  679. }
  680. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  681. void kvm_get_pfn(pfn_t pfn)
  682. {
  683. if (pfn_valid(pfn))
  684. get_page(pfn_to_page(pfn));
  685. }
  686. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  687. static int next_segment(unsigned long len, int offset)
  688. {
  689. if (len > PAGE_SIZE - offset)
  690. return PAGE_SIZE - offset;
  691. else
  692. return len;
  693. }
  694. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  695. int len)
  696. {
  697. int r;
  698. unsigned long addr;
  699. addr = gfn_to_hva(kvm, gfn);
  700. if (kvm_is_error_hva(addr))
  701. return -EFAULT;
  702. r = copy_from_user(data, (void __user *)addr + offset, len);
  703. if (r)
  704. return -EFAULT;
  705. return 0;
  706. }
  707. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  708. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  709. {
  710. gfn_t gfn = gpa >> PAGE_SHIFT;
  711. int seg;
  712. int offset = offset_in_page(gpa);
  713. int ret;
  714. while ((seg = next_segment(len, offset)) != 0) {
  715. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  716. if (ret < 0)
  717. return ret;
  718. offset = 0;
  719. len -= seg;
  720. data += seg;
  721. ++gfn;
  722. }
  723. return 0;
  724. }
  725. EXPORT_SYMBOL_GPL(kvm_read_guest);
  726. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  727. unsigned long len)
  728. {
  729. int r;
  730. unsigned long addr;
  731. gfn_t gfn = gpa >> PAGE_SHIFT;
  732. int offset = offset_in_page(gpa);
  733. addr = gfn_to_hva(kvm, gfn);
  734. if (kvm_is_error_hva(addr))
  735. return -EFAULT;
  736. pagefault_disable();
  737. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  738. pagefault_enable();
  739. if (r)
  740. return -EFAULT;
  741. return 0;
  742. }
  743. EXPORT_SYMBOL(kvm_read_guest_atomic);
  744. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  745. int offset, int len)
  746. {
  747. int r;
  748. unsigned long addr;
  749. addr = gfn_to_hva(kvm, gfn);
  750. if (kvm_is_error_hva(addr))
  751. return -EFAULT;
  752. r = copy_to_user((void __user *)addr + offset, data, len);
  753. if (r)
  754. return -EFAULT;
  755. mark_page_dirty(kvm, gfn);
  756. return 0;
  757. }
  758. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  759. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  760. unsigned long len)
  761. {
  762. gfn_t gfn = gpa >> PAGE_SHIFT;
  763. int seg;
  764. int offset = offset_in_page(gpa);
  765. int ret;
  766. while ((seg = next_segment(len, offset)) != 0) {
  767. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  768. if (ret < 0)
  769. return ret;
  770. offset = 0;
  771. len -= seg;
  772. data += seg;
  773. ++gfn;
  774. }
  775. return 0;
  776. }
  777. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  778. {
  779. return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
  780. }
  781. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  782. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  783. {
  784. gfn_t gfn = gpa >> PAGE_SHIFT;
  785. int seg;
  786. int offset = offset_in_page(gpa);
  787. int ret;
  788. while ((seg = next_segment(len, offset)) != 0) {
  789. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  790. if (ret < 0)
  791. return ret;
  792. offset = 0;
  793. len -= seg;
  794. ++gfn;
  795. }
  796. return 0;
  797. }
  798. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  799. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  800. {
  801. struct kvm_memory_slot *memslot;
  802. gfn = unalias_gfn(kvm, gfn);
  803. memslot = __gfn_to_memslot(kvm, gfn);
  804. if (memslot && memslot->dirty_bitmap) {
  805. unsigned long rel_gfn = gfn - memslot->base_gfn;
  806. /* avoid RMW */
  807. if (!test_bit(rel_gfn, memslot->dirty_bitmap))
  808. set_bit(rel_gfn, memslot->dirty_bitmap);
  809. }
  810. }
  811. /*
  812. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  813. */
  814. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  815. {
  816. DEFINE_WAIT(wait);
  817. for (;;) {
  818. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  819. if (kvm_cpu_has_interrupt(vcpu))
  820. break;
  821. if (kvm_cpu_has_pending_timer(vcpu))
  822. break;
  823. if (kvm_arch_vcpu_runnable(vcpu))
  824. break;
  825. if (signal_pending(current))
  826. break;
  827. vcpu_put(vcpu);
  828. schedule();
  829. vcpu_load(vcpu);
  830. }
  831. finish_wait(&vcpu->wq, &wait);
  832. }
  833. void kvm_resched(struct kvm_vcpu *vcpu)
  834. {
  835. if (!need_resched())
  836. return;
  837. cond_resched();
  838. }
  839. EXPORT_SYMBOL_GPL(kvm_resched);
  840. static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  841. {
  842. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  843. struct page *page;
  844. if (vmf->pgoff == 0)
  845. page = virt_to_page(vcpu->run);
  846. #ifdef CONFIG_X86
  847. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  848. page = virt_to_page(vcpu->arch.pio_data);
  849. #endif
  850. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  851. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  852. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  853. #endif
  854. else
  855. return VM_FAULT_SIGBUS;
  856. get_page(page);
  857. vmf->page = page;
  858. return 0;
  859. }
  860. static struct vm_operations_struct kvm_vcpu_vm_ops = {
  861. .fault = kvm_vcpu_fault,
  862. };
  863. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  864. {
  865. vma->vm_ops = &kvm_vcpu_vm_ops;
  866. return 0;
  867. }
  868. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  869. {
  870. struct kvm_vcpu *vcpu = filp->private_data;
  871. kvm_put_kvm(vcpu->kvm);
  872. return 0;
  873. }
  874. static const struct file_operations kvm_vcpu_fops = {
  875. .release = kvm_vcpu_release,
  876. .unlocked_ioctl = kvm_vcpu_ioctl,
  877. .compat_ioctl = kvm_vcpu_ioctl,
  878. .mmap = kvm_vcpu_mmap,
  879. };
  880. /*
  881. * Allocates an inode for the vcpu.
  882. */
  883. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  884. {
  885. int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
  886. if (fd < 0)
  887. kvm_put_kvm(vcpu->kvm);
  888. return fd;
  889. }
  890. /*
  891. * Creates some virtual cpus. Good luck creating more than one.
  892. */
  893. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
  894. {
  895. int r;
  896. struct kvm_vcpu *vcpu;
  897. if (!valid_vcpu(n))
  898. return -EINVAL;
  899. vcpu = kvm_arch_vcpu_create(kvm, n);
  900. if (IS_ERR(vcpu))
  901. return PTR_ERR(vcpu);
  902. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  903. r = kvm_arch_vcpu_setup(vcpu);
  904. if (r)
  905. goto vcpu_destroy;
  906. mutex_lock(&kvm->lock);
  907. if (kvm->vcpus[n]) {
  908. r = -EEXIST;
  909. mutex_unlock(&kvm->lock);
  910. goto vcpu_destroy;
  911. }
  912. kvm->vcpus[n] = vcpu;
  913. mutex_unlock(&kvm->lock);
  914. /* Now it's all set up, let userspace reach it */
  915. kvm_get_kvm(kvm);
  916. r = create_vcpu_fd(vcpu);
  917. if (r < 0)
  918. goto unlink;
  919. return r;
  920. unlink:
  921. mutex_lock(&kvm->lock);
  922. kvm->vcpus[n] = NULL;
  923. mutex_unlock(&kvm->lock);
  924. vcpu_destroy:
  925. kvm_arch_vcpu_destroy(vcpu);
  926. return r;
  927. }
  928. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  929. {
  930. if (sigset) {
  931. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  932. vcpu->sigset_active = 1;
  933. vcpu->sigset = *sigset;
  934. } else
  935. vcpu->sigset_active = 0;
  936. return 0;
  937. }
  938. static long kvm_vcpu_ioctl(struct file *filp,
  939. unsigned int ioctl, unsigned long arg)
  940. {
  941. struct kvm_vcpu *vcpu = filp->private_data;
  942. void __user *argp = (void __user *)arg;
  943. int r;
  944. if (vcpu->kvm->mm != current->mm)
  945. return -EIO;
  946. switch (ioctl) {
  947. case KVM_RUN:
  948. r = -EINVAL;
  949. if (arg)
  950. goto out;
  951. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  952. break;
  953. case KVM_GET_REGS: {
  954. struct kvm_regs *kvm_regs;
  955. r = -ENOMEM;
  956. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  957. if (!kvm_regs)
  958. goto out;
  959. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  960. if (r)
  961. goto out_free1;
  962. r = -EFAULT;
  963. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  964. goto out_free1;
  965. r = 0;
  966. out_free1:
  967. kfree(kvm_regs);
  968. break;
  969. }
  970. case KVM_SET_REGS: {
  971. struct kvm_regs *kvm_regs;
  972. r = -ENOMEM;
  973. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  974. if (!kvm_regs)
  975. goto out;
  976. r = -EFAULT;
  977. if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
  978. goto out_free2;
  979. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  980. if (r)
  981. goto out_free2;
  982. r = 0;
  983. out_free2:
  984. kfree(kvm_regs);
  985. break;
  986. }
  987. case KVM_GET_SREGS: {
  988. struct kvm_sregs kvm_sregs;
  989. memset(&kvm_sregs, 0, sizeof kvm_sregs);
  990. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
  991. if (r)
  992. goto out;
  993. r = -EFAULT;
  994. if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
  995. goto out;
  996. r = 0;
  997. break;
  998. }
  999. case KVM_SET_SREGS: {
  1000. struct kvm_sregs kvm_sregs;
  1001. r = -EFAULT;
  1002. if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
  1003. goto out;
  1004. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
  1005. if (r)
  1006. goto out;
  1007. r = 0;
  1008. break;
  1009. }
  1010. case KVM_GET_MP_STATE: {
  1011. struct kvm_mp_state mp_state;
  1012. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  1013. if (r)
  1014. goto out;
  1015. r = -EFAULT;
  1016. if (copy_to_user(argp, &mp_state, sizeof mp_state))
  1017. goto out;
  1018. r = 0;
  1019. break;
  1020. }
  1021. case KVM_SET_MP_STATE: {
  1022. struct kvm_mp_state mp_state;
  1023. r = -EFAULT;
  1024. if (copy_from_user(&mp_state, argp, sizeof mp_state))
  1025. goto out;
  1026. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  1027. if (r)
  1028. goto out;
  1029. r = 0;
  1030. break;
  1031. }
  1032. case KVM_TRANSLATE: {
  1033. struct kvm_translation tr;
  1034. r = -EFAULT;
  1035. if (copy_from_user(&tr, argp, sizeof tr))
  1036. goto out;
  1037. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  1038. if (r)
  1039. goto out;
  1040. r = -EFAULT;
  1041. if (copy_to_user(argp, &tr, sizeof tr))
  1042. goto out;
  1043. r = 0;
  1044. break;
  1045. }
  1046. case KVM_DEBUG_GUEST: {
  1047. struct kvm_debug_guest dbg;
  1048. r = -EFAULT;
  1049. if (copy_from_user(&dbg, argp, sizeof dbg))
  1050. goto out;
  1051. r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
  1052. if (r)
  1053. goto out;
  1054. r = 0;
  1055. break;
  1056. }
  1057. case KVM_SET_SIGNAL_MASK: {
  1058. struct kvm_signal_mask __user *sigmask_arg = argp;
  1059. struct kvm_signal_mask kvm_sigmask;
  1060. sigset_t sigset, *p;
  1061. p = NULL;
  1062. if (argp) {
  1063. r = -EFAULT;
  1064. if (copy_from_user(&kvm_sigmask, argp,
  1065. sizeof kvm_sigmask))
  1066. goto out;
  1067. r = -EINVAL;
  1068. if (kvm_sigmask.len != sizeof sigset)
  1069. goto out;
  1070. r = -EFAULT;
  1071. if (copy_from_user(&sigset, sigmask_arg->sigset,
  1072. sizeof sigset))
  1073. goto out;
  1074. p = &sigset;
  1075. }
  1076. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  1077. break;
  1078. }
  1079. case KVM_GET_FPU: {
  1080. struct kvm_fpu fpu;
  1081. memset(&fpu, 0, sizeof fpu);
  1082. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
  1083. if (r)
  1084. goto out;
  1085. r = -EFAULT;
  1086. if (copy_to_user(argp, &fpu, sizeof fpu))
  1087. goto out;
  1088. r = 0;
  1089. break;
  1090. }
  1091. case KVM_SET_FPU: {
  1092. struct kvm_fpu fpu;
  1093. r = -EFAULT;
  1094. if (copy_from_user(&fpu, argp, sizeof fpu))
  1095. goto out;
  1096. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
  1097. if (r)
  1098. goto out;
  1099. r = 0;
  1100. break;
  1101. }
  1102. default:
  1103. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1104. }
  1105. out:
  1106. return r;
  1107. }
  1108. static long kvm_vm_ioctl(struct file *filp,
  1109. unsigned int ioctl, unsigned long arg)
  1110. {
  1111. struct kvm *kvm = filp->private_data;
  1112. void __user *argp = (void __user *)arg;
  1113. int r;
  1114. if (kvm->mm != current->mm)
  1115. return -EIO;
  1116. switch (ioctl) {
  1117. case KVM_CREATE_VCPU:
  1118. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  1119. if (r < 0)
  1120. goto out;
  1121. break;
  1122. case KVM_SET_USER_MEMORY_REGION: {
  1123. struct kvm_userspace_memory_region kvm_userspace_mem;
  1124. r = -EFAULT;
  1125. if (copy_from_user(&kvm_userspace_mem, argp,
  1126. sizeof kvm_userspace_mem))
  1127. goto out;
  1128. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
  1129. if (r)
  1130. goto out;
  1131. break;
  1132. }
  1133. case KVM_GET_DIRTY_LOG: {
  1134. struct kvm_dirty_log log;
  1135. r = -EFAULT;
  1136. if (copy_from_user(&log, argp, sizeof log))
  1137. goto out;
  1138. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1139. if (r)
  1140. goto out;
  1141. break;
  1142. }
  1143. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1144. case KVM_REGISTER_COALESCED_MMIO: {
  1145. struct kvm_coalesced_mmio_zone zone;
  1146. r = -EFAULT;
  1147. if (copy_from_user(&zone, argp, sizeof zone))
  1148. goto out;
  1149. r = -ENXIO;
  1150. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  1151. if (r)
  1152. goto out;
  1153. r = 0;
  1154. break;
  1155. }
  1156. case KVM_UNREGISTER_COALESCED_MMIO: {
  1157. struct kvm_coalesced_mmio_zone zone;
  1158. r = -EFAULT;
  1159. if (copy_from_user(&zone, argp, sizeof zone))
  1160. goto out;
  1161. r = -ENXIO;
  1162. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  1163. if (r)
  1164. goto out;
  1165. r = 0;
  1166. break;
  1167. }
  1168. #endif
  1169. default:
  1170. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  1171. }
  1172. out:
  1173. return r;
  1174. }
  1175. static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1176. {
  1177. struct kvm *kvm = vma->vm_file->private_data;
  1178. struct page *page;
  1179. if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
  1180. return VM_FAULT_SIGBUS;
  1181. page = gfn_to_page(kvm, vmf->pgoff);
  1182. if (is_error_page(page)) {
  1183. kvm_release_page_clean(page);
  1184. return VM_FAULT_SIGBUS;
  1185. }
  1186. vmf->page = page;
  1187. return 0;
  1188. }
  1189. static struct vm_operations_struct kvm_vm_vm_ops = {
  1190. .fault = kvm_vm_fault,
  1191. };
  1192. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  1193. {
  1194. vma->vm_ops = &kvm_vm_vm_ops;
  1195. return 0;
  1196. }
  1197. static const struct file_operations kvm_vm_fops = {
  1198. .release = kvm_vm_release,
  1199. .unlocked_ioctl = kvm_vm_ioctl,
  1200. .compat_ioctl = kvm_vm_ioctl,
  1201. .mmap = kvm_vm_mmap,
  1202. };
  1203. static int kvm_dev_ioctl_create_vm(void)
  1204. {
  1205. int fd;
  1206. struct kvm *kvm;
  1207. kvm = kvm_create_vm();
  1208. if (IS_ERR(kvm))
  1209. return PTR_ERR(kvm);
  1210. fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
  1211. if (fd < 0)
  1212. kvm_put_kvm(kvm);
  1213. return fd;
  1214. }
  1215. static long kvm_dev_ioctl(struct file *filp,
  1216. unsigned int ioctl, unsigned long arg)
  1217. {
  1218. long r = -EINVAL;
  1219. switch (ioctl) {
  1220. case KVM_GET_API_VERSION:
  1221. r = -EINVAL;
  1222. if (arg)
  1223. goto out;
  1224. r = KVM_API_VERSION;
  1225. break;
  1226. case KVM_CREATE_VM:
  1227. r = -EINVAL;
  1228. if (arg)
  1229. goto out;
  1230. r = kvm_dev_ioctl_create_vm();
  1231. break;
  1232. case KVM_CHECK_EXTENSION:
  1233. r = kvm_dev_ioctl_check_extension(arg);
  1234. break;
  1235. case KVM_GET_VCPU_MMAP_SIZE:
  1236. r = -EINVAL;
  1237. if (arg)
  1238. goto out;
  1239. r = PAGE_SIZE; /* struct kvm_run */
  1240. #ifdef CONFIG_X86
  1241. r += PAGE_SIZE; /* pio data page */
  1242. #endif
  1243. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1244. r += PAGE_SIZE; /* coalesced mmio ring page */
  1245. #endif
  1246. break;
  1247. case KVM_TRACE_ENABLE:
  1248. case KVM_TRACE_PAUSE:
  1249. case KVM_TRACE_DISABLE:
  1250. r = kvm_trace_ioctl(ioctl, arg);
  1251. break;
  1252. default:
  1253. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  1254. }
  1255. out:
  1256. return r;
  1257. }
  1258. static struct file_operations kvm_chardev_ops = {
  1259. .unlocked_ioctl = kvm_dev_ioctl,
  1260. .compat_ioctl = kvm_dev_ioctl,
  1261. };
  1262. static struct miscdevice kvm_dev = {
  1263. KVM_MINOR,
  1264. "kvm",
  1265. &kvm_chardev_ops,
  1266. };
  1267. static void hardware_enable(void *junk)
  1268. {
  1269. int cpu = raw_smp_processor_id();
  1270. if (cpu_isset(cpu, cpus_hardware_enabled))
  1271. return;
  1272. cpu_set(cpu, cpus_hardware_enabled);
  1273. kvm_arch_hardware_enable(NULL);
  1274. }
  1275. static void hardware_disable(void *junk)
  1276. {
  1277. int cpu = raw_smp_processor_id();
  1278. if (!cpu_isset(cpu, cpus_hardware_enabled))
  1279. return;
  1280. cpu_clear(cpu, cpus_hardware_enabled);
  1281. kvm_arch_hardware_disable(NULL);
  1282. }
  1283. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  1284. void *v)
  1285. {
  1286. int cpu = (long)v;
  1287. val &= ~CPU_TASKS_FROZEN;
  1288. switch (val) {
  1289. case CPU_DYING:
  1290. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1291. cpu);
  1292. hardware_disable(NULL);
  1293. break;
  1294. case CPU_UP_CANCELED:
  1295. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1296. cpu);
  1297. smp_call_function_single(cpu, hardware_disable, NULL, 1);
  1298. break;
  1299. case CPU_ONLINE:
  1300. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  1301. cpu);
  1302. smp_call_function_single(cpu, hardware_enable, NULL, 1);
  1303. break;
  1304. }
  1305. return NOTIFY_OK;
  1306. }
  1307. asmlinkage void kvm_handle_fault_on_reboot(void)
  1308. {
  1309. if (kvm_rebooting)
  1310. /* spin while reset goes on */
  1311. while (true)
  1312. ;
  1313. /* Fault while not rebooting. We want the trace. */
  1314. BUG();
  1315. }
  1316. EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
  1317. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1318. void *v)
  1319. {
  1320. if (val == SYS_RESTART) {
  1321. /*
  1322. * Some (well, at least mine) BIOSes hang on reboot if
  1323. * in vmx root mode.
  1324. */
  1325. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  1326. kvm_rebooting = true;
  1327. on_each_cpu(hardware_disable, NULL, 1);
  1328. }
  1329. return NOTIFY_OK;
  1330. }
  1331. static struct notifier_block kvm_reboot_notifier = {
  1332. .notifier_call = kvm_reboot,
  1333. .priority = 0,
  1334. };
  1335. void kvm_io_bus_init(struct kvm_io_bus *bus)
  1336. {
  1337. memset(bus, 0, sizeof(*bus));
  1338. }
  1339. void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  1340. {
  1341. int i;
  1342. for (i = 0; i < bus->dev_count; i++) {
  1343. struct kvm_io_device *pos = bus->devs[i];
  1344. kvm_iodevice_destructor(pos);
  1345. }
  1346. }
  1347. struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
  1348. gpa_t addr, int len, int is_write)
  1349. {
  1350. int i;
  1351. for (i = 0; i < bus->dev_count; i++) {
  1352. struct kvm_io_device *pos = bus->devs[i];
  1353. if (pos->in_range(pos, addr, len, is_write))
  1354. return pos;
  1355. }
  1356. return NULL;
  1357. }
  1358. void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
  1359. {
  1360. BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
  1361. bus->devs[bus->dev_count++] = dev;
  1362. }
  1363. static struct notifier_block kvm_cpu_notifier = {
  1364. .notifier_call = kvm_cpu_hotplug,
  1365. .priority = 20, /* must be > scheduler priority */
  1366. };
  1367. static int vm_stat_get(void *_offset, u64 *val)
  1368. {
  1369. unsigned offset = (long)_offset;
  1370. struct kvm *kvm;
  1371. *val = 0;
  1372. spin_lock(&kvm_lock);
  1373. list_for_each_entry(kvm, &vm_list, vm_list)
  1374. *val += *(u32 *)((void *)kvm + offset);
  1375. spin_unlock(&kvm_lock);
  1376. return 0;
  1377. }
  1378. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
  1379. static int vcpu_stat_get(void *_offset, u64 *val)
  1380. {
  1381. unsigned offset = (long)_offset;
  1382. struct kvm *kvm;
  1383. struct kvm_vcpu *vcpu;
  1384. int i;
  1385. *val = 0;
  1386. spin_lock(&kvm_lock);
  1387. list_for_each_entry(kvm, &vm_list, vm_list)
  1388. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  1389. vcpu = kvm->vcpus[i];
  1390. if (vcpu)
  1391. *val += *(u32 *)((void *)vcpu + offset);
  1392. }
  1393. spin_unlock(&kvm_lock);
  1394. return 0;
  1395. }
  1396. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
  1397. static struct file_operations *stat_fops[] = {
  1398. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  1399. [KVM_STAT_VM] = &vm_stat_fops,
  1400. };
  1401. static void kvm_init_debug(void)
  1402. {
  1403. struct kvm_stats_debugfs_item *p;
  1404. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  1405. for (p = debugfs_entries; p->name; ++p)
  1406. p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
  1407. (void *)(long)p->offset,
  1408. stat_fops[p->kind]);
  1409. }
  1410. static void kvm_exit_debug(void)
  1411. {
  1412. struct kvm_stats_debugfs_item *p;
  1413. for (p = debugfs_entries; p->name; ++p)
  1414. debugfs_remove(p->dentry);
  1415. debugfs_remove(kvm_debugfs_dir);
  1416. }
  1417. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  1418. {
  1419. hardware_disable(NULL);
  1420. return 0;
  1421. }
  1422. static int kvm_resume(struct sys_device *dev)
  1423. {
  1424. hardware_enable(NULL);
  1425. return 0;
  1426. }
  1427. static struct sysdev_class kvm_sysdev_class = {
  1428. .name = "kvm",
  1429. .suspend = kvm_suspend,
  1430. .resume = kvm_resume,
  1431. };
  1432. static struct sys_device kvm_sysdev = {
  1433. .id = 0,
  1434. .cls = &kvm_sysdev_class,
  1435. };
  1436. struct page *bad_page;
  1437. pfn_t bad_pfn;
  1438. static inline
  1439. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  1440. {
  1441. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  1442. }
  1443. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  1444. {
  1445. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1446. kvm_arch_vcpu_load(vcpu, cpu);
  1447. }
  1448. static void kvm_sched_out(struct preempt_notifier *pn,
  1449. struct task_struct *next)
  1450. {
  1451. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1452. kvm_arch_vcpu_put(vcpu);
  1453. }
  1454. int kvm_init(void *opaque, unsigned int vcpu_size,
  1455. struct module *module)
  1456. {
  1457. int r;
  1458. int cpu;
  1459. kvm_init_debug();
  1460. r = kvm_arch_init(opaque);
  1461. if (r)
  1462. goto out_fail;
  1463. bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1464. if (bad_page == NULL) {
  1465. r = -ENOMEM;
  1466. goto out;
  1467. }
  1468. bad_pfn = page_to_pfn(bad_page);
  1469. r = kvm_arch_hardware_setup();
  1470. if (r < 0)
  1471. goto out_free_0;
  1472. for_each_online_cpu(cpu) {
  1473. smp_call_function_single(cpu,
  1474. kvm_arch_check_processor_compat,
  1475. &r, 1);
  1476. if (r < 0)
  1477. goto out_free_1;
  1478. }
  1479. on_each_cpu(hardware_enable, NULL, 1);
  1480. r = register_cpu_notifier(&kvm_cpu_notifier);
  1481. if (r)
  1482. goto out_free_2;
  1483. register_reboot_notifier(&kvm_reboot_notifier);
  1484. r = sysdev_class_register(&kvm_sysdev_class);
  1485. if (r)
  1486. goto out_free_3;
  1487. r = sysdev_register(&kvm_sysdev);
  1488. if (r)
  1489. goto out_free_4;
  1490. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  1491. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
  1492. __alignof__(struct kvm_vcpu),
  1493. 0, NULL);
  1494. if (!kvm_vcpu_cache) {
  1495. r = -ENOMEM;
  1496. goto out_free_5;
  1497. }
  1498. kvm_chardev_ops.owner = module;
  1499. r = misc_register(&kvm_dev);
  1500. if (r) {
  1501. printk(KERN_ERR "kvm: misc device register failed\n");
  1502. goto out_free;
  1503. }
  1504. kvm_preempt_ops.sched_in = kvm_sched_in;
  1505. kvm_preempt_ops.sched_out = kvm_sched_out;
  1506. return 0;
  1507. out_free:
  1508. kmem_cache_destroy(kvm_vcpu_cache);
  1509. out_free_5:
  1510. sysdev_unregister(&kvm_sysdev);
  1511. out_free_4:
  1512. sysdev_class_unregister(&kvm_sysdev_class);
  1513. out_free_3:
  1514. unregister_reboot_notifier(&kvm_reboot_notifier);
  1515. unregister_cpu_notifier(&kvm_cpu_notifier);
  1516. out_free_2:
  1517. on_each_cpu(hardware_disable, NULL, 1);
  1518. out_free_1:
  1519. kvm_arch_hardware_unsetup();
  1520. out_free_0:
  1521. __free_page(bad_page);
  1522. out:
  1523. kvm_arch_exit();
  1524. kvm_exit_debug();
  1525. out_fail:
  1526. return r;
  1527. }
  1528. EXPORT_SYMBOL_GPL(kvm_init);
  1529. void kvm_exit(void)
  1530. {
  1531. kvm_trace_cleanup();
  1532. misc_deregister(&kvm_dev);
  1533. kmem_cache_destroy(kvm_vcpu_cache);
  1534. sysdev_unregister(&kvm_sysdev);
  1535. sysdev_class_unregister(&kvm_sysdev_class);
  1536. unregister_reboot_notifier(&kvm_reboot_notifier);
  1537. unregister_cpu_notifier(&kvm_cpu_notifier);
  1538. on_each_cpu(hardware_disable, NULL, 1);
  1539. kvm_arch_hardware_unsetup();
  1540. kvm_arch_exit();
  1541. kvm_exit_debug();
  1542. __free_page(bad_page);
  1543. }
  1544. EXPORT_SYMBOL_GPL(kvm_exit);