1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810 |
- /*
- * Kernel-based Virtual Machine driver for Linux
- *
- * This module enables machines with Intel VT-x extensions to run virtual
- * machines without emulation or binary translation.
- *
- * Copyright (C) 2006 Qumranet, Inc.
- *
- * Authors:
- * Avi Kivity <avi@qumranet.com>
- * Yaniv Kamay <yaniv@qumranet.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
- #include "iodev.h"
- #include <linux/kvm_host.h>
- #include <linux/kvm.h>
- #include <linux/module.h>
- #include <linux/errno.h>
- #include <linux/percpu.h>
- #include <linux/gfp.h>
- #include <linux/mm.h>
- #include <linux/miscdevice.h>
- #include <linux/vmalloc.h>
- #include <linux/reboot.h>
- #include <linux/debugfs.h>
- #include <linux/highmem.h>
- #include <linux/file.h>
- #include <linux/sysdev.h>
- #include <linux/cpu.h>
- #include <linux/sched.h>
- #include <linux/cpumask.h>
- #include <linux/smp.h>
- #include <linux/anon_inodes.h>
- #include <linux/profile.h>
- #include <linux/kvm_para.h>
- #include <linux/pagemap.h>
- #include <linux/mman.h>
- #include <linux/swap.h>
- #include <asm/processor.h>
- #include <asm/io.h>
- #include <asm/uaccess.h>
- #include <asm/pgtable.h>
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- #include "coalesced_mmio.h"
- #endif
- MODULE_AUTHOR("Qumranet");
- MODULE_LICENSE("GPL");
- DEFINE_SPINLOCK(kvm_lock);
- LIST_HEAD(vm_list);
- static cpumask_t cpus_hardware_enabled;
- struct kmem_cache *kvm_vcpu_cache;
- EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
- static __read_mostly struct preempt_ops kvm_preempt_ops;
- struct dentry *kvm_debugfs_dir;
- static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
- unsigned long arg);
- bool kvm_rebooting;
- static inline int valid_vcpu(int n)
- {
- return likely(n >= 0 && n < KVM_MAX_VCPUS);
- }
- /*
- * Switches to specified vcpu, until a matching vcpu_put()
- */
- void vcpu_load(struct kvm_vcpu *vcpu)
- {
- int cpu;
- mutex_lock(&vcpu->mutex);
- cpu = get_cpu();
- preempt_notifier_register(&vcpu->preempt_notifier);
- kvm_arch_vcpu_load(vcpu, cpu);
- put_cpu();
- }
- void vcpu_put(struct kvm_vcpu *vcpu)
- {
- preempt_disable();
- kvm_arch_vcpu_put(vcpu);
- preempt_notifier_unregister(&vcpu->preempt_notifier);
- preempt_enable();
- mutex_unlock(&vcpu->mutex);
- }
- static void ack_flush(void *_completed)
- {
- }
- void kvm_flush_remote_tlbs(struct kvm *kvm)
- {
- int i, cpu, me;
- cpumask_t cpus;
- struct kvm_vcpu *vcpu;
- me = get_cpu();
- cpus_clear(cpus);
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
- if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
- continue;
- cpu = vcpu->cpu;
- if (cpu != -1 && cpu != me)
- cpu_set(cpu, cpus);
- }
- if (cpus_empty(cpus))
- goto out;
- ++kvm->stat.remote_tlb_flush;
- smp_call_function_mask(cpus, ack_flush, NULL, 1);
- out:
- put_cpu();
- }
- void kvm_reload_remote_mmus(struct kvm *kvm)
- {
- int i, cpu, me;
- cpumask_t cpus;
- struct kvm_vcpu *vcpu;
- me = get_cpu();
- cpus_clear(cpus);
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
- if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
- continue;
- cpu = vcpu->cpu;
- if (cpu != -1 && cpu != me)
- cpu_set(cpu, cpus);
- }
- if (cpus_empty(cpus))
- goto out;
- smp_call_function_mask(cpus, ack_flush, NULL, 1);
- out:
- put_cpu();
- }
- int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
- {
- struct page *page;
- int r;
- mutex_init(&vcpu->mutex);
- vcpu->cpu = -1;
- vcpu->kvm = kvm;
- vcpu->vcpu_id = id;
- init_waitqueue_head(&vcpu->wq);
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- r = -ENOMEM;
- goto fail;
- }
- vcpu->run = page_address(page);
- r = kvm_arch_vcpu_init(vcpu);
- if (r < 0)
- goto fail_free_run;
- return 0;
- fail_free_run:
- free_page((unsigned long)vcpu->run);
- fail:
- return r;
- }
- EXPORT_SYMBOL_GPL(kvm_vcpu_init);
- void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
- {
- kvm_arch_vcpu_uninit(vcpu);
- free_page((unsigned long)vcpu->run);
- }
- EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
- #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
- static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
- {
- return container_of(mn, struct kvm, mmu_notifier);
- }
- static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
- {
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int need_tlb_flush;
- /*
- * When ->invalidate_page runs, the linux pte has been zapped
- * already but the page is still allocated until
- * ->invalidate_page returns. So if we increase the sequence
- * here the kvm page fault will notice if the spte can't be
- * established because the page is going to be freed. If
- * instead the kvm page fault establishes the spte before
- * ->invalidate_page runs, kvm_unmap_hva will release it
- * before returning.
- *
- * The sequence increase only need to be seen at spin_unlock
- * time, and not at spin_lock time.
- *
- * Increasing the sequence after the spin_unlock would be
- * unsafe because the kvm page fault could then establish the
- * pte after kvm_unmap_hva returned, without noticing the page
- * is going to be freed.
- */
- spin_lock(&kvm->mmu_lock);
- kvm->mmu_notifier_seq++;
- need_tlb_flush = kvm_unmap_hva(kvm, address);
- spin_unlock(&kvm->mmu_lock);
- /* we've to flush the tlb before the pages can be freed */
- if (need_tlb_flush)
- kvm_flush_remote_tlbs(kvm);
- }
- static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
- {
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int need_tlb_flush = 0;
- spin_lock(&kvm->mmu_lock);
- /*
- * The count increase must become visible at unlock time as no
- * spte can be established without taking the mmu_lock and
- * count is also read inside the mmu_lock critical section.
- */
- kvm->mmu_notifier_count++;
- for (; start < end; start += PAGE_SIZE)
- need_tlb_flush |= kvm_unmap_hva(kvm, start);
- spin_unlock(&kvm->mmu_lock);
- /* we've to flush the tlb before the pages can be freed */
- if (need_tlb_flush)
- kvm_flush_remote_tlbs(kvm);
- }
- static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
- {
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- spin_lock(&kvm->mmu_lock);
- /*
- * This sequence increase will notify the kvm page fault that
- * the page that is going to be mapped in the spte could have
- * been freed.
- */
- kvm->mmu_notifier_seq++;
- /*
- * The above sequence increase must be visible before the
- * below count decrease but both values are read by the kvm
- * page fault under mmu_lock spinlock so we don't need to add
- * a smb_wmb() here in between the two.
- */
- kvm->mmu_notifier_count--;
- spin_unlock(&kvm->mmu_lock);
- BUG_ON(kvm->mmu_notifier_count < 0);
- }
- static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
- {
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int young;
- spin_lock(&kvm->mmu_lock);
- young = kvm_age_hva(kvm, address);
- spin_unlock(&kvm->mmu_lock);
- if (young)
- kvm_flush_remote_tlbs(kvm);
- return young;
- }
- static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
- .invalidate_page = kvm_mmu_notifier_invalidate_page,
- .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
- .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
- .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
- };
- #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
- static struct kvm *kvm_create_vm(void)
- {
- struct kvm *kvm = kvm_arch_create_vm();
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- struct page *page;
- #endif
- if (IS_ERR(kvm))
- goto out;
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- kfree(kvm);
- return ERR_PTR(-ENOMEM);
- }
- kvm->coalesced_mmio_ring =
- (struct kvm_coalesced_mmio_ring *)page_address(page);
- #endif
- #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
- {
- int err;
- kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
- err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
- if (err) {
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- put_page(page);
- #endif
- kfree(kvm);
- return ERR_PTR(err);
- }
- }
- #endif
- kvm->mm = current->mm;
- atomic_inc(&kvm->mm->mm_count);
- spin_lock_init(&kvm->mmu_lock);
- kvm_io_bus_init(&kvm->pio_bus);
- mutex_init(&kvm->lock);
- kvm_io_bus_init(&kvm->mmio_bus);
- init_rwsem(&kvm->slots_lock);
- atomic_set(&kvm->users_count, 1);
- spin_lock(&kvm_lock);
- list_add(&kvm->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- kvm_coalesced_mmio_init(kvm);
- #endif
- out:
- return kvm;
- }
- /*
- * Free any memory in @free but not in @dont.
- */
- static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
- {
- if (!dont || free->rmap != dont->rmap)
- vfree(free->rmap);
- if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
- vfree(free->dirty_bitmap);
- if (!dont || free->lpage_info != dont->lpage_info)
- vfree(free->lpage_info);
- free->npages = 0;
- free->dirty_bitmap = NULL;
- free->rmap = NULL;
- free->lpage_info = NULL;
- }
- void kvm_free_physmem(struct kvm *kvm)
- {
- int i;
- for (i = 0; i < kvm->nmemslots; ++i)
- kvm_free_physmem_slot(&kvm->memslots[i], NULL);
- }
- static void kvm_destroy_vm(struct kvm *kvm)
- {
- struct mm_struct *mm = kvm->mm;
- spin_lock(&kvm_lock);
- list_del(&kvm->vm_list);
- spin_unlock(&kvm_lock);
- kvm_io_bus_destroy(&kvm->pio_bus);
- kvm_io_bus_destroy(&kvm->mmio_bus);
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- if (kvm->coalesced_mmio_ring != NULL)
- free_page((unsigned long)kvm->coalesced_mmio_ring);
- #endif
- #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
- mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
- #endif
- kvm_arch_destroy_vm(kvm);
- mmdrop(mm);
- }
- void kvm_get_kvm(struct kvm *kvm)
- {
- atomic_inc(&kvm->users_count);
- }
- EXPORT_SYMBOL_GPL(kvm_get_kvm);
- void kvm_put_kvm(struct kvm *kvm)
- {
- if (atomic_dec_and_test(&kvm->users_count))
- kvm_destroy_vm(kvm);
- }
- EXPORT_SYMBOL_GPL(kvm_put_kvm);
- static int kvm_vm_release(struct inode *inode, struct file *filp)
- {
- struct kvm *kvm = filp->private_data;
- kvm_put_kvm(kvm);
- return 0;
- }
- /*
- * Allocate some memory and give it an address in the guest physical address
- * space.
- *
- * Discontiguous memory is allowed, mostly for framebuffers.
- *
- * Must be called holding mmap_sem for write.
- */
- int __kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- int user_alloc)
- {
- int r;
- gfn_t base_gfn;
- unsigned long npages;
- unsigned long i;
- struct kvm_memory_slot *memslot;
- struct kvm_memory_slot old, new;
- r = -EINVAL;
- /* General sanity checks */
- if (mem->memory_size & (PAGE_SIZE - 1))
- goto out;
- if (mem->guest_phys_addr & (PAGE_SIZE - 1))
- goto out;
- if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
- goto out;
- if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
- goto out;
- memslot = &kvm->memslots[mem->slot];
- base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
- npages = mem->memory_size >> PAGE_SHIFT;
- if (!npages)
- mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
- new = old = *memslot;
- new.base_gfn = base_gfn;
- new.npages = npages;
- new.flags = mem->flags;
- /* Disallow changing a memory slot's size. */
- r = -EINVAL;
- if (npages && old.npages && npages != old.npages)
- goto out_free;
- /* Check for overlaps */
- r = -EEXIST;
- for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *s = &kvm->memslots[i];
- if (s == memslot)
- continue;
- if (!((base_gfn + npages <= s->base_gfn) ||
- (base_gfn >= s->base_gfn + s->npages)))
- goto out_free;
- }
- /* Free page dirty bitmap if unneeded */
- if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
- new.dirty_bitmap = NULL;
- r = -ENOMEM;
- /* Allocate if a slot is being created */
- #ifndef CONFIG_S390
- if (npages && !new.rmap) {
- new.rmap = vmalloc(npages * sizeof(struct page *));
- if (!new.rmap)
- goto out_free;
- memset(new.rmap, 0, npages * sizeof(*new.rmap));
- new.user_alloc = user_alloc;
- /*
- * hva_to_rmmap() serialzies with the mmu_lock and to be
- * safe it has to ignore memslots with !user_alloc &&
- * !userspace_addr.
- */
- if (user_alloc)
- new.userspace_addr = mem->userspace_addr;
- else
- new.userspace_addr = 0;
- }
- if (npages && !new.lpage_info) {
- int largepages = npages / KVM_PAGES_PER_HPAGE;
- if (npages % KVM_PAGES_PER_HPAGE)
- largepages++;
- if (base_gfn % KVM_PAGES_PER_HPAGE)
- largepages++;
- new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
- if (!new.lpage_info)
- goto out_free;
- memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
- if (base_gfn % KVM_PAGES_PER_HPAGE)
- new.lpage_info[0].write_count = 1;
- if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
- new.lpage_info[largepages-1].write_count = 1;
- }
- /* Allocate page dirty bitmap if needed */
- if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
- unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
- new.dirty_bitmap = vmalloc(dirty_bytes);
- if (!new.dirty_bitmap)
- goto out_free;
- memset(new.dirty_bitmap, 0, dirty_bytes);
- }
- #endif /* not defined CONFIG_S390 */
- if (!npages)
- kvm_arch_flush_shadow(kvm);
- spin_lock(&kvm->mmu_lock);
- if (mem->slot >= kvm->nmemslots)
- kvm->nmemslots = mem->slot + 1;
- *memslot = new;
- spin_unlock(&kvm->mmu_lock);
- r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
- if (r) {
- spin_lock(&kvm->mmu_lock);
- *memslot = old;
- spin_unlock(&kvm->mmu_lock);
- goto out_free;
- }
- kvm_free_physmem_slot(&old, &new);
- return 0;
- out_free:
- kvm_free_physmem_slot(&new, &old);
- out:
- return r;
- }
- EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
- int kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- int user_alloc)
- {
- int r;
- down_write(&kvm->slots_lock);
- r = __kvm_set_memory_region(kvm, mem, user_alloc);
- up_write(&kvm->slots_lock);
- return r;
- }
- EXPORT_SYMBOL_GPL(kvm_set_memory_region);
- int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
- struct
- kvm_userspace_memory_region *mem,
- int user_alloc)
- {
- if (mem->slot >= KVM_MEMORY_SLOTS)
- return -EINVAL;
- return kvm_set_memory_region(kvm, mem, user_alloc);
- }
- int kvm_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log, int *is_dirty)
- {
- struct kvm_memory_slot *memslot;
- int r, i;
- int n;
- unsigned long any = 0;
- r = -EINVAL;
- if (log->slot >= KVM_MEMORY_SLOTS)
- goto out;
- memslot = &kvm->memslots[log->slot];
- r = -ENOENT;
- if (!memslot->dirty_bitmap)
- goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
- for (i = 0; !any && i < n/sizeof(long); ++i)
- any = memslot->dirty_bitmap[i];
- r = -EFAULT;
- if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
- goto out;
- if (any)
- *is_dirty = 1;
- r = 0;
- out:
- return r;
- }
- int is_error_page(struct page *page)
- {
- return page == bad_page;
- }
- EXPORT_SYMBOL_GPL(is_error_page);
- int is_error_pfn(pfn_t pfn)
- {
- return pfn == bad_pfn;
- }
- EXPORT_SYMBOL_GPL(is_error_pfn);
- static inline unsigned long bad_hva(void)
- {
- return PAGE_OFFSET;
- }
- int kvm_is_error_hva(unsigned long addr)
- {
- return addr == bad_hva();
- }
- EXPORT_SYMBOL_GPL(kvm_is_error_hva);
- static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
- {
- int i;
- for (i = 0; i < kvm->nmemslots; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
- if (gfn >= memslot->base_gfn
- && gfn < memslot->base_gfn + memslot->npages)
- return memslot;
- }
- return NULL;
- }
- struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
- {
- gfn = unalias_gfn(kvm, gfn);
- return __gfn_to_memslot(kvm, gfn);
- }
- int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
- {
- int i;
- gfn = unalias_gfn(kvm, gfn);
- for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
- if (gfn >= memslot->base_gfn
- && gfn < memslot->base_gfn + memslot->npages)
- return 1;
- }
- return 0;
- }
- EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
- unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
- {
- struct kvm_memory_slot *slot;
- gfn = unalias_gfn(kvm, gfn);
- slot = __gfn_to_memslot(kvm, gfn);
- if (!slot)
- return bad_hva();
- return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
- }
- EXPORT_SYMBOL_GPL(gfn_to_hva);
- /*
- * Requires current->mm->mmap_sem to be held
- */
- pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
- {
- struct page *page[1];
- unsigned long addr;
- int npages;
- pfn_t pfn;
- might_sleep();
- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr)) {
- get_page(bad_page);
- return page_to_pfn(bad_page);
- }
- npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
- NULL);
- if (unlikely(npages != 1)) {
- struct vm_area_struct *vma;
- vma = find_vma(current->mm, addr);
- if (vma == NULL || addr < vma->vm_start ||
- !(vma->vm_flags & VM_PFNMAP)) {
- get_page(bad_page);
- return page_to_pfn(bad_page);
- }
- pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- BUG_ON(pfn_valid(pfn));
- } else
- pfn = page_to_pfn(page[0]);
- return pfn;
- }
- EXPORT_SYMBOL_GPL(gfn_to_pfn);
- struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
- {
- pfn_t pfn;
- pfn = gfn_to_pfn(kvm, gfn);
- if (pfn_valid(pfn))
- return pfn_to_page(pfn);
- WARN_ON(!pfn_valid(pfn));
- get_page(bad_page);
- return bad_page;
- }
- EXPORT_SYMBOL_GPL(gfn_to_page);
- void kvm_release_page_clean(struct page *page)
- {
- kvm_release_pfn_clean(page_to_pfn(page));
- }
- EXPORT_SYMBOL_GPL(kvm_release_page_clean);
- void kvm_release_pfn_clean(pfn_t pfn)
- {
- if (pfn_valid(pfn))
- put_page(pfn_to_page(pfn));
- }
- EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
- void kvm_release_page_dirty(struct page *page)
- {
- kvm_release_pfn_dirty(page_to_pfn(page));
- }
- EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
- void kvm_release_pfn_dirty(pfn_t pfn)
- {
- kvm_set_pfn_dirty(pfn);
- kvm_release_pfn_clean(pfn);
- }
- EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
- void kvm_set_page_dirty(struct page *page)
- {
- kvm_set_pfn_dirty(page_to_pfn(page));
- }
- EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
- void kvm_set_pfn_dirty(pfn_t pfn)
- {
- if (pfn_valid(pfn)) {
- struct page *page = pfn_to_page(pfn);
- if (!PageReserved(page))
- SetPageDirty(page);
- }
- }
- EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
- void kvm_set_pfn_accessed(pfn_t pfn)
- {
- if (pfn_valid(pfn))
- mark_page_accessed(pfn_to_page(pfn));
- }
- EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
- void kvm_get_pfn(pfn_t pfn)
- {
- if (pfn_valid(pfn))
- get_page(pfn_to_page(pfn));
- }
- EXPORT_SYMBOL_GPL(kvm_get_pfn);
- static int next_segment(unsigned long len, int offset)
- {
- if (len > PAGE_SIZE - offset)
- return PAGE_SIZE - offset;
- else
- return len;
- }
- int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
- int len)
- {
- int r;
- unsigned long addr;
- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr))
- return -EFAULT;
- r = copy_from_user(data, (void __user *)addr + offset, len);
- if (r)
- return -EFAULT;
- return 0;
- }
- EXPORT_SYMBOL_GPL(kvm_read_guest_page);
- int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
- {
- gfn_t gfn = gpa >> PAGE_SHIFT;
- int seg;
- int offset = offset_in_page(gpa);
- int ret;
- while ((seg = next_segment(len, offset)) != 0) {
- ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
- if (ret < 0)
- return ret;
- offset = 0;
- len -= seg;
- data += seg;
- ++gfn;
- }
- return 0;
- }
- EXPORT_SYMBOL_GPL(kvm_read_guest);
- int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
- unsigned long len)
- {
- int r;
- unsigned long addr;
- gfn_t gfn = gpa >> PAGE_SHIFT;
- int offset = offset_in_page(gpa);
- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr))
- return -EFAULT;
- pagefault_disable();
- r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
- pagefault_enable();
- if (r)
- return -EFAULT;
- return 0;
- }
- EXPORT_SYMBOL(kvm_read_guest_atomic);
- int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
- int offset, int len)
- {
- int r;
- unsigned long addr;
- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr))
- return -EFAULT;
- r = copy_to_user((void __user *)addr + offset, data, len);
- if (r)
- return -EFAULT;
- mark_page_dirty(kvm, gfn);
- return 0;
- }
- EXPORT_SYMBOL_GPL(kvm_write_guest_page);
- int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
- unsigned long len)
- {
- gfn_t gfn = gpa >> PAGE_SHIFT;
- int seg;
- int offset = offset_in_page(gpa);
- int ret;
- while ((seg = next_segment(len, offset)) != 0) {
- ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
- if (ret < 0)
- return ret;
- offset = 0;
- len -= seg;
- data += seg;
- ++gfn;
- }
- return 0;
- }
- int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
- {
- return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
- }
- EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
- int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
- {
- gfn_t gfn = gpa >> PAGE_SHIFT;
- int seg;
- int offset = offset_in_page(gpa);
- int ret;
- while ((seg = next_segment(len, offset)) != 0) {
- ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
- if (ret < 0)
- return ret;
- offset = 0;
- len -= seg;
- ++gfn;
- }
- return 0;
- }
- EXPORT_SYMBOL_GPL(kvm_clear_guest);
- void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
- {
- struct kvm_memory_slot *memslot;
- gfn = unalias_gfn(kvm, gfn);
- memslot = __gfn_to_memslot(kvm, gfn);
- if (memslot && memslot->dirty_bitmap) {
- unsigned long rel_gfn = gfn - memslot->base_gfn;
- /* avoid RMW */
- if (!test_bit(rel_gfn, memslot->dirty_bitmap))
- set_bit(rel_gfn, memslot->dirty_bitmap);
- }
- }
- /*
- * The vCPU has executed a HLT instruction with in-kernel mode enabled.
- */
- void kvm_vcpu_block(struct kvm_vcpu *vcpu)
- {
- DEFINE_WAIT(wait);
- for (;;) {
- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
- if (kvm_cpu_has_interrupt(vcpu))
- break;
- if (kvm_cpu_has_pending_timer(vcpu))
- break;
- if (kvm_arch_vcpu_runnable(vcpu))
- break;
- if (signal_pending(current))
- break;
- vcpu_put(vcpu);
- schedule();
- vcpu_load(vcpu);
- }
- finish_wait(&vcpu->wq, &wait);
- }
- void kvm_resched(struct kvm_vcpu *vcpu)
- {
- if (!need_resched())
- return;
- cond_resched();
- }
- EXPORT_SYMBOL_GPL(kvm_resched);
- static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
- {
- struct kvm_vcpu *vcpu = vma->vm_file->private_data;
- struct page *page;
- if (vmf->pgoff == 0)
- page = virt_to_page(vcpu->run);
- #ifdef CONFIG_X86
- else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
- page = virt_to_page(vcpu->arch.pio_data);
- #endif
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
- page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
- #endif
- else
- return VM_FAULT_SIGBUS;
- get_page(page);
- vmf->page = page;
- return 0;
- }
- static struct vm_operations_struct kvm_vcpu_vm_ops = {
- .fault = kvm_vcpu_fault,
- };
- static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
- {
- vma->vm_ops = &kvm_vcpu_vm_ops;
- return 0;
- }
- static int kvm_vcpu_release(struct inode *inode, struct file *filp)
- {
- struct kvm_vcpu *vcpu = filp->private_data;
- kvm_put_kvm(vcpu->kvm);
- return 0;
- }
- static const struct file_operations kvm_vcpu_fops = {
- .release = kvm_vcpu_release,
- .unlocked_ioctl = kvm_vcpu_ioctl,
- .compat_ioctl = kvm_vcpu_ioctl,
- .mmap = kvm_vcpu_mmap,
- };
- /*
- * Allocates an inode for the vcpu.
- */
- static int create_vcpu_fd(struct kvm_vcpu *vcpu)
- {
- int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
- if (fd < 0)
- kvm_put_kvm(vcpu->kvm);
- return fd;
- }
- /*
- * Creates some virtual cpus. Good luck creating more than one.
- */
- static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
- {
- int r;
- struct kvm_vcpu *vcpu;
- if (!valid_vcpu(n))
- return -EINVAL;
- vcpu = kvm_arch_vcpu_create(kvm, n);
- if (IS_ERR(vcpu))
- return PTR_ERR(vcpu);
- preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
- r = kvm_arch_vcpu_setup(vcpu);
- if (r)
- goto vcpu_destroy;
- mutex_lock(&kvm->lock);
- if (kvm->vcpus[n]) {
- r = -EEXIST;
- mutex_unlock(&kvm->lock);
- goto vcpu_destroy;
- }
- kvm->vcpus[n] = vcpu;
- mutex_unlock(&kvm->lock);
- /* Now it's all set up, let userspace reach it */
- kvm_get_kvm(kvm);
- r = create_vcpu_fd(vcpu);
- if (r < 0)
- goto unlink;
- return r;
- unlink:
- mutex_lock(&kvm->lock);
- kvm->vcpus[n] = NULL;
- mutex_unlock(&kvm->lock);
- vcpu_destroy:
- kvm_arch_vcpu_destroy(vcpu);
- return r;
- }
- static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
- {
- if (sigset) {
- sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
- vcpu->sigset_active = 1;
- vcpu->sigset = *sigset;
- } else
- vcpu->sigset_active = 0;
- return 0;
- }
- static long kvm_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
- {
- struct kvm_vcpu *vcpu = filp->private_data;
- void __user *argp = (void __user *)arg;
- int r;
- if (vcpu->kvm->mm != current->mm)
- return -EIO;
- switch (ioctl) {
- case KVM_RUN:
- r = -EINVAL;
- if (arg)
- goto out;
- r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
- break;
- case KVM_GET_REGS: {
- struct kvm_regs *kvm_regs;
- r = -ENOMEM;
- kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
- if (!kvm_regs)
- goto out;
- r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
- if (r)
- goto out_free1;
- r = -EFAULT;
- if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
- goto out_free1;
- r = 0;
- out_free1:
- kfree(kvm_regs);
- break;
- }
- case KVM_SET_REGS: {
- struct kvm_regs *kvm_regs;
- r = -ENOMEM;
- kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
- if (!kvm_regs)
- goto out;
- r = -EFAULT;
- if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
- goto out_free2;
- r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
- if (r)
- goto out_free2;
- r = 0;
- out_free2:
- kfree(kvm_regs);
- break;
- }
- case KVM_GET_SREGS: {
- struct kvm_sregs kvm_sregs;
- memset(&kvm_sregs, 0, sizeof kvm_sregs);
- r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
- if (r)
- goto out;
- r = -EFAULT;
- if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
- goto out;
- r = 0;
- break;
- }
- case KVM_SET_SREGS: {
- struct kvm_sregs kvm_sregs;
- r = -EFAULT;
- if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
- goto out;
- r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
- if (r)
- goto out;
- r = 0;
- break;
- }
- case KVM_GET_MP_STATE: {
- struct kvm_mp_state mp_state;
- r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
- if (r)
- goto out;
- r = -EFAULT;
- if (copy_to_user(argp, &mp_state, sizeof mp_state))
- goto out;
- r = 0;
- break;
- }
- case KVM_SET_MP_STATE: {
- struct kvm_mp_state mp_state;
- r = -EFAULT;
- if (copy_from_user(&mp_state, argp, sizeof mp_state))
- goto out;
- r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
- if (r)
- goto out;
- r = 0;
- break;
- }
- case KVM_TRANSLATE: {
- struct kvm_translation tr;
- r = -EFAULT;
- if (copy_from_user(&tr, argp, sizeof tr))
- goto out;
- r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
- if (r)
- goto out;
- r = -EFAULT;
- if (copy_to_user(argp, &tr, sizeof tr))
- goto out;
- r = 0;
- break;
- }
- case KVM_DEBUG_GUEST: {
- struct kvm_debug_guest dbg;
- r = -EFAULT;
- if (copy_from_user(&dbg, argp, sizeof dbg))
- goto out;
- r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
- if (r)
- goto out;
- r = 0;
- break;
- }
- case KVM_SET_SIGNAL_MASK: {
- struct kvm_signal_mask __user *sigmask_arg = argp;
- struct kvm_signal_mask kvm_sigmask;
- sigset_t sigset, *p;
- p = NULL;
- if (argp) {
- r = -EFAULT;
- if (copy_from_user(&kvm_sigmask, argp,
- sizeof kvm_sigmask))
- goto out;
- r = -EINVAL;
- if (kvm_sigmask.len != sizeof sigset)
- goto out;
- r = -EFAULT;
- if (copy_from_user(&sigset, sigmask_arg->sigset,
- sizeof sigset))
- goto out;
- p = &sigset;
- }
- r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
- break;
- }
- case KVM_GET_FPU: {
- struct kvm_fpu fpu;
- memset(&fpu, 0, sizeof fpu);
- r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
- if (r)
- goto out;
- r = -EFAULT;
- if (copy_to_user(argp, &fpu, sizeof fpu))
- goto out;
- r = 0;
- break;
- }
- case KVM_SET_FPU: {
- struct kvm_fpu fpu;
- r = -EFAULT;
- if (copy_from_user(&fpu, argp, sizeof fpu))
- goto out;
- r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
- if (r)
- goto out;
- r = 0;
- break;
- }
- default:
- r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
- }
- out:
- return r;
- }
- static long kvm_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
- {
- struct kvm *kvm = filp->private_data;
- void __user *argp = (void __user *)arg;
- int r;
- if (kvm->mm != current->mm)
- return -EIO;
- switch (ioctl) {
- case KVM_CREATE_VCPU:
- r = kvm_vm_ioctl_create_vcpu(kvm, arg);
- if (r < 0)
- goto out;
- break;
- case KVM_SET_USER_MEMORY_REGION: {
- struct kvm_userspace_memory_region kvm_userspace_mem;
- r = -EFAULT;
- if (copy_from_user(&kvm_userspace_mem, argp,
- sizeof kvm_userspace_mem))
- goto out;
- r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
- if (r)
- goto out;
- break;
- }
- case KVM_GET_DIRTY_LOG: {
- struct kvm_dirty_log log;
- r = -EFAULT;
- if (copy_from_user(&log, argp, sizeof log))
- goto out;
- r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
- if (r)
- goto out;
- break;
- }
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- case KVM_REGISTER_COALESCED_MMIO: {
- struct kvm_coalesced_mmio_zone zone;
- r = -EFAULT;
- if (copy_from_user(&zone, argp, sizeof zone))
- goto out;
- r = -ENXIO;
- r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
- if (r)
- goto out;
- r = 0;
- break;
- }
- case KVM_UNREGISTER_COALESCED_MMIO: {
- struct kvm_coalesced_mmio_zone zone;
- r = -EFAULT;
- if (copy_from_user(&zone, argp, sizeof zone))
- goto out;
- r = -ENXIO;
- r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
- if (r)
- goto out;
- r = 0;
- break;
- }
- #endif
- default:
- r = kvm_arch_vm_ioctl(filp, ioctl, arg);
- }
- out:
- return r;
- }
- static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
- {
- struct kvm *kvm = vma->vm_file->private_data;
- struct page *page;
- if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
- return VM_FAULT_SIGBUS;
- page = gfn_to_page(kvm, vmf->pgoff);
- if (is_error_page(page)) {
- kvm_release_page_clean(page);
- return VM_FAULT_SIGBUS;
- }
- vmf->page = page;
- return 0;
- }
- static struct vm_operations_struct kvm_vm_vm_ops = {
- .fault = kvm_vm_fault,
- };
- static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
- {
- vma->vm_ops = &kvm_vm_vm_ops;
- return 0;
- }
- static const struct file_operations kvm_vm_fops = {
- .release = kvm_vm_release,
- .unlocked_ioctl = kvm_vm_ioctl,
- .compat_ioctl = kvm_vm_ioctl,
- .mmap = kvm_vm_mmap,
- };
- static int kvm_dev_ioctl_create_vm(void)
- {
- int fd;
- struct kvm *kvm;
- kvm = kvm_create_vm();
- if (IS_ERR(kvm))
- return PTR_ERR(kvm);
- fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
- if (fd < 0)
- kvm_put_kvm(kvm);
- return fd;
- }
- static long kvm_dev_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
- {
- long r = -EINVAL;
- switch (ioctl) {
- case KVM_GET_API_VERSION:
- r = -EINVAL;
- if (arg)
- goto out;
- r = KVM_API_VERSION;
- break;
- case KVM_CREATE_VM:
- r = -EINVAL;
- if (arg)
- goto out;
- r = kvm_dev_ioctl_create_vm();
- break;
- case KVM_CHECK_EXTENSION:
- r = kvm_dev_ioctl_check_extension(arg);
- break;
- case KVM_GET_VCPU_MMAP_SIZE:
- r = -EINVAL;
- if (arg)
- goto out;
- r = PAGE_SIZE; /* struct kvm_run */
- #ifdef CONFIG_X86
- r += PAGE_SIZE; /* pio data page */
- #endif
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- r += PAGE_SIZE; /* coalesced mmio ring page */
- #endif
- break;
- case KVM_TRACE_ENABLE:
- case KVM_TRACE_PAUSE:
- case KVM_TRACE_DISABLE:
- r = kvm_trace_ioctl(ioctl, arg);
- break;
- default:
- return kvm_arch_dev_ioctl(filp, ioctl, arg);
- }
- out:
- return r;
- }
- static struct file_operations kvm_chardev_ops = {
- .unlocked_ioctl = kvm_dev_ioctl,
- .compat_ioctl = kvm_dev_ioctl,
- };
- static struct miscdevice kvm_dev = {
- KVM_MINOR,
- "kvm",
- &kvm_chardev_ops,
- };
- static void hardware_enable(void *junk)
- {
- int cpu = raw_smp_processor_id();
- if (cpu_isset(cpu, cpus_hardware_enabled))
- return;
- cpu_set(cpu, cpus_hardware_enabled);
- kvm_arch_hardware_enable(NULL);
- }
- static void hardware_disable(void *junk)
- {
- int cpu = raw_smp_processor_id();
- if (!cpu_isset(cpu, cpus_hardware_enabled))
- return;
- cpu_clear(cpu, cpus_hardware_enabled);
- kvm_arch_hardware_disable(NULL);
- }
- static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
- void *v)
- {
- int cpu = (long)v;
- val &= ~CPU_TASKS_FROZEN;
- switch (val) {
- case CPU_DYING:
- printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
- cpu);
- hardware_disable(NULL);
- break;
- case CPU_UP_CANCELED:
- printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
- cpu);
- smp_call_function_single(cpu, hardware_disable, NULL, 1);
- break;
- case CPU_ONLINE:
- printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
- cpu);
- smp_call_function_single(cpu, hardware_enable, NULL, 1);
- break;
- }
- return NOTIFY_OK;
- }
- asmlinkage void kvm_handle_fault_on_reboot(void)
- {
- if (kvm_rebooting)
- /* spin while reset goes on */
- while (true)
- ;
- /* Fault while not rebooting. We want the trace. */
- BUG();
- }
- EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
- static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
- void *v)
- {
- if (val == SYS_RESTART) {
- /*
- * Some (well, at least mine) BIOSes hang on reboot if
- * in vmx root mode.
- */
- printk(KERN_INFO "kvm: exiting hardware virtualization\n");
- kvm_rebooting = true;
- on_each_cpu(hardware_disable, NULL, 1);
- }
- return NOTIFY_OK;
- }
- static struct notifier_block kvm_reboot_notifier = {
- .notifier_call = kvm_reboot,
- .priority = 0,
- };
- void kvm_io_bus_init(struct kvm_io_bus *bus)
- {
- memset(bus, 0, sizeof(*bus));
- }
- void kvm_io_bus_destroy(struct kvm_io_bus *bus)
- {
- int i;
- for (i = 0; i < bus->dev_count; i++) {
- struct kvm_io_device *pos = bus->devs[i];
- kvm_iodevice_destructor(pos);
- }
- }
- struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
- gpa_t addr, int len, int is_write)
- {
- int i;
- for (i = 0; i < bus->dev_count; i++) {
- struct kvm_io_device *pos = bus->devs[i];
- if (pos->in_range(pos, addr, len, is_write))
- return pos;
- }
- return NULL;
- }
- void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
- {
- BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
- bus->devs[bus->dev_count++] = dev;
- }
- static struct notifier_block kvm_cpu_notifier = {
- .notifier_call = kvm_cpu_hotplug,
- .priority = 20, /* must be > scheduler priority */
- };
- static int vm_stat_get(void *_offset, u64 *val)
- {
- unsigned offset = (long)_offset;
- struct kvm *kvm;
- *val = 0;
- spin_lock(&kvm_lock);
- list_for_each_entry(kvm, &vm_list, vm_list)
- *val += *(u32 *)((void *)kvm + offset);
- spin_unlock(&kvm_lock);
- return 0;
- }
- DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
- static int vcpu_stat_get(void *_offset, u64 *val)
- {
- unsigned offset = (long)_offset;
- struct kvm *kvm;
- struct kvm_vcpu *vcpu;
- int i;
- *val = 0;
- spin_lock(&kvm_lock);
- list_for_each_entry(kvm, &vm_list, vm_list)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (vcpu)
- *val += *(u32 *)((void *)vcpu + offset);
- }
- spin_unlock(&kvm_lock);
- return 0;
- }
- DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
- static struct file_operations *stat_fops[] = {
- [KVM_STAT_VCPU] = &vcpu_stat_fops,
- [KVM_STAT_VM] = &vm_stat_fops,
- };
- static void kvm_init_debug(void)
- {
- struct kvm_stats_debugfs_item *p;
- kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
- for (p = debugfs_entries; p->name; ++p)
- p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
- (void *)(long)p->offset,
- stat_fops[p->kind]);
- }
- static void kvm_exit_debug(void)
- {
- struct kvm_stats_debugfs_item *p;
- for (p = debugfs_entries; p->name; ++p)
- debugfs_remove(p->dentry);
- debugfs_remove(kvm_debugfs_dir);
- }
- static int kvm_suspend(struct sys_device *dev, pm_message_t state)
- {
- hardware_disable(NULL);
- return 0;
- }
- static int kvm_resume(struct sys_device *dev)
- {
- hardware_enable(NULL);
- return 0;
- }
- static struct sysdev_class kvm_sysdev_class = {
- .name = "kvm",
- .suspend = kvm_suspend,
- .resume = kvm_resume,
- };
- static struct sys_device kvm_sysdev = {
- .id = 0,
- .cls = &kvm_sysdev_class,
- };
- struct page *bad_page;
- pfn_t bad_pfn;
- static inline
- struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
- {
- return container_of(pn, struct kvm_vcpu, preempt_notifier);
- }
- static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
- {
- struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_arch_vcpu_load(vcpu, cpu);
- }
- static void kvm_sched_out(struct preempt_notifier *pn,
- struct task_struct *next)
- {
- struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_arch_vcpu_put(vcpu);
- }
- int kvm_init(void *opaque, unsigned int vcpu_size,
- struct module *module)
- {
- int r;
- int cpu;
- kvm_init_debug();
- r = kvm_arch_init(opaque);
- if (r)
- goto out_fail;
- bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (bad_page == NULL) {
- r = -ENOMEM;
- goto out;
- }
- bad_pfn = page_to_pfn(bad_page);
- r = kvm_arch_hardware_setup();
- if (r < 0)
- goto out_free_0;
- for_each_online_cpu(cpu) {
- smp_call_function_single(cpu,
- kvm_arch_check_processor_compat,
- &r, 1);
- if (r < 0)
- goto out_free_1;
- }
- on_each_cpu(hardware_enable, NULL, 1);
- r = register_cpu_notifier(&kvm_cpu_notifier);
- if (r)
- goto out_free_2;
- register_reboot_notifier(&kvm_reboot_notifier);
- r = sysdev_class_register(&kvm_sysdev_class);
- if (r)
- goto out_free_3;
- r = sysdev_register(&kvm_sysdev);
- if (r)
- goto out_free_4;
- /* A kmem cache lets us meet the alignment requirements of fx_save. */
- kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
- __alignof__(struct kvm_vcpu),
- 0, NULL);
- if (!kvm_vcpu_cache) {
- r = -ENOMEM;
- goto out_free_5;
- }
- kvm_chardev_ops.owner = module;
- r = misc_register(&kvm_dev);
- if (r) {
- printk(KERN_ERR "kvm: misc device register failed\n");
- goto out_free;
- }
- kvm_preempt_ops.sched_in = kvm_sched_in;
- kvm_preempt_ops.sched_out = kvm_sched_out;
- return 0;
- out_free:
- kmem_cache_destroy(kvm_vcpu_cache);
- out_free_5:
- sysdev_unregister(&kvm_sysdev);
- out_free_4:
- sysdev_class_unregister(&kvm_sysdev_class);
- out_free_3:
- unregister_reboot_notifier(&kvm_reboot_notifier);
- unregister_cpu_notifier(&kvm_cpu_notifier);
- out_free_2:
- on_each_cpu(hardware_disable, NULL, 1);
- out_free_1:
- kvm_arch_hardware_unsetup();
- out_free_0:
- __free_page(bad_page);
- out:
- kvm_arch_exit();
- kvm_exit_debug();
- out_fail:
- return r;
- }
- EXPORT_SYMBOL_GPL(kvm_init);
- void kvm_exit(void)
- {
- kvm_trace_cleanup();
- misc_deregister(&kvm_dev);
- kmem_cache_destroy(kvm_vcpu_cache);
- sysdev_unregister(&kvm_sysdev);
- sysdev_class_unregister(&kvm_sysdev_class);
- unregister_reboot_notifier(&kvm_reboot_notifier);
- unregister_cpu_notifier(&kvm_cpu_notifier);
- on_each_cpu(hardware_disable, NULL, 1);
- kvm_arch_hardware_unsetup();
- kvm_arch_exit();
- kvm_exit_debug();
- __free_page(bad_page);
- }
- EXPORT_SYMBOL_GPL(kvm_exit);
|