kvm_main.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "iodev.h"
  18. #include <linux/kvm_host.h>
  19. #include <linux/kvm.h>
  20. #include <linux/module.h>
  21. #include <linux/errno.h>
  22. #include <linux/percpu.h>
  23. #include <linux/gfp.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/sysdev.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched.h>
  34. #include <linux/cpumask.h>
  35. #include <linux/smp.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/profile.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/mman.h>
  41. #include <linux/swap.h>
  42. #include <linux/bitops.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/compat.h>
  45. #include <asm/processor.h>
  46. #include <asm/io.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/pgtable.h>
  49. #include <asm-generic/bitops/le.h>
  50. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  51. #include "coalesced_mmio.h"
  52. #endif
  53. #define CREATE_TRACE_POINTS
  54. #include <trace/events/kvm.h>
  55. MODULE_AUTHOR("Qumranet");
  56. MODULE_LICENSE("GPL");
  57. /*
  58. * Ordering of locks:
  59. *
  60. * kvm->slots_lock --> kvm->lock --> kvm->irq_lock
  61. */
  62. DEFINE_SPINLOCK(kvm_lock);
  63. LIST_HEAD(vm_list);
  64. static cpumask_var_t cpus_hardware_enabled;
  65. static int kvm_usage_count = 0;
  66. static atomic_t hardware_enable_failed;
  67. struct kmem_cache *kvm_vcpu_cache;
  68. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  69. static __read_mostly struct preempt_ops kvm_preempt_ops;
  70. struct dentry *kvm_debugfs_dir;
  71. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  72. unsigned long arg);
  73. static int hardware_enable_all(void);
  74. static void hardware_disable_all(void);
  75. static bool kvm_rebooting;
  76. static bool largepages_enabled = true;
  77. inline int kvm_is_mmio_pfn(pfn_t pfn)
  78. {
  79. if (pfn_valid(pfn)) {
  80. struct page *page = compound_head(pfn_to_page(pfn));
  81. return PageReserved(page);
  82. }
  83. return true;
  84. }
  85. /*
  86. * Switches to specified vcpu, until a matching vcpu_put()
  87. */
  88. void vcpu_load(struct kvm_vcpu *vcpu)
  89. {
  90. int cpu;
  91. mutex_lock(&vcpu->mutex);
  92. cpu = get_cpu();
  93. preempt_notifier_register(&vcpu->preempt_notifier);
  94. kvm_arch_vcpu_load(vcpu, cpu);
  95. put_cpu();
  96. }
  97. void vcpu_put(struct kvm_vcpu *vcpu)
  98. {
  99. preempt_disable();
  100. kvm_arch_vcpu_put(vcpu);
  101. preempt_notifier_unregister(&vcpu->preempt_notifier);
  102. preempt_enable();
  103. mutex_unlock(&vcpu->mutex);
  104. }
  105. static void ack_flush(void *_completed)
  106. {
  107. }
  108. static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
  109. {
  110. int i, cpu, me;
  111. cpumask_var_t cpus;
  112. bool called = true;
  113. struct kvm_vcpu *vcpu;
  114. zalloc_cpumask_var(&cpus, GFP_ATOMIC);
  115. spin_lock(&kvm->requests_lock);
  116. me = smp_processor_id();
  117. kvm_for_each_vcpu(i, vcpu, kvm) {
  118. if (test_and_set_bit(req, &vcpu->requests))
  119. continue;
  120. cpu = vcpu->cpu;
  121. if (cpus != NULL && cpu != -1 && cpu != me)
  122. cpumask_set_cpu(cpu, cpus);
  123. }
  124. if (unlikely(cpus == NULL))
  125. smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
  126. else if (!cpumask_empty(cpus))
  127. smp_call_function_many(cpus, ack_flush, NULL, 1);
  128. else
  129. called = false;
  130. spin_unlock(&kvm->requests_lock);
  131. free_cpumask_var(cpus);
  132. return called;
  133. }
  134. void kvm_flush_remote_tlbs(struct kvm *kvm)
  135. {
  136. if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
  137. ++kvm->stat.remote_tlb_flush;
  138. }
  139. void kvm_reload_remote_mmus(struct kvm *kvm)
  140. {
  141. make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
  142. }
  143. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  144. {
  145. struct page *page;
  146. int r;
  147. mutex_init(&vcpu->mutex);
  148. vcpu->cpu = -1;
  149. vcpu->kvm = kvm;
  150. vcpu->vcpu_id = id;
  151. init_waitqueue_head(&vcpu->wq);
  152. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  153. if (!page) {
  154. r = -ENOMEM;
  155. goto fail;
  156. }
  157. vcpu->run = page_address(page);
  158. r = kvm_arch_vcpu_init(vcpu);
  159. if (r < 0)
  160. goto fail_free_run;
  161. return 0;
  162. fail_free_run:
  163. free_page((unsigned long)vcpu->run);
  164. fail:
  165. return r;
  166. }
  167. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  168. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  169. {
  170. kvm_arch_vcpu_uninit(vcpu);
  171. free_page((unsigned long)vcpu->run);
  172. }
  173. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  174. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  175. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  176. {
  177. return container_of(mn, struct kvm, mmu_notifier);
  178. }
  179. static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
  180. struct mm_struct *mm,
  181. unsigned long address)
  182. {
  183. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  184. int need_tlb_flush;
  185. /*
  186. * When ->invalidate_page runs, the linux pte has been zapped
  187. * already but the page is still allocated until
  188. * ->invalidate_page returns. So if we increase the sequence
  189. * here the kvm page fault will notice if the spte can't be
  190. * established because the page is going to be freed. If
  191. * instead the kvm page fault establishes the spte before
  192. * ->invalidate_page runs, kvm_unmap_hva will release it
  193. * before returning.
  194. *
  195. * The sequence increase only need to be seen at spin_unlock
  196. * time, and not at spin_lock time.
  197. *
  198. * Increasing the sequence after the spin_unlock would be
  199. * unsafe because the kvm page fault could then establish the
  200. * pte after kvm_unmap_hva returned, without noticing the page
  201. * is going to be freed.
  202. */
  203. spin_lock(&kvm->mmu_lock);
  204. kvm->mmu_notifier_seq++;
  205. need_tlb_flush = kvm_unmap_hva(kvm, address);
  206. spin_unlock(&kvm->mmu_lock);
  207. /* we've to flush the tlb before the pages can be freed */
  208. if (need_tlb_flush)
  209. kvm_flush_remote_tlbs(kvm);
  210. }
  211. static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
  212. struct mm_struct *mm,
  213. unsigned long address,
  214. pte_t pte)
  215. {
  216. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  217. spin_lock(&kvm->mmu_lock);
  218. kvm->mmu_notifier_seq++;
  219. kvm_set_spte_hva(kvm, address, pte);
  220. spin_unlock(&kvm->mmu_lock);
  221. }
  222. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  223. struct mm_struct *mm,
  224. unsigned long start,
  225. unsigned long end)
  226. {
  227. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  228. int need_tlb_flush = 0;
  229. spin_lock(&kvm->mmu_lock);
  230. /*
  231. * The count increase must become visible at unlock time as no
  232. * spte can be established without taking the mmu_lock and
  233. * count is also read inside the mmu_lock critical section.
  234. */
  235. kvm->mmu_notifier_count++;
  236. for (; start < end; start += PAGE_SIZE)
  237. need_tlb_flush |= kvm_unmap_hva(kvm, start);
  238. spin_unlock(&kvm->mmu_lock);
  239. /* we've to flush the tlb before the pages can be freed */
  240. if (need_tlb_flush)
  241. kvm_flush_remote_tlbs(kvm);
  242. }
  243. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  244. struct mm_struct *mm,
  245. unsigned long start,
  246. unsigned long end)
  247. {
  248. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  249. spin_lock(&kvm->mmu_lock);
  250. /*
  251. * This sequence increase will notify the kvm page fault that
  252. * the page that is going to be mapped in the spte could have
  253. * been freed.
  254. */
  255. kvm->mmu_notifier_seq++;
  256. /*
  257. * The above sequence increase must be visible before the
  258. * below count decrease but both values are read by the kvm
  259. * page fault under mmu_lock spinlock so we don't need to add
  260. * a smb_wmb() here in between the two.
  261. */
  262. kvm->mmu_notifier_count--;
  263. spin_unlock(&kvm->mmu_lock);
  264. BUG_ON(kvm->mmu_notifier_count < 0);
  265. }
  266. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  267. struct mm_struct *mm,
  268. unsigned long address)
  269. {
  270. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  271. int young;
  272. spin_lock(&kvm->mmu_lock);
  273. young = kvm_age_hva(kvm, address);
  274. spin_unlock(&kvm->mmu_lock);
  275. if (young)
  276. kvm_flush_remote_tlbs(kvm);
  277. return young;
  278. }
  279. static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
  280. struct mm_struct *mm)
  281. {
  282. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  283. kvm_arch_flush_shadow(kvm);
  284. }
  285. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  286. .invalidate_page = kvm_mmu_notifier_invalidate_page,
  287. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  288. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  289. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  290. .change_pte = kvm_mmu_notifier_change_pte,
  291. .release = kvm_mmu_notifier_release,
  292. };
  293. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  294. static struct kvm *kvm_create_vm(void)
  295. {
  296. int r = 0;
  297. struct kvm *kvm = kvm_arch_create_vm();
  298. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  299. struct page *page;
  300. #endif
  301. if (IS_ERR(kvm))
  302. goto out;
  303. r = hardware_enable_all();
  304. if (r)
  305. goto out_err_nodisable;
  306. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  307. INIT_HLIST_HEAD(&kvm->mask_notifier_list);
  308. INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
  309. #endif
  310. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  311. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  312. if (!page) {
  313. r = -ENOMEM;
  314. goto out_err;
  315. }
  316. kvm->coalesced_mmio_ring =
  317. (struct kvm_coalesced_mmio_ring *)page_address(page);
  318. #endif
  319. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  320. {
  321. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  322. r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  323. if (r) {
  324. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  325. put_page(page);
  326. #endif
  327. goto out_err;
  328. }
  329. }
  330. #endif
  331. kvm->mm = current->mm;
  332. atomic_inc(&kvm->mm->mm_count);
  333. spin_lock_init(&kvm->mmu_lock);
  334. spin_lock_init(&kvm->requests_lock);
  335. kvm_io_bus_init(&kvm->pio_bus);
  336. kvm_eventfd_init(kvm);
  337. mutex_init(&kvm->lock);
  338. mutex_init(&kvm->irq_lock);
  339. kvm_io_bus_init(&kvm->mmio_bus);
  340. init_rwsem(&kvm->slots_lock);
  341. atomic_set(&kvm->users_count, 1);
  342. spin_lock(&kvm_lock);
  343. list_add(&kvm->vm_list, &vm_list);
  344. spin_unlock(&kvm_lock);
  345. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  346. kvm_coalesced_mmio_init(kvm);
  347. #endif
  348. out:
  349. return kvm;
  350. out_err:
  351. hardware_disable_all();
  352. out_err_nodisable:
  353. kfree(kvm);
  354. return ERR_PTR(r);
  355. }
  356. /*
  357. * Free any memory in @free but not in @dont.
  358. */
  359. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  360. struct kvm_memory_slot *dont)
  361. {
  362. int i;
  363. if (!dont || free->rmap != dont->rmap)
  364. vfree(free->rmap);
  365. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  366. vfree(free->dirty_bitmap);
  367. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  368. if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
  369. vfree(free->lpage_info[i]);
  370. free->lpage_info[i] = NULL;
  371. }
  372. }
  373. free->npages = 0;
  374. free->dirty_bitmap = NULL;
  375. free->rmap = NULL;
  376. }
  377. void kvm_free_physmem(struct kvm *kvm)
  378. {
  379. int i;
  380. for (i = 0; i < kvm->nmemslots; ++i)
  381. kvm_free_physmem_slot(&kvm->memslots[i], NULL);
  382. }
  383. static void kvm_destroy_vm(struct kvm *kvm)
  384. {
  385. struct mm_struct *mm = kvm->mm;
  386. kvm_arch_sync_events(kvm);
  387. spin_lock(&kvm_lock);
  388. list_del(&kvm->vm_list);
  389. spin_unlock(&kvm_lock);
  390. kvm_free_irq_routing(kvm);
  391. kvm_io_bus_destroy(&kvm->pio_bus);
  392. kvm_io_bus_destroy(&kvm->mmio_bus);
  393. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  394. if (kvm->coalesced_mmio_ring != NULL)
  395. free_page((unsigned long)kvm->coalesced_mmio_ring);
  396. #endif
  397. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  398. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  399. #else
  400. kvm_arch_flush_shadow(kvm);
  401. #endif
  402. kvm_arch_destroy_vm(kvm);
  403. hardware_disable_all();
  404. mmdrop(mm);
  405. }
  406. void kvm_get_kvm(struct kvm *kvm)
  407. {
  408. atomic_inc(&kvm->users_count);
  409. }
  410. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  411. void kvm_put_kvm(struct kvm *kvm)
  412. {
  413. if (atomic_dec_and_test(&kvm->users_count))
  414. kvm_destroy_vm(kvm);
  415. }
  416. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  417. static int kvm_vm_release(struct inode *inode, struct file *filp)
  418. {
  419. struct kvm *kvm = filp->private_data;
  420. kvm_irqfd_release(kvm);
  421. kvm_put_kvm(kvm);
  422. return 0;
  423. }
  424. /*
  425. * Allocate some memory and give it an address in the guest physical address
  426. * space.
  427. *
  428. * Discontiguous memory is allowed, mostly for framebuffers.
  429. *
  430. * Must be called holding mmap_sem for write.
  431. */
  432. int __kvm_set_memory_region(struct kvm *kvm,
  433. struct kvm_userspace_memory_region *mem,
  434. int user_alloc)
  435. {
  436. int r;
  437. gfn_t base_gfn;
  438. unsigned long npages;
  439. unsigned long i;
  440. struct kvm_memory_slot *memslot;
  441. struct kvm_memory_slot old, new;
  442. r = -EINVAL;
  443. /* General sanity checks */
  444. if (mem->memory_size & (PAGE_SIZE - 1))
  445. goto out;
  446. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  447. goto out;
  448. if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
  449. goto out;
  450. if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  451. goto out;
  452. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  453. goto out;
  454. memslot = &kvm->memslots[mem->slot];
  455. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  456. npages = mem->memory_size >> PAGE_SHIFT;
  457. if (!npages)
  458. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  459. new = old = *memslot;
  460. new.base_gfn = base_gfn;
  461. new.npages = npages;
  462. new.flags = mem->flags;
  463. /* Disallow changing a memory slot's size. */
  464. r = -EINVAL;
  465. if (npages && old.npages && npages != old.npages)
  466. goto out_free;
  467. /* Check for overlaps */
  468. r = -EEXIST;
  469. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  470. struct kvm_memory_slot *s = &kvm->memslots[i];
  471. if (s == memslot || !s->npages)
  472. continue;
  473. if (!((base_gfn + npages <= s->base_gfn) ||
  474. (base_gfn >= s->base_gfn + s->npages)))
  475. goto out_free;
  476. }
  477. /* Free page dirty bitmap if unneeded */
  478. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  479. new.dirty_bitmap = NULL;
  480. r = -ENOMEM;
  481. /* Allocate if a slot is being created */
  482. #ifndef CONFIG_S390
  483. if (npages && !new.rmap) {
  484. new.rmap = vmalloc(npages * sizeof(struct page *));
  485. if (!new.rmap)
  486. goto out_free;
  487. memset(new.rmap, 0, npages * sizeof(*new.rmap));
  488. new.user_alloc = user_alloc;
  489. /*
  490. * hva_to_rmmap() serialzies with the mmu_lock and to be
  491. * safe it has to ignore memslots with !user_alloc &&
  492. * !userspace_addr.
  493. */
  494. if (user_alloc)
  495. new.userspace_addr = mem->userspace_addr;
  496. else
  497. new.userspace_addr = 0;
  498. }
  499. if (!npages)
  500. goto skip_lpage;
  501. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  502. unsigned long ugfn;
  503. unsigned long j;
  504. int lpages;
  505. int level = i + 2;
  506. /* Avoid unused variable warning if no large pages */
  507. (void)level;
  508. if (new.lpage_info[i])
  509. continue;
  510. lpages = 1 + (base_gfn + npages - 1) /
  511. KVM_PAGES_PER_HPAGE(level);
  512. lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
  513. new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
  514. if (!new.lpage_info[i])
  515. goto out_free;
  516. memset(new.lpage_info[i], 0,
  517. lpages * sizeof(*new.lpage_info[i]));
  518. if (base_gfn % KVM_PAGES_PER_HPAGE(level))
  519. new.lpage_info[i][0].write_count = 1;
  520. if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
  521. new.lpage_info[i][lpages - 1].write_count = 1;
  522. ugfn = new.userspace_addr >> PAGE_SHIFT;
  523. /*
  524. * If the gfn and userspace address are not aligned wrt each
  525. * other, or if explicitly asked to, disable large page
  526. * support for this slot
  527. */
  528. if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
  529. !largepages_enabled)
  530. for (j = 0; j < lpages; ++j)
  531. new.lpage_info[i][j].write_count = 1;
  532. }
  533. skip_lpage:
  534. /* Allocate page dirty bitmap if needed */
  535. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  536. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  537. new.dirty_bitmap = vmalloc(dirty_bytes);
  538. if (!new.dirty_bitmap)
  539. goto out_free;
  540. memset(new.dirty_bitmap, 0, dirty_bytes);
  541. if (old.npages)
  542. kvm_arch_flush_shadow(kvm);
  543. }
  544. #else /* not defined CONFIG_S390 */
  545. new.user_alloc = user_alloc;
  546. if (user_alloc)
  547. new.userspace_addr = mem->userspace_addr;
  548. #endif /* not defined CONFIG_S390 */
  549. if (!npages)
  550. kvm_arch_flush_shadow(kvm);
  551. spin_lock(&kvm->mmu_lock);
  552. if (mem->slot >= kvm->nmemslots)
  553. kvm->nmemslots = mem->slot + 1;
  554. *memslot = new;
  555. spin_unlock(&kvm->mmu_lock);
  556. r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
  557. if (r) {
  558. spin_lock(&kvm->mmu_lock);
  559. *memslot = old;
  560. spin_unlock(&kvm->mmu_lock);
  561. goto out_free;
  562. }
  563. kvm_free_physmem_slot(&old, npages ? &new : NULL);
  564. /* Slot deletion case: we have to update the current slot */
  565. spin_lock(&kvm->mmu_lock);
  566. if (!npages)
  567. *memslot = old;
  568. spin_unlock(&kvm->mmu_lock);
  569. #ifdef CONFIG_DMAR
  570. /* map the pages in iommu page table */
  571. r = kvm_iommu_map_pages(kvm, base_gfn, npages);
  572. if (r)
  573. goto out;
  574. #endif
  575. return 0;
  576. out_free:
  577. kvm_free_physmem_slot(&new, &old);
  578. out:
  579. return r;
  580. }
  581. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  582. int kvm_set_memory_region(struct kvm *kvm,
  583. struct kvm_userspace_memory_region *mem,
  584. int user_alloc)
  585. {
  586. int r;
  587. down_write(&kvm->slots_lock);
  588. r = __kvm_set_memory_region(kvm, mem, user_alloc);
  589. up_write(&kvm->slots_lock);
  590. return r;
  591. }
  592. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  593. int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  594. struct
  595. kvm_userspace_memory_region *mem,
  596. int user_alloc)
  597. {
  598. if (mem->slot >= KVM_MEMORY_SLOTS)
  599. return -EINVAL;
  600. return kvm_set_memory_region(kvm, mem, user_alloc);
  601. }
  602. int kvm_get_dirty_log(struct kvm *kvm,
  603. struct kvm_dirty_log *log, int *is_dirty)
  604. {
  605. struct kvm_memory_slot *memslot;
  606. int r, i;
  607. int n;
  608. unsigned long any = 0;
  609. r = -EINVAL;
  610. if (log->slot >= KVM_MEMORY_SLOTS)
  611. goto out;
  612. memslot = &kvm->memslots[log->slot];
  613. r = -ENOENT;
  614. if (!memslot->dirty_bitmap)
  615. goto out;
  616. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  617. for (i = 0; !any && i < n/sizeof(long); ++i)
  618. any = memslot->dirty_bitmap[i];
  619. r = -EFAULT;
  620. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  621. goto out;
  622. if (any)
  623. *is_dirty = 1;
  624. r = 0;
  625. out:
  626. return r;
  627. }
  628. void kvm_disable_largepages(void)
  629. {
  630. largepages_enabled = false;
  631. }
  632. EXPORT_SYMBOL_GPL(kvm_disable_largepages);
  633. int is_error_page(struct page *page)
  634. {
  635. return page == bad_page;
  636. }
  637. EXPORT_SYMBOL_GPL(is_error_page);
  638. int is_error_pfn(pfn_t pfn)
  639. {
  640. return pfn == bad_pfn;
  641. }
  642. EXPORT_SYMBOL_GPL(is_error_pfn);
  643. static inline unsigned long bad_hva(void)
  644. {
  645. return PAGE_OFFSET;
  646. }
  647. int kvm_is_error_hva(unsigned long addr)
  648. {
  649. return addr == bad_hva();
  650. }
  651. EXPORT_SYMBOL_GPL(kvm_is_error_hva);
  652. struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
  653. {
  654. int i;
  655. for (i = 0; i < kvm->nmemslots; ++i) {
  656. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  657. if (gfn >= memslot->base_gfn
  658. && gfn < memslot->base_gfn + memslot->npages)
  659. return memslot;
  660. }
  661. return NULL;
  662. }
  663. EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
  664. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  665. {
  666. gfn = unalias_gfn(kvm, gfn);
  667. return gfn_to_memslot_unaliased(kvm, gfn);
  668. }
  669. int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  670. {
  671. int i;
  672. gfn = unalias_gfn(kvm, gfn);
  673. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  674. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  675. if (gfn >= memslot->base_gfn
  676. && gfn < memslot->base_gfn + memslot->npages)
  677. return 1;
  678. }
  679. return 0;
  680. }
  681. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  682. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  683. {
  684. struct kvm_memory_slot *slot;
  685. gfn = unalias_gfn(kvm, gfn);
  686. slot = gfn_to_memslot_unaliased(kvm, gfn);
  687. if (!slot)
  688. return bad_hva();
  689. return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
  690. }
  691. EXPORT_SYMBOL_GPL(gfn_to_hva);
  692. pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  693. {
  694. struct page *page[1];
  695. unsigned long addr;
  696. int npages;
  697. pfn_t pfn;
  698. might_sleep();
  699. addr = gfn_to_hva(kvm, gfn);
  700. if (kvm_is_error_hva(addr)) {
  701. get_page(bad_page);
  702. return page_to_pfn(bad_page);
  703. }
  704. npages = get_user_pages_fast(addr, 1, 1, page);
  705. if (unlikely(npages != 1)) {
  706. struct vm_area_struct *vma;
  707. down_read(&current->mm->mmap_sem);
  708. vma = find_vma(current->mm, addr);
  709. if (vma == NULL || addr < vma->vm_start ||
  710. !(vma->vm_flags & VM_PFNMAP)) {
  711. up_read(&current->mm->mmap_sem);
  712. get_page(bad_page);
  713. return page_to_pfn(bad_page);
  714. }
  715. pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  716. up_read(&current->mm->mmap_sem);
  717. BUG_ON(!kvm_is_mmio_pfn(pfn));
  718. } else
  719. pfn = page_to_pfn(page[0]);
  720. return pfn;
  721. }
  722. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  723. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  724. {
  725. pfn_t pfn;
  726. pfn = gfn_to_pfn(kvm, gfn);
  727. if (!kvm_is_mmio_pfn(pfn))
  728. return pfn_to_page(pfn);
  729. WARN_ON(kvm_is_mmio_pfn(pfn));
  730. get_page(bad_page);
  731. return bad_page;
  732. }
  733. EXPORT_SYMBOL_GPL(gfn_to_page);
  734. void kvm_release_page_clean(struct page *page)
  735. {
  736. kvm_release_pfn_clean(page_to_pfn(page));
  737. }
  738. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  739. void kvm_release_pfn_clean(pfn_t pfn)
  740. {
  741. if (!kvm_is_mmio_pfn(pfn))
  742. put_page(pfn_to_page(pfn));
  743. }
  744. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  745. void kvm_release_page_dirty(struct page *page)
  746. {
  747. kvm_release_pfn_dirty(page_to_pfn(page));
  748. }
  749. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  750. void kvm_release_pfn_dirty(pfn_t pfn)
  751. {
  752. kvm_set_pfn_dirty(pfn);
  753. kvm_release_pfn_clean(pfn);
  754. }
  755. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  756. void kvm_set_page_dirty(struct page *page)
  757. {
  758. kvm_set_pfn_dirty(page_to_pfn(page));
  759. }
  760. EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
  761. void kvm_set_pfn_dirty(pfn_t pfn)
  762. {
  763. if (!kvm_is_mmio_pfn(pfn)) {
  764. struct page *page = pfn_to_page(pfn);
  765. if (!PageReserved(page))
  766. SetPageDirty(page);
  767. }
  768. }
  769. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  770. void kvm_set_pfn_accessed(pfn_t pfn)
  771. {
  772. if (!kvm_is_mmio_pfn(pfn))
  773. mark_page_accessed(pfn_to_page(pfn));
  774. }
  775. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  776. void kvm_get_pfn(pfn_t pfn)
  777. {
  778. if (!kvm_is_mmio_pfn(pfn))
  779. get_page(pfn_to_page(pfn));
  780. }
  781. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  782. static int next_segment(unsigned long len, int offset)
  783. {
  784. if (len > PAGE_SIZE - offset)
  785. return PAGE_SIZE - offset;
  786. else
  787. return len;
  788. }
  789. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  790. int len)
  791. {
  792. int r;
  793. unsigned long addr;
  794. addr = gfn_to_hva(kvm, gfn);
  795. if (kvm_is_error_hva(addr))
  796. return -EFAULT;
  797. r = copy_from_user(data, (void __user *)addr + offset, len);
  798. if (r)
  799. return -EFAULT;
  800. return 0;
  801. }
  802. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  803. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  804. {
  805. gfn_t gfn = gpa >> PAGE_SHIFT;
  806. int seg;
  807. int offset = offset_in_page(gpa);
  808. int ret;
  809. while ((seg = next_segment(len, offset)) != 0) {
  810. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  811. if (ret < 0)
  812. return ret;
  813. offset = 0;
  814. len -= seg;
  815. data += seg;
  816. ++gfn;
  817. }
  818. return 0;
  819. }
  820. EXPORT_SYMBOL_GPL(kvm_read_guest);
  821. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  822. unsigned long len)
  823. {
  824. int r;
  825. unsigned long addr;
  826. gfn_t gfn = gpa >> PAGE_SHIFT;
  827. int offset = offset_in_page(gpa);
  828. addr = gfn_to_hva(kvm, gfn);
  829. if (kvm_is_error_hva(addr))
  830. return -EFAULT;
  831. pagefault_disable();
  832. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  833. pagefault_enable();
  834. if (r)
  835. return -EFAULT;
  836. return 0;
  837. }
  838. EXPORT_SYMBOL(kvm_read_guest_atomic);
  839. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  840. int offset, int len)
  841. {
  842. int r;
  843. unsigned long addr;
  844. addr = gfn_to_hva(kvm, gfn);
  845. if (kvm_is_error_hva(addr))
  846. return -EFAULT;
  847. r = copy_to_user((void __user *)addr + offset, data, len);
  848. if (r)
  849. return -EFAULT;
  850. mark_page_dirty(kvm, gfn);
  851. return 0;
  852. }
  853. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  854. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  855. unsigned long len)
  856. {
  857. gfn_t gfn = gpa >> PAGE_SHIFT;
  858. int seg;
  859. int offset = offset_in_page(gpa);
  860. int ret;
  861. while ((seg = next_segment(len, offset)) != 0) {
  862. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  863. if (ret < 0)
  864. return ret;
  865. offset = 0;
  866. len -= seg;
  867. data += seg;
  868. ++gfn;
  869. }
  870. return 0;
  871. }
  872. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  873. {
  874. return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
  875. }
  876. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  877. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  878. {
  879. gfn_t gfn = gpa >> PAGE_SHIFT;
  880. int seg;
  881. int offset = offset_in_page(gpa);
  882. int ret;
  883. while ((seg = next_segment(len, offset)) != 0) {
  884. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  885. if (ret < 0)
  886. return ret;
  887. offset = 0;
  888. len -= seg;
  889. ++gfn;
  890. }
  891. return 0;
  892. }
  893. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  894. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  895. {
  896. struct kvm_memory_slot *memslot;
  897. gfn = unalias_gfn(kvm, gfn);
  898. memslot = gfn_to_memslot_unaliased(kvm, gfn);
  899. if (memslot && memslot->dirty_bitmap) {
  900. unsigned long rel_gfn = gfn - memslot->base_gfn;
  901. /* avoid RMW */
  902. if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
  903. generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
  904. }
  905. }
  906. /*
  907. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  908. */
  909. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  910. {
  911. DEFINE_WAIT(wait);
  912. for (;;) {
  913. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  914. if (kvm_arch_vcpu_runnable(vcpu)) {
  915. set_bit(KVM_REQ_UNHALT, &vcpu->requests);
  916. break;
  917. }
  918. if (kvm_cpu_has_pending_timer(vcpu))
  919. break;
  920. if (signal_pending(current))
  921. break;
  922. schedule();
  923. }
  924. finish_wait(&vcpu->wq, &wait);
  925. }
  926. void kvm_resched(struct kvm_vcpu *vcpu)
  927. {
  928. if (!need_resched())
  929. return;
  930. cond_resched();
  931. }
  932. EXPORT_SYMBOL_GPL(kvm_resched);
  933. void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
  934. {
  935. ktime_t expires;
  936. DEFINE_WAIT(wait);
  937. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  938. /* Sleep for 100 us, and hope lock-holder got scheduled */
  939. expires = ktime_add_ns(ktime_get(), 100000UL);
  940. schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
  941. finish_wait(&vcpu->wq, &wait);
  942. }
  943. EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
  944. static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  945. {
  946. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  947. struct page *page;
  948. if (vmf->pgoff == 0)
  949. page = virt_to_page(vcpu->run);
  950. #ifdef CONFIG_X86
  951. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  952. page = virt_to_page(vcpu->arch.pio_data);
  953. #endif
  954. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  955. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  956. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  957. #endif
  958. else
  959. return VM_FAULT_SIGBUS;
  960. get_page(page);
  961. vmf->page = page;
  962. return 0;
  963. }
  964. static const struct vm_operations_struct kvm_vcpu_vm_ops = {
  965. .fault = kvm_vcpu_fault,
  966. };
  967. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  968. {
  969. vma->vm_ops = &kvm_vcpu_vm_ops;
  970. return 0;
  971. }
  972. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  973. {
  974. struct kvm_vcpu *vcpu = filp->private_data;
  975. kvm_put_kvm(vcpu->kvm);
  976. return 0;
  977. }
  978. static struct file_operations kvm_vcpu_fops = {
  979. .release = kvm_vcpu_release,
  980. .unlocked_ioctl = kvm_vcpu_ioctl,
  981. .compat_ioctl = kvm_vcpu_ioctl,
  982. .mmap = kvm_vcpu_mmap,
  983. };
  984. /*
  985. * Allocates an inode for the vcpu.
  986. */
  987. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  988. {
  989. return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
  990. }
  991. /*
  992. * Creates some virtual cpus. Good luck creating more than one.
  993. */
  994. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
  995. {
  996. int r;
  997. struct kvm_vcpu *vcpu, *v;
  998. vcpu = kvm_arch_vcpu_create(kvm, id);
  999. if (IS_ERR(vcpu))
  1000. return PTR_ERR(vcpu);
  1001. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  1002. r = kvm_arch_vcpu_setup(vcpu);
  1003. if (r)
  1004. return r;
  1005. mutex_lock(&kvm->lock);
  1006. if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
  1007. r = -EINVAL;
  1008. goto vcpu_destroy;
  1009. }
  1010. kvm_for_each_vcpu(r, v, kvm)
  1011. if (v->vcpu_id == id) {
  1012. r = -EEXIST;
  1013. goto vcpu_destroy;
  1014. }
  1015. BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
  1016. /* Now it's all set up, let userspace reach it */
  1017. kvm_get_kvm(kvm);
  1018. r = create_vcpu_fd(vcpu);
  1019. if (r < 0) {
  1020. kvm_put_kvm(kvm);
  1021. goto vcpu_destroy;
  1022. }
  1023. kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
  1024. smp_wmb();
  1025. atomic_inc(&kvm->online_vcpus);
  1026. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1027. if (kvm->bsp_vcpu_id == id)
  1028. kvm->bsp_vcpu = vcpu;
  1029. #endif
  1030. mutex_unlock(&kvm->lock);
  1031. return r;
  1032. vcpu_destroy:
  1033. mutex_unlock(&kvm->lock);
  1034. kvm_arch_vcpu_destroy(vcpu);
  1035. return r;
  1036. }
  1037. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  1038. {
  1039. if (sigset) {
  1040. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  1041. vcpu->sigset_active = 1;
  1042. vcpu->sigset = *sigset;
  1043. } else
  1044. vcpu->sigset_active = 0;
  1045. return 0;
  1046. }
  1047. static long kvm_vcpu_ioctl(struct file *filp,
  1048. unsigned int ioctl, unsigned long arg)
  1049. {
  1050. struct kvm_vcpu *vcpu = filp->private_data;
  1051. void __user *argp = (void __user *)arg;
  1052. int r;
  1053. struct kvm_fpu *fpu = NULL;
  1054. struct kvm_sregs *kvm_sregs = NULL;
  1055. if (vcpu->kvm->mm != current->mm)
  1056. return -EIO;
  1057. switch (ioctl) {
  1058. case KVM_RUN:
  1059. r = -EINVAL;
  1060. if (arg)
  1061. goto out;
  1062. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  1063. break;
  1064. case KVM_GET_REGS: {
  1065. struct kvm_regs *kvm_regs;
  1066. r = -ENOMEM;
  1067. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1068. if (!kvm_regs)
  1069. goto out;
  1070. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  1071. if (r)
  1072. goto out_free1;
  1073. r = -EFAULT;
  1074. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  1075. goto out_free1;
  1076. r = 0;
  1077. out_free1:
  1078. kfree(kvm_regs);
  1079. break;
  1080. }
  1081. case KVM_SET_REGS: {
  1082. struct kvm_regs *kvm_regs;
  1083. r = -ENOMEM;
  1084. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1085. if (!kvm_regs)
  1086. goto out;
  1087. r = -EFAULT;
  1088. if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
  1089. goto out_free2;
  1090. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  1091. if (r)
  1092. goto out_free2;
  1093. r = 0;
  1094. out_free2:
  1095. kfree(kvm_regs);
  1096. break;
  1097. }
  1098. case KVM_GET_SREGS: {
  1099. kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1100. r = -ENOMEM;
  1101. if (!kvm_sregs)
  1102. goto out;
  1103. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
  1104. if (r)
  1105. goto out;
  1106. r = -EFAULT;
  1107. if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
  1108. goto out;
  1109. r = 0;
  1110. break;
  1111. }
  1112. case KVM_SET_SREGS: {
  1113. kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1114. r = -ENOMEM;
  1115. if (!kvm_sregs)
  1116. goto out;
  1117. r = -EFAULT;
  1118. if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
  1119. goto out;
  1120. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
  1121. if (r)
  1122. goto out;
  1123. r = 0;
  1124. break;
  1125. }
  1126. case KVM_GET_MP_STATE: {
  1127. struct kvm_mp_state mp_state;
  1128. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  1129. if (r)
  1130. goto out;
  1131. r = -EFAULT;
  1132. if (copy_to_user(argp, &mp_state, sizeof mp_state))
  1133. goto out;
  1134. r = 0;
  1135. break;
  1136. }
  1137. case KVM_SET_MP_STATE: {
  1138. struct kvm_mp_state mp_state;
  1139. r = -EFAULT;
  1140. if (copy_from_user(&mp_state, argp, sizeof mp_state))
  1141. goto out;
  1142. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  1143. if (r)
  1144. goto out;
  1145. r = 0;
  1146. break;
  1147. }
  1148. case KVM_TRANSLATE: {
  1149. struct kvm_translation tr;
  1150. r = -EFAULT;
  1151. if (copy_from_user(&tr, argp, sizeof tr))
  1152. goto out;
  1153. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  1154. if (r)
  1155. goto out;
  1156. r = -EFAULT;
  1157. if (copy_to_user(argp, &tr, sizeof tr))
  1158. goto out;
  1159. r = 0;
  1160. break;
  1161. }
  1162. case KVM_SET_GUEST_DEBUG: {
  1163. struct kvm_guest_debug dbg;
  1164. r = -EFAULT;
  1165. if (copy_from_user(&dbg, argp, sizeof dbg))
  1166. goto out;
  1167. r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
  1168. if (r)
  1169. goto out;
  1170. r = 0;
  1171. break;
  1172. }
  1173. case KVM_SET_SIGNAL_MASK: {
  1174. struct kvm_signal_mask __user *sigmask_arg = argp;
  1175. struct kvm_signal_mask kvm_sigmask;
  1176. sigset_t sigset, *p;
  1177. p = NULL;
  1178. if (argp) {
  1179. r = -EFAULT;
  1180. if (copy_from_user(&kvm_sigmask, argp,
  1181. sizeof kvm_sigmask))
  1182. goto out;
  1183. r = -EINVAL;
  1184. if (kvm_sigmask.len != sizeof sigset)
  1185. goto out;
  1186. r = -EFAULT;
  1187. if (copy_from_user(&sigset, sigmask_arg->sigset,
  1188. sizeof sigset))
  1189. goto out;
  1190. p = &sigset;
  1191. }
  1192. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  1193. break;
  1194. }
  1195. case KVM_GET_FPU: {
  1196. fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1197. r = -ENOMEM;
  1198. if (!fpu)
  1199. goto out;
  1200. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
  1201. if (r)
  1202. goto out;
  1203. r = -EFAULT;
  1204. if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
  1205. goto out;
  1206. r = 0;
  1207. break;
  1208. }
  1209. case KVM_SET_FPU: {
  1210. fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1211. r = -ENOMEM;
  1212. if (!fpu)
  1213. goto out;
  1214. r = -EFAULT;
  1215. if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
  1216. goto out;
  1217. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
  1218. if (r)
  1219. goto out;
  1220. r = 0;
  1221. break;
  1222. }
  1223. default:
  1224. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1225. }
  1226. out:
  1227. kfree(fpu);
  1228. kfree(kvm_sregs);
  1229. return r;
  1230. }
  1231. static long kvm_vm_ioctl(struct file *filp,
  1232. unsigned int ioctl, unsigned long arg)
  1233. {
  1234. struct kvm *kvm = filp->private_data;
  1235. void __user *argp = (void __user *)arg;
  1236. int r;
  1237. if (kvm->mm != current->mm)
  1238. return -EIO;
  1239. switch (ioctl) {
  1240. case KVM_CREATE_VCPU:
  1241. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  1242. if (r < 0)
  1243. goto out;
  1244. break;
  1245. case KVM_SET_USER_MEMORY_REGION: {
  1246. struct kvm_userspace_memory_region kvm_userspace_mem;
  1247. r = -EFAULT;
  1248. if (copy_from_user(&kvm_userspace_mem, argp,
  1249. sizeof kvm_userspace_mem))
  1250. goto out;
  1251. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
  1252. if (r)
  1253. goto out;
  1254. break;
  1255. }
  1256. case KVM_GET_DIRTY_LOG: {
  1257. struct kvm_dirty_log log;
  1258. r = -EFAULT;
  1259. if (copy_from_user(&log, argp, sizeof log))
  1260. goto out;
  1261. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1262. if (r)
  1263. goto out;
  1264. break;
  1265. }
  1266. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1267. case KVM_REGISTER_COALESCED_MMIO: {
  1268. struct kvm_coalesced_mmio_zone zone;
  1269. r = -EFAULT;
  1270. if (copy_from_user(&zone, argp, sizeof zone))
  1271. goto out;
  1272. r = -ENXIO;
  1273. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  1274. if (r)
  1275. goto out;
  1276. r = 0;
  1277. break;
  1278. }
  1279. case KVM_UNREGISTER_COALESCED_MMIO: {
  1280. struct kvm_coalesced_mmio_zone zone;
  1281. r = -EFAULT;
  1282. if (copy_from_user(&zone, argp, sizeof zone))
  1283. goto out;
  1284. r = -ENXIO;
  1285. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  1286. if (r)
  1287. goto out;
  1288. r = 0;
  1289. break;
  1290. }
  1291. #endif
  1292. case KVM_IRQFD: {
  1293. struct kvm_irqfd data;
  1294. r = -EFAULT;
  1295. if (copy_from_user(&data, argp, sizeof data))
  1296. goto out;
  1297. r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
  1298. break;
  1299. }
  1300. case KVM_IOEVENTFD: {
  1301. struct kvm_ioeventfd data;
  1302. r = -EFAULT;
  1303. if (copy_from_user(&data, argp, sizeof data))
  1304. goto out;
  1305. r = kvm_ioeventfd(kvm, &data);
  1306. break;
  1307. }
  1308. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1309. case KVM_SET_BOOT_CPU_ID:
  1310. r = 0;
  1311. mutex_lock(&kvm->lock);
  1312. if (atomic_read(&kvm->online_vcpus) != 0)
  1313. r = -EBUSY;
  1314. else
  1315. kvm->bsp_vcpu_id = arg;
  1316. mutex_unlock(&kvm->lock);
  1317. break;
  1318. #endif
  1319. default:
  1320. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  1321. if (r == -ENOTTY)
  1322. r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
  1323. }
  1324. out:
  1325. return r;
  1326. }
  1327. #ifdef CONFIG_COMPAT
  1328. struct compat_kvm_dirty_log {
  1329. __u32 slot;
  1330. __u32 padding1;
  1331. union {
  1332. compat_uptr_t dirty_bitmap; /* one bit per page */
  1333. __u64 padding2;
  1334. };
  1335. };
  1336. static long kvm_vm_compat_ioctl(struct file *filp,
  1337. unsigned int ioctl, unsigned long arg)
  1338. {
  1339. struct kvm *kvm = filp->private_data;
  1340. int r;
  1341. if (kvm->mm != current->mm)
  1342. return -EIO;
  1343. switch (ioctl) {
  1344. case KVM_GET_DIRTY_LOG: {
  1345. struct compat_kvm_dirty_log compat_log;
  1346. struct kvm_dirty_log log;
  1347. r = -EFAULT;
  1348. if (copy_from_user(&compat_log, (void __user *)arg,
  1349. sizeof(compat_log)))
  1350. goto out;
  1351. log.slot = compat_log.slot;
  1352. log.padding1 = compat_log.padding1;
  1353. log.padding2 = compat_log.padding2;
  1354. log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
  1355. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1356. if (r)
  1357. goto out;
  1358. break;
  1359. }
  1360. default:
  1361. r = kvm_vm_ioctl(filp, ioctl, arg);
  1362. }
  1363. out:
  1364. return r;
  1365. }
  1366. #endif
  1367. static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1368. {
  1369. struct page *page[1];
  1370. unsigned long addr;
  1371. int npages;
  1372. gfn_t gfn = vmf->pgoff;
  1373. struct kvm *kvm = vma->vm_file->private_data;
  1374. addr = gfn_to_hva(kvm, gfn);
  1375. if (kvm_is_error_hva(addr))
  1376. return VM_FAULT_SIGBUS;
  1377. npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
  1378. NULL);
  1379. if (unlikely(npages != 1))
  1380. return VM_FAULT_SIGBUS;
  1381. vmf->page = page[0];
  1382. return 0;
  1383. }
  1384. static const struct vm_operations_struct kvm_vm_vm_ops = {
  1385. .fault = kvm_vm_fault,
  1386. };
  1387. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  1388. {
  1389. vma->vm_ops = &kvm_vm_vm_ops;
  1390. return 0;
  1391. }
  1392. static struct file_operations kvm_vm_fops = {
  1393. .release = kvm_vm_release,
  1394. .unlocked_ioctl = kvm_vm_ioctl,
  1395. #ifdef CONFIG_COMPAT
  1396. .compat_ioctl = kvm_vm_compat_ioctl,
  1397. #endif
  1398. .mmap = kvm_vm_mmap,
  1399. };
  1400. static int kvm_dev_ioctl_create_vm(void)
  1401. {
  1402. int fd;
  1403. struct kvm *kvm;
  1404. kvm = kvm_create_vm();
  1405. if (IS_ERR(kvm))
  1406. return PTR_ERR(kvm);
  1407. fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
  1408. if (fd < 0)
  1409. kvm_put_kvm(kvm);
  1410. return fd;
  1411. }
  1412. static long kvm_dev_ioctl_check_extension_generic(long arg)
  1413. {
  1414. switch (arg) {
  1415. case KVM_CAP_USER_MEMORY:
  1416. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  1417. case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
  1418. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1419. case KVM_CAP_SET_BOOT_CPU_ID:
  1420. #endif
  1421. case KVM_CAP_INTERNAL_ERROR_DATA:
  1422. return 1;
  1423. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  1424. case KVM_CAP_IRQ_ROUTING:
  1425. return KVM_MAX_IRQ_ROUTES;
  1426. #endif
  1427. default:
  1428. break;
  1429. }
  1430. return kvm_dev_ioctl_check_extension(arg);
  1431. }
  1432. static long kvm_dev_ioctl(struct file *filp,
  1433. unsigned int ioctl, unsigned long arg)
  1434. {
  1435. long r = -EINVAL;
  1436. switch (ioctl) {
  1437. case KVM_GET_API_VERSION:
  1438. r = -EINVAL;
  1439. if (arg)
  1440. goto out;
  1441. r = KVM_API_VERSION;
  1442. break;
  1443. case KVM_CREATE_VM:
  1444. r = -EINVAL;
  1445. if (arg)
  1446. goto out;
  1447. r = kvm_dev_ioctl_create_vm();
  1448. break;
  1449. case KVM_CHECK_EXTENSION:
  1450. r = kvm_dev_ioctl_check_extension_generic(arg);
  1451. break;
  1452. case KVM_GET_VCPU_MMAP_SIZE:
  1453. r = -EINVAL;
  1454. if (arg)
  1455. goto out;
  1456. r = PAGE_SIZE; /* struct kvm_run */
  1457. #ifdef CONFIG_X86
  1458. r += PAGE_SIZE; /* pio data page */
  1459. #endif
  1460. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1461. r += PAGE_SIZE; /* coalesced mmio ring page */
  1462. #endif
  1463. break;
  1464. case KVM_TRACE_ENABLE:
  1465. case KVM_TRACE_PAUSE:
  1466. case KVM_TRACE_DISABLE:
  1467. r = -EOPNOTSUPP;
  1468. break;
  1469. default:
  1470. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  1471. }
  1472. out:
  1473. return r;
  1474. }
  1475. static struct file_operations kvm_chardev_ops = {
  1476. .unlocked_ioctl = kvm_dev_ioctl,
  1477. .compat_ioctl = kvm_dev_ioctl,
  1478. };
  1479. static struct miscdevice kvm_dev = {
  1480. KVM_MINOR,
  1481. "kvm",
  1482. &kvm_chardev_ops,
  1483. };
  1484. static void hardware_enable(void *junk)
  1485. {
  1486. int cpu = raw_smp_processor_id();
  1487. int r;
  1488. if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1489. return;
  1490. cpumask_set_cpu(cpu, cpus_hardware_enabled);
  1491. r = kvm_arch_hardware_enable(NULL);
  1492. if (r) {
  1493. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1494. atomic_inc(&hardware_enable_failed);
  1495. printk(KERN_INFO "kvm: enabling virtualization on "
  1496. "CPU%d failed\n", cpu);
  1497. }
  1498. }
  1499. static void hardware_disable(void *junk)
  1500. {
  1501. int cpu = raw_smp_processor_id();
  1502. if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1503. return;
  1504. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1505. kvm_arch_hardware_disable(NULL);
  1506. }
  1507. static void hardware_disable_all_nolock(void)
  1508. {
  1509. BUG_ON(!kvm_usage_count);
  1510. kvm_usage_count--;
  1511. if (!kvm_usage_count)
  1512. on_each_cpu(hardware_disable, NULL, 1);
  1513. }
  1514. static void hardware_disable_all(void)
  1515. {
  1516. spin_lock(&kvm_lock);
  1517. hardware_disable_all_nolock();
  1518. spin_unlock(&kvm_lock);
  1519. }
  1520. static int hardware_enable_all(void)
  1521. {
  1522. int r = 0;
  1523. spin_lock(&kvm_lock);
  1524. kvm_usage_count++;
  1525. if (kvm_usage_count == 1) {
  1526. atomic_set(&hardware_enable_failed, 0);
  1527. on_each_cpu(hardware_enable, NULL, 1);
  1528. if (atomic_read(&hardware_enable_failed)) {
  1529. hardware_disable_all_nolock();
  1530. r = -EBUSY;
  1531. }
  1532. }
  1533. spin_unlock(&kvm_lock);
  1534. return r;
  1535. }
  1536. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  1537. void *v)
  1538. {
  1539. int cpu = (long)v;
  1540. if (!kvm_usage_count)
  1541. return NOTIFY_OK;
  1542. val &= ~CPU_TASKS_FROZEN;
  1543. switch (val) {
  1544. case CPU_DYING:
  1545. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1546. cpu);
  1547. hardware_disable(NULL);
  1548. break;
  1549. case CPU_UP_CANCELED:
  1550. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1551. cpu);
  1552. smp_call_function_single(cpu, hardware_disable, NULL, 1);
  1553. break;
  1554. case CPU_ONLINE:
  1555. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  1556. cpu);
  1557. smp_call_function_single(cpu, hardware_enable, NULL, 1);
  1558. break;
  1559. }
  1560. return NOTIFY_OK;
  1561. }
  1562. asmlinkage void kvm_handle_fault_on_reboot(void)
  1563. {
  1564. if (kvm_rebooting)
  1565. /* spin while reset goes on */
  1566. while (true)
  1567. ;
  1568. /* Fault while not rebooting. We want the trace. */
  1569. BUG();
  1570. }
  1571. EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
  1572. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1573. void *v)
  1574. {
  1575. /*
  1576. * Some (well, at least mine) BIOSes hang on reboot if
  1577. * in vmx root mode.
  1578. *
  1579. * And Intel TXT required VMX off for all cpu when system shutdown.
  1580. */
  1581. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  1582. kvm_rebooting = true;
  1583. on_each_cpu(hardware_disable, NULL, 1);
  1584. return NOTIFY_OK;
  1585. }
  1586. static struct notifier_block kvm_reboot_notifier = {
  1587. .notifier_call = kvm_reboot,
  1588. .priority = 0,
  1589. };
  1590. void kvm_io_bus_init(struct kvm_io_bus *bus)
  1591. {
  1592. memset(bus, 0, sizeof(*bus));
  1593. }
  1594. void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  1595. {
  1596. int i;
  1597. for (i = 0; i < bus->dev_count; i++) {
  1598. struct kvm_io_device *pos = bus->devs[i];
  1599. kvm_iodevice_destructor(pos);
  1600. }
  1601. }
  1602. /* kvm_io_bus_write - called under kvm->slots_lock */
  1603. int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
  1604. int len, const void *val)
  1605. {
  1606. int i;
  1607. for (i = 0; i < bus->dev_count; i++)
  1608. if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
  1609. return 0;
  1610. return -EOPNOTSUPP;
  1611. }
  1612. /* kvm_io_bus_read - called under kvm->slots_lock */
  1613. int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
  1614. {
  1615. int i;
  1616. for (i = 0; i < bus->dev_count; i++)
  1617. if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
  1618. return 0;
  1619. return -EOPNOTSUPP;
  1620. }
  1621. int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
  1622. struct kvm_io_device *dev)
  1623. {
  1624. int ret;
  1625. down_write(&kvm->slots_lock);
  1626. ret = __kvm_io_bus_register_dev(bus, dev);
  1627. up_write(&kvm->slots_lock);
  1628. return ret;
  1629. }
  1630. /* An unlocked version. Caller must have write lock on slots_lock. */
  1631. int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
  1632. struct kvm_io_device *dev)
  1633. {
  1634. if (bus->dev_count > NR_IOBUS_DEVS-1)
  1635. return -ENOSPC;
  1636. bus->devs[bus->dev_count++] = dev;
  1637. return 0;
  1638. }
  1639. void kvm_io_bus_unregister_dev(struct kvm *kvm,
  1640. struct kvm_io_bus *bus,
  1641. struct kvm_io_device *dev)
  1642. {
  1643. down_write(&kvm->slots_lock);
  1644. __kvm_io_bus_unregister_dev(bus, dev);
  1645. up_write(&kvm->slots_lock);
  1646. }
  1647. /* An unlocked version. Caller must have write lock on slots_lock. */
  1648. void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
  1649. struct kvm_io_device *dev)
  1650. {
  1651. int i;
  1652. for (i = 0; i < bus->dev_count; i++)
  1653. if (bus->devs[i] == dev) {
  1654. bus->devs[i] = bus->devs[--bus->dev_count];
  1655. break;
  1656. }
  1657. }
  1658. static struct notifier_block kvm_cpu_notifier = {
  1659. .notifier_call = kvm_cpu_hotplug,
  1660. .priority = 20, /* must be > scheduler priority */
  1661. };
  1662. static int vm_stat_get(void *_offset, u64 *val)
  1663. {
  1664. unsigned offset = (long)_offset;
  1665. struct kvm *kvm;
  1666. *val = 0;
  1667. spin_lock(&kvm_lock);
  1668. list_for_each_entry(kvm, &vm_list, vm_list)
  1669. *val += *(u32 *)((void *)kvm + offset);
  1670. spin_unlock(&kvm_lock);
  1671. return 0;
  1672. }
  1673. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
  1674. static int vcpu_stat_get(void *_offset, u64 *val)
  1675. {
  1676. unsigned offset = (long)_offset;
  1677. struct kvm *kvm;
  1678. struct kvm_vcpu *vcpu;
  1679. int i;
  1680. *val = 0;
  1681. spin_lock(&kvm_lock);
  1682. list_for_each_entry(kvm, &vm_list, vm_list)
  1683. kvm_for_each_vcpu(i, vcpu, kvm)
  1684. *val += *(u32 *)((void *)vcpu + offset);
  1685. spin_unlock(&kvm_lock);
  1686. return 0;
  1687. }
  1688. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
  1689. static const struct file_operations *stat_fops[] = {
  1690. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  1691. [KVM_STAT_VM] = &vm_stat_fops,
  1692. };
  1693. static void kvm_init_debug(void)
  1694. {
  1695. struct kvm_stats_debugfs_item *p;
  1696. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  1697. for (p = debugfs_entries; p->name; ++p)
  1698. p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
  1699. (void *)(long)p->offset,
  1700. stat_fops[p->kind]);
  1701. }
  1702. static void kvm_exit_debug(void)
  1703. {
  1704. struct kvm_stats_debugfs_item *p;
  1705. for (p = debugfs_entries; p->name; ++p)
  1706. debugfs_remove(p->dentry);
  1707. debugfs_remove(kvm_debugfs_dir);
  1708. }
  1709. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  1710. {
  1711. if (kvm_usage_count)
  1712. hardware_disable(NULL);
  1713. return 0;
  1714. }
  1715. static int kvm_resume(struct sys_device *dev)
  1716. {
  1717. if (kvm_usage_count)
  1718. hardware_enable(NULL);
  1719. return 0;
  1720. }
  1721. static struct sysdev_class kvm_sysdev_class = {
  1722. .name = "kvm",
  1723. .suspend = kvm_suspend,
  1724. .resume = kvm_resume,
  1725. };
  1726. static struct sys_device kvm_sysdev = {
  1727. .id = 0,
  1728. .cls = &kvm_sysdev_class,
  1729. };
  1730. struct page *bad_page;
  1731. pfn_t bad_pfn;
  1732. static inline
  1733. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  1734. {
  1735. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  1736. }
  1737. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  1738. {
  1739. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1740. kvm_arch_vcpu_load(vcpu, cpu);
  1741. }
  1742. static void kvm_sched_out(struct preempt_notifier *pn,
  1743. struct task_struct *next)
  1744. {
  1745. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1746. kvm_arch_vcpu_put(vcpu);
  1747. }
  1748. int kvm_init(void *opaque, unsigned int vcpu_size,
  1749. struct module *module)
  1750. {
  1751. int r;
  1752. int cpu;
  1753. r = kvm_arch_init(opaque);
  1754. if (r)
  1755. goto out_fail;
  1756. bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1757. if (bad_page == NULL) {
  1758. r = -ENOMEM;
  1759. goto out;
  1760. }
  1761. bad_pfn = page_to_pfn(bad_page);
  1762. if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
  1763. r = -ENOMEM;
  1764. goto out_free_0;
  1765. }
  1766. r = kvm_arch_hardware_setup();
  1767. if (r < 0)
  1768. goto out_free_0a;
  1769. for_each_online_cpu(cpu) {
  1770. smp_call_function_single(cpu,
  1771. kvm_arch_check_processor_compat,
  1772. &r, 1);
  1773. if (r < 0)
  1774. goto out_free_1;
  1775. }
  1776. r = register_cpu_notifier(&kvm_cpu_notifier);
  1777. if (r)
  1778. goto out_free_2;
  1779. register_reboot_notifier(&kvm_reboot_notifier);
  1780. r = sysdev_class_register(&kvm_sysdev_class);
  1781. if (r)
  1782. goto out_free_3;
  1783. r = sysdev_register(&kvm_sysdev);
  1784. if (r)
  1785. goto out_free_4;
  1786. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  1787. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
  1788. __alignof__(struct kvm_vcpu),
  1789. 0, NULL);
  1790. if (!kvm_vcpu_cache) {
  1791. r = -ENOMEM;
  1792. goto out_free_5;
  1793. }
  1794. kvm_chardev_ops.owner = module;
  1795. kvm_vm_fops.owner = module;
  1796. kvm_vcpu_fops.owner = module;
  1797. r = misc_register(&kvm_dev);
  1798. if (r) {
  1799. printk(KERN_ERR "kvm: misc device register failed\n");
  1800. goto out_free;
  1801. }
  1802. kvm_preempt_ops.sched_in = kvm_sched_in;
  1803. kvm_preempt_ops.sched_out = kvm_sched_out;
  1804. kvm_init_debug();
  1805. return 0;
  1806. out_free:
  1807. kmem_cache_destroy(kvm_vcpu_cache);
  1808. out_free_5:
  1809. sysdev_unregister(&kvm_sysdev);
  1810. out_free_4:
  1811. sysdev_class_unregister(&kvm_sysdev_class);
  1812. out_free_3:
  1813. unregister_reboot_notifier(&kvm_reboot_notifier);
  1814. unregister_cpu_notifier(&kvm_cpu_notifier);
  1815. out_free_2:
  1816. out_free_1:
  1817. kvm_arch_hardware_unsetup();
  1818. out_free_0a:
  1819. free_cpumask_var(cpus_hardware_enabled);
  1820. out_free_0:
  1821. __free_page(bad_page);
  1822. out:
  1823. kvm_arch_exit();
  1824. out_fail:
  1825. return r;
  1826. }
  1827. EXPORT_SYMBOL_GPL(kvm_init);
  1828. void kvm_exit(void)
  1829. {
  1830. tracepoint_synchronize_unregister();
  1831. kvm_exit_debug();
  1832. misc_deregister(&kvm_dev);
  1833. kmem_cache_destroy(kvm_vcpu_cache);
  1834. sysdev_unregister(&kvm_sysdev);
  1835. sysdev_class_unregister(&kvm_sysdev_class);
  1836. unregister_reboot_notifier(&kvm_reboot_notifier);
  1837. unregister_cpu_notifier(&kvm_cpu_notifier);
  1838. on_each_cpu(hardware_disable, NULL, 1);
  1839. kvm_arch_hardware_unsetup();
  1840. kvm_arch_exit();
  1841. free_cpumask_var(cpus_hardware_enabled);
  1842. __free_page(bad_page);
  1843. }
  1844. EXPORT_SYMBOL_GPL(kvm_exit);