kvm_main.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. * Copyright 2010 Red Hat, Inc. and/or its affilates.
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. *
  14. * This work is licensed under the terms of the GNU GPL, version 2. See
  15. * the COPYING file in the top-level directory.
  16. *
  17. */
  18. #include "iodev.h"
  19. #include <linux/kvm_host.h>
  20. #include <linux/kvm.h>
  21. #include <linux/module.h>
  22. #include <linux/errno.h>
  23. #include <linux/percpu.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/sysdev.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched.h>
  34. #include <linux/cpumask.h>
  35. #include <linux/smp.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/profile.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/mman.h>
  41. #include <linux/swap.h>
  42. #include <linux/bitops.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/compat.h>
  45. #include <linux/srcu.h>
  46. #include <linux/hugetlb.h>
  47. #include <linux/slab.h>
  48. #include <asm/processor.h>
  49. #include <asm/io.h>
  50. #include <asm/uaccess.h>
  51. #include <asm/pgtable.h>
  52. #include <asm-generic/bitops/le.h>
  53. #include "coalesced_mmio.h"
  54. #define CREATE_TRACE_POINTS
  55. #include <trace/events/kvm.h>
  56. MODULE_AUTHOR("Qumranet");
  57. MODULE_LICENSE("GPL");
  58. /*
  59. * Ordering of locks:
  60. *
  61. * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  62. */
  63. DEFINE_SPINLOCK(kvm_lock);
  64. LIST_HEAD(vm_list);
  65. static cpumask_var_t cpus_hardware_enabled;
  66. static int kvm_usage_count = 0;
  67. static atomic_t hardware_enable_failed;
  68. struct kmem_cache *kvm_vcpu_cache;
  69. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  70. static __read_mostly struct preempt_ops kvm_preempt_ops;
  71. struct dentry *kvm_debugfs_dir;
  72. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  73. unsigned long arg);
  74. static int hardware_enable_all(void);
  75. static void hardware_disable_all(void);
  76. static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
  77. static bool kvm_rebooting;
  78. static bool largepages_enabled = true;
  79. struct page *hwpoison_page;
  80. pfn_t hwpoison_pfn;
  81. inline int kvm_is_mmio_pfn(pfn_t pfn)
  82. {
  83. if (pfn_valid(pfn)) {
  84. struct page *page = compound_head(pfn_to_page(pfn));
  85. return PageReserved(page);
  86. }
  87. return true;
  88. }
  89. /*
  90. * Switches to specified vcpu, until a matching vcpu_put()
  91. */
  92. void vcpu_load(struct kvm_vcpu *vcpu)
  93. {
  94. int cpu;
  95. mutex_lock(&vcpu->mutex);
  96. cpu = get_cpu();
  97. preempt_notifier_register(&vcpu->preempt_notifier);
  98. kvm_arch_vcpu_load(vcpu, cpu);
  99. put_cpu();
  100. }
  101. void vcpu_put(struct kvm_vcpu *vcpu)
  102. {
  103. preempt_disable();
  104. kvm_arch_vcpu_put(vcpu);
  105. preempt_notifier_unregister(&vcpu->preempt_notifier);
  106. preempt_enable();
  107. mutex_unlock(&vcpu->mutex);
  108. }
  109. static void ack_flush(void *_completed)
  110. {
  111. }
  112. static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
  113. {
  114. int i, cpu, me;
  115. cpumask_var_t cpus;
  116. bool called = true;
  117. struct kvm_vcpu *vcpu;
  118. zalloc_cpumask_var(&cpus, GFP_ATOMIC);
  119. raw_spin_lock(&kvm->requests_lock);
  120. me = smp_processor_id();
  121. kvm_for_each_vcpu(i, vcpu, kvm) {
  122. if (test_and_set_bit(req, &vcpu->requests))
  123. continue;
  124. cpu = vcpu->cpu;
  125. if (cpus != NULL && cpu != -1 && cpu != me)
  126. cpumask_set_cpu(cpu, cpus);
  127. }
  128. if (unlikely(cpus == NULL))
  129. smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
  130. else if (!cpumask_empty(cpus))
  131. smp_call_function_many(cpus, ack_flush, NULL, 1);
  132. else
  133. called = false;
  134. raw_spin_unlock(&kvm->requests_lock);
  135. free_cpumask_var(cpus);
  136. return called;
  137. }
  138. void kvm_flush_remote_tlbs(struct kvm *kvm)
  139. {
  140. if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
  141. ++kvm->stat.remote_tlb_flush;
  142. }
  143. void kvm_reload_remote_mmus(struct kvm *kvm)
  144. {
  145. make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
  146. }
  147. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  148. {
  149. struct page *page;
  150. int r;
  151. mutex_init(&vcpu->mutex);
  152. vcpu->cpu = -1;
  153. vcpu->kvm = kvm;
  154. vcpu->vcpu_id = id;
  155. init_waitqueue_head(&vcpu->wq);
  156. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  157. if (!page) {
  158. r = -ENOMEM;
  159. goto fail;
  160. }
  161. vcpu->run = page_address(page);
  162. r = kvm_arch_vcpu_init(vcpu);
  163. if (r < 0)
  164. goto fail_free_run;
  165. return 0;
  166. fail_free_run:
  167. free_page((unsigned long)vcpu->run);
  168. fail:
  169. return r;
  170. }
  171. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  172. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  173. {
  174. kvm_arch_vcpu_uninit(vcpu);
  175. free_page((unsigned long)vcpu->run);
  176. }
  177. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  178. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  179. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  180. {
  181. return container_of(mn, struct kvm, mmu_notifier);
  182. }
  183. static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
  184. struct mm_struct *mm,
  185. unsigned long address)
  186. {
  187. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  188. int need_tlb_flush, idx;
  189. /*
  190. * When ->invalidate_page runs, the linux pte has been zapped
  191. * already but the page is still allocated until
  192. * ->invalidate_page returns. So if we increase the sequence
  193. * here the kvm page fault will notice if the spte can't be
  194. * established because the page is going to be freed. If
  195. * instead the kvm page fault establishes the spte before
  196. * ->invalidate_page runs, kvm_unmap_hva will release it
  197. * before returning.
  198. *
  199. * The sequence increase only need to be seen at spin_unlock
  200. * time, and not at spin_lock time.
  201. *
  202. * Increasing the sequence after the spin_unlock would be
  203. * unsafe because the kvm page fault could then establish the
  204. * pte after kvm_unmap_hva returned, without noticing the page
  205. * is going to be freed.
  206. */
  207. idx = srcu_read_lock(&kvm->srcu);
  208. spin_lock(&kvm->mmu_lock);
  209. kvm->mmu_notifier_seq++;
  210. need_tlb_flush = kvm_unmap_hva(kvm, address);
  211. spin_unlock(&kvm->mmu_lock);
  212. srcu_read_unlock(&kvm->srcu, idx);
  213. /* we've to flush the tlb before the pages can be freed */
  214. if (need_tlb_flush)
  215. kvm_flush_remote_tlbs(kvm);
  216. }
  217. static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
  218. struct mm_struct *mm,
  219. unsigned long address,
  220. pte_t pte)
  221. {
  222. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  223. int idx;
  224. idx = srcu_read_lock(&kvm->srcu);
  225. spin_lock(&kvm->mmu_lock);
  226. kvm->mmu_notifier_seq++;
  227. kvm_set_spte_hva(kvm, address, pte);
  228. spin_unlock(&kvm->mmu_lock);
  229. srcu_read_unlock(&kvm->srcu, idx);
  230. }
  231. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  232. struct mm_struct *mm,
  233. unsigned long start,
  234. unsigned long end)
  235. {
  236. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  237. int need_tlb_flush = 0, idx;
  238. idx = srcu_read_lock(&kvm->srcu);
  239. spin_lock(&kvm->mmu_lock);
  240. /*
  241. * The count increase must become visible at unlock time as no
  242. * spte can be established without taking the mmu_lock and
  243. * count is also read inside the mmu_lock critical section.
  244. */
  245. kvm->mmu_notifier_count++;
  246. for (; start < end; start += PAGE_SIZE)
  247. need_tlb_flush |= kvm_unmap_hva(kvm, start);
  248. spin_unlock(&kvm->mmu_lock);
  249. srcu_read_unlock(&kvm->srcu, idx);
  250. /* we've to flush the tlb before the pages can be freed */
  251. if (need_tlb_flush)
  252. kvm_flush_remote_tlbs(kvm);
  253. }
  254. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  255. struct mm_struct *mm,
  256. unsigned long start,
  257. unsigned long end)
  258. {
  259. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  260. spin_lock(&kvm->mmu_lock);
  261. /*
  262. * This sequence increase will notify the kvm page fault that
  263. * the page that is going to be mapped in the spte could have
  264. * been freed.
  265. */
  266. kvm->mmu_notifier_seq++;
  267. /*
  268. * The above sequence increase must be visible before the
  269. * below count decrease but both values are read by the kvm
  270. * page fault under mmu_lock spinlock so we don't need to add
  271. * a smb_wmb() here in between the two.
  272. */
  273. kvm->mmu_notifier_count--;
  274. spin_unlock(&kvm->mmu_lock);
  275. BUG_ON(kvm->mmu_notifier_count < 0);
  276. }
  277. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  278. struct mm_struct *mm,
  279. unsigned long address)
  280. {
  281. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  282. int young, idx;
  283. idx = srcu_read_lock(&kvm->srcu);
  284. spin_lock(&kvm->mmu_lock);
  285. young = kvm_age_hva(kvm, address);
  286. spin_unlock(&kvm->mmu_lock);
  287. srcu_read_unlock(&kvm->srcu, idx);
  288. if (young)
  289. kvm_flush_remote_tlbs(kvm);
  290. return young;
  291. }
  292. static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
  293. struct mm_struct *mm)
  294. {
  295. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  296. int idx;
  297. idx = srcu_read_lock(&kvm->srcu);
  298. kvm_arch_flush_shadow(kvm);
  299. srcu_read_unlock(&kvm->srcu, idx);
  300. }
  301. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  302. .invalidate_page = kvm_mmu_notifier_invalidate_page,
  303. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  304. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  305. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  306. .change_pte = kvm_mmu_notifier_change_pte,
  307. .release = kvm_mmu_notifier_release,
  308. };
  309. static int kvm_init_mmu_notifier(struct kvm *kvm)
  310. {
  311. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  312. return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  313. }
  314. #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
  315. static int kvm_init_mmu_notifier(struct kvm *kvm)
  316. {
  317. return 0;
  318. }
  319. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  320. static struct kvm *kvm_create_vm(void)
  321. {
  322. int r = 0, i;
  323. struct kvm *kvm = kvm_arch_create_vm();
  324. if (IS_ERR(kvm))
  325. goto out;
  326. r = hardware_enable_all();
  327. if (r)
  328. goto out_err_nodisable;
  329. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  330. INIT_HLIST_HEAD(&kvm->mask_notifier_list);
  331. INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
  332. #endif
  333. r = -ENOMEM;
  334. kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  335. if (!kvm->memslots)
  336. goto out_err;
  337. if (init_srcu_struct(&kvm->srcu))
  338. goto out_err;
  339. for (i = 0; i < KVM_NR_BUSES; i++) {
  340. kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
  341. GFP_KERNEL);
  342. if (!kvm->buses[i]) {
  343. cleanup_srcu_struct(&kvm->srcu);
  344. goto out_err;
  345. }
  346. }
  347. r = kvm_init_mmu_notifier(kvm);
  348. if (r) {
  349. cleanup_srcu_struct(&kvm->srcu);
  350. goto out_err;
  351. }
  352. kvm->mm = current->mm;
  353. atomic_inc(&kvm->mm->mm_count);
  354. spin_lock_init(&kvm->mmu_lock);
  355. raw_spin_lock_init(&kvm->requests_lock);
  356. kvm_eventfd_init(kvm);
  357. mutex_init(&kvm->lock);
  358. mutex_init(&kvm->irq_lock);
  359. mutex_init(&kvm->slots_lock);
  360. atomic_set(&kvm->users_count, 1);
  361. spin_lock(&kvm_lock);
  362. list_add(&kvm->vm_list, &vm_list);
  363. spin_unlock(&kvm_lock);
  364. out:
  365. return kvm;
  366. out_err:
  367. hardware_disable_all();
  368. out_err_nodisable:
  369. for (i = 0; i < KVM_NR_BUSES; i++)
  370. kfree(kvm->buses[i]);
  371. kfree(kvm->memslots);
  372. kfree(kvm);
  373. return ERR_PTR(r);
  374. }
  375. /*
  376. * Free any memory in @free but not in @dont.
  377. */
  378. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  379. struct kvm_memory_slot *dont)
  380. {
  381. int i;
  382. if (!dont || free->rmap != dont->rmap)
  383. vfree(free->rmap);
  384. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  385. vfree(free->dirty_bitmap);
  386. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  387. if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
  388. vfree(free->lpage_info[i]);
  389. free->lpage_info[i] = NULL;
  390. }
  391. }
  392. free->npages = 0;
  393. free->dirty_bitmap = NULL;
  394. free->rmap = NULL;
  395. }
  396. void kvm_free_physmem(struct kvm *kvm)
  397. {
  398. int i;
  399. struct kvm_memslots *slots = kvm->memslots;
  400. for (i = 0; i < slots->nmemslots; ++i)
  401. kvm_free_physmem_slot(&slots->memslots[i], NULL);
  402. kfree(kvm->memslots);
  403. }
  404. static void kvm_destroy_vm(struct kvm *kvm)
  405. {
  406. int i;
  407. struct mm_struct *mm = kvm->mm;
  408. kvm_arch_sync_events(kvm);
  409. spin_lock(&kvm_lock);
  410. list_del(&kvm->vm_list);
  411. spin_unlock(&kvm_lock);
  412. kvm_free_irq_routing(kvm);
  413. for (i = 0; i < KVM_NR_BUSES; i++)
  414. kvm_io_bus_destroy(kvm->buses[i]);
  415. kvm_coalesced_mmio_free(kvm);
  416. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  417. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  418. #else
  419. kvm_arch_flush_shadow(kvm);
  420. #endif
  421. kvm_arch_destroy_vm(kvm);
  422. hardware_disable_all();
  423. mmdrop(mm);
  424. }
  425. void kvm_get_kvm(struct kvm *kvm)
  426. {
  427. atomic_inc(&kvm->users_count);
  428. }
  429. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  430. void kvm_put_kvm(struct kvm *kvm)
  431. {
  432. if (atomic_dec_and_test(&kvm->users_count))
  433. kvm_destroy_vm(kvm);
  434. }
  435. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  436. static int kvm_vm_release(struct inode *inode, struct file *filp)
  437. {
  438. struct kvm *kvm = filp->private_data;
  439. kvm_irqfd_release(kvm);
  440. kvm_put_kvm(kvm);
  441. return 0;
  442. }
  443. /*
  444. * Allocate some memory and give it an address in the guest physical address
  445. * space.
  446. *
  447. * Discontiguous memory is allowed, mostly for framebuffers.
  448. *
  449. * Must be called holding mmap_sem for write.
  450. */
  451. int __kvm_set_memory_region(struct kvm *kvm,
  452. struct kvm_userspace_memory_region *mem,
  453. int user_alloc)
  454. {
  455. int r, flush_shadow = 0;
  456. gfn_t base_gfn;
  457. unsigned long npages;
  458. unsigned long i;
  459. struct kvm_memory_slot *memslot;
  460. struct kvm_memory_slot old, new;
  461. struct kvm_memslots *slots, *old_memslots;
  462. r = -EINVAL;
  463. /* General sanity checks */
  464. if (mem->memory_size & (PAGE_SIZE - 1))
  465. goto out;
  466. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  467. goto out;
  468. if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
  469. goto out;
  470. if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  471. goto out;
  472. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  473. goto out;
  474. memslot = &kvm->memslots->memslots[mem->slot];
  475. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  476. npages = mem->memory_size >> PAGE_SHIFT;
  477. r = -EINVAL;
  478. if (npages > KVM_MEM_MAX_NR_PAGES)
  479. goto out;
  480. if (!npages)
  481. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  482. new = old = *memslot;
  483. new.base_gfn = base_gfn;
  484. new.npages = npages;
  485. new.flags = mem->flags;
  486. /* Disallow changing a memory slot's size. */
  487. r = -EINVAL;
  488. if (npages && old.npages && npages != old.npages)
  489. goto out_free;
  490. /* Check for overlaps */
  491. r = -EEXIST;
  492. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  493. struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
  494. if (s == memslot || !s->npages)
  495. continue;
  496. if (!((base_gfn + npages <= s->base_gfn) ||
  497. (base_gfn >= s->base_gfn + s->npages)))
  498. goto out_free;
  499. }
  500. /* Free page dirty bitmap if unneeded */
  501. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  502. new.dirty_bitmap = NULL;
  503. r = -ENOMEM;
  504. /* Allocate if a slot is being created */
  505. #ifndef CONFIG_S390
  506. if (npages && !new.rmap) {
  507. new.rmap = vmalloc(npages * sizeof(struct page *));
  508. if (!new.rmap)
  509. goto out_free;
  510. memset(new.rmap, 0, npages * sizeof(*new.rmap));
  511. new.user_alloc = user_alloc;
  512. new.userspace_addr = mem->userspace_addr;
  513. }
  514. if (!npages)
  515. goto skip_lpage;
  516. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  517. unsigned long ugfn;
  518. unsigned long j;
  519. int lpages;
  520. int level = i + 2;
  521. /* Avoid unused variable warning if no large pages */
  522. (void)level;
  523. if (new.lpage_info[i])
  524. continue;
  525. lpages = 1 + (base_gfn + npages - 1) /
  526. KVM_PAGES_PER_HPAGE(level);
  527. lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
  528. new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
  529. if (!new.lpage_info[i])
  530. goto out_free;
  531. memset(new.lpage_info[i], 0,
  532. lpages * sizeof(*new.lpage_info[i]));
  533. if (base_gfn % KVM_PAGES_PER_HPAGE(level))
  534. new.lpage_info[i][0].write_count = 1;
  535. if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
  536. new.lpage_info[i][lpages - 1].write_count = 1;
  537. ugfn = new.userspace_addr >> PAGE_SHIFT;
  538. /*
  539. * If the gfn and userspace address are not aligned wrt each
  540. * other, or if explicitly asked to, disable large page
  541. * support for this slot
  542. */
  543. if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
  544. !largepages_enabled)
  545. for (j = 0; j < lpages; ++j)
  546. new.lpage_info[i][j].write_count = 1;
  547. }
  548. skip_lpage:
  549. /* Allocate page dirty bitmap if needed */
  550. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  551. unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
  552. new.dirty_bitmap = vmalloc(dirty_bytes);
  553. if (!new.dirty_bitmap)
  554. goto out_free;
  555. memset(new.dirty_bitmap, 0, dirty_bytes);
  556. /* destroy any largepage mappings for dirty tracking */
  557. if (old.npages)
  558. flush_shadow = 1;
  559. }
  560. #else /* not defined CONFIG_S390 */
  561. new.user_alloc = user_alloc;
  562. if (user_alloc)
  563. new.userspace_addr = mem->userspace_addr;
  564. #endif /* not defined CONFIG_S390 */
  565. if (!npages) {
  566. r = -ENOMEM;
  567. slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  568. if (!slots)
  569. goto out_free;
  570. memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
  571. if (mem->slot >= slots->nmemslots)
  572. slots->nmemslots = mem->slot + 1;
  573. slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
  574. old_memslots = kvm->memslots;
  575. rcu_assign_pointer(kvm->memslots, slots);
  576. synchronize_srcu_expedited(&kvm->srcu);
  577. /* From this point no new shadow pages pointing to a deleted
  578. * memslot will be created.
  579. *
  580. * validation of sp->gfn happens in:
  581. * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
  582. * - kvm_is_visible_gfn (mmu_check_roots)
  583. */
  584. kvm_arch_flush_shadow(kvm);
  585. kfree(old_memslots);
  586. }
  587. r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
  588. if (r)
  589. goto out_free;
  590. #ifdef CONFIG_DMAR
  591. /* map the pages in iommu page table */
  592. if (npages) {
  593. r = kvm_iommu_map_pages(kvm, &new);
  594. if (r)
  595. goto out_free;
  596. }
  597. #endif
  598. r = -ENOMEM;
  599. slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  600. if (!slots)
  601. goto out_free;
  602. memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
  603. if (mem->slot >= slots->nmemslots)
  604. slots->nmemslots = mem->slot + 1;
  605. /* actual memory is freed via old in kvm_free_physmem_slot below */
  606. if (!npages) {
  607. new.rmap = NULL;
  608. new.dirty_bitmap = NULL;
  609. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
  610. new.lpage_info[i] = NULL;
  611. }
  612. slots->memslots[mem->slot] = new;
  613. old_memslots = kvm->memslots;
  614. rcu_assign_pointer(kvm->memslots, slots);
  615. synchronize_srcu_expedited(&kvm->srcu);
  616. kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
  617. kvm_free_physmem_slot(&old, &new);
  618. kfree(old_memslots);
  619. if (flush_shadow)
  620. kvm_arch_flush_shadow(kvm);
  621. return 0;
  622. out_free:
  623. kvm_free_physmem_slot(&new, &old);
  624. out:
  625. return r;
  626. }
  627. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  628. int kvm_set_memory_region(struct kvm *kvm,
  629. struct kvm_userspace_memory_region *mem,
  630. int user_alloc)
  631. {
  632. int r;
  633. mutex_lock(&kvm->slots_lock);
  634. r = __kvm_set_memory_region(kvm, mem, user_alloc);
  635. mutex_unlock(&kvm->slots_lock);
  636. return r;
  637. }
  638. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  639. int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  640. struct
  641. kvm_userspace_memory_region *mem,
  642. int user_alloc)
  643. {
  644. if (mem->slot >= KVM_MEMORY_SLOTS)
  645. return -EINVAL;
  646. return kvm_set_memory_region(kvm, mem, user_alloc);
  647. }
  648. int kvm_get_dirty_log(struct kvm *kvm,
  649. struct kvm_dirty_log *log, int *is_dirty)
  650. {
  651. struct kvm_memory_slot *memslot;
  652. int r, i;
  653. unsigned long n;
  654. unsigned long any = 0;
  655. r = -EINVAL;
  656. if (log->slot >= KVM_MEMORY_SLOTS)
  657. goto out;
  658. memslot = &kvm->memslots->memslots[log->slot];
  659. r = -ENOENT;
  660. if (!memslot->dirty_bitmap)
  661. goto out;
  662. n = kvm_dirty_bitmap_bytes(memslot);
  663. for (i = 0; !any && i < n/sizeof(long); ++i)
  664. any = memslot->dirty_bitmap[i];
  665. r = -EFAULT;
  666. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  667. goto out;
  668. if (any)
  669. *is_dirty = 1;
  670. r = 0;
  671. out:
  672. return r;
  673. }
  674. void kvm_disable_largepages(void)
  675. {
  676. largepages_enabled = false;
  677. }
  678. EXPORT_SYMBOL_GPL(kvm_disable_largepages);
  679. int is_error_page(struct page *page)
  680. {
  681. return page == bad_page || page == hwpoison_page;
  682. }
  683. EXPORT_SYMBOL_GPL(is_error_page);
  684. int is_error_pfn(pfn_t pfn)
  685. {
  686. return pfn == bad_pfn || pfn == hwpoison_pfn;
  687. }
  688. EXPORT_SYMBOL_GPL(is_error_pfn);
  689. int is_hwpoison_pfn(pfn_t pfn)
  690. {
  691. return pfn == hwpoison_pfn;
  692. }
  693. EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
  694. static inline unsigned long bad_hva(void)
  695. {
  696. return PAGE_OFFSET;
  697. }
  698. int kvm_is_error_hva(unsigned long addr)
  699. {
  700. return addr == bad_hva();
  701. }
  702. EXPORT_SYMBOL_GPL(kvm_is_error_hva);
  703. struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
  704. {
  705. int i;
  706. struct kvm_memslots *slots = kvm_memslots(kvm);
  707. for (i = 0; i < slots->nmemslots; ++i) {
  708. struct kvm_memory_slot *memslot = &slots->memslots[i];
  709. if (gfn >= memslot->base_gfn
  710. && gfn < memslot->base_gfn + memslot->npages)
  711. return memslot;
  712. }
  713. return NULL;
  714. }
  715. EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
  716. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  717. {
  718. gfn = unalias_gfn(kvm, gfn);
  719. return gfn_to_memslot_unaliased(kvm, gfn);
  720. }
  721. int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  722. {
  723. int i;
  724. struct kvm_memslots *slots = kvm_memslots(kvm);
  725. gfn = unalias_gfn_instantiation(kvm, gfn);
  726. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  727. struct kvm_memory_slot *memslot = &slots->memslots[i];
  728. if (memslot->flags & KVM_MEMSLOT_INVALID)
  729. continue;
  730. if (gfn >= memslot->base_gfn
  731. && gfn < memslot->base_gfn + memslot->npages)
  732. return 1;
  733. }
  734. return 0;
  735. }
  736. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  737. unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
  738. {
  739. struct vm_area_struct *vma;
  740. unsigned long addr, size;
  741. size = PAGE_SIZE;
  742. addr = gfn_to_hva(kvm, gfn);
  743. if (kvm_is_error_hva(addr))
  744. return PAGE_SIZE;
  745. down_read(&current->mm->mmap_sem);
  746. vma = find_vma(current->mm, addr);
  747. if (!vma)
  748. goto out;
  749. size = vma_kernel_pagesize(vma);
  750. out:
  751. up_read(&current->mm->mmap_sem);
  752. return size;
  753. }
  754. int memslot_id(struct kvm *kvm, gfn_t gfn)
  755. {
  756. int i;
  757. struct kvm_memslots *slots = kvm_memslots(kvm);
  758. struct kvm_memory_slot *memslot = NULL;
  759. gfn = unalias_gfn(kvm, gfn);
  760. for (i = 0; i < slots->nmemslots; ++i) {
  761. memslot = &slots->memslots[i];
  762. if (gfn >= memslot->base_gfn
  763. && gfn < memslot->base_gfn + memslot->npages)
  764. break;
  765. }
  766. return memslot - slots->memslots;
  767. }
  768. static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
  769. {
  770. return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
  771. }
  772. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  773. {
  774. struct kvm_memory_slot *slot;
  775. gfn = unalias_gfn_instantiation(kvm, gfn);
  776. slot = gfn_to_memslot_unaliased(kvm, gfn);
  777. if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
  778. return bad_hva();
  779. return gfn_to_hva_memslot(slot, gfn);
  780. }
  781. EXPORT_SYMBOL_GPL(gfn_to_hva);
  782. static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
  783. {
  784. struct page *page[1];
  785. int npages;
  786. pfn_t pfn;
  787. might_sleep();
  788. npages = get_user_pages_fast(addr, 1, 1, page);
  789. if (unlikely(npages != 1)) {
  790. struct vm_area_struct *vma;
  791. if (is_hwpoison_address(addr)) {
  792. get_page(hwpoison_page);
  793. return page_to_pfn(hwpoison_page);
  794. }
  795. down_read(&current->mm->mmap_sem);
  796. vma = find_vma(current->mm, addr);
  797. if (vma == NULL || addr < vma->vm_start ||
  798. !(vma->vm_flags & VM_PFNMAP)) {
  799. up_read(&current->mm->mmap_sem);
  800. get_page(bad_page);
  801. return page_to_pfn(bad_page);
  802. }
  803. pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  804. up_read(&current->mm->mmap_sem);
  805. BUG_ON(!kvm_is_mmio_pfn(pfn));
  806. } else
  807. pfn = page_to_pfn(page[0]);
  808. return pfn;
  809. }
  810. pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  811. {
  812. unsigned long addr;
  813. addr = gfn_to_hva(kvm, gfn);
  814. if (kvm_is_error_hva(addr)) {
  815. get_page(bad_page);
  816. return page_to_pfn(bad_page);
  817. }
  818. return hva_to_pfn(kvm, addr);
  819. }
  820. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  821. pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
  822. struct kvm_memory_slot *slot, gfn_t gfn)
  823. {
  824. unsigned long addr = gfn_to_hva_memslot(slot, gfn);
  825. return hva_to_pfn(kvm, addr);
  826. }
  827. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  828. {
  829. pfn_t pfn;
  830. pfn = gfn_to_pfn(kvm, gfn);
  831. if (!kvm_is_mmio_pfn(pfn))
  832. return pfn_to_page(pfn);
  833. WARN_ON(kvm_is_mmio_pfn(pfn));
  834. get_page(bad_page);
  835. return bad_page;
  836. }
  837. EXPORT_SYMBOL_GPL(gfn_to_page);
  838. void kvm_release_page_clean(struct page *page)
  839. {
  840. kvm_release_pfn_clean(page_to_pfn(page));
  841. }
  842. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  843. void kvm_release_pfn_clean(pfn_t pfn)
  844. {
  845. if (!kvm_is_mmio_pfn(pfn))
  846. put_page(pfn_to_page(pfn));
  847. }
  848. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  849. void kvm_release_page_dirty(struct page *page)
  850. {
  851. kvm_release_pfn_dirty(page_to_pfn(page));
  852. }
  853. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  854. void kvm_release_pfn_dirty(pfn_t pfn)
  855. {
  856. kvm_set_pfn_dirty(pfn);
  857. kvm_release_pfn_clean(pfn);
  858. }
  859. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  860. void kvm_set_page_dirty(struct page *page)
  861. {
  862. kvm_set_pfn_dirty(page_to_pfn(page));
  863. }
  864. EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
  865. void kvm_set_pfn_dirty(pfn_t pfn)
  866. {
  867. if (!kvm_is_mmio_pfn(pfn)) {
  868. struct page *page = pfn_to_page(pfn);
  869. if (!PageReserved(page))
  870. SetPageDirty(page);
  871. }
  872. }
  873. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  874. void kvm_set_pfn_accessed(pfn_t pfn)
  875. {
  876. if (!kvm_is_mmio_pfn(pfn))
  877. mark_page_accessed(pfn_to_page(pfn));
  878. }
  879. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  880. void kvm_get_pfn(pfn_t pfn)
  881. {
  882. if (!kvm_is_mmio_pfn(pfn))
  883. get_page(pfn_to_page(pfn));
  884. }
  885. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  886. static int next_segment(unsigned long len, int offset)
  887. {
  888. if (len > PAGE_SIZE - offset)
  889. return PAGE_SIZE - offset;
  890. else
  891. return len;
  892. }
  893. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  894. int len)
  895. {
  896. int r;
  897. unsigned long addr;
  898. addr = gfn_to_hva(kvm, gfn);
  899. if (kvm_is_error_hva(addr))
  900. return -EFAULT;
  901. r = copy_from_user(data, (void __user *)addr + offset, len);
  902. if (r)
  903. return -EFAULT;
  904. return 0;
  905. }
  906. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  907. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  908. {
  909. gfn_t gfn = gpa >> PAGE_SHIFT;
  910. int seg;
  911. int offset = offset_in_page(gpa);
  912. int ret;
  913. while ((seg = next_segment(len, offset)) != 0) {
  914. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  915. if (ret < 0)
  916. return ret;
  917. offset = 0;
  918. len -= seg;
  919. data += seg;
  920. ++gfn;
  921. }
  922. return 0;
  923. }
  924. EXPORT_SYMBOL_GPL(kvm_read_guest);
  925. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  926. unsigned long len)
  927. {
  928. int r;
  929. unsigned long addr;
  930. gfn_t gfn = gpa >> PAGE_SHIFT;
  931. int offset = offset_in_page(gpa);
  932. addr = gfn_to_hva(kvm, gfn);
  933. if (kvm_is_error_hva(addr))
  934. return -EFAULT;
  935. pagefault_disable();
  936. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  937. pagefault_enable();
  938. if (r)
  939. return -EFAULT;
  940. return 0;
  941. }
  942. EXPORT_SYMBOL(kvm_read_guest_atomic);
  943. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  944. int offset, int len)
  945. {
  946. int r;
  947. unsigned long addr;
  948. addr = gfn_to_hva(kvm, gfn);
  949. if (kvm_is_error_hva(addr))
  950. return -EFAULT;
  951. r = copy_to_user((void __user *)addr + offset, data, len);
  952. if (r)
  953. return -EFAULT;
  954. mark_page_dirty(kvm, gfn);
  955. return 0;
  956. }
  957. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  958. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  959. unsigned long len)
  960. {
  961. gfn_t gfn = gpa >> PAGE_SHIFT;
  962. int seg;
  963. int offset = offset_in_page(gpa);
  964. int ret;
  965. while ((seg = next_segment(len, offset)) != 0) {
  966. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  967. if (ret < 0)
  968. return ret;
  969. offset = 0;
  970. len -= seg;
  971. data += seg;
  972. ++gfn;
  973. }
  974. return 0;
  975. }
  976. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  977. {
  978. return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
  979. }
  980. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  981. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  982. {
  983. gfn_t gfn = gpa >> PAGE_SHIFT;
  984. int seg;
  985. int offset = offset_in_page(gpa);
  986. int ret;
  987. while ((seg = next_segment(len, offset)) != 0) {
  988. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  989. if (ret < 0)
  990. return ret;
  991. offset = 0;
  992. len -= seg;
  993. ++gfn;
  994. }
  995. return 0;
  996. }
  997. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  998. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  999. {
  1000. struct kvm_memory_slot *memslot;
  1001. gfn = unalias_gfn(kvm, gfn);
  1002. memslot = gfn_to_memslot_unaliased(kvm, gfn);
  1003. if (memslot && memslot->dirty_bitmap) {
  1004. unsigned long rel_gfn = gfn - memslot->base_gfn;
  1005. generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
  1006. }
  1007. }
  1008. /*
  1009. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  1010. */
  1011. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  1012. {
  1013. DEFINE_WAIT(wait);
  1014. for (;;) {
  1015. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  1016. if (kvm_arch_vcpu_runnable(vcpu)) {
  1017. set_bit(KVM_REQ_UNHALT, &vcpu->requests);
  1018. break;
  1019. }
  1020. if (kvm_cpu_has_pending_timer(vcpu))
  1021. break;
  1022. if (signal_pending(current))
  1023. break;
  1024. schedule();
  1025. }
  1026. finish_wait(&vcpu->wq, &wait);
  1027. }
  1028. void kvm_resched(struct kvm_vcpu *vcpu)
  1029. {
  1030. if (!need_resched())
  1031. return;
  1032. cond_resched();
  1033. }
  1034. EXPORT_SYMBOL_GPL(kvm_resched);
  1035. void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
  1036. {
  1037. ktime_t expires;
  1038. DEFINE_WAIT(wait);
  1039. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  1040. /* Sleep for 100 us, and hope lock-holder got scheduled */
  1041. expires = ktime_add_ns(ktime_get(), 100000UL);
  1042. schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
  1043. finish_wait(&vcpu->wq, &wait);
  1044. }
  1045. EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
  1046. static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1047. {
  1048. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  1049. struct page *page;
  1050. if (vmf->pgoff == 0)
  1051. page = virt_to_page(vcpu->run);
  1052. #ifdef CONFIG_X86
  1053. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  1054. page = virt_to_page(vcpu->arch.pio_data);
  1055. #endif
  1056. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1057. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  1058. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  1059. #endif
  1060. else
  1061. return VM_FAULT_SIGBUS;
  1062. get_page(page);
  1063. vmf->page = page;
  1064. return 0;
  1065. }
  1066. static const struct vm_operations_struct kvm_vcpu_vm_ops = {
  1067. .fault = kvm_vcpu_fault,
  1068. };
  1069. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  1070. {
  1071. vma->vm_ops = &kvm_vcpu_vm_ops;
  1072. return 0;
  1073. }
  1074. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  1075. {
  1076. struct kvm_vcpu *vcpu = filp->private_data;
  1077. kvm_put_kvm(vcpu->kvm);
  1078. return 0;
  1079. }
  1080. static struct file_operations kvm_vcpu_fops = {
  1081. .release = kvm_vcpu_release,
  1082. .unlocked_ioctl = kvm_vcpu_ioctl,
  1083. .compat_ioctl = kvm_vcpu_ioctl,
  1084. .mmap = kvm_vcpu_mmap,
  1085. };
  1086. /*
  1087. * Allocates an inode for the vcpu.
  1088. */
  1089. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  1090. {
  1091. return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
  1092. }
  1093. /*
  1094. * Creates some virtual cpus. Good luck creating more than one.
  1095. */
  1096. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
  1097. {
  1098. int r;
  1099. struct kvm_vcpu *vcpu, *v;
  1100. vcpu = kvm_arch_vcpu_create(kvm, id);
  1101. if (IS_ERR(vcpu))
  1102. return PTR_ERR(vcpu);
  1103. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  1104. r = kvm_arch_vcpu_setup(vcpu);
  1105. if (r)
  1106. return r;
  1107. mutex_lock(&kvm->lock);
  1108. if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
  1109. r = -EINVAL;
  1110. goto vcpu_destroy;
  1111. }
  1112. kvm_for_each_vcpu(r, v, kvm)
  1113. if (v->vcpu_id == id) {
  1114. r = -EEXIST;
  1115. goto vcpu_destroy;
  1116. }
  1117. BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
  1118. /* Now it's all set up, let userspace reach it */
  1119. kvm_get_kvm(kvm);
  1120. r = create_vcpu_fd(vcpu);
  1121. if (r < 0) {
  1122. kvm_put_kvm(kvm);
  1123. goto vcpu_destroy;
  1124. }
  1125. kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
  1126. smp_wmb();
  1127. atomic_inc(&kvm->online_vcpus);
  1128. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1129. if (kvm->bsp_vcpu_id == id)
  1130. kvm->bsp_vcpu = vcpu;
  1131. #endif
  1132. mutex_unlock(&kvm->lock);
  1133. return r;
  1134. vcpu_destroy:
  1135. mutex_unlock(&kvm->lock);
  1136. kvm_arch_vcpu_destroy(vcpu);
  1137. return r;
  1138. }
  1139. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  1140. {
  1141. if (sigset) {
  1142. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  1143. vcpu->sigset_active = 1;
  1144. vcpu->sigset = *sigset;
  1145. } else
  1146. vcpu->sigset_active = 0;
  1147. return 0;
  1148. }
  1149. static long kvm_vcpu_ioctl(struct file *filp,
  1150. unsigned int ioctl, unsigned long arg)
  1151. {
  1152. struct kvm_vcpu *vcpu = filp->private_data;
  1153. void __user *argp = (void __user *)arg;
  1154. int r;
  1155. struct kvm_fpu *fpu = NULL;
  1156. struct kvm_sregs *kvm_sregs = NULL;
  1157. if (vcpu->kvm->mm != current->mm)
  1158. return -EIO;
  1159. #if defined(CONFIG_S390) || defined(CONFIG_PPC)
  1160. /*
  1161. * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
  1162. * so vcpu_load() would break it.
  1163. */
  1164. if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
  1165. return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1166. #endif
  1167. vcpu_load(vcpu);
  1168. switch (ioctl) {
  1169. case KVM_RUN:
  1170. r = -EINVAL;
  1171. if (arg)
  1172. goto out;
  1173. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  1174. break;
  1175. case KVM_GET_REGS: {
  1176. struct kvm_regs *kvm_regs;
  1177. r = -ENOMEM;
  1178. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1179. if (!kvm_regs)
  1180. goto out;
  1181. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  1182. if (r)
  1183. goto out_free1;
  1184. r = -EFAULT;
  1185. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  1186. goto out_free1;
  1187. r = 0;
  1188. out_free1:
  1189. kfree(kvm_regs);
  1190. break;
  1191. }
  1192. case KVM_SET_REGS: {
  1193. struct kvm_regs *kvm_regs;
  1194. r = -ENOMEM;
  1195. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1196. if (!kvm_regs)
  1197. goto out;
  1198. r = -EFAULT;
  1199. if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
  1200. goto out_free2;
  1201. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  1202. if (r)
  1203. goto out_free2;
  1204. r = 0;
  1205. out_free2:
  1206. kfree(kvm_regs);
  1207. break;
  1208. }
  1209. case KVM_GET_SREGS: {
  1210. kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1211. r = -ENOMEM;
  1212. if (!kvm_sregs)
  1213. goto out;
  1214. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
  1215. if (r)
  1216. goto out;
  1217. r = -EFAULT;
  1218. if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
  1219. goto out;
  1220. r = 0;
  1221. break;
  1222. }
  1223. case KVM_SET_SREGS: {
  1224. kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1225. r = -ENOMEM;
  1226. if (!kvm_sregs)
  1227. goto out;
  1228. r = -EFAULT;
  1229. if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
  1230. goto out;
  1231. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
  1232. if (r)
  1233. goto out;
  1234. r = 0;
  1235. break;
  1236. }
  1237. case KVM_GET_MP_STATE: {
  1238. struct kvm_mp_state mp_state;
  1239. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  1240. if (r)
  1241. goto out;
  1242. r = -EFAULT;
  1243. if (copy_to_user(argp, &mp_state, sizeof mp_state))
  1244. goto out;
  1245. r = 0;
  1246. break;
  1247. }
  1248. case KVM_SET_MP_STATE: {
  1249. struct kvm_mp_state mp_state;
  1250. r = -EFAULT;
  1251. if (copy_from_user(&mp_state, argp, sizeof mp_state))
  1252. goto out;
  1253. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  1254. if (r)
  1255. goto out;
  1256. r = 0;
  1257. break;
  1258. }
  1259. case KVM_TRANSLATE: {
  1260. struct kvm_translation tr;
  1261. r = -EFAULT;
  1262. if (copy_from_user(&tr, argp, sizeof tr))
  1263. goto out;
  1264. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  1265. if (r)
  1266. goto out;
  1267. r = -EFAULT;
  1268. if (copy_to_user(argp, &tr, sizeof tr))
  1269. goto out;
  1270. r = 0;
  1271. break;
  1272. }
  1273. case KVM_SET_GUEST_DEBUG: {
  1274. struct kvm_guest_debug dbg;
  1275. r = -EFAULT;
  1276. if (copy_from_user(&dbg, argp, sizeof dbg))
  1277. goto out;
  1278. r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
  1279. if (r)
  1280. goto out;
  1281. r = 0;
  1282. break;
  1283. }
  1284. case KVM_SET_SIGNAL_MASK: {
  1285. struct kvm_signal_mask __user *sigmask_arg = argp;
  1286. struct kvm_signal_mask kvm_sigmask;
  1287. sigset_t sigset, *p;
  1288. p = NULL;
  1289. if (argp) {
  1290. r = -EFAULT;
  1291. if (copy_from_user(&kvm_sigmask, argp,
  1292. sizeof kvm_sigmask))
  1293. goto out;
  1294. r = -EINVAL;
  1295. if (kvm_sigmask.len != sizeof sigset)
  1296. goto out;
  1297. r = -EFAULT;
  1298. if (copy_from_user(&sigset, sigmask_arg->sigset,
  1299. sizeof sigset))
  1300. goto out;
  1301. p = &sigset;
  1302. }
  1303. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  1304. break;
  1305. }
  1306. case KVM_GET_FPU: {
  1307. fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1308. r = -ENOMEM;
  1309. if (!fpu)
  1310. goto out;
  1311. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
  1312. if (r)
  1313. goto out;
  1314. r = -EFAULT;
  1315. if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
  1316. goto out;
  1317. r = 0;
  1318. break;
  1319. }
  1320. case KVM_SET_FPU: {
  1321. fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1322. r = -ENOMEM;
  1323. if (!fpu)
  1324. goto out;
  1325. r = -EFAULT;
  1326. if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
  1327. goto out;
  1328. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
  1329. if (r)
  1330. goto out;
  1331. r = 0;
  1332. break;
  1333. }
  1334. default:
  1335. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1336. }
  1337. out:
  1338. vcpu_put(vcpu);
  1339. kfree(fpu);
  1340. kfree(kvm_sregs);
  1341. return r;
  1342. }
  1343. static long kvm_vm_ioctl(struct file *filp,
  1344. unsigned int ioctl, unsigned long arg)
  1345. {
  1346. struct kvm *kvm = filp->private_data;
  1347. void __user *argp = (void __user *)arg;
  1348. int r;
  1349. if (kvm->mm != current->mm)
  1350. return -EIO;
  1351. switch (ioctl) {
  1352. case KVM_CREATE_VCPU:
  1353. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  1354. if (r < 0)
  1355. goto out;
  1356. break;
  1357. case KVM_SET_USER_MEMORY_REGION: {
  1358. struct kvm_userspace_memory_region kvm_userspace_mem;
  1359. r = -EFAULT;
  1360. if (copy_from_user(&kvm_userspace_mem, argp,
  1361. sizeof kvm_userspace_mem))
  1362. goto out;
  1363. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
  1364. if (r)
  1365. goto out;
  1366. break;
  1367. }
  1368. case KVM_GET_DIRTY_LOG: {
  1369. struct kvm_dirty_log log;
  1370. r = -EFAULT;
  1371. if (copy_from_user(&log, argp, sizeof log))
  1372. goto out;
  1373. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1374. if (r)
  1375. goto out;
  1376. break;
  1377. }
  1378. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1379. case KVM_REGISTER_COALESCED_MMIO: {
  1380. struct kvm_coalesced_mmio_zone zone;
  1381. r = -EFAULT;
  1382. if (copy_from_user(&zone, argp, sizeof zone))
  1383. goto out;
  1384. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  1385. if (r)
  1386. goto out;
  1387. r = 0;
  1388. break;
  1389. }
  1390. case KVM_UNREGISTER_COALESCED_MMIO: {
  1391. struct kvm_coalesced_mmio_zone zone;
  1392. r = -EFAULT;
  1393. if (copy_from_user(&zone, argp, sizeof zone))
  1394. goto out;
  1395. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  1396. if (r)
  1397. goto out;
  1398. r = 0;
  1399. break;
  1400. }
  1401. #endif
  1402. case KVM_IRQFD: {
  1403. struct kvm_irqfd data;
  1404. r = -EFAULT;
  1405. if (copy_from_user(&data, argp, sizeof data))
  1406. goto out;
  1407. r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
  1408. break;
  1409. }
  1410. case KVM_IOEVENTFD: {
  1411. struct kvm_ioeventfd data;
  1412. r = -EFAULT;
  1413. if (copy_from_user(&data, argp, sizeof data))
  1414. goto out;
  1415. r = kvm_ioeventfd(kvm, &data);
  1416. break;
  1417. }
  1418. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1419. case KVM_SET_BOOT_CPU_ID:
  1420. r = 0;
  1421. mutex_lock(&kvm->lock);
  1422. if (atomic_read(&kvm->online_vcpus) != 0)
  1423. r = -EBUSY;
  1424. else
  1425. kvm->bsp_vcpu_id = arg;
  1426. mutex_unlock(&kvm->lock);
  1427. break;
  1428. #endif
  1429. default:
  1430. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  1431. if (r == -ENOTTY)
  1432. r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
  1433. }
  1434. out:
  1435. return r;
  1436. }
  1437. #ifdef CONFIG_COMPAT
  1438. struct compat_kvm_dirty_log {
  1439. __u32 slot;
  1440. __u32 padding1;
  1441. union {
  1442. compat_uptr_t dirty_bitmap; /* one bit per page */
  1443. __u64 padding2;
  1444. };
  1445. };
  1446. static long kvm_vm_compat_ioctl(struct file *filp,
  1447. unsigned int ioctl, unsigned long arg)
  1448. {
  1449. struct kvm *kvm = filp->private_data;
  1450. int r;
  1451. if (kvm->mm != current->mm)
  1452. return -EIO;
  1453. switch (ioctl) {
  1454. case KVM_GET_DIRTY_LOG: {
  1455. struct compat_kvm_dirty_log compat_log;
  1456. struct kvm_dirty_log log;
  1457. r = -EFAULT;
  1458. if (copy_from_user(&compat_log, (void __user *)arg,
  1459. sizeof(compat_log)))
  1460. goto out;
  1461. log.slot = compat_log.slot;
  1462. log.padding1 = compat_log.padding1;
  1463. log.padding2 = compat_log.padding2;
  1464. log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
  1465. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1466. if (r)
  1467. goto out;
  1468. break;
  1469. }
  1470. default:
  1471. r = kvm_vm_ioctl(filp, ioctl, arg);
  1472. }
  1473. out:
  1474. return r;
  1475. }
  1476. #endif
  1477. static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1478. {
  1479. struct page *page[1];
  1480. unsigned long addr;
  1481. int npages;
  1482. gfn_t gfn = vmf->pgoff;
  1483. struct kvm *kvm = vma->vm_file->private_data;
  1484. addr = gfn_to_hva(kvm, gfn);
  1485. if (kvm_is_error_hva(addr))
  1486. return VM_FAULT_SIGBUS;
  1487. npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
  1488. NULL);
  1489. if (unlikely(npages != 1))
  1490. return VM_FAULT_SIGBUS;
  1491. vmf->page = page[0];
  1492. return 0;
  1493. }
  1494. static const struct vm_operations_struct kvm_vm_vm_ops = {
  1495. .fault = kvm_vm_fault,
  1496. };
  1497. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  1498. {
  1499. vma->vm_ops = &kvm_vm_vm_ops;
  1500. return 0;
  1501. }
  1502. static struct file_operations kvm_vm_fops = {
  1503. .release = kvm_vm_release,
  1504. .unlocked_ioctl = kvm_vm_ioctl,
  1505. #ifdef CONFIG_COMPAT
  1506. .compat_ioctl = kvm_vm_compat_ioctl,
  1507. #endif
  1508. .mmap = kvm_vm_mmap,
  1509. };
  1510. static int kvm_dev_ioctl_create_vm(void)
  1511. {
  1512. int fd, r;
  1513. struct kvm *kvm;
  1514. kvm = kvm_create_vm();
  1515. if (IS_ERR(kvm))
  1516. return PTR_ERR(kvm);
  1517. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1518. r = kvm_coalesced_mmio_init(kvm);
  1519. if (r < 0) {
  1520. kvm_put_kvm(kvm);
  1521. return r;
  1522. }
  1523. #endif
  1524. fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
  1525. if (fd < 0)
  1526. kvm_put_kvm(kvm);
  1527. return fd;
  1528. }
  1529. static long kvm_dev_ioctl_check_extension_generic(long arg)
  1530. {
  1531. switch (arg) {
  1532. case KVM_CAP_USER_MEMORY:
  1533. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  1534. case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
  1535. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1536. case KVM_CAP_SET_BOOT_CPU_ID:
  1537. #endif
  1538. case KVM_CAP_INTERNAL_ERROR_DATA:
  1539. return 1;
  1540. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  1541. case KVM_CAP_IRQ_ROUTING:
  1542. return KVM_MAX_IRQ_ROUTES;
  1543. #endif
  1544. default:
  1545. break;
  1546. }
  1547. return kvm_dev_ioctl_check_extension(arg);
  1548. }
  1549. static long kvm_dev_ioctl(struct file *filp,
  1550. unsigned int ioctl, unsigned long arg)
  1551. {
  1552. long r = -EINVAL;
  1553. switch (ioctl) {
  1554. case KVM_GET_API_VERSION:
  1555. r = -EINVAL;
  1556. if (arg)
  1557. goto out;
  1558. r = KVM_API_VERSION;
  1559. break;
  1560. case KVM_CREATE_VM:
  1561. r = -EINVAL;
  1562. if (arg)
  1563. goto out;
  1564. r = kvm_dev_ioctl_create_vm();
  1565. break;
  1566. case KVM_CHECK_EXTENSION:
  1567. r = kvm_dev_ioctl_check_extension_generic(arg);
  1568. break;
  1569. case KVM_GET_VCPU_MMAP_SIZE:
  1570. r = -EINVAL;
  1571. if (arg)
  1572. goto out;
  1573. r = PAGE_SIZE; /* struct kvm_run */
  1574. #ifdef CONFIG_X86
  1575. r += PAGE_SIZE; /* pio data page */
  1576. #endif
  1577. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1578. r += PAGE_SIZE; /* coalesced mmio ring page */
  1579. #endif
  1580. break;
  1581. case KVM_TRACE_ENABLE:
  1582. case KVM_TRACE_PAUSE:
  1583. case KVM_TRACE_DISABLE:
  1584. r = -EOPNOTSUPP;
  1585. break;
  1586. default:
  1587. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  1588. }
  1589. out:
  1590. return r;
  1591. }
  1592. static struct file_operations kvm_chardev_ops = {
  1593. .unlocked_ioctl = kvm_dev_ioctl,
  1594. .compat_ioctl = kvm_dev_ioctl,
  1595. };
  1596. static struct miscdevice kvm_dev = {
  1597. KVM_MINOR,
  1598. "kvm",
  1599. &kvm_chardev_ops,
  1600. };
  1601. static void hardware_enable(void *junk)
  1602. {
  1603. int cpu = raw_smp_processor_id();
  1604. int r;
  1605. if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1606. return;
  1607. cpumask_set_cpu(cpu, cpus_hardware_enabled);
  1608. r = kvm_arch_hardware_enable(NULL);
  1609. if (r) {
  1610. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1611. atomic_inc(&hardware_enable_failed);
  1612. printk(KERN_INFO "kvm: enabling virtualization on "
  1613. "CPU%d failed\n", cpu);
  1614. }
  1615. }
  1616. static void hardware_disable(void *junk)
  1617. {
  1618. int cpu = raw_smp_processor_id();
  1619. if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1620. return;
  1621. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1622. kvm_arch_hardware_disable(NULL);
  1623. }
  1624. static void hardware_disable_all_nolock(void)
  1625. {
  1626. BUG_ON(!kvm_usage_count);
  1627. kvm_usage_count--;
  1628. if (!kvm_usage_count)
  1629. on_each_cpu(hardware_disable, NULL, 1);
  1630. }
  1631. static void hardware_disable_all(void)
  1632. {
  1633. spin_lock(&kvm_lock);
  1634. hardware_disable_all_nolock();
  1635. spin_unlock(&kvm_lock);
  1636. }
  1637. static int hardware_enable_all(void)
  1638. {
  1639. int r = 0;
  1640. spin_lock(&kvm_lock);
  1641. kvm_usage_count++;
  1642. if (kvm_usage_count == 1) {
  1643. atomic_set(&hardware_enable_failed, 0);
  1644. on_each_cpu(hardware_enable, NULL, 1);
  1645. if (atomic_read(&hardware_enable_failed)) {
  1646. hardware_disable_all_nolock();
  1647. r = -EBUSY;
  1648. }
  1649. }
  1650. spin_unlock(&kvm_lock);
  1651. return r;
  1652. }
  1653. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  1654. void *v)
  1655. {
  1656. int cpu = (long)v;
  1657. if (!kvm_usage_count)
  1658. return NOTIFY_OK;
  1659. val &= ~CPU_TASKS_FROZEN;
  1660. switch (val) {
  1661. case CPU_DYING:
  1662. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1663. cpu);
  1664. hardware_disable(NULL);
  1665. break;
  1666. case CPU_ONLINE:
  1667. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  1668. cpu);
  1669. smp_call_function_single(cpu, hardware_enable, NULL, 1);
  1670. break;
  1671. }
  1672. return NOTIFY_OK;
  1673. }
  1674. asmlinkage void kvm_handle_fault_on_reboot(void)
  1675. {
  1676. if (kvm_rebooting)
  1677. /* spin while reset goes on */
  1678. while (true)
  1679. ;
  1680. /* Fault while not rebooting. We want the trace. */
  1681. BUG();
  1682. }
  1683. EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
  1684. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1685. void *v)
  1686. {
  1687. /*
  1688. * Some (well, at least mine) BIOSes hang on reboot if
  1689. * in vmx root mode.
  1690. *
  1691. * And Intel TXT required VMX off for all cpu when system shutdown.
  1692. */
  1693. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  1694. kvm_rebooting = true;
  1695. on_each_cpu(hardware_disable, NULL, 1);
  1696. return NOTIFY_OK;
  1697. }
  1698. static struct notifier_block kvm_reboot_notifier = {
  1699. .notifier_call = kvm_reboot,
  1700. .priority = 0,
  1701. };
  1702. static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  1703. {
  1704. int i;
  1705. for (i = 0; i < bus->dev_count; i++) {
  1706. struct kvm_io_device *pos = bus->devs[i];
  1707. kvm_iodevice_destructor(pos);
  1708. }
  1709. kfree(bus);
  1710. }
  1711. /* kvm_io_bus_write - called under kvm->slots_lock */
  1712. int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  1713. int len, const void *val)
  1714. {
  1715. int i;
  1716. struct kvm_io_bus *bus;
  1717. bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
  1718. for (i = 0; i < bus->dev_count; i++)
  1719. if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
  1720. return 0;
  1721. return -EOPNOTSUPP;
  1722. }
  1723. /* kvm_io_bus_read - called under kvm->slots_lock */
  1724. int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  1725. int len, void *val)
  1726. {
  1727. int i;
  1728. struct kvm_io_bus *bus;
  1729. bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
  1730. for (i = 0; i < bus->dev_count; i++)
  1731. if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
  1732. return 0;
  1733. return -EOPNOTSUPP;
  1734. }
  1735. /* Caller must hold slots_lock. */
  1736. int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  1737. struct kvm_io_device *dev)
  1738. {
  1739. struct kvm_io_bus *new_bus, *bus;
  1740. bus = kvm->buses[bus_idx];
  1741. if (bus->dev_count > NR_IOBUS_DEVS-1)
  1742. return -ENOSPC;
  1743. new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
  1744. if (!new_bus)
  1745. return -ENOMEM;
  1746. memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
  1747. new_bus->devs[new_bus->dev_count++] = dev;
  1748. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  1749. synchronize_srcu_expedited(&kvm->srcu);
  1750. kfree(bus);
  1751. return 0;
  1752. }
  1753. /* Caller must hold slots_lock. */
  1754. int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  1755. struct kvm_io_device *dev)
  1756. {
  1757. int i, r;
  1758. struct kvm_io_bus *new_bus, *bus;
  1759. new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
  1760. if (!new_bus)
  1761. return -ENOMEM;
  1762. bus = kvm->buses[bus_idx];
  1763. memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
  1764. r = -ENOENT;
  1765. for (i = 0; i < new_bus->dev_count; i++)
  1766. if (new_bus->devs[i] == dev) {
  1767. r = 0;
  1768. new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
  1769. break;
  1770. }
  1771. if (r) {
  1772. kfree(new_bus);
  1773. return r;
  1774. }
  1775. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  1776. synchronize_srcu_expedited(&kvm->srcu);
  1777. kfree(bus);
  1778. return r;
  1779. }
  1780. static struct notifier_block kvm_cpu_notifier = {
  1781. .notifier_call = kvm_cpu_hotplug,
  1782. .priority = 20, /* must be > scheduler priority */
  1783. };
  1784. static int vm_stat_get(void *_offset, u64 *val)
  1785. {
  1786. unsigned offset = (long)_offset;
  1787. struct kvm *kvm;
  1788. *val = 0;
  1789. spin_lock(&kvm_lock);
  1790. list_for_each_entry(kvm, &vm_list, vm_list)
  1791. *val += *(u32 *)((void *)kvm + offset);
  1792. spin_unlock(&kvm_lock);
  1793. return 0;
  1794. }
  1795. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
  1796. static int vcpu_stat_get(void *_offset, u64 *val)
  1797. {
  1798. unsigned offset = (long)_offset;
  1799. struct kvm *kvm;
  1800. struct kvm_vcpu *vcpu;
  1801. int i;
  1802. *val = 0;
  1803. spin_lock(&kvm_lock);
  1804. list_for_each_entry(kvm, &vm_list, vm_list)
  1805. kvm_for_each_vcpu(i, vcpu, kvm)
  1806. *val += *(u32 *)((void *)vcpu + offset);
  1807. spin_unlock(&kvm_lock);
  1808. return 0;
  1809. }
  1810. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
  1811. static const struct file_operations *stat_fops[] = {
  1812. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  1813. [KVM_STAT_VM] = &vm_stat_fops,
  1814. };
  1815. static void kvm_init_debug(void)
  1816. {
  1817. struct kvm_stats_debugfs_item *p;
  1818. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  1819. for (p = debugfs_entries; p->name; ++p)
  1820. p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
  1821. (void *)(long)p->offset,
  1822. stat_fops[p->kind]);
  1823. }
  1824. static void kvm_exit_debug(void)
  1825. {
  1826. struct kvm_stats_debugfs_item *p;
  1827. for (p = debugfs_entries; p->name; ++p)
  1828. debugfs_remove(p->dentry);
  1829. debugfs_remove(kvm_debugfs_dir);
  1830. }
  1831. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  1832. {
  1833. if (kvm_usage_count)
  1834. hardware_disable(NULL);
  1835. return 0;
  1836. }
  1837. static int kvm_resume(struct sys_device *dev)
  1838. {
  1839. if (kvm_usage_count)
  1840. hardware_enable(NULL);
  1841. return 0;
  1842. }
  1843. static struct sysdev_class kvm_sysdev_class = {
  1844. .name = "kvm",
  1845. .suspend = kvm_suspend,
  1846. .resume = kvm_resume,
  1847. };
  1848. static struct sys_device kvm_sysdev = {
  1849. .id = 0,
  1850. .cls = &kvm_sysdev_class,
  1851. };
  1852. struct page *bad_page;
  1853. pfn_t bad_pfn;
  1854. static inline
  1855. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  1856. {
  1857. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  1858. }
  1859. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  1860. {
  1861. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1862. kvm_arch_vcpu_load(vcpu, cpu);
  1863. }
  1864. static void kvm_sched_out(struct preempt_notifier *pn,
  1865. struct task_struct *next)
  1866. {
  1867. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1868. kvm_arch_vcpu_put(vcpu);
  1869. }
  1870. int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
  1871. struct module *module)
  1872. {
  1873. int r;
  1874. int cpu;
  1875. r = kvm_arch_init(opaque);
  1876. if (r)
  1877. goto out_fail;
  1878. bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1879. if (bad_page == NULL) {
  1880. r = -ENOMEM;
  1881. goto out;
  1882. }
  1883. bad_pfn = page_to_pfn(bad_page);
  1884. hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1885. if (hwpoison_page == NULL) {
  1886. r = -ENOMEM;
  1887. goto out_free_0;
  1888. }
  1889. hwpoison_pfn = page_to_pfn(hwpoison_page);
  1890. if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
  1891. r = -ENOMEM;
  1892. goto out_free_0;
  1893. }
  1894. r = kvm_arch_hardware_setup();
  1895. if (r < 0)
  1896. goto out_free_0a;
  1897. for_each_online_cpu(cpu) {
  1898. smp_call_function_single(cpu,
  1899. kvm_arch_check_processor_compat,
  1900. &r, 1);
  1901. if (r < 0)
  1902. goto out_free_1;
  1903. }
  1904. r = register_cpu_notifier(&kvm_cpu_notifier);
  1905. if (r)
  1906. goto out_free_2;
  1907. register_reboot_notifier(&kvm_reboot_notifier);
  1908. r = sysdev_class_register(&kvm_sysdev_class);
  1909. if (r)
  1910. goto out_free_3;
  1911. r = sysdev_register(&kvm_sysdev);
  1912. if (r)
  1913. goto out_free_4;
  1914. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  1915. if (!vcpu_align)
  1916. vcpu_align = __alignof__(struct kvm_vcpu);
  1917. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
  1918. 0, NULL);
  1919. if (!kvm_vcpu_cache) {
  1920. r = -ENOMEM;
  1921. goto out_free_5;
  1922. }
  1923. kvm_chardev_ops.owner = module;
  1924. kvm_vm_fops.owner = module;
  1925. kvm_vcpu_fops.owner = module;
  1926. r = misc_register(&kvm_dev);
  1927. if (r) {
  1928. printk(KERN_ERR "kvm: misc device register failed\n");
  1929. goto out_free;
  1930. }
  1931. kvm_preempt_ops.sched_in = kvm_sched_in;
  1932. kvm_preempt_ops.sched_out = kvm_sched_out;
  1933. kvm_init_debug();
  1934. return 0;
  1935. out_free:
  1936. kmem_cache_destroy(kvm_vcpu_cache);
  1937. out_free_5:
  1938. sysdev_unregister(&kvm_sysdev);
  1939. out_free_4:
  1940. sysdev_class_unregister(&kvm_sysdev_class);
  1941. out_free_3:
  1942. unregister_reboot_notifier(&kvm_reboot_notifier);
  1943. unregister_cpu_notifier(&kvm_cpu_notifier);
  1944. out_free_2:
  1945. out_free_1:
  1946. kvm_arch_hardware_unsetup();
  1947. out_free_0a:
  1948. free_cpumask_var(cpus_hardware_enabled);
  1949. out_free_0:
  1950. if (hwpoison_page)
  1951. __free_page(hwpoison_page);
  1952. __free_page(bad_page);
  1953. out:
  1954. kvm_arch_exit();
  1955. out_fail:
  1956. return r;
  1957. }
  1958. EXPORT_SYMBOL_GPL(kvm_init);
  1959. void kvm_exit(void)
  1960. {
  1961. kvm_exit_debug();
  1962. misc_deregister(&kvm_dev);
  1963. kmem_cache_destroy(kvm_vcpu_cache);
  1964. sysdev_unregister(&kvm_sysdev);
  1965. sysdev_class_unregister(&kvm_sysdev_class);
  1966. unregister_reboot_notifier(&kvm_reboot_notifier);
  1967. unregister_cpu_notifier(&kvm_cpu_notifier);
  1968. on_each_cpu(hardware_disable, NULL, 1);
  1969. kvm_arch_hardware_unsetup();
  1970. kvm_arch_exit();
  1971. free_cpumask_var(cpus_hardware_enabled);
  1972. __free_page(hwpoison_page);
  1973. __free_page(bad_page);
  1974. }
  1975. EXPORT_SYMBOL_GPL(kvm_exit);