kvm_main.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. *
  14. * This work is licensed under the terms of the GNU GPL, version 2. See
  15. * the COPYING file in the top-level directory.
  16. *
  17. */
  18. #include "iodev.h"
  19. #include <linux/kvm_host.h>
  20. #include <linux/kvm.h>
  21. #include <linux/module.h>
  22. #include <linux/errno.h>
  23. #include <linux/percpu.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/syscore_ops.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched.h>
  34. #include <linux/cpumask.h>
  35. #include <linux/smp.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/profile.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/mman.h>
  41. #include <linux/swap.h>
  42. #include <linux/bitops.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/compat.h>
  45. #include <linux/srcu.h>
  46. #include <linux/hugetlb.h>
  47. #include <linux/slab.h>
  48. #include <linux/sort.h>
  49. #include <linux/bsearch.h>
  50. #include <asm/processor.h>
  51. #include <asm/io.h>
  52. #include <asm/uaccess.h>
  53. #include <asm/pgtable.h>
  54. #include "coalesced_mmio.h"
  55. #include "async_pf.h"
  56. #define CREATE_TRACE_POINTS
  57. #include <trace/events/kvm.h>
  58. MODULE_AUTHOR("Qumranet");
  59. MODULE_LICENSE("GPL");
  60. /*
  61. * Ordering of locks:
  62. *
  63. * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  64. */
  65. DEFINE_RAW_SPINLOCK(kvm_lock);
  66. LIST_HEAD(vm_list);
  67. static cpumask_var_t cpus_hardware_enabled;
  68. static int kvm_usage_count = 0;
  69. static atomic_t hardware_enable_failed;
  70. struct kmem_cache *kvm_vcpu_cache;
  71. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  72. static __read_mostly struct preempt_ops kvm_preempt_ops;
  73. struct dentry *kvm_debugfs_dir;
  74. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  75. unsigned long arg);
  76. #ifdef CONFIG_COMPAT
  77. static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
  78. unsigned long arg);
  79. #endif
  80. static int hardware_enable_all(void);
  81. static void hardware_disable_all(void);
  82. static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
  83. bool kvm_rebooting;
  84. EXPORT_SYMBOL_GPL(kvm_rebooting);
  85. static bool largepages_enabled = true;
  86. static struct page *hwpoison_page;
  87. static pfn_t hwpoison_pfn;
  88. struct page *fault_page;
  89. pfn_t fault_pfn;
  90. inline int kvm_is_mmio_pfn(pfn_t pfn)
  91. {
  92. if (pfn_valid(pfn)) {
  93. int reserved;
  94. struct page *tail = pfn_to_page(pfn);
  95. struct page *head = compound_trans_head(tail);
  96. reserved = PageReserved(head);
  97. if (head != tail) {
  98. /*
  99. * "head" is not a dangling pointer
  100. * (compound_trans_head takes care of that)
  101. * but the hugepage may have been splitted
  102. * from under us (and we may not hold a
  103. * reference count on the head page so it can
  104. * be reused before we run PageReferenced), so
  105. * we've to check PageTail before returning
  106. * what we just read.
  107. */
  108. smp_rmb();
  109. if (PageTail(tail))
  110. return reserved;
  111. }
  112. return PageReserved(tail);
  113. }
  114. return true;
  115. }
  116. /*
  117. * Switches to specified vcpu, until a matching vcpu_put()
  118. */
  119. void vcpu_load(struct kvm_vcpu *vcpu)
  120. {
  121. int cpu;
  122. mutex_lock(&vcpu->mutex);
  123. if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
  124. /* The thread running this VCPU changed. */
  125. struct pid *oldpid = vcpu->pid;
  126. struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
  127. rcu_assign_pointer(vcpu->pid, newpid);
  128. synchronize_rcu();
  129. put_pid(oldpid);
  130. }
  131. cpu = get_cpu();
  132. preempt_notifier_register(&vcpu->preempt_notifier);
  133. kvm_arch_vcpu_load(vcpu, cpu);
  134. put_cpu();
  135. }
  136. void vcpu_put(struct kvm_vcpu *vcpu)
  137. {
  138. preempt_disable();
  139. kvm_arch_vcpu_put(vcpu);
  140. preempt_notifier_unregister(&vcpu->preempt_notifier);
  141. preempt_enable();
  142. mutex_unlock(&vcpu->mutex);
  143. }
  144. static void ack_flush(void *_completed)
  145. {
  146. }
  147. static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
  148. {
  149. int i, cpu, me;
  150. cpumask_var_t cpus;
  151. bool called = true;
  152. struct kvm_vcpu *vcpu;
  153. zalloc_cpumask_var(&cpus, GFP_ATOMIC);
  154. me = get_cpu();
  155. kvm_for_each_vcpu(i, vcpu, kvm) {
  156. kvm_make_request(req, vcpu);
  157. cpu = vcpu->cpu;
  158. /* Set ->requests bit before we read ->mode */
  159. smp_mb();
  160. if (cpus != NULL && cpu != -1 && cpu != me &&
  161. kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
  162. cpumask_set_cpu(cpu, cpus);
  163. }
  164. if (unlikely(cpus == NULL))
  165. smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
  166. else if (!cpumask_empty(cpus))
  167. smp_call_function_many(cpus, ack_flush, NULL, 1);
  168. else
  169. called = false;
  170. put_cpu();
  171. free_cpumask_var(cpus);
  172. return called;
  173. }
  174. void kvm_flush_remote_tlbs(struct kvm *kvm)
  175. {
  176. int dirty_count = kvm->tlbs_dirty;
  177. smp_mb();
  178. if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
  179. ++kvm->stat.remote_tlb_flush;
  180. cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
  181. }
  182. void kvm_reload_remote_mmus(struct kvm *kvm)
  183. {
  184. make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
  185. }
  186. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  187. {
  188. struct page *page;
  189. int r;
  190. mutex_init(&vcpu->mutex);
  191. vcpu->cpu = -1;
  192. vcpu->kvm = kvm;
  193. vcpu->vcpu_id = id;
  194. vcpu->pid = NULL;
  195. init_waitqueue_head(&vcpu->wq);
  196. kvm_async_pf_vcpu_init(vcpu);
  197. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  198. if (!page) {
  199. r = -ENOMEM;
  200. goto fail;
  201. }
  202. vcpu->run = page_address(page);
  203. r = kvm_arch_vcpu_init(vcpu);
  204. if (r < 0)
  205. goto fail_free_run;
  206. return 0;
  207. fail_free_run:
  208. free_page((unsigned long)vcpu->run);
  209. fail:
  210. return r;
  211. }
  212. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  213. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  214. {
  215. put_pid(vcpu->pid);
  216. kvm_arch_vcpu_uninit(vcpu);
  217. free_page((unsigned long)vcpu->run);
  218. }
  219. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  220. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  221. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  222. {
  223. return container_of(mn, struct kvm, mmu_notifier);
  224. }
  225. static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
  226. struct mm_struct *mm,
  227. unsigned long address)
  228. {
  229. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  230. int need_tlb_flush, idx;
  231. /*
  232. * When ->invalidate_page runs, the linux pte has been zapped
  233. * already but the page is still allocated until
  234. * ->invalidate_page returns. So if we increase the sequence
  235. * here the kvm page fault will notice if the spte can't be
  236. * established because the page is going to be freed. If
  237. * instead the kvm page fault establishes the spte before
  238. * ->invalidate_page runs, kvm_unmap_hva will release it
  239. * before returning.
  240. *
  241. * The sequence increase only need to be seen at spin_unlock
  242. * time, and not at spin_lock time.
  243. *
  244. * Increasing the sequence after the spin_unlock would be
  245. * unsafe because the kvm page fault could then establish the
  246. * pte after kvm_unmap_hva returned, without noticing the page
  247. * is going to be freed.
  248. */
  249. idx = srcu_read_lock(&kvm->srcu);
  250. spin_lock(&kvm->mmu_lock);
  251. kvm->mmu_notifier_seq++;
  252. need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
  253. spin_unlock(&kvm->mmu_lock);
  254. srcu_read_unlock(&kvm->srcu, idx);
  255. /* we've to flush the tlb before the pages can be freed */
  256. if (need_tlb_flush)
  257. kvm_flush_remote_tlbs(kvm);
  258. }
  259. static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
  260. struct mm_struct *mm,
  261. unsigned long address,
  262. pte_t pte)
  263. {
  264. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  265. int idx;
  266. idx = srcu_read_lock(&kvm->srcu);
  267. spin_lock(&kvm->mmu_lock);
  268. kvm->mmu_notifier_seq++;
  269. kvm_set_spte_hva(kvm, address, pte);
  270. spin_unlock(&kvm->mmu_lock);
  271. srcu_read_unlock(&kvm->srcu, idx);
  272. }
  273. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  274. struct mm_struct *mm,
  275. unsigned long start,
  276. unsigned long end)
  277. {
  278. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  279. int need_tlb_flush = 0, idx;
  280. idx = srcu_read_lock(&kvm->srcu);
  281. spin_lock(&kvm->mmu_lock);
  282. /*
  283. * The count increase must become visible at unlock time as no
  284. * spte can be established without taking the mmu_lock and
  285. * count is also read inside the mmu_lock critical section.
  286. */
  287. kvm->mmu_notifier_count++;
  288. for (; start < end; start += PAGE_SIZE)
  289. need_tlb_flush |= kvm_unmap_hva(kvm, start);
  290. need_tlb_flush |= kvm->tlbs_dirty;
  291. spin_unlock(&kvm->mmu_lock);
  292. srcu_read_unlock(&kvm->srcu, idx);
  293. /* we've to flush the tlb before the pages can be freed */
  294. if (need_tlb_flush)
  295. kvm_flush_remote_tlbs(kvm);
  296. }
  297. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  298. struct mm_struct *mm,
  299. unsigned long start,
  300. unsigned long end)
  301. {
  302. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  303. spin_lock(&kvm->mmu_lock);
  304. /*
  305. * This sequence increase will notify the kvm page fault that
  306. * the page that is going to be mapped in the spte could have
  307. * been freed.
  308. */
  309. kvm->mmu_notifier_seq++;
  310. smp_wmb();
  311. /*
  312. * The above sequence increase must be visible before the
  313. * below count decrease, which is ensured by the smp_wmb above
  314. * in conjunction with the smp_rmb in mmu_notifier_retry().
  315. */
  316. kvm->mmu_notifier_count--;
  317. spin_unlock(&kvm->mmu_lock);
  318. BUG_ON(kvm->mmu_notifier_count < 0);
  319. }
  320. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  321. struct mm_struct *mm,
  322. unsigned long address)
  323. {
  324. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  325. int young, idx;
  326. idx = srcu_read_lock(&kvm->srcu);
  327. spin_lock(&kvm->mmu_lock);
  328. young = kvm_age_hva(kvm, address);
  329. spin_unlock(&kvm->mmu_lock);
  330. srcu_read_unlock(&kvm->srcu, idx);
  331. if (young)
  332. kvm_flush_remote_tlbs(kvm);
  333. return young;
  334. }
  335. static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
  336. struct mm_struct *mm,
  337. unsigned long address)
  338. {
  339. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  340. int young, idx;
  341. idx = srcu_read_lock(&kvm->srcu);
  342. spin_lock(&kvm->mmu_lock);
  343. young = kvm_test_age_hva(kvm, address);
  344. spin_unlock(&kvm->mmu_lock);
  345. srcu_read_unlock(&kvm->srcu, idx);
  346. return young;
  347. }
  348. static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
  349. struct mm_struct *mm)
  350. {
  351. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  352. int idx;
  353. idx = srcu_read_lock(&kvm->srcu);
  354. kvm_arch_flush_shadow(kvm);
  355. srcu_read_unlock(&kvm->srcu, idx);
  356. }
  357. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  358. .invalidate_page = kvm_mmu_notifier_invalidate_page,
  359. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  360. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  361. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  362. .test_young = kvm_mmu_notifier_test_young,
  363. .change_pte = kvm_mmu_notifier_change_pte,
  364. .release = kvm_mmu_notifier_release,
  365. };
  366. static int kvm_init_mmu_notifier(struct kvm *kvm)
  367. {
  368. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  369. return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  370. }
  371. #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
  372. static int kvm_init_mmu_notifier(struct kvm *kvm)
  373. {
  374. return 0;
  375. }
  376. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  377. static void kvm_init_memslots_id(struct kvm *kvm)
  378. {
  379. int i;
  380. struct kvm_memslots *slots = kvm->memslots;
  381. for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
  382. slots->id_to_index[i] = slots->memslots[i].id = i;
  383. }
  384. static struct kvm *kvm_create_vm(unsigned long type)
  385. {
  386. int r, i;
  387. struct kvm *kvm = kvm_arch_alloc_vm();
  388. if (!kvm)
  389. return ERR_PTR(-ENOMEM);
  390. r = kvm_arch_init_vm(kvm, type);
  391. if (r)
  392. goto out_err_nodisable;
  393. r = hardware_enable_all();
  394. if (r)
  395. goto out_err_nodisable;
  396. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  397. INIT_HLIST_HEAD(&kvm->mask_notifier_list);
  398. INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
  399. #endif
  400. r = -ENOMEM;
  401. kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  402. if (!kvm->memslots)
  403. goto out_err_nosrcu;
  404. kvm_init_memslots_id(kvm);
  405. if (init_srcu_struct(&kvm->srcu))
  406. goto out_err_nosrcu;
  407. for (i = 0; i < KVM_NR_BUSES; i++) {
  408. kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
  409. GFP_KERNEL);
  410. if (!kvm->buses[i])
  411. goto out_err;
  412. }
  413. spin_lock_init(&kvm->mmu_lock);
  414. kvm->mm = current->mm;
  415. atomic_inc(&kvm->mm->mm_count);
  416. kvm_eventfd_init(kvm);
  417. mutex_init(&kvm->lock);
  418. mutex_init(&kvm->irq_lock);
  419. mutex_init(&kvm->slots_lock);
  420. atomic_set(&kvm->users_count, 1);
  421. r = kvm_init_mmu_notifier(kvm);
  422. if (r)
  423. goto out_err;
  424. raw_spin_lock(&kvm_lock);
  425. list_add(&kvm->vm_list, &vm_list);
  426. raw_spin_unlock(&kvm_lock);
  427. return kvm;
  428. out_err:
  429. cleanup_srcu_struct(&kvm->srcu);
  430. out_err_nosrcu:
  431. hardware_disable_all();
  432. out_err_nodisable:
  433. for (i = 0; i < KVM_NR_BUSES; i++)
  434. kfree(kvm->buses[i]);
  435. kfree(kvm->memslots);
  436. kvm_arch_free_vm(kvm);
  437. return ERR_PTR(r);
  438. }
  439. static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
  440. {
  441. if (!memslot->dirty_bitmap)
  442. return;
  443. if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
  444. vfree(memslot->dirty_bitmap_head);
  445. else
  446. kfree(memslot->dirty_bitmap_head);
  447. memslot->dirty_bitmap = NULL;
  448. memslot->dirty_bitmap_head = NULL;
  449. }
  450. /*
  451. * Free any memory in @free but not in @dont.
  452. */
  453. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  454. struct kvm_memory_slot *dont)
  455. {
  456. if (!dont || free->rmap != dont->rmap)
  457. vfree(free->rmap);
  458. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  459. kvm_destroy_dirty_bitmap(free);
  460. kvm_arch_free_memslot(free, dont);
  461. free->npages = 0;
  462. free->rmap = NULL;
  463. }
  464. void kvm_free_physmem(struct kvm *kvm)
  465. {
  466. struct kvm_memslots *slots = kvm->memslots;
  467. struct kvm_memory_slot *memslot;
  468. kvm_for_each_memslot(memslot, slots)
  469. kvm_free_physmem_slot(memslot, NULL);
  470. kfree(kvm->memslots);
  471. }
  472. static void kvm_destroy_vm(struct kvm *kvm)
  473. {
  474. int i;
  475. struct mm_struct *mm = kvm->mm;
  476. kvm_arch_sync_events(kvm);
  477. raw_spin_lock(&kvm_lock);
  478. list_del(&kvm->vm_list);
  479. raw_spin_unlock(&kvm_lock);
  480. kvm_free_irq_routing(kvm);
  481. for (i = 0; i < KVM_NR_BUSES; i++)
  482. kvm_io_bus_destroy(kvm->buses[i]);
  483. kvm_coalesced_mmio_free(kvm);
  484. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  485. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  486. #else
  487. kvm_arch_flush_shadow(kvm);
  488. #endif
  489. kvm_arch_destroy_vm(kvm);
  490. kvm_free_physmem(kvm);
  491. cleanup_srcu_struct(&kvm->srcu);
  492. kvm_arch_free_vm(kvm);
  493. hardware_disable_all();
  494. mmdrop(mm);
  495. }
  496. void kvm_get_kvm(struct kvm *kvm)
  497. {
  498. atomic_inc(&kvm->users_count);
  499. }
  500. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  501. void kvm_put_kvm(struct kvm *kvm)
  502. {
  503. if (atomic_dec_and_test(&kvm->users_count))
  504. kvm_destroy_vm(kvm);
  505. }
  506. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  507. static int kvm_vm_release(struct inode *inode, struct file *filp)
  508. {
  509. struct kvm *kvm = filp->private_data;
  510. kvm_irqfd_release(kvm);
  511. kvm_put_kvm(kvm);
  512. return 0;
  513. }
  514. /*
  515. * Allocation size is twice as large as the actual dirty bitmap size.
  516. * This makes it possible to do double buffering: see x86's
  517. * kvm_vm_ioctl_get_dirty_log().
  518. */
  519. static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
  520. {
  521. #ifndef CONFIG_S390
  522. unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
  523. if (dirty_bytes > PAGE_SIZE)
  524. memslot->dirty_bitmap = vzalloc(dirty_bytes);
  525. else
  526. memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
  527. if (!memslot->dirty_bitmap)
  528. return -ENOMEM;
  529. memslot->dirty_bitmap_head = memslot->dirty_bitmap;
  530. memslot->nr_dirty_pages = 0;
  531. #endif /* !CONFIG_S390 */
  532. return 0;
  533. }
  534. static int cmp_memslot(const void *slot1, const void *slot2)
  535. {
  536. struct kvm_memory_slot *s1, *s2;
  537. s1 = (struct kvm_memory_slot *)slot1;
  538. s2 = (struct kvm_memory_slot *)slot2;
  539. if (s1->npages < s2->npages)
  540. return 1;
  541. if (s1->npages > s2->npages)
  542. return -1;
  543. return 0;
  544. }
  545. /*
  546. * Sort the memslots base on its size, so the larger slots
  547. * will get better fit.
  548. */
  549. static void sort_memslots(struct kvm_memslots *slots)
  550. {
  551. int i;
  552. sort(slots->memslots, KVM_MEM_SLOTS_NUM,
  553. sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
  554. for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
  555. slots->id_to_index[slots->memslots[i].id] = i;
  556. }
  557. void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
  558. {
  559. if (new) {
  560. int id = new->id;
  561. struct kvm_memory_slot *old = id_to_memslot(slots, id);
  562. unsigned long npages = old->npages;
  563. *old = *new;
  564. if (new->npages != npages)
  565. sort_memslots(slots);
  566. }
  567. slots->generation++;
  568. }
  569. /*
  570. * Allocate some memory and give it an address in the guest physical address
  571. * space.
  572. *
  573. * Discontiguous memory is allowed, mostly for framebuffers.
  574. *
  575. * Must be called holding mmap_sem for write.
  576. */
  577. int __kvm_set_memory_region(struct kvm *kvm,
  578. struct kvm_userspace_memory_region *mem,
  579. int user_alloc)
  580. {
  581. int r;
  582. gfn_t base_gfn;
  583. unsigned long npages;
  584. unsigned long i;
  585. struct kvm_memory_slot *memslot;
  586. struct kvm_memory_slot old, new;
  587. struct kvm_memslots *slots, *old_memslots;
  588. r = -EINVAL;
  589. /* General sanity checks */
  590. if (mem->memory_size & (PAGE_SIZE - 1))
  591. goto out;
  592. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  593. goto out;
  594. /* We can read the guest memory with __xxx_user() later on. */
  595. if (user_alloc &&
  596. ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
  597. !access_ok(VERIFY_WRITE,
  598. (void __user *)(unsigned long)mem->userspace_addr,
  599. mem->memory_size)))
  600. goto out;
  601. if (mem->slot >= KVM_MEM_SLOTS_NUM)
  602. goto out;
  603. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  604. goto out;
  605. memslot = id_to_memslot(kvm->memslots, mem->slot);
  606. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  607. npages = mem->memory_size >> PAGE_SHIFT;
  608. r = -EINVAL;
  609. if (npages > KVM_MEM_MAX_NR_PAGES)
  610. goto out;
  611. if (!npages)
  612. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  613. new = old = *memslot;
  614. new.id = mem->slot;
  615. new.base_gfn = base_gfn;
  616. new.npages = npages;
  617. new.flags = mem->flags;
  618. /* Disallow changing a memory slot's size. */
  619. r = -EINVAL;
  620. if (npages && old.npages && npages != old.npages)
  621. goto out_free;
  622. /* Check for overlaps */
  623. r = -EEXIST;
  624. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  625. struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
  626. if (s == memslot || !s->npages)
  627. continue;
  628. if (!((base_gfn + npages <= s->base_gfn) ||
  629. (base_gfn >= s->base_gfn + s->npages)))
  630. goto out_free;
  631. }
  632. /* Free page dirty bitmap if unneeded */
  633. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  634. new.dirty_bitmap = NULL;
  635. r = -ENOMEM;
  636. /* Allocate if a slot is being created */
  637. if (npages && !old.npages) {
  638. new.user_alloc = user_alloc;
  639. new.userspace_addr = mem->userspace_addr;
  640. #ifndef CONFIG_S390
  641. new.rmap = vzalloc(npages * sizeof(*new.rmap));
  642. if (!new.rmap)
  643. goto out_free;
  644. #endif /* not defined CONFIG_S390 */
  645. if (kvm_arch_create_memslot(&new, npages))
  646. goto out_free;
  647. }
  648. /* Allocate page dirty bitmap if needed */
  649. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  650. if (kvm_create_dirty_bitmap(&new) < 0)
  651. goto out_free;
  652. /* destroy any largepage mappings for dirty tracking */
  653. }
  654. if (!npages) {
  655. struct kvm_memory_slot *slot;
  656. r = -ENOMEM;
  657. slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
  658. GFP_KERNEL);
  659. if (!slots)
  660. goto out_free;
  661. slot = id_to_memslot(slots, mem->slot);
  662. slot->flags |= KVM_MEMSLOT_INVALID;
  663. update_memslots(slots, NULL);
  664. old_memslots = kvm->memslots;
  665. rcu_assign_pointer(kvm->memslots, slots);
  666. synchronize_srcu_expedited(&kvm->srcu);
  667. /* From this point no new shadow pages pointing to a deleted
  668. * memslot will be created.
  669. *
  670. * validation of sp->gfn happens in:
  671. * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
  672. * - kvm_is_visible_gfn (mmu_check_roots)
  673. */
  674. kvm_arch_flush_shadow(kvm);
  675. kfree(old_memslots);
  676. }
  677. r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
  678. if (r)
  679. goto out_free;
  680. /* map the pages in iommu page table */
  681. if (npages) {
  682. r = kvm_iommu_map_pages(kvm, &new);
  683. if (r)
  684. goto out_free;
  685. }
  686. r = -ENOMEM;
  687. slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
  688. GFP_KERNEL);
  689. if (!slots)
  690. goto out_free;
  691. /* actual memory is freed via old in kvm_free_physmem_slot below */
  692. if (!npages) {
  693. new.rmap = NULL;
  694. new.dirty_bitmap = NULL;
  695. memset(&new.arch, 0, sizeof(new.arch));
  696. }
  697. update_memslots(slots, &new);
  698. old_memslots = kvm->memslots;
  699. rcu_assign_pointer(kvm->memslots, slots);
  700. synchronize_srcu_expedited(&kvm->srcu);
  701. kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
  702. /*
  703. * If the new memory slot is created, we need to clear all
  704. * mmio sptes.
  705. */
  706. if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
  707. kvm_arch_flush_shadow(kvm);
  708. kvm_free_physmem_slot(&old, &new);
  709. kfree(old_memslots);
  710. return 0;
  711. out_free:
  712. kvm_free_physmem_slot(&new, &old);
  713. out:
  714. return r;
  715. }
  716. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  717. int kvm_set_memory_region(struct kvm *kvm,
  718. struct kvm_userspace_memory_region *mem,
  719. int user_alloc)
  720. {
  721. int r;
  722. mutex_lock(&kvm->slots_lock);
  723. r = __kvm_set_memory_region(kvm, mem, user_alloc);
  724. mutex_unlock(&kvm->slots_lock);
  725. return r;
  726. }
  727. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  728. int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  729. struct
  730. kvm_userspace_memory_region *mem,
  731. int user_alloc)
  732. {
  733. if (mem->slot >= KVM_MEMORY_SLOTS)
  734. return -EINVAL;
  735. return kvm_set_memory_region(kvm, mem, user_alloc);
  736. }
  737. int kvm_get_dirty_log(struct kvm *kvm,
  738. struct kvm_dirty_log *log, int *is_dirty)
  739. {
  740. struct kvm_memory_slot *memslot;
  741. int r, i;
  742. unsigned long n;
  743. unsigned long any = 0;
  744. r = -EINVAL;
  745. if (log->slot >= KVM_MEMORY_SLOTS)
  746. goto out;
  747. memslot = id_to_memslot(kvm->memslots, log->slot);
  748. r = -ENOENT;
  749. if (!memslot->dirty_bitmap)
  750. goto out;
  751. n = kvm_dirty_bitmap_bytes(memslot);
  752. for (i = 0; !any && i < n/sizeof(long); ++i)
  753. any = memslot->dirty_bitmap[i];
  754. r = -EFAULT;
  755. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  756. goto out;
  757. if (any)
  758. *is_dirty = 1;
  759. r = 0;
  760. out:
  761. return r;
  762. }
  763. bool kvm_largepages_enabled(void)
  764. {
  765. return largepages_enabled;
  766. }
  767. void kvm_disable_largepages(void)
  768. {
  769. largepages_enabled = false;
  770. }
  771. EXPORT_SYMBOL_GPL(kvm_disable_largepages);
  772. int is_error_page(struct page *page)
  773. {
  774. return page == bad_page || page == hwpoison_page || page == fault_page;
  775. }
  776. EXPORT_SYMBOL_GPL(is_error_page);
  777. int is_error_pfn(pfn_t pfn)
  778. {
  779. return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
  780. }
  781. EXPORT_SYMBOL_GPL(is_error_pfn);
  782. int is_hwpoison_pfn(pfn_t pfn)
  783. {
  784. return pfn == hwpoison_pfn;
  785. }
  786. EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
  787. int is_fault_pfn(pfn_t pfn)
  788. {
  789. return pfn == fault_pfn;
  790. }
  791. EXPORT_SYMBOL_GPL(is_fault_pfn);
  792. int is_noslot_pfn(pfn_t pfn)
  793. {
  794. return pfn == bad_pfn;
  795. }
  796. EXPORT_SYMBOL_GPL(is_noslot_pfn);
  797. int is_invalid_pfn(pfn_t pfn)
  798. {
  799. return pfn == hwpoison_pfn || pfn == fault_pfn;
  800. }
  801. EXPORT_SYMBOL_GPL(is_invalid_pfn);
  802. static inline unsigned long bad_hva(void)
  803. {
  804. return PAGE_OFFSET;
  805. }
  806. int kvm_is_error_hva(unsigned long addr)
  807. {
  808. return addr == bad_hva();
  809. }
  810. EXPORT_SYMBOL_GPL(kvm_is_error_hva);
  811. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  812. {
  813. return __gfn_to_memslot(kvm_memslots(kvm), gfn);
  814. }
  815. EXPORT_SYMBOL_GPL(gfn_to_memslot);
  816. int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  817. {
  818. struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
  819. if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
  820. memslot->flags & KVM_MEMSLOT_INVALID)
  821. return 0;
  822. return 1;
  823. }
  824. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  825. unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
  826. {
  827. struct vm_area_struct *vma;
  828. unsigned long addr, size;
  829. size = PAGE_SIZE;
  830. addr = gfn_to_hva(kvm, gfn);
  831. if (kvm_is_error_hva(addr))
  832. return PAGE_SIZE;
  833. down_read(&current->mm->mmap_sem);
  834. vma = find_vma(current->mm, addr);
  835. if (!vma)
  836. goto out;
  837. size = vma_kernel_pagesize(vma);
  838. out:
  839. up_read(&current->mm->mmap_sem);
  840. return size;
  841. }
  842. static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
  843. gfn_t *nr_pages)
  844. {
  845. if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
  846. return bad_hva();
  847. if (nr_pages)
  848. *nr_pages = slot->npages - (gfn - slot->base_gfn);
  849. return gfn_to_hva_memslot(slot, gfn);
  850. }
  851. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  852. {
  853. return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
  854. }
  855. EXPORT_SYMBOL_GPL(gfn_to_hva);
  856. static pfn_t get_fault_pfn(void)
  857. {
  858. get_page(fault_page);
  859. return fault_pfn;
  860. }
  861. int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
  862. unsigned long start, int write, struct page **page)
  863. {
  864. int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
  865. if (write)
  866. flags |= FOLL_WRITE;
  867. return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
  868. }
  869. static inline int check_user_page_hwpoison(unsigned long addr)
  870. {
  871. int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
  872. rc = __get_user_pages(current, current->mm, addr, 1,
  873. flags, NULL, NULL, NULL);
  874. return rc == -EHWPOISON;
  875. }
  876. static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
  877. bool *async, bool write_fault, bool *writable)
  878. {
  879. struct page *page[1];
  880. int npages = 0;
  881. pfn_t pfn;
  882. /* we can do it either atomically or asynchronously, not both */
  883. BUG_ON(atomic && async);
  884. BUG_ON(!write_fault && !writable);
  885. if (writable)
  886. *writable = true;
  887. if (atomic || async)
  888. npages = __get_user_pages_fast(addr, 1, 1, page);
  889. if (unlikely(npages != 1) && !atomic) {
  890. might_sleep();
  891. if (writable)
  892. *writable = write_fault;
  893. if (async) {
  894. down_read(&current->mm->mmap_sem);
  895. npages = get_user_page_nowait(current, current->mm,
  896. addr, write_fault, page);
  897. up_read(&current->mm->mmap_sem);
  898. } else
  899. npages = get_user_pages_fast(addr, 1, write_fault,
  900. page);
  901. /* map read fault as writable if possible */
  902. if (unlikely(!write_fault) && npages == 1) {
  903. struct page *wpage[1];
  904. npages = __get_user_pages_fast(addr, 1, 1, wpage);
  905. if (npages == 1) {
  906. *writable = true;
  907. put_page(page[0]);
  908. page[0] = wpage[0];
  909. }
  910. npages = 1;
  911. }
  912. }
  913. if (unlikely(npages != 1)) {
  914. struct vm_area_struct *vma;
  915. if (atomic)
  916. return get_fault_pfn();
  917. down_read(&current->mm->mmap_sem);
  918. if (npages == -EHWPOISON ||
  919. (!async && check_user_page_hwpoison(addr))) {
  920. up_read(&current->mm->mmap_sem);
  921. get_page(hwpoison_page);
  922. return page_to_pfn(hwpoison_page);
  923. }
  924. vma = find_vma_intersection(current->mm, addr, addr+1);
  925. if (vma == NULL)
  926. pfn = get_fault_pfn();
  927. else if ((vma->vm_flags & VM_PFNMAP)) {
  928. pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
  929. vma->vm_pgoff;
  930. BUG_ON(!kvm_is_mmio_pfn(pfn));
  931. } else {
  932. if (async && (vma->vm_flags & VM_WRITE))
  933. *async = true;
  934. pfn = get_fault_pfn();
  935. }
  936. up_read(&current->mm->mmap_sem);
  937. } else
  938. pfn = page_to_pfn(page[0]);
  939. return pfn;
  940. }
  941. pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
  942. {
  943. return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
  944. }
  945. EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
  946. static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
  947. bool write_fault, bool *writable)
  948. {
  949. unsigned long addr;
  950. if (async)
  951. *async = false;
  952. addr = gfn_to_hva(kvm, gfn);
  953. if (kvm_is_error_hva(addr)) {
  954. get_page(bad_page);
  955. return page_to_pfn(bad_page);
  956. }
  957. return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
  958. }
  959. pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
  960. {
  961. return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
  962. }
  963. EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
  964. pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
  965. bool write_fault, bool *writable)
  966. {
  967. return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
  968. }
  969. EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
  970. pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  971. {
  972. return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
  973. }
  974. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  975. pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
  976. bool *writable)
  977. {
  978. return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
  979. }
  980. EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
  981. pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
  982. struct kvm_memory_slot *slot, gfn_t gfn)
  983. {
  984. unsigned long addr = gfn_to_hva_memslot(slot, gfn);
  985. return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
  986. }
  987. int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
  988. int nr_pages)
  989. {
  990. unsigned long addr;
  991. gfn_t entry;
  992. addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
  993. if (kvm_is_error_hva(addr))
  994. return -1;
  995. if (entry < nr_pages)
  996. return 0;
  997. return __get_user_pages_fast(addr, nr_pages, 1, pages);
  998. }
  999. EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
  1000. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  1001. {
  1002. pfn_t pfn;
  1003. pfn = gfn_to_pfn(kvm, gfn);
  1004. if (!kvm_is_mmio_pfn(pfn))
  1005. return pfn_to_page(pfn);
  1006. WARN_ON(kvm_is_mmio_pfn(pfn));
  1007. get_page(bad_page);
  1008. return bad_page;
  1009. }
  1010. EXPORT_SYMBOL_GPL(gfn_to_page);
  1011. void kvm_release_page_clean(struct page *page)
  1012. {
  1013. kvm_release_pfn_clean(page_to_pfn(page));
  1014. }
  1015. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  1016. void kvm_release_pfn_clean(pfn_t pfn)
  1017. {
  1018. if (!kvm_is_mmio_pfn(pfn))
  1019. put_page(pfn_to_page(pfn));
  1020. }
  1021. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  1022. void kvm_release_page_dirty(struct page *page)
  1023. {
  1024. kvm_release_pfn_dirty(page_to_pfn(page));
  1025. }
  1026. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  1027. void kvm_release_pfn_dirty(pfn_t pfn)
  1028. {
  1029. kvm_set_pfn_dirty(pfn);
  1030. kvm_release_pfn_clean(pfn);
  1031. }
  1032. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  1033. void kvm_set_page_dirty(struct page *page)
  1034. {
  1035. kvm_set_pfn_dirty(page_to_pfn(page));
  1036. }
  1037. EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
  1038. void kvm_set_pfn_dirty(pfn_t pfn)
  1039. {
  1040. if (!kvm_is_mmio_pfn(pfn)) {
  1041. struct page *page = pfn_to_page(pfn);
  1042. if (!PageReserved(page))
  1043. SetPageDirty(page);
  1044. }
  1045. }
  1046. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  1047. void kvm_set_pfn_accessed(pfn_t pfn)
  1048. {
  1049. if (!kvm_is_mmio_pfn(pfn))
  1050. mark_page_accessed(pfn_to_page(pfn));
  1051. }
  1052. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  1053. void kvm_get_pfn(pfn_t pfn)
  1054. {
  1055. if (!kvm_is_mmio_pfn(pfn))
  1056. get_page(pfn_to_page(pfn));
  1057. }
  1058. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  1059. static int next_segment(unsigned long len, int offset)
  1060. {
  1061. if (len > PAGE_SIZE - offset)
  1062. return PAGE_SIZE - offset;
  1063. else
  1064. return len;
  1065. }
  1066. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  1067. int len)
  1068. {
  1069. int r;
  1070. unsigned long addr;
  1071. addr = gfn_to_hva(kvm, gfn);
  1072. if (kvm_is_error_hva(addr))
  1073. return -EFAULT;
  1074. r = __copy_from_user(data, (void __user *)addr + offset, len);
  1075. if (r)
  1076. return -EFAULT;
  1077. return 0;
  1078. }
  1079. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  1080. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  1081. {
  1082. gfn_t gfn = gpa >> PAGE_SHIFT;
  1083. int seg;
  1084. int offset = offset_in_page(gpa);
  1085. int ret;
  1086. while ((seg = next_segment(len, offset)) != 0) {
  1087. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  1088. if (ret < 0)
  1089. return ret;
  1090. offset = 0;
  1091. len -= seg;
  1092. data += seg;
  1093. ++gfn;
  1094. }
  1095. return 0;
  1096. }
  1097. EXPORT_SYMBOL_GPL(kvm_read_guest);
  1098. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  1099. unsigned long len)
  1100. {
  1101. int r;
  1102. unsigned long addr;
  1103. gfn_t gfn = gpa >> PAGE_SHIFT;
  1104. int offset = offset_in_page(gpa);
  1105. addr = gfn_to_hva(kvm, gfn);
  1106. if (kvm_is_error_hva(addr))
  1107. return -EFAULT;
  1108. pagefault_disable();
  1109. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  1110. pagefault_enable();
  1111. if (r)
  1112. return -EFAULT;
  1113. return 0;
  1114. }
  1115. EXPORT_SYMBOL(kvm_read_guest_atomic);
  1116. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  1117. int offset, int len)
  1118. {
  1119. int r;
  1120. unsigned long addr;
  1121. addr = gfn_to_hva(kvm, gfn);
  1122. if (kvm_is_error_hva(addr))
  1123. return -EFAULT;
  1124. r = __copy_to_user((void __user *)addr + offset, data, len);
  1125. if (r)
  1126. return -EFAULT;
  1127. mark_page_dirty(kvm, gfn);
  1128. return 0;
  1129. }
  1130. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  1131. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  1132. unsigned long len)
  1133. {
  1134. gfn_t gfn = gpa >> PAGE_SHIFT;
  1135. int seg;
  1136. int offset = offset_in_page(gpa);
  1137. int ret;
  1138. while ((seg = next_segment(len, offset)) != 0) {
  1139. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  1140. if (ret < 0)
  1141. return ret;
  1142. offset = 0;
  1143. len -= seg;
  1144. data += seg;
  1145. ++gfn;
  1146. }
  1147. return 0;
  1148. }
  1149. int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1150. gpa_t gpa)
  1151. {
  1152. struct kvm_memslots *slots = kvm_memslots(kvm);
  1153. int offset = offset_in_page(gpa);
  1154. gfn_t gfn = gpa >> PAGE_SHIFT;
  1155. ghc->gpa = gpa;
  1156. ghc->generation = slots->generation;
  1157. ghc->memslot = gfn_to_memslot(kvm, gfn);
  1158. ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
  1159. if (!kvm_is_error_hva(ghc->hva))
  1160. ghc->hva += offset;
  1161. else
  1162. return -EFAULT;
  1163. return 0;
  1164. }
  1165. EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
  1166. int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1167. void *data, unsigned long len)
  1168. {
  1169. struct kvm_memslots *slots = kvm_memslots(kvm);
  1170. int r;
  1171. if (slots->generation != ghc->generation)
  1172. kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
  1173. if (kvm_is_error_hva(ghc->hva))
  1174. return -EFAULT;
  1175. r = __copy_to_user((void __user *)ghc->hva, data, len);
  1176. if (r)
  1177. return -EFAULT;
  1178. mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
  1179. return 0;
  1180. }
  1181. EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
  1182. int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1183. void *data, unsigned long len)
  1184. {
  1185. struct kvm_memslots *slots = kvm_memslots(kvm);
  1186. int r;
  1187. if (slots->generation != ghc->generation)
  1188. kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
  1189. if (kvm_is_error_hva(ghc->hva))
  1190. return -EFAULT;
  1191. r = __copy_from_user(data, (void __user *)ghc->hva, len);
  1192. if (r)
  1193. return -EFAULT;
  1194. return 0;
  1195. }
  1196. EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
  1197. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  1198. {
  1199. return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
  1200. offset, len);
  1201. }
  1202. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  1203. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  1204. {
  1205. gfn_t gfn = gpa >> PAGE_SHIFT;
  1206. int seg;
  1207. int offset = offset_in_page(gpa);
  1208. int ret;
  1209. while ((seg = next_segment(len, offset)) != 0) {
  1210. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  1211. if (ret < 0)
  1212. return ret;
  1213. offset = 0;
  1214. len -= seg;
  1215. ++gfn;
  1216. }
  1217. return 0;
  1218. }
  1219. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  1220. void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
  1221. gfn_t gfn)
  1222. {
  1223. if (memslot && memslot->dirty_bitmap) {
  1224. unsigned long rel_gfn = gfn - memslot->base_gfn;
  1225. if (!test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
  1226. memslot->nr_dirty_pages++;
  1227. }
  1228. }
  1229. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  1230. {
  1231. struct kvm_memory_slot *memslot;
  1232. memslot = gfn_to_memslot(kvm, gfn);
  1233. mark_page_dirty_in_slot(kvm, memslot, gfn);
  1234. }
  1235. /*
  1236. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  1237. */
  1238. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  1239. {
  1240. DEFINE_WAIT(wait);
  1241. for (;;) {
  1242. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  1243. if (kvm_arch_vcpu_runnable(vcpu)) {
  1244. kvm_make_request(KVM_REQ_UNHALT, vcpu);
  1245. break;
  1246. }
  1247. if (kvm_cpu_has_pending_timer(vcpu))
  1248. break;
  1249. if (signal_pending(current))
  1250. break;
  1251. schedule();
  1252. }
  1253. finish_wait(&vcpu->wq, &wait);
  1254. }
  1255. void kvm_resched(struct kvm_vcpu *vcpu)
  1256. {
  1257. if (!need_resched())
  1258. return;
  1259. cond_resched();
  1260. }
  1261. EXPORT_SYMBOL_GPL(kvm_resched);
  1262. void kvm_vcpu_on_spin(struct kvm_vcpu *me)
  1263. {
  1264. struct kvm *kvm = me->kvm;
  1265. struct kvm_vcpu *vcpu;
  1266. int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
  1267. int yielded = 0;
  1268. int pass;
  1269. int i;
  1270. /*
  1271. * We boost the priority of a VCPU that is runnable but not
  1272. * currently running, because it got preempted by something
  1273. * else and called schedule in __vcpu_run. Hopefully that
  1274. * VCPU is holding the lock that we need and will release it.
  1275. * We approximate round-robin by starting at the last boosted VCPU.
  1276. */
  1277. for (pass = 0; pass < 2 && !yielded; pass++) {
  1278. kvm_for_each_vcpu(i, vcpu, kvm) {
  1279. struct task_struct *task = NULL;
  1280. struct pid *pid;
  1281. if (!pass && i < last_boosted_vcpu) {
  1282. i = last_boosted_vcpu;
  1283. continue;
  1284. } else if (pass && i > last_boosted_vcpu)
  1285. break;
  1286. if (vcpu == me)
  1287. continue;
  1288. if (waitqueue_active(&vcpu->wq))
  1289. continue;
  1290. rcu_read_lock();
  1291. pid = rcu_dereference(vcpu->pid);
  1292. if (pid)
  1293. task = get_pid_task(vcpu->pid, PIDTYPE_PID);
  1294. rcu_read_unlock();
  1295. if (!task)
  1296. continue;
  1297. if (task->flags & PF_VCPU) {
  1298. put_task_struct(task);
  1299. continue;
  1300. }
  1301. if (yield_to(task, 1)) {
  1302. put_task_struct(task);
  1303. kvm->last_boosted_vcpu = i;
  1304. yielded = 1;
  1305. break;
  1306. }
  1307. put_task_struct(task);
  1308. }
  1309. }
  1310. }
  1311. EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
  1312. static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1313. {
  1314. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  1315. struct page *page;
  1316. if (vmf->pgoff == 0)
  1317. page = virt_to_page(vcpu->run);
  1318. #ifdef CONFIG_X86
  1319. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  1320. page = virt_to_page(vcpu->arch.pio_data);
  1321. #endif
  1322. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1323. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  1324. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  1325. #endif
  1326. else
  1327. return kvm_arch_vcpu_fault(vcpu, vmf);
  1328. get_page(page);
  1329. vmf->page = page;
  1330. return 0;
  1331. }
  1332. static const struct vm_operations_struct kvm_vcpu_vm_ops = {
  1333. .fault = kvm_vcpu_fault,
  1334. };
  1335. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  1336. {
  1337. vma->vm_ops = &kvm_vcpu_vm_ops;
  1338. return 0;
  1339. }
  1340. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  1341. {
  1342. struct kvm_vcpu *vcpu = filp->private_data;
  1343. kvm_put_kvm(vcpu->kvm);
  1344. return 0;
  1345. }
  1346. static struct file_operations kvm_vcpu_fops = {
  1347. .release = kvm_vcpu_release,
  1348. .unlocked_ioctl = kvm_vcpu_ioctl,
  1349. #ifdef CONFIG_COMPAT
  1350. .compat_ioctl = kvm_vcpu_compat_ioctl,
  1351. #endif
  1352. .mmap = kvm_vcpu_mmap,
  1353. .llseek = noop_llseek,
  1354. };
  1355. /*
  1356. * Allocates an inode for the vcpu.
  1357. */
  1358. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  1359. {
  1360. return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
  1361. }
  1362. /*
  1363. * Creates some virtual cpus. Good luck creating more than one.
  1364. */
  1365. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
  1366. {
  1367. int r;
  1368. struct kvm_vcpu *vcpu, *v;
  1369. vcpu = kvm_arch_vcpu_create(kvm, id);
  1370. if (IS_ERR(vcpu))
  1371. return PTR_ERR(vcpu);
  1372. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  1373. r = kvm_arch_vcpu_setup(vcpu);
  1374. if (r)
  1375. goto vcpu_destroy;
  1376. mutex_lock(&kvm->lock);
  1377. if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
  1378. r = -EINVAL;
  1379. goto unlock_vcpu_destroy;
  1380. }
  1381. kvm_for_each_vcpu(r, v, kvm)
  1382. if (v->vcpu_id == id) {
  1383. r = -EEXIST;
  1384. goto unlock_vcpu_destroy;
  1385. }
  1386. BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
  1387. /* Now it's all set up, let userspace reach it */
  1388. kvm_get_kvm(kvm);
  1389. r = create_vcpu_fd(vcpu);
  1390. if (r < 0) {
  1391. kvm_put_kvm(kvm);
  1392. goto unlock_vcpu_destroy;
  1393. }
  1394. kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
  1395. smp_wmb();
  1396. atomic_inc(&kvm->online_vcpus);
  1397. mutex_unlock(&kvm->lock);
  1398. return r;
  1399. unlock_vcpu_destroy:
  1400. mutex_unlock(&kvm->lock);
  1401. vcpu_destroy:
  1402. kvm_arch_vcpu_destroy(vcpu);
  1403. return r;
  1404. }
  1405. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  1406. {
  1407. if (sigset) {
  1408. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  1409. vcpu->sigset_active = 1;
  1410. vcpu->sigset = *sigset;
  1411. } else
  1412. vcpu->sigset_active = 0;
  1413. return 0;
  1414. }
  1415. static long kvm_vcpu_ioctl(struct file *filp,
  1416. unsigned int ioctl, unsigned long arg)
  1417. {
  1418. struct kvm_vcpu *vcpu = filp->private_data;
  1419. void __user *argp = (void __user *)arg;
  1420. int r;
  1421. struct kvm_fpu *fpu = NULL;
  1422. struct kvm_sregs *kvm_sregs = NULL;
  1423. if (vcpu->kvm->mm != current->mm)
  1424. return -EIO;
  1425. #if defined(CONFIG_S390) || defined(CONFIG_PPC)
  1426. /*
  1427. * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
  1428. * so vcpu_load() would break it.
  1429. */
  1430. if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
  1431. return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1432. #endif
  1433. vcpu_load(vcpu);
  1434. switch (ioctl) {
  1435. case KVM_RUN:
  1436. r = -EINVAL;
  1437. if (arg)
  1438. goto out;
  1439. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  1440. trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
  1441. break;
  1442. case KVM_GET_REGS: {
  1443. struct kvm_regs *kvm_regs;
  1444. r = -ENOMEM;
  1445. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1446. if (!kvm_regs)
  1447. goto out;
  1448. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  1449. if (r)
  1450. goto out_free1;
  1451. r = -EFAULT;
  1452. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  1453. goto out_free1;
  1454. r = 0;
  1455. out_free1:
  1456. kfree(kvm_regs);
  1457. break;
  1458. }
  1459. case KVM_SET_REGS: {
  1460. struct kvm_regs *kvm_regs;
  1461. r = -ENOMEM;
  1462. kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
  1463. if (IS_ERR(kvm_regs)) {
  1464. r = PTR_ERR(kvm_regs);
  1465. goto out;
  1466. }
  1467. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  1468. if (r)
  1469. goto out_free2;
  1470. r = 0;
  1471. out_free2:
  1472. kfree(kvm_regs);
  1473. break;
  1474. }
  1475. case KVM_GET_SREGS: {
  1476. kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1477. r = -ENOMEM;
  1478. if (!kvm_sregs)
  1479. goto out;
  1480. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
  1481. if (r)
  1482. goto out;
  1483. r = -EFAULT;
  1484. if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
  1485. goto out;
  1486. r = 0;
  1487. break;
  1488. }
  1489. case KVM_SET_SREGS: {
  1490. kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
  1491. if (IS_ERR(kvm_sregs)) {
  1492. r = PTR_ERR(kvm_sregs);
  1493. goto out;
  1494. }
  1495. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
  1496. if (r)
  1497. goto out;
  1498. r = 0;
  1499. break;
  1500. }
  1501. case KVM_GET_MP_STATE: {
  1502. struct kvm_mp_state mp_state;
  1503. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  1504. if (r)
  1505. goto out;
  1506. r = -EFAULT;
  1507. if (copy_to_user(argp, &mp_state, sizeof mp_state))
  1508. goto out;
  1509. r = 0;
  1510. break;
  1511. }
  1512. case KVM_SET_MP_STATE: {
  1513. struct kvm_mp_state mp_state;
  1514. r = -EFAULT;
  1515. if (copy_from_user(&mp_state, argp, sizeof mp_state))
  1516. goto out;
  1517. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  1518. if (r)
  1519. goto out;
  1520. r = 0;
  1521. break;
  1522. }
  1523. case KVM_TRANSLATE: {
  1524. struct kvm_translation tr;
  1525. r = -EFAULT;
  1526. if (copy_from_user(&tr, argp, sizeof tr))
  1527. goto out;
  1528. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  1529. if (r)
  1530. goto out;
  1531. r = -EFAULT;
  1532. if (copy_to_user(argp, &tr, sizeof tr))
  1533. goto out;
  1534. r = 0;
  1535. break;
  1536. }
  1537. case KVM_SET_GUEST_DEBUG: {
  1538. struct kvm_guest_debug dbg;
  1539. r = -EFAULT;
  1540. if (copy_from_user(&dbg, argp, sizeof dbg))
  1541. goto out;
  1542. r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
  1543. if (r)
  1544. goto out;
  1545. r = 0;
  1546. break;
  1547. }
  1548. case KVM_SET_SIGNAL_MASK: {
  1549. struct kvm_signal_mask __user *sigmask_arg = argp;
  1550. struct kvm_signal_mask kvm_sigmask;
  1551. sigset_t sigset, *p;
  1552. p = NULL;
  1553. if (argp) {
  1554. r = -EFAULT;
  1555. if (copy_from_user(&kvm_sigmask, argp,
  1556. sizeof kvm_sigmask))
  1557. goto out;
  1558. r = -EINVAL;
  1559. if (kvm_sigmask.len != sizeof sigset)
  1560. goto out;
  1561. r = -EFAULT;
  1562. if (copy_from_user(&sigset, sigmask_arg->sigset,
  1563. sizeof sigset))
  1564. goto out;
  1565. p = &sigset;
  1566. }
  1567. r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
  1568. break;
  1569. }
  1570. case KVM_GET_FPU: {
  1571. fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1572. r = -ENOMEM;
  1573. if (!fpu)
  1574. goto out;
  1575. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
  1576. if (r)
  1577. goto out;
  1578. r = -EFAULT;
  1579. if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
  1580. goto out;
  1581. r = 0;
  1582. break;
  1583. }
  1584. case KVM_SET_FPU: {
  1585. fpu = memdup_user(argp, sizeof(*fpu));
  1586. if (IS_ERR(fpu)) {
  1587. r = PTR_ERR(fpu);
  1588. goto out;
  1589. }
  1590. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
  1591. if (r)
  1592. goto out;
  1593. r = 0;
  1594. break;
  1595. }
  1596. default:
  1597. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1598. }
  1599. out:
  1600. vcpu_put(vcpu);
  1601. kfree(fpu);
  1602. kfree(kvm_sregs);
  1603. return r;
  1604. }
  1605. #ifdef CONFIG_COMPAT
  1606. static long kvm_vcpu_compat_ioctl(struct file *filp,
  1607. unsigned int ioctl, unsigned long arg)
  1608. {
  1609. struct kvm_vcpu *vcpu = filp->private_data;
  1610. void __user *argp = compat_ptr(arg);
  1611. int r;
  1612. if (vcpu->kvm->mm != current->mm)
  1613. return -EIO;
  1614. switch (ioctl) {
  1615. case KVM_SET_SIGNAL_MASK: {
  1616. struct kvm_signal_mask __user *sigmask_arg = argp;
  1617. struct kvm_signal_mask kvm_sigmask;
  1618. compat_sigset_t csigset;
  1619. sigset_t sigset;
  1620. if (argp) {
  1621. r = -EFAULT;
  1622. if (copy_from_user(&kvm_sigmask, argp,
  1623. sizeof kvm_sigmask))
  1624. goto out;
  1625. r = -EINVAL;
  1626. if (kvm_sigmask.len != sizeof csigset)
  1627. goto out;
  1628. r = -EFAULT;
  1629. if (copy_from_user(&csigset, sigmask_arg->sigset,
  1630. sizeof csigset))
  1631. goto out;
  1632. }
  1633. sigset_from_compat(&sigset, &csigset);
  1634. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  1635. break;
  1636. }
  1637. default:
  1638. r = kvm_vcpu_ioctl(filp, ioctl, arg);
  1639. }
  1640. out:
  1641. return r;
  1642. }
  1643. #endif
  1644. static long kvm_vm_ioctl(struct file *filp,
  1645. unsigned int ioctl, unsigned long arg)
  1646. {
  1647. struct kvm *kvm = filp->private_data;
  1648. void __user *argp = (void __user *)arg;
  1649. int r;
  1650. if (kvm->mm != current->mm)
  1651. return -EIO;
  1652. switch (ioctl) {
  1653. case KVM_CREATE_VCPU:
  1654. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  1655. if (r < 0)
  1656. goto out;
  1657. break;
  1658. case KVM_SET_USER_MEMORY_REGION: {
  1659. struct kvm_userspace_memory_region kvm_userspace_mem;
  1660. r = -EFAULT;
  1661. if (copy_from_user(&kvm_userspace_mem, argp,
  1662. sizeof kvm_userspace_mem))
  1663. goto out;
  1664. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
  1665. if (r)
  1666. goto out;
  1667. break;
  1668. }
  1669. case KVM_GET_DIRTY_LOG: {
  1670. struct kvm_dirty_log log;
  1671. r = -EFAULT;
  1672. if (copy_from_user(&log, argp, sizeof log))
  1673. goto out;
  1674. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1675. if (r)
  1676. goto out;
  1677. break;
  1678. }
  1679. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1680. case KVM_REGISTER_COALESCED_MMIO: {
  1681. struct kvm_coalesced_mmio_zone zone;
  1682. r = -EFAULT;
  1683. if (copy_from_user(&zone, argp, sizeof zone))
  1684. goto out;
  1685. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  1686. if (r)
  1687. goto out;
  1688. r = 0;
  1689. break;
  1690. }
  1691. case KVM_UNREGISTER_COALESCED_MMIO: {
  1692. struct kvm_coalesced_mmio_zone zone;
  1693. r = -EFAULT;
  1694. if (copy_from_user(&zone, argp, sizeof zone))
  1695. goto out;
  1696. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  1697. if (r)
  1698. goto out;
  1699. r = 0;
  1700. break;
  1701. }
  1702. #endif
  1703. case KVM_IRQFD: {
  1704. struct kvm_irqfd data;
  1705. r = -EFAULT;
  1706. if (copy_from_user(&data, argp, sizeof data))
  1707. goto out;
  1708. r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
  1709. break;
  1710. }
  1711. case KVM_IOEVENTFD: {
  1712. struct kvm_ioeventfd data;
  1713. r = -EFAULT;
  1714. if (copy_from_user(&data, argp, sizeof data))
  1715. goto out;
  1716. r = kvm_ioeventfd(kvm, &data);
  1717. break;
  1718. }
  1719. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1720. case KVM_SET_BOOT_CPU_ID:
  1721. r = 0;
  1722. mutex_lock(&kvm->lock);
  1723. if (atomic_read(&kvm->online_vcpus) != 0)
  1724. r = -EBUSY;
  1725. else
  1726. kvm->bsp_vcpu_id = arg;
  1727. mutex_unlock(&kvm->lock);
  1728. break;
  1729. #endif
  1730. default:
  1731. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  1732. if (r == -ENOTTY)
  1733. r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
  1734. }
  1735. out:
  1736. return r;
  1737. }
  1738. #ifdef CONFIG_COMPAT
  1739. struct compat_kvm_dirty_log {
  1740. __u32 slot;
  1741. __u32 padding1;
  1742. union {
  1743. compat_uptr_t dirty_bitmap; /* one bit per page */
  1744. __u64 padding2;
  1745. };
  1746. };
  1747. static long kvm_vm_compat_ioctl(struct file *filp,
  1748. unsigned int ioctl, unsigned long arg)
  1749. {
  1750. struct kvm *kvm = filp->private_data;
  1751. int r;
  1752. if (kvm->mm != current->mm)
  1753. return -EIO;
  1754. switch (ioctl) {
  1755. case KVM_GET_DIRTY_LOG: {
  1756. struct compat_kvm_dirty_log compat_log;
  1757. struct kvm_dirty_log log;
  1758. r = -EFAULT;
  1759. if (copy_from_user(&compat_log, (void __user *)arg,
  1760. sizeof(compat_log)))
  1761. goto out;
  1762. log.slot = compat_log.slot;
  1763. log.padding1 = compat_log.padding1;
  1764. log.padding2 = compat_log.padding2;
  1765. log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
  1766. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1767. if (r)
  1768. goto out;
  1769. break;
  1770. }
  1771. default:
  1772. r = kvm_vm_ioctl(filp, ioctl, arg);
  1773. }
  1774. out:
  1775. return r;
  1776. }
  1777. #endif
  1778. static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1779. {
  1780. struct page *page[1];
  1781. unsigned long addr;
  1782. int npages;
  1783. gfn_t gfn = vmf->pgoff;
  1784. struct kvm *kvm = vma->vm_file->private_data;
  1785. addr = gfn_to_hva(kvm, gfn);
  1786. if (kvm_is_error_hva(addr))
  1787. return VM_FAULT_SIGBUS;
  1788. npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
  1789. NULL);
  1790. if (unlikely(npages != 1))
  1791. return VM_FAULT_SIGBUS;
  1792. vmf->page = page[0];
  1793. return 0;
  1794. }
  1795. static const struct vm_operations_struct kvm_vm_vm_ops = {
  1796. .fault = kvm_vm_fault,
  1797. };
  1798. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  1799. {
  1800. vma->vm_ops = &kvm_vm_vm_ops;
  1801. return 0;
  1802. }
  1803. static struct file_operations kvm_vm_fops = {
  1804. .release = kvm_vm_release,
  1805. .unlocked_ioctl = kvm_vm_ioctl,
  1806. #ifdef CONFIG_COMPAT
  1807. .compat_ioctl = kvm_vm_compat_ioctl,
  1808. #endif
  1809. .mmap = kvm_vm_mmap,
  1810. .llseek = noop_llseek,
  1811. };
  1812. static int kvm_dev_ioctl_create_vm(unsigned long type)
  1813. {
  1814. int r;
  1815. struct kvm *kvm;
  1816. kvm = kvm_create_vm(type);
  1817. if (IS_ERR(kvm))
  1818. return PTR_ERR(kvm);
  1819. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1820. r = kvm_coalesced_mmio_init(kvm);
  1821. if (r < 0) {
  1822. kvm_put_kvm(kvm);
  1823. return r;
  1824. }
  1825. #endif
  1826. r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
  1827. if (r < 0)
  1828. kvm_put_kvm(kvm);
  1829. return r;
  1830. }
  1831. static long kvm_dev_ioctl_check_extension_generic(long arg)
  1832. {
  1833. switch (arg) {
  1834. case KVM_CAP_USER_MEMORY:
  1835. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  1836. case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
  1837. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1838. case KVM_CAP_SET_BOOT_CPU_ID:
  1839. #endif
  1840. case KVM_CAP_INTERNAL_ERROR_DATA:
  1841. return 1;
  1842. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  1843. case KVM_CAP_IRQ_ROUTING:
  1844. return KVM_MAX_IRQ_ROUTES;
  1845. #endif
  1846. default:
  1847. break;
  1848. }
  1849. return kvm_dev_ioctl_check_extension(arg);
  1850. }
  1851. static long kvm_dev_ioctl(struct file *filp,
  1852. unsigned int ioctl, unsigned long arg)
  1853. {
  1854. long r = -EINVAL;
  1855. switch (ioctl) {
  1856. case KVM_GET_API_VERSION:
  1857. r = -EINVAL;
  1858. if (arg)
  1859. goto out;
  1860. r = KVM_API_VERSION;
  1861. break;
  1862. case KVM_CREATE_VM:
  1863. r = kvm_dev_ioctl_create_vm(arg);
  1864. break;
  1865. case KVM_CHECK_EXTENSION:
  1866. r = kvm_dev_ioctl_check_extension_generic(arg);
  1867. break;
  1868. case KVM_GET_VCPU_MMAP_SIZE:
  1869. r = -EINVAL;
  1870. if (arg)
  1871. goto out;
  1872. r = PAGE_SIZE; /* struct kvm_run */
  1873. #ifdef CONFIG_X86
  1874. r += PAGE_SIZE; /* pio data page */
  1875. #endif
  1876. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1877. r += PAGE_SIZE; /* coalesced mmio ring page */
  1878. #endif
  1879. break;
  1880. case KVM_TRACE_ENABLE:
  1881. case KVM_TRACE_PAUSE:
  1882. case KVM_TRACE_DISABLE:
  1883. r = -EOPNOTSUPP;
  1884. break;
  1885. default:
  1886. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  1887. }
  1888. out:
  1889. return r;
  1890. }
  1891. static struct file_operations kvm_chardev_ops = {
  1892. .unlocked_ioctl = kvm_dev_ioctl,
  1893. .compat_ioctl = kvm_dev_ioctl,
  1894. .llseek = noop_llseek,
  1895. };
  1896. static struct miscdevice kvm_dev = {
  1897. KVM_MINOR,
  1898. "kvm",
  1899. &kvm_chardev_ops,
  1900. };
  1901. static void hardware_enable_nolock(void *junk)
  1902. {
  1903. int cpu = raw_smp_processor_id();
  1904. int r;
  1905. if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1906. return;
  1907. cpumask_set_cpu(cpu, cpus_hardware_enabled);
  1908. r = kvm_arch_hardware_enable(NULL);
  1909. if (r) {
  1910. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1911. atomic_inc(&hardware_enable_failed);
  1912. printk(KERN_INFO "kvm: enabling virtualization on "
  1913. "CPU%d failed\n", cpu);
  1914. }
  1915. }
  1916. static void hardware_enable(void *junk)
  1917. {
  1918. raw_spin_lock(&kvm_lock);
  1919. hardware_enable_nolock(junk);
  1920. raw_spin_unlock(&kvm_lock);
  1921. }
  1922. static void hardware_disable_nolock(void *junk)
  1923. {
  1924. int cpu = raw_smp_processor_id();
  1925. if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1926. return;
  1927. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1928. kvm_arch_hardware_disable(NULL);
  1929. }
  1930. static void hardware_disable(void *junk)
  1931. {
  1932. raw_spin_lock(&kvm_lock);
  1933. hardware_disable_nolock(junk);
  1934. raw_spin_unlock(&kvm_lock);
  1935. }
  1936. static void hardware_disable_all_nolock(void)
  1937. {
  1938. BUG_ON(!kvm_usage_count);
  1939. kvm_usage_count--;
  1940. if (!kvm_usage_count)
  1941. on_each_cpu(hardware_disable_nolock, NULL, 1);
  1942. }
  1943. static void hardware_disable_all(void)
  1944. {
  1945. raw_spin_lock(&kvm_lock);
  1946. hardware_disable_all_nolock();
  1947. raw_spin_unlock(&kvm_lock);
  1948. }
  1949. static int hardware_enable_all(void)
  1950. {
  1951. int r = 0;
  1952. raw_spin_lock(&kvm_lock);
  1953. kvm_usage_count++;
  1954. if (kvm_usage_count == 1) {
  1955. atomic_set(&hardware_enable_failed, 0);
  1956. on_each_cpu(hardware_enable_nolock, NULL, 1);
  1957. if (atomic_read(&hardware_enable_failed)) {
  1958. hardware_disable_all_nolock();
  1959. r = -EBUSY;
  1960. }
  1961. }
  1962. raw_spin_unlock(&kvm_lock);
  1963. return r;
  1964. }
  1965. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  1966. void *v)
  1967. {
  1968. int cpu = (long)v;
  1969. if (!kvm_usage_count)
  1970. return NOTIFY_OK;
  1971. val &= ~CPU_TASKS_FROZEN;
  1972. switch (val) {
  1973. case CPU_DYING:
  1974. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1975. cpu);
  1976. hardware_disable(NULL);
  1977. break;
  1978. case CPU_STARTING:
  1979. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  1980. cpu);
  1981. hardware_enable(NULL);
  1982. break;
  1983. }
  1984. return NOTIFY_OK;
  1985. }
  1986. asmlinkage void kvm_spurious_fault(void)
  1987. {
  1988. /* Fault while not rebooting. We want the trace. */
  1989. BUG();
  1990. }
  1991. EXPORT_SYMBOL_GPL(kvm_spurious_fault);
  1992. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1993. void *v)
  1994. {
  1995. /*
  1996. * Some (well, at least mine) BIOSes hang on reboot if
  1997. * in vmx root mode.
  1998. *
  1999. * And Intel TXT required VMX off for all cpu when system shutdown.
  2000. */
  2001. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  2002. kvm_rebooting = true;
  2003. on_each_cpu(hardware_disable_nolock, NULL, 1);
  2004. return NOTIFY_OK;
  2005. }
  2006. static struct notifier_block kvm_reboot_notifier = {
  2007. .notifier_call = kvm_reboot,
  2008. .priority = 0,
  2009. };
  2010. static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  2011. {
  2012. int i;
  2013. for (i = 0; i < bus->dev_count; i++) {
  2014. struct kvm_io_device *pos = bus->range[i].dev;
  2015. kvm_iodevice_destructor(pos);
  2016. }
  2017. kfree(bus);
  2018. }
  2019. int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
  2020. {
  2021. const struct kvm_io_range *r1 = p1;
  2022. const struct kvm_io_range *r2 = p2;
  2023. if (r1->addr < r2->addr)
  2024. return -1;
  2025. if (r1->addr + r1->len > r2->addr + r2->len)
  2026. return 1;
  2027. return 0;
  2028. }
  2029. int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
  2030. gpa_t addr, int len)
  2031. {
  2032. if (bus->dev_count == NR_IOBUS_DEVS)
  2033. return -ENOSPC;
  2034. bus->range[bus->dev_count++] = (struct kvm_io_range) {
  2035. .addr = addr,
  2036. .len = len,
  2037. .dev = dev,
  2038. };
  2039. sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
  2040. kvm_io_bus_sort_cmp, NULL);
  2041. return 0;
  2042. }
  2043. int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
  2044. gpa_t addr, int len)
  2045. {
  2046. struct kvm_io_range *range, key;
  2047. int off;
  2048. key = (struct kvm_io_range) {
  2049. .addr = addr,
  2050. .len = len,
  2051. };
  2052. range = bsearch(&key, bus->range, bus->dev_count,
  2053. sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
  2054. if (range == NULL)
  2055. return -ENOENT;
  2056. off = range - bus->range;
  2057. while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0)
  2058. off--;
  2059. return off;
  2060. }
  2061. /* kvm_io_bus_write - called under kvm->slots_lock */
  2062. int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  2063. int len, const void *val)
  2064. {
  2065. int idx;
  2066. struct kvm_io_bus *bus;
  2067. struct kvm_io_range range;
  2068. range = (struct kvm_io_range) {
  2069. .addr = addr,
  2070. .len = len,
  2071. };
  2072. bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
  2073. idx = kvm_io_bus_get_first_dev(bus, addr, len);
  2074. if (idx < 0)
  2075. return -EOPNOTSUPP;
  2076. while (idx < bus->dev_count &&
  2077. kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
  2078. if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val))
  2079. return 0;
  2080. idx++;
  2081. }
  2082. return -EOPNOTSUPP;
  2083. }
  2084. /* kvm_io_bus_read - called under kvm->slots_lock */
  2085. int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  2086. int len, void *val)
  2087. {
  2088. int idx;
  2089. struct kvm_io_bus *bus;
  2090. struct kvm_io_range range;
  2091. range = (struct kvm_io_range) {
  2092. .addr = addr,
  2093. .len = len,
  2094. };
  2095. bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
  2096. idx = kvm_io_bus_get_first_dev(bus, addr, len);
  2097. if (idx < 0)
  2098. return -EOPNOTSUPP;
  2099. while (idx < bus->dev_count &&
  2100. kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
  2101. if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val))
  2102. return 0;
  2103. idx++;
  2104. }
  2105. return -EOPNOTSUPP;
  2106. }
  2107. /* Caller must hold slots_lock. */
  2108. int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  2109. int len, struct kvm_io_device *dev)
  2110. {
  2111. struct kvm_io_bus *new_bus, *bus;
  2112. bus = kvm->buses[bus_idx];
  2113. if (bus->dev_count > NR_IOBUS_DEVS-1)
  2114. return -ENOSPC;
  2115. new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
  2116. if (!new_bus)
  2117. return -ENOMEM;
  2118. kvm_io_bus_insert_dev(new_bus, dev, addr, len);
  2119. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  2120. synchronize_srcu_expedited(&kvm->srcu);
  2121. kfree(bus);
  2122. return 0;
  2123. }
  2124. /* Caller must hold slots_lock. */
  2125. int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  2126. struct kvm_io_device *dev)
  2127. {
  2128. int i, r;
  2129. struct kvm_io_bus *new_bus, *bus;
  2130. bus = kvm->buses[bus_idx];
  2131. new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
  2132. if (!new_bus)
  2133. return -ENOMEM;
  2134. r = -ENOENT;
  2135. for (i = 0; i < new_bus->dev_count; i++)
  2136. if (new_bus->range[i].dev == dev) {
  2137. r = 0;
  2138. new_bus->dev_count--;
  2139. new_bus->range[i] = new_bus->range[new_bus->dev_count];
  2140. sort(new_bus->range, new_bus->dev_count,
  2141. sizeof(struct kvm_io_range),
  2142. kvm_io_bus_sort_cmp, NULL);
  2143. break;
  2144. }
  2145. if (r) {
  2146. kfree(new_bus);
  2147. return r;
  2148. }
  2149. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  2150. synchronize_srcu_expedited(&kvm->srcu);
  2151. kfree(bus);
  2152. return r;
  2153. }
  2154. static struct notifier_block kvm_cpu_notifier = {
  2155. .notifier_call = kvm_cpu_hotplug,
  2156. };
  2157. static int vm_stat_get(void *_offset, u64 *val)
  2158. {
  2159. unsigned offset = (long)_offset;
  2160. struct kvm *kvm;
  2161. *val = 0;
  2162. raw_spin_lock(&kvm_lock);
  2163. list_for_each_entry(kvm, &vm_list, vm_list)
  2164. *val += *(u32 *)((void *)kvm + offset);
  2165. raw_spin_unlock(&kvm_lock);
  2166. return 0;
  2167. }
  2168. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
  2169. static int vcpu_stat_get(void *_offset, u64 *val)
  2170. {
  2171. unsigned offset = (long)_offset;
  2172. struct kvm *kvm;
  2173. struct kvm_vcpu *vcpu;
  2174. int i;
  2175. *val = 0;
  2176. raw_spin_lock(&kvm_lock);
  2177. list_for_each_entry(kvm, &vm_list, vm_list)
  2178. kvm_for_each_vcpu(i, vcpu, kvm)
  2179. *val += *(u32 *)((void *)vcpu + offset);
  2180. raw_spin_unlock(&kvm_lock);
  2181. return 0;
  2182. }
  2183. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
  2184. static const struct file_operations *stat_fops[] = {
  2185. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  2186. [KVM_STAT_VM] = &vm_stat_fops,
  2187. };
  2188. static int kvm_init_debug(void)
  2189. {
  2190. int r = -EFAULT;
  2191. struct kvm_stats_debugfs_item *p;
  2192. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  2193. if (kvm_debugfs_dir == NULL)
  2194. goto out;
  2195. for (p = debugfs_entries; p->name; ++p) {
  2196. p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
  2197. (void *)(long)p->offset,
  2198. stat_fops[p->kind]);
  2199. if (p->dentry == NULL)
  2200. goto out_dir;
  2201. }
  2202. return 0;
  2203. out_dir:
  2204. debugfs_remove_recursive(kvm_debugfs_dir);
  2205. out:
  2206. return r;
  2207. }
  2208. static void kvm_exit_debug(void)
  2209. {
  2210. struct kvm_stats_debugfs_item *p;
  2211. for (p = debugfs_entries; p->name; ++p)
  2212. debugfs_remove(p->dentry);
  2213. debugfs_remove(kvm_debugfs_dir);
  2214. }
  2215. static int kvm_suspend(void)
  2216. {
  2217. if (kvm_usage_count)
  2218. hardware_disable_nolock(NULL);
  2219. return 0;
  2220. }
  2221. static void kvm_resume(void)
  2222. {
  2223. if (kvm_usage_count) {
  2224. WARN_ON(raw_spin_is_locked(&kvm_lock));
  2225. hardware_enable_nolock(NULL);
  2226. }
  2227. }
  2228. static struct syscore_ops kvm_syscore_ops = {
  2229. .suspend = kvm_suspend,
  2230. .resume = kvm_resume,
  2231. };
  2232. struct page *bad_page;
  2233. pfn_t bad_pfn;
  2234. static inline
  2235. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  2236. {
  2237. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  2238. }
  2239. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  2240. {
  2241. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  2242. kvm_arch_vcpu_load(vcpu, cpu);
  2243. }
  2244. static void kvm_sched_out(struct preempt_notifier *pn,
  2245. struct task_struct *next)
  2246. {
  2247. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  2248. kvm_arch_vcpu_put(vcpu);
  2249. }
  2250. int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
  2251. struct module *module)
  2252. {
  2253. int r;
  2254. int cpu;
  2255. r = kvm_arch_init(opaque);
  2256. if (r)
  2257. goto out_fail;
  2258. bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  2259. if (bad_page == NULL) {
  2260. r = -ENOMEM;
  2261. goto out;
  2262. }
  2263. bad_pfn = page_to_pfn(bad_page);
  2264. hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  2265. if (hwpoison_page == NULL) {
  2266. r = -ENOMEM;
  2267. goto out_free_0;
  2268. }
  2269. hwpoison_pfn = page_to_pfn(hwpoison_page);
  2270. fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  2271. if (fault_page == NULL) {
  2272. r = -ENOMEM;
  2273. goto out_free_0;
  2274. }
  2275. fault_pfn = page_to_pfn(fault_page);
  2276. if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
  2277. r = -ENOMEM;
  2278. goto out_free_0;
  2279. }
  2280. r = kvm_arch_hardware_setup();
  2281. if (r < 0)
  2282. goto out_free_0a;
  2283. for_each_online_cpu(cpu) {
  2284. smp_call_function_single(cpu,
  2285. kvm_arch_check_processor_compat,
  2286. &r, 1);
  2287. if (r < 0)
  2288. goto out_free_1;
  2289. }
  2290. r = register_cpu_notifier(&kvm_cpu_notifier);
  2291. if (r)
  2292. goto out_free_2;
  2293. register_reboot_notifier(&kvm_reboot_notifier);
  2294. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  2295. if (!vcpu_align)
  2296. vcpu_align = __alignof__(struct kvm_vcpu);
  2297. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
  2298. 0, NULL);
  2299. if (!kvm_vcpu_cache) {
  2300. r = -ENOMEM;
  2301. goto out_free_3;
  2302. }
  2303. r = kvm_async_pf_init();
  2304. if (r)
  2305. goto out_free;
  2306. kvm_chardev_ops.owner = module;
  2307. kvm_vm_fops.owner = module;
  2308. kvm_vcpu_fops.owner = module;
  2309. r = misc_register(&kvm_dev);
  2310. if (r) {
  2311. printk(KERN_ERR "kvm: misc device register failed\n");
  2312. goto out_unreg;
  2313. }
  2314. register_syscore_ops(&kvm_syscore_ops);
  2315. kvm_preempt_ops.sched_in = kvm_sched_in;
  2316. kvm_preempt_ops.sched_out = kvm_sched_out;
  2317. r = kvm_init_debug();
  2318. if (r) {
  2319. printk(KERN_ERR "kvm: create debugfs files failed\n");
  2320. goto out_undebugfs;
  2321. }
  2322. return 0;
  2323. out_undebugfs:
  2324. unregister_syscore_ops(&kvm_syscore_ops);
  2325. out_unreg:
  2326. kvm_async_pf_deinit();
  2327. out_free:
  2328. kmem_cache_destroy(kvm_vcpu_cache);
  2329. out_free_3:
  2330. unregister_reboot_notifier(&kvm_reboot_notifier);
  2331. unregister_cpu_notifier(&kvm_cpu_notifier);
  2332. out_free_2:
  2333. out_free_1:
  2334. kvm_arch_hardware_unsetup();
  2335. out_free_0a:
  2336. free_cpumask_var(cpus_hardware_enabled);
  2337. out_free_0:
  2338. if (fault_page)
  2339. __free_page(fault_page);
  2340. if (hwpoison_page)
  2341. __free_page(hwpoison_page);
  2342. __free_page(bad_page);
  2343. out:
  2344. kvm_arch_exit();
  2345. out_fail:
  2346. return r;
  2347. }
  2348. EXPORT_SYMBOL_GPL(kvm_init);
  2349. void kvm_exit(void)
  2350. {
  2351. kvm_exit_debug();
  2352. misc_deregister(&kvm_dev);
  2353. kmem_cache_destroy(kvm_vcpu_cache);
  2354. kvm_async_pf_deinit();
  2355. unregister_syscore_ops(&kvm_syscore_ops);
  2356. unregister_reboot_notifier(&kvm_reboot_notifier);
  2357. unregister_cpu_notifier(&kvm_cpu_notifier);
  2358. on_each_cpu(hardware_disable_nolock, NULL, 1);
  2359. kvm_arch_hardware_unsetup();
  2360. kvm_arch_exit();
  2361. free_cpumask_var(cpus_hardware_enabled);
  2362. __free_page(hwpoison_page);
  2363. __free_page(bad_page);
  2364. }
  2365. EXPORT_SYMBOL_GPL(kvm_exit);