kvm_main.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "iodev.h"
  18. #include <linux/kvm_host.h>
  19. #include <linux/kvm.h>
  20. #include <linux/module.h>
  21. #include <linux/errno.h>
  22. #include <linux/percpu.h>
  23. #include <linux/gfp.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/sysdev.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched.h>
  34. #include <linux/cpumask.h>
  35. #include <linux/smp.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/profile.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/mman.h>
  41. #include <linux/swap.h>
  42. #include <linux/bitops.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/compat.h>
  45. #include <linux/srcu.h>
  46. #include <linux/hugetlb.h>
  47. #include <asm/processor.h>
  48. #include <asm/io.h>
  49. #include <asm/uaccess.h>
  50. #include <asm/pgtable.h>
  51. #include <asm-generic/bitops/le.h>
  52. #include "coalesced_mmio.h"
  53. #define CREATE_TRACE_POINTS
  54. #include <trace/events/kvm.h>
  55. MODULE_AUTHOR("Qumranet");
  56. MODULE_LICENSE("GPL");
  57. /*
  58. * Ordering of locks:
  59. *
  60. * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  61. */
  62. DEFINE_SPINLOCK(kvm_lock);
  63. LIST_HEAD(vm_list);
  64. static cpumask_var_t cpus_hardware_enabled;
  65. static int kvm_usage_count = 0;
  66. static atomic_t hardware_enable_failed;
  67. struct kmem_cache *kvm_vcpu_cache;
  68. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  69. static __read_mostly struct preempt_ops kvm_preempt_ops;
  70. struct dentry *kvm_debugfs_dir;
  71. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  72. unsigned long arg);
  73. static int hardware_enable_all(void);
  74. static void hardware_disable_all(void);
  75. static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
  76. static bool kvm_rebooting;
  77. static bool largepages_enabled = true;
  78. inline int kvm_is_mmio_pfn(pfn_t pfn)
  79. {
  80. if (pfn_valid(pfn)) {
  81. struct page *page = compound_head(pfn_to_page(pfn));
  82. return PageReserved(page);
  83. }
  84. return true;
  85. }
  86. /*
  87. * Switches to specified vcpu, until a matching vcpu_put()
  88. */
  89. void vcpu_load(struct kvm_vcpu *vcpu)
  90. {
  91. int cpu;
  92. mutex_lock(&vcpu->mutex);
  93. cpu = get_cpu();
  94. preempt_notifier_register(&vcpu->preempt_notifier);
  95. kvm_arch_vcpu_load(vcpu, cpu);
  96. put_cpu();
  97. }
  98. void vcpu_put(struct kvm_vcpu *vcpu)
  99. {
  100. preempt_disable();
  101. kvm_arch_vcpu_put(vcpu);
  102. preempt_notifier_unregister(&vcpu->preempt_notifier);
  103. preempt_enable();
  104. mutex_unlock(&vcpu->mutex);
  105. }
  106. static void ack_flush(void *_completed)
  107. {
  108. }
  109. static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
  110. {
  111. int i, cpu, me;
  112. cpumask_var_t cpus;
  113. bool called = true;
  114. struct kvm_vcpu *vcpu;
  115. zalloc_cpumask_var(&cpus, GFP_ATOMIC);
  116. raw_spin_lock(&kvm->requests_lock);
  117. me = smp_processor_id();
  118. kvm_for_each_vcpu(i, vcpu, kvm) {
  119. if (test_and_set_bit(req, &vcpu->requests))
  120. continue;
  121. cpu = vcpu->cpu;
  122. if (cpus != NULL && cpu != -1 && cpu != me)
  123. cpumask_set_cpu(cpu, cpus);
  124. }
  125. if (unlikely(cpus == NULL))
  126. smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
  127. else if (!cpumask_empty(cpus))
  128. smp_call_function_many(cpus, ack_flush, NULL, 1);
  129. else
  130. called = false;
  131. raw_spin_unlock(&kvm->requests_lock);
  132. free_cpumask_var(cpus);
  133. return called;
  134. }
  135. void kvm_flush_remote_tlbs(struct kvm *kvm)
  136. {
  137. if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
  138. ++kvm->stat.remote_tlb_flush;
  139. }
  140. void kvm_reload_remote_mmus(struct kvm *kvm)
  141. {
  142. make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
  143. }
  144. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  145. {
  146. struct page *page;
  147. int r;
  148. mutex_init(&vcpu->mutex);
  149. vcpu->cpu = -1;
  150. vcpu->kvm = kvm;
  151. vcpu->vcpu_id = id;
  152. init_waitqueue_head(&vcpu->wq);
  153. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  154. if (!page) {
  155. r = -ENOMEM;
  156. goto fail;
  157. }
  158. vcpu->run = page_address(page);
  159. r = kvm_arch_vcpu_init(vcpu);
  160. if (r < 0)
  161. goto fail_free_run;
  162. return 0;
  163. fail_free_run:
  164. free_page((unsigned long)vcpu->run);
  165. fail:
  166. return r;
  167. }
  168. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  169. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  170. {
  171. kvm_arch_vcpu_uninit(vcpu);
  172. free_page((unsigned long)vcpu->run);
  173. }
  174. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  175. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  176. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  177. {
  178. return container_of(mn, struct kvm, mmu_notifier);
  179. }
  180. static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
  181. struct mm_struct *mm,
  182. unsigned long address)
  183. {
  184. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  185. int need_tlb_flush, idx;
  186. /*
  187. * When ->invalidate_page runs, the linux pte has been zapped
  188. * already but the page is still allocated until
  189. * ->invalidate_page returns. So if we increase the sequence
  190. * here the kvm page fault will notice if the spte can't be
  191. * established because the page is going to be freed. If
  192. * instead the kvm page fault establishes the spte before
  193. * ->invalidate_page runs, kvm_unmap_hva will release it
  194. * before returning.
  195. *
  196. * The sequence increase only need to be seen at spin_unlock
  197. * time, and not at spin_lock time.
  198. *
  199. * Increasing the sequence after the spin_unlock would be
  200. * unsafe because the kvm page fault could then establish the
  201. * pte after kvm_unmap_hva returned, without noticing the page
  202. * is going to be freed.
  203. */
  204. idx = srcu_read_lock(&kvm->srcu);
  205. spin_lock(&kvm->mmu_lock);
  206. kvm->mmu_notifier_seq++;
  207. need_tlb_flush = kvm_unmap_hva(kvm, address);
  208. spin_unlock(&kvm->mmu_lock);
  209. srcu_read_unlock(&kvm->srcu, idx);
  210. /* we've to flush the tlb before the pages can be freed */
  211. if (need_tlb_flush)
  212. kvm_flush_remote_tlbs(kvm);
  213. }
  214. static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
  215. struct mm_struct *mm,
  216. unsigned long address,
  217. pte_t pte)
  218. {
  219. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  220. int idx;
  221. idx = srcu_read_lock(&kvm->srcu);
  222. spin_lock(&kvm->mmu_lock);
  223. kvm->mmu_notifier_seq++;
  224. kvm_set_spte_hva(kvm, address, pte);
  225. spin_unlock(&kvm->mmu_lock);
  226. srcu_read_unlock(&kvm->srcu, idx);
  227. }
  228. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  229. struct mm_struct *mm,
  230. unsigned long start,
  231. unsigned long end)
  232. {
  233. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  234. int need_tlb_flush = 0, idx;
  235. idx = srcu_read_lock(&kvm->srcu);
  236. spin_lock(&kvm->mmu_lock);
  237. /*
  238. * The count increase must become visible at unlock time as no
  239. * spte can be established without taking the mmu_lock and
  240. * count is also read inside the mmu_lock critical section.
  241. */
  242. kvm->mmu_notifier_count++;
  243. for (; start < end; start += PAGE_SIZE)
  244. need_tlb_flush |= kvm_unmap_hva(kvm, start);
  245. spin_unlock(&kvm->mmu_lock);
  246. srcu_read_unlock(&kvm->srcu, idx);
  247. /* we've to flush the tlb before the pages can be freed */
  248. if (need_tlb_flush)
  249. kvm_flush_remote_tlbs(kvm);
  250. }
  251. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  252. struct mm_struct *mm,
  253. unsigned long start,
  254. unsigned long end)
  255. {
  256. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  257. spin_lock(&kvm->mmu_lock);
  258. /*
  259. * This sequence increase will notify the kvm page fault that
  260. * the page that is going to be mapped in the spte could have
  261. * been freed.
  262. */
  263. kvm->mmu_notifier_seq++;
  264. /*
  265. * The above sequence increase must be visible before the
  266. * below count decrease but both values are read by the kvm
  267. * page fault under mmu_lock spinlock so we don't need to add
  268. * a smb_wmb() here in between the two.
  269. */
  270. kvm->mmu_notifier_count--;
  271. spin_unlock(&kvm->mmu_lock);
  272. BUG_ON(kvm->mmu_notifier_count < 0);
  273. }
  274. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  275. struct mm_struct *mm,
  276. unsigned long address)
  277. {
  278. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  279. int young, idx;
  280. idx = srcu_read_lock(&kvm->srcu);
  281. spin_lock(&kvm->mmu_lock);
  282. young = kvm_age_hva(kvm, address);
  283. spin_unlock(&kvm->mmu_lock);
  284. srcu_read_unlock(&kvm->srcu, idx);
  285. if (young)
  286. kvm_flush_remote_tlbs(kvm);
  287. return young;
  288. }
  289. static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
  290. struct mm_struct *mm)
  291. {
  292. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  293. kvm_arch_flush_shadow(kvm);
  294. }
  295. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  296. .invalidate_page = kvm_mmu_notifier_invalidate_page,
  297. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  298. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  299. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  300. .change_pte = kvm_mmu_notifier_change_pte,
  301. .release = kvm_mmu_notifier_release,
  302. };
  303. static int kvm_init_mmu_notifier(struct kvm *kvm)
  304. {
  305. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  306. return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  307. }
  308. #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
  309. static int kvm_init_mmu_notifier(struct kvm *kvm)
  310. {
  311. return 0;
  312. }
  313. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  314. static struct kvm *kvm_create_vm(void)
  315. {
  316. int r = 0, i;
  317. struct kvm *kvm = kvm_arch_create_vm();
  318. if (IS_ERR(kvm))
  319. goto out;
  320. r = hardware_enable_all();
  321. if (r)
  322. goto out_err_nodisable;
  323. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  324. INIT_HLIST_HEAD(&kvm->mask_notifier_list);
  325. INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
  326. #endif
  327. r = -ENOMEM;
  328. kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  329. if (!kvm->memslots)
  330. goto out_err;
  331. if (init_srcu_struct(&kvm->srcu))
  332. goto out_err;
  333. for (i = 0; i < KVM_NR_BUSES; i++) {
  334. kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
  335. GFP_KERNEL);
  336. if (!kvm->buses[i]) {
  337. cleanup_srcu_struct(&kvm->srcu);
  338. goto out_err;
  339. }
  340. }
  341. r = kvm_init_mmu_notifier(kvm);
  342. if (r) {
  343. cleanup_srcu_struct(&kvm->srcu);
  344. goto out_err;
  345. }
  346. kvm->mm = current->mm;
  347. atomic_inc(&kvm->mm->mm_count);
  348. spin_lock_init(&kvm->mmu_lock);
  349. raw_spin_lock_init(&kvm->requests_lock);
  350. kvm_eventfd_init(kvm);
  351. mutex_init(&kvm->lock);
  352. mutex_init(&kvm->irq_lock);
  353. mutex_init(&kvm->slots_lock);
  354. atomic_set(&kvm->users_count, 1);
  355. spin_lock(&kvm_lock);
  356. list_add(&kvm->vm_list, &vm_list);
  357. spin_unlock(&kvm_lock);
  358. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  359. kvm_coalesced_mmio_init(kvm);
  360. #endif
  361. out:
  362. return kvm;
  363. out_err:
  364. hardware_disable_all();
  365. out_err_nodisable:
  366. for (i = 0; i < KVM_NR_BUSES; i++)
  367. kfree(kvm->buses[i]);
  368. kfree(kvm->memslots);
  369. kfree(kvm);
  370. return ERR_PTR(r);
  371. }
  372. /*
  373. * Free any memory in @free but not in @dont.
  374. */
  375. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  376. struct kvm_memory_slot *dont)
  377. {
  378. int i;
  379. if (!dont || free->rmap != dont->rmap)
  380. vfree(free->rmap);
  381. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  382. vfree(free->dirty_bitmap);
  383. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  384. if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
  385. vfree(free->lpage_info[i]);
  386. free->lpage_info[i] = NULL;
  387. }
  388. }
  389. free->npages = 0;
  390. free->dirty_bitmap = NULL;
  391. free->rmap = NULL;
  392. }
  393. void kvm_free_physmem(struct kvm *kvm)
  394. {
  395. int i;
  396. struct kvm_memslots *slots = kvm->memslots;
  397. for (i = 0; i < slots->nmemslots; ++i)
  398. kvm_free_physmem_slot(&slots->memslots[i], NULL);
  399. kfree(kvm->memslots);
  400. }
  401. static void kvm_destroy_vm(struct kvm *kvm)
  402. {
  403. int i;
  404. struct mm_struct *mm = kvm->mm;
  405. kvm_arch_sync_events(kvm);
  406. spin_lock(&kvm_lock);
  407. list_del(&kvm->vm_list);
  408. spin_unlock(&kvm_lock);
  409. kvm_free_irq_routing(kvm);
  410. for (i = 0; i < KVM_NR_BUSES; i++)
  411. kvm_io_bus_destroy(kvm->buses[i]);
  412. kvm_coalesced_mmio_free(kvm);
  413. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  414. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  415. #else
  416. kvm_arch_flush_shadow(kvm);
  417. #endif
  418. kvm_arch_destroy_vm(kvm);
  419. hardware_disable_all();
  420. mmdrop(mm);
  421. }
  422. void kvm_get_kvm(struct kvm *kvm)
  423. {
  424. atomic_inc(&kvm->users_count);
  425. }
  426. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  427. void kvm_put_kvm(struct kvm *kvm)
  428. {
  429. if (atomic_dec_and_test(&kvm->users_count))
  430. kvm_destroy_vm(kvm);
  431. }
  432. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  433. static int kvm_vm_release(struct inode *inode, struct file *filp)
  434. {
  435. struct kvm *kvm = filp->private_data;
  436. kvm_irqfd_release(kvm);
  437. kvm_put_kvm(kvm);
  438. return 0;
  439. }
  440. /*
  441. * Allocate some memory and give it an address in the guest physical address
  442. * space.
  443. *
  444. * Discontiguous memory is allowed, mostly for framebuffers.
  445. *
  446. * Must be called holding mmap_sem for write.
  447. */
  448. int __kvm_set_memory_region(struct kvm *kvm,
  449. struct kvm_userspace_memory_region *mem,
  450. int user_alloc)
  451. {
  452. int r, flush_shadow = 0;
  453. gfn_t base_gfn;
  454. unsigned long npages;
  455. unsigned long i;
  456. struct kvm_memory_slot *memslot;
  457. struct kvm_memory_slot old, new;
  458. struct kvm_memslots *slots, *old_memslots;
  459. r = -EINVAL;
  460. /* General sanity checks */
  461. if (mem->memory_size & (PAGE_SIZE - 1))
  462. goto out;
  463. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  464. goto out;
  465. if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
  466. goto out;
  467. if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  468. goto out;
  469. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  470. goto out;
  471. memslot = &kvm->memslots->memslots[mem->slot];
  472. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  473. npages = mem->memory_size >> PAGE_SHIFT;
  474. if (!npages)
  475. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  476. new = old = *memslot;
  477. new.base_gfn = base_gfn;
  478. new.npages = npages;
  479. new.flags = mem->flags;
  480. /* Disallow changing a memory slot's size. */
  481. r = -EINVAL;
  482. if (npages && old.npages && npages != old.npages)
  483. goto out_free;
  484. /* Check for overlaps */
  485. r = -EEXIST;
  486. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  487. struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
  488. if (s == memslot || !s->npages)
  489. continue;
  490. if (!((base_gfn + npages <= s->base_gfn) ||
  491. (base_gfn >= s->base_gfn + s->npages)))
  492. goto out_free;
  493. }
  494. /* Free page dirty bitmap if unneeded */
  495. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  496. new.dirty_bitmap = NULL;
  497. r = -ENOMEM;
  498. /* Allocate if a slot is being created */
  499. #ifndef CONFIG_S390
  500. if (npages && !new.rmap) {
  501. new.rmap = vmalloc(npages * sizeof(struct page *));
  502. if (!new.rmap)
  503. goto out_free;
  504. memset(new.rmap, 0, npages * sizeof(*new.rmap));
  505. new.user_alloc = user_alloc;
  506. new.userspace_addr = mem->userspace_addr;
  507. }
  508. if (!npages)
  509. goto skip_lpage;
  510. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  511. unsigned long ugfn;
  512. unsigned long j;
  513. int lpages;
  514. int level = i + 2;
  515. /* Avoid unused variable warning if no large pages */
  516. (void)level;
  517. if (new.lpage_info[i])
  518. continue;
  519. lpages = 1 + (base_gfn + npages - 1) /
  520. KVM_PAGES_PER_HPAGE(level);
  521. lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
  522. new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
  523. if (!new.lpage_info[i])
  524. goto out_free;
  525. memset(new.lpage_info[i], 0,
  526. lpages * sizeof(*new.lpage_info[i]));
  527. if (base_gfn % KVM_PAGES_PER_HPAGE(level))
  528. new.lpage_info[i][0].write_count = 1;
  529. if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
  530. new.lpage_info[i][lpages - 1].write_count = 1;
  531. ugfn = new.userspace_addr >> PAGE_SHIFT;
  532. /*
  533. * If the gfn and userspace address are not aligned wrt each
  534. * other, or if explicitly asked to, disable large page
  535. * support for this slot
  536. */
  537. if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
  538. !largepages_enabled)
  539. for (j = 0; j < lpages; ++j)
  540. new.lpage_info[i][j].write_count = 1;
  541. }
  542. skip_lpage:
  543. /* Allocate page dirty bitmap if needed */
  544. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  545. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  546. new.dirty_bitmap = vmalloc(dirty_bytes);
  547. if (!new.dirty_bitmap)
  548. goto out_free;
  549. memset(new.dirty_bitmap, 0, dirty_bytes);
  550. /* destroy any largepage mappings for dirty tracking */
  551. if (old.npages)
  552. flush_shadow = 1;
  553. }
  554. #else /* not defined CONFIG_S390 */
  555. new.user_alloc = user_alloc;
  556. if (user_alloc)
  557. new.userspace_addr = mem->userspace_addr;
  558. #endif /* not defined CONFIG_S390 */
  559. if (!npages) {
  560. r = -ENOMEM;
  561. slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  562. if (!slots)
  563. goto out_free;
  564. memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
  565. if (mem->slot >= slots->nmemslots)
  566. slots->nmemslots = mem->slot + 1;
  567. slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
  568. old_memslots = kvm->memslots;
  569. rcu_assign_pointer(kvm->memslots, slots);
  570. synchronize_srcu_expedited(&kvm->srcu);
  571. /* From this point no new shadow pages pointing to a deleted
  572. * memslot will be created.
  573. *
  574. * validation of sp->gfn happens in:
  575. * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
  576. * - kvm_is_visible_gfn (mmu_check_roots)
  577. */
  578. kvm_arch_flush_shadow(kvm);
  579. kfree(old_memslots);
  580. }
  581. r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
  582. if (r)
  583. goto out_free;
  584. #ifdef CONFIG_DMAR
  585. /* map the pages in iommu page table */
  586. if (npages) {
  587. r = kvm_iommu_map_pages(kvm, &new);
  588. if (r)
  589. goto out_free;
  590. }
  591. #endif
  592. r = -ENOMEM;
  593. slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  594. if (!slots)
  595. goto out_free;
  596. memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
  597. if (mem->slot >= slots->nmemslots)
  598. slots->nmemslots = mem->slot + 1;
  599. /* actual memory is freed via old in kvm_free_physmem_slot below */
  600. if (!npages) {
  601. new.rmap = NULL;
  602. new.dirty_bitmap = NULL;
  603. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
  604. new.lpage_info[i] = NULL;
  605. }
  606. slots->memslots[mem->slot] = new;
  607. old_memslots = kvm->memslots;
  608. rcu_assign_pointer(kvm->memslots, slots);
  609. synchronize_srcu_expedited(&kvm->srcu);
  610. kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
  611. kvm_free_physmem_slot(&old, &new);
  612. kfree(old_memslots);
  613. if (flush_shadow)
  614. kvm_arch_flush_shadow(kvm);
  615. return 0;
  616. out_free:
  617. kvm_free_physmem_slot(&new, &old);
  618. out:
  619. return r;
  620. }
  621. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  622. int kvm_set_memory_region(struct kvm *kvm,
  623. struct kvm_userspace_memory_region *mem,
  624. int user_alloc)
  625. {
  626. int r;
  627. mutex_lock(&kvm->slots_lock);
  628. r = __kvm_set_memory_region(kvm, mem, user_alloc);
  629. mutex_unlock(&kvm->slots_lock);
  630. return r;
  631. }
  632. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  633. int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  634. struct
  635. kvm_userspace_memory_region *mem,
  636. int user_alloc)
  637. {
  638. if (mem->slot >= KVM_MEMORY_SLOTS)
  639. return -EINVAL;
  640. return kvm_set_memory_region(kvm, mem, user_alloc);
  641. }
  642. int kvm_get_dirty_log(struct kvm *kvm,
  643. struct kvm_dirty_log *log, int *is_dirty)
  644. {
  645. struct kvm_memory_slot *memslot;
  646. int r, i;
  647. int n;
  648. unsigned long any = 0;
  649. r = -EINVAL;
  650. if (log->slot >= KVM_MEMORY_SLOTS)
  651. goto out;
  652. memslot = &kvm->memslots->memslots[log->slot];
  653. r = -ENOENT;
  654. if (!memslot->dirty_bitmap)
  655. goto out;
  656. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  657. for (i = 0; !any && i < n/sizeof(long); ++i)
  658. any = memslot->dirty_bitmap[i];
  659. r = -EFAULT;
  660. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  661. goto out;
  662. if (any)
  663. *is_dirty = 1;
  664. r = 0;
  665. out:
  666. return r;
  667. }
  668. void kvm_disable_largepages(void)
  669. {
  670. largepages_enabled = false;
  671. }
  672. EXPORT_SYMBOL_GPL(kvm_disable_largepages);
  673. int is_error_page(struct page *page)
  674. {
  675. return page == bad_page;
  676. }
  677. EXPORT_SYMBOL_GPL(is_error_page);
  678. int is_error_pfn(pfn_t pfn)
  679. {
  680. return pfn == bad_pfn;
  681. }
  682. EXPORT_SYMBOL_GPL(is_error_pfn);
  683. static inline unsigned long bad_hva(void)
  684. {
  685. return PAGE_OFFSET;
  686. }
  687. int kvm_is_error_hva(unsigned long addr)
  688. {
  689. return addr == bad_hva();
  690. }
  691. EXPORT_SYMBOL_GPL(kvm_is_error_hva);
  692. struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
  693. {
  694. int i;
  695. struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
  696. for (i = 0; i < slots->nmemslots; ++i) {
  697. struct kvm_memory_slot *memslot = &slots->memslots[i];
  698. if (gfn >= memslot->base_gfn
  699. && gfn < memslot->base_gfn + memslot->npages)
  700. return memslot;
  701. }
  702. return NULL;
  703. }
  704. EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
  705. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  706. {
  707. gfn = unalias_gfn(kvm, gfn);
  708. return gfn_to_memslot_unaliased(kvm, gfn);
  709. }
  710. int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  711. {
  712. int i;
  713. struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
  714. gfn = unalias_gfn_instantiation(kvm, gfn);
  715. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  716. struct kvm_memory_slot *memslot = &slots->memslots[i];
  717. if (memslot->flags & KVM_MEMSLOT_INVALID)
  718. continue;
  719. if (gfn >= memslot->base_gfn
  720. && gfn < memslot->base_gfn + memslot->npages)
  721. return 1;
  722. }
  723. return 0;
  724. }
  725. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  726. unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
  727. {
  728. struct vm_area_struct *vma;
  729. unsigned long addr, size;
  730. size = PAGE_SIZE;
  731. addr = gfn_to_hva(kvm, gfn);
  732. if (kvm_is_error_hva(addr))
  733. return PAGE_SIZE;
  734. down_read(&current->mm->mmap_sem);
  735. vma = find_vma(current->mm, addr);
  736. if (!vma)
  737. goto out;
  738. size = vma_kernel_pagesize(vma);
  739. out:
  740. up_read(&current->mm->mmap_sem);
  741. return size;
  742. }
  743. int memslot_id(struct kvm *kvm, gfn_t gfn)
  744. {
  745. int i;
  746. struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
  747. struct kvm_memory_slot *memslot = NULL;
  748. gfn = unalias_gfn(kvm, gfn);
  749. for (i = 0; i < slots->nmemslots; ++i) {
  750. memslot = &slots->memslots[i];
  751. if (gfn >= memslot->base_gfn
  752. && gfn < memslot->base_gfn + memslot->npages)
  753. break;
  754. }
  755. return memslot - slots->memslots;
  756. }
  757. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  758. {
  759. struct kvm_memory_slot *slot;
  760. gfn = unalias_gfn_instantiation(kvm, gfn);
  761. slot = gfn_to_memslot_unaliased(kvm, gfn);
  762. if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
  763. return bad_hva();
  764. return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
  765. }
  766. EXPORT_SYMBOL_GPL(gfn_to_hva);
  767. static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
  768. {
  769. struct page *page[1];
  770. int npages;
  771. pfn_t pfn;
  772. might_sleep();
  773. npages = get_user_pages_fast(addr, 1, 1, page);
  774. if (unlikely(npages != 1)) {
  775. struct vm_area_struct *vma;
  776. down_read(&current->mm->mmap_sem);
  777. vma = find_vma(current->mm, addr);
  778. if (vma == NULL || addr < vma->vm_start ||
  779. !(vma->vm_flags & VM_PFNMAP)) {
  780. up_read(&current->mm->mmap_sem);
  781. get_page(bad_page);
  782. return page_to_pfn(bad_page);
  783. }
  784. pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  785. up_read(&current->mm->mmap_sem);
  786. BUG_ON(!kvm_is_mmio_pfn(pfn));
  787. } else
  788. pfn = page_to_pfn(page[0]);
  789. return pfn;
  790. }
  791. pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  792. {
  793. unsigned long addr;
  794. addr = gfn_to_hva(kvm, gfn);
  795. if (kvm_is_error_hva(addr)) {
  796. get_page(bad_page);
  797. return page_to_pfn(bad_page);
  798. }
  799. return hva_to_pfn(kvm, addr);
  800. }
  801. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  802. static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
  803. {
  804. return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
  805. }
  806. pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
  807. struct kvm_memory_slot *slot, gfn_t gfn)
  808. {
  809. unsigned long addr = gfn_to_hva_memslot(slot, gfn);
  810. return hva_to_pfn(kvm, addr);
  811. }
  812. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  813. {
  814. pfn_t pfn;
  815. pfn = gfn_to_pfn(kvm, gfn);
  816. if (!kvm_is_mmio_pfn(pfn))
  817. return pfn_to_page(pfn);
  818. WARN_ON(kvm_is_mmio_pfn(pfn));
  819. get_page(bad_page);
  820. return bad_page;
  821. }
  822. EXPORT_SYMBOL_GPL(gfn_to_page);
  823. void kvm_release_page_clean(struct page *page)
  824. {
  825. kvm_release_pfn_clean(page_to_pfn(page));
  826. }
  827. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  828. void kvm_release_pfn_clean(pfn_t pfn)
  829. {
  830. if (!kvm_is_mmio_pfn(pfn))
  831. put_page(pfn_to_page(pfn));
  832. }
  833. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  834. void kvm_release_page_dirty(struct page *page)
  835. {
  836. kvm_release_pfn_dirty(page_to_pfn(page));
  837. }
  838. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  839. void kvm_release_pfn_dirty(pfn_t pfn)
  840. {
  841. kvm_set_pfn_dirty(pfn);
  842. kvm_release_pfn_clean(pfn);
  843. }
  844. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  845. void kvm_set_page_dirty(struct page *page)
  846. {
  847. kvm_set_pfn_dirty(page_to_pfn(page));
  848. }
  849. EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
  850. void kvm_set_pfn_dirty(pfn_t pfn)
  851. {
  852. if (!kvm_is_mmio_pfn(pfn)) {
  853. struct page *page = pfn_to_page(pfn);
  854. if (!PageReserved(page))
  855. SetPageDirty(page);
  856. }
  857. }
  858. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  859. void kvm_set_pfn_accessed(pfn_t pfn)
  860. {
  861. if (!kvm_is_mmio_pfn(pfn))
  862. mark_page_accessed(pfn_to_page(pfn));
  863. }
  864. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  865. void kvm_get_pfn(pfn_t pfn)
  866. {
  867. if (!kvm_is_mmio_pfn(pfn))
  868. get_page(pfn_to_page(pfn));
  869. }
  870. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  871. static int next_segment(unsigned long len, int offset)
  872. {
  873. if (len > PAGE_SIZE - offset)
  874. return PAGE_SIZE - offset;
  875. else
  876. return len;
  877. }
  878. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  879. int len)
  880. {
  881. int r;
  882. unsigned long addr;
  883. addr = gfn_to_hva(kvm, gfn);
  884. if (kvm_is_error_hva(addr))
  885. return -EFAULT;
  886. r = copy_from_user(data, (void __user *)addr + offset, len);
  887. if (r)
  888. return -EFAULT;
  889. return 0;
  890. }
  891. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  892. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  893. {
  894. gfn_t gfn = gpa >> PAGE_SHIFT;
  895. int seg;
  896. int offset = offset_in_page(gpa);
  897. int ret;
  898. while ((seg = next_segment(len, offset)) != 0) {
  899. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  900. if (ret < 0)
  901. return ret;
  902. offset = 0;
  903. len -= seg;
  904. data += seg;
  905. ++gfn;
  906. }
  907. return 0;
  908. }
  909. EXPORT_SYMBOL_GPL(kvm_read_guest);
  910. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  911. unsigned long len)
  912. {
  913. int r;
  914. unsigned long addr;
  915. gfn_t gfn = gpa >> PAGE_SHIFT;
  916. int offset = offset_in_page(gpa);
  917. addr = gfn_to_hva(kvm, gfn);
  918. if (kvm_is_error_hva(addr))
  919. return -EFAULT;
  920. pagefault_disable();
  921. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  922. pagefault_enable();
  923. if (r)
  924. return -EFAULT;
  925. return 0;
  926. }
  927. EXPORT_SYMBOL(kvm_read_guest_atomic);
  928. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  929. int offset, int len)
  930. {
  931. int r;
  932. unsigned long addr;
  933. addr = gfn_to_hva(kvm, gfn);
  934. if (kvm_is_error_hva(addr))
  935. return -EFAULT;
  936. r = copy_to_user((void __user *)addr + offset, data, len);
  937. if (r)
  938. return -EFAULT;
  939. mark_page_dirty(kvm, gfn);
  940. return 0;
  941. }
  942. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  943. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  944. unsigned long len)
  945. {
  946. gfn_t gfn = gpa >> PAGE_SHIFT;
  947. int seg;
  948. int offset = offset_in_page(gpa);
  949. int ret;
  950. while ((seg = next_segment(len, offset)) != 0) {
  951. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  952. if (ret < 0)
  953. return ret;
  954. offset = 0;
  955. len -= seg;
  956. data += seg;
  957. ++gfn;
  958. }
  959. return 0;
  960. }
  961. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  962. {
  963. return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
  964. }
  965. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  966. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  967. {
  968. gfn_t gfn = gpa >> PAGE_SHIFT;
  969. int seg;
  970. int offset = offset_in_page(gpa);
  971. int ret;
  972. while ((seg = next_segment(len, offset)) != 0) {
  973. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  974. if (ret < 0)
  975. return ret;
  976. offset = 0;
  977. len -= seg;
  978. ++gfn;
  979. }
  980. return 0;
  981. }
  982. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  983. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  984. {
  985. struct kvm_memory_slot *memslot;
  986. gfn = unalias_gfn(kvm, gfn);
  987. memslot = gfn_to_memslot_unaliased(kvm, gfn);
  988. if (memslot && memslot->dirty_bitmap) {
  989. unsigned long rel_gfn = gfn - memslot->base_gfn;
  990. /* avoid RMW */
  991. if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
  992. generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
  993. }
  994. }
  995. /*
  996. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  997. */
  998. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  999. {
  1000. DEFINE_WAIT(wait);
  1001. for (;;) {
  1002. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  1003. if (kvm_arch_vcpu_runnable(vcpu)) {
  1004. set_bit(KVM_REQ_UNHALT, &vcpu->requests);
  1005. break;
  1006. }
  1007. if (kvm_cpu_has_pending_timer(vcpu))
  1008. break;
  1009. if (signal_pending(current))
  1010. break;
  1011. schedule();
  1012. }
  1013. finish_wait(&vcpu->wq, &wait);
  1014. }
  1015. void kvm_resched(struct kvm_vcpu *vcpu)
  1016. {
  1017. if (!need_resched())
  1018. return;
  1019. cond_resched();
  1020. }
  1021. EXPORT_SYMBOL_GPL(kvm_resched);
  1022. void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
  1023. {
  1024. ktime_t expires;
  1025. DEFINE_WAIT(wait);
  1026. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  1027. /* Sleep for 100 us, and hope lock-holder got scheduled */
  1028. expires = ktime_add_ns(ktime_get(), 100000UL);
  1029. schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
  1030. finish_wait(&vcpu->wq, &wait);
  1031. }
  1032. EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
  1033. static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1034. {
  1035. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  1036. struct page *page;
  1037. if (vmf->pgoff == 0)
  1038. page = virt_to_page(vcpu->run);
  1039. #ifdef CONFIG_X86
  1040. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  1041. page = virt_to_page(vcpu->arch.pio_data);
  1042. #endif
  1043. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1044. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  1045. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  1046. #endif
  1047. else
  1048. return VM_FAULT_SIGBUS;
  1049. get_page(page);
  1050. vmf->page = page;
  1051. return 0;
  1052. }
  1053. static const struct vm_operations_struct kvm_vcpu_vm_ops = {
  1054. .fault = kvm_vcpu_fault,
  1055. };
  1056. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  1057. {
  1058. vma->vm_ops = &kvm_vcpu_vm_ops;
  1059. return 0;
  1060. }
  1061. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  1062. {
  1063. struct kvm_vcpu *vcpu = filp->private_data;
  1064. kvm_put_kvm(vcpu->kvm);
  1065. return 0;
  1066. }
  1067. static struct file_operations kvm_vcpu_fops = {
  1068. .release = kvm_vcpu_release,
  1069. .unlocked_ioctl = kvm_vcpu_ioctl,
  1070. .compat_ioctl = kvm_vcpu_ioctl,
  1071. .mmap = kvm_vcpu_mmap,
  1072. };
  1073. /*
  1074. * Allocates an inode for the vcpu.
  1075. */
  1076. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  1077. {
  1078. return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
  1079. }
  1080. /*
  1081. * Creates some virtual cpus. Good luck creating more than one.
  1082. */
  1083. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
  1084. {
  1085. int r;
  1086. struct kvm_vcpu *vcpu, *v;
  1087. vcpu = kvm_arch_vcpu_create(kvm, id);
  1088. if (IS_ERR(vcpu))
  1089. return PTR_ERR(vcpu);
  1090. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  1091. r = kvm_arch_vcpu_setup(vcpu);
  1092. if (r)
  1093. return r;
  1094. mutex_lock(&kvm->lock);
  1095. if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
  1096. r = -EINVAL;
  1097. goto vcpu_destroy;
  1098. }
  1099. kvm_for_each_vcpu(r, v, kvm)
  1100. if (v->vcpu_id == id) {
  1101. r = -EEXIST;
  1102. goto vcpu_destroy;
  1103. }
  1104. BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
  1105. /* Now it's all set up, let userspace reach it */
  1106. kvm_get_kvm(kvm);
  1107. r = create_vcpu_fd(vcpu);
  1108. if (r < 0) {
  1109. kvm_put_kvm(kvm);
  1110. goto vcpu_destroy;
  1111. }
  1112. kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
  1113. smp_wmb();
  1114. atomic_inc(&kvm->online_vcpus);
  1115. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1116. if (kvm->bsp_vcpu_id == id)
  1117. kvm->bsp_vcpu = vcpu;
  1118. #endif
  1119. mutex_unlock(&kvm->lock);
  1120. return r;
  1121. vcpu_destroy:
  1122. mutex_unlock(&kvm->lock);
  1123. kvm_arch_vcpu_destroy(vcpu);
  1124. return r;
  1125. }
  1126. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  1127. {
  1128. if (sigset) {
  1129. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  1130. vcpu->sigset_active = 1;
  1131. vcpu->sigset = *sigset;
  1132. } else
  1133. vcpu->sigset_active = 0;
  1134. return 0;
  1135. }
  1136. static long kvm_vcpu_ioctl(struct file *filp,
  1137. unsigned int ioctl, unsigned long arg)
  1138. {
  1139. struct kvm_vcpu *vcpu = filp->private_data;
  1140. void __user *argp = (void __user *)arg;
  1141. int r;
  1142. struct kvm_fpu *fpu = NULL;
  1143. struct kvm_sregs *kvm_sregs = NULL;
  1144. if (vcpu->kvm->mm != current->mm)
  1145. return -EIO;
  1146. switch (ioctl) {
  1147. case KVM_RUN:
  1148. r = -EINVAL;
  1149. if (arg)
  1150. goto out;
  1151. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  1152. break;
  1153. case KVM_GET_REGS: {
  1154. struct kvm_regs *kvm_regs;
  1155. r = -ENOMEM;
  1156. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1157. if (!kvm_regs)
  1158. goto out;
  1159. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  1160. if (r)
  1161. goto out_free1;
  1162. r = -EFAULT;
  1163. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  1164. goto out_free1;
  1165. r = 0;
  1166. out_free1:
  1167. kfree(kvm_regs);
  1168. break;
  1169. }
  1170. case KVM_SET_REGS: {
  1171. struct kvm_regs *kvm_regs;
  1172. r = -ENOMEM;
  1173. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1174. if (!kvm_regs)
  1175. goto out;
  1176. r = -EFAULT;
  1177. if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
  1178. goto out_free2;
  1179. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  1180. if (r)
  1181. goto out_free2;
  1182. r = 0;
  1183. out_free2:
  1184. kfree(kvm_regs);
  1185. break;
  1186. }
  1187. case KVM_GET_SREGS: {
  1188. kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1189. r = -ENOMEM;
  1190. if (!kvm_sregs)
  1191. goto out;
  1192. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
  1193. if (r)
  1194. goto out;
  1195. r = -EFAULT;
  1196. if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
  1197. goto out;
  1198. r = 0;
  1199. break;
  1200. }
  1201. case KVM_SET_SREGS: {
  1202. kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1203. r = -ENOMEM;
  1204. if (!kvm_sregs)
  1205. goto out;
  1206. r = -EFAULT;
  1207. if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
  1208. goto out;
  1209. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
  1210. if (r)
  1211. goto out;
  1212. r = 0;
  1213. break;
  1214. }
  1215. case KVM_GET_MP_STATE: {
  1216. struct kvm_mp_state mp_state;
  1217. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  1218. if (r)
  1219. goto out;
  1220. r = -EFAULT;
  1221. if (copy_to_user(argp, &mp_state, sizeof mp_state))
  1222. goto out;
  1223. r = 0;
  1224. break;
  1225. }
  1226. case KVM_SET_MP_STATE: {
  1227. struct kvm_mp_state mp_state;
  1228. r = -EFAULT;
  1229. if (copy_from_user(&mp_state, argp, sizeof mp_state))
  1230. goto out;
  1231. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  1232. if (r)
  1233. goto out;
  1234. r = 0;
  1235. break;
  1236. }
  1237. case KVM_TRANSLATE: {
  1238. struct kvm_translation tr;
  1239. r = -EFAULT;
  1240. if (copy_from_user(&tr, argp, sizeof tr))
  1241. goto out;
  1242. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  1243. if (r)
  1244. goto out;
  1245. r = -EFAULT;
  1246. if (copy_to_user(argp, &tr, sizeof tr))
  1247. goto out;
  1248. r = 0;
  1249. break;
  1250. }
  1251. case KVM_SET_GUEST_DEBUG: {
  1252. struct kvm_guest_debug dbg;
  1253. r = -EFAULT;
  1254. if (copy_from_user(&dbg, argp, sizeof dbg))
  1255. goto out;
  1256. r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
  1257. if (r)
  1258. goto out;
  1259. r = 0;
  1260. break;
  1261. }
  1262. case KVM_SET_SIGNAL_MASK: {
  1263. struct kvm_signal_mask __user *sigmask_arg = argp;
  1264. struct kvm_signal_mask kvm_sigmask;
  1265. sigset_t sigset, *p;
  1266. p = NULL;
  1267. if (argp) {
  1268. r = -EFAULT;
  1269. if (copy_from_user(&kvm_sigmask, argp,
  1270. sizeof kvm_sigmask))
  1271. goto out;
  1272. r = -EINVAL;
  1273. if (kvm_sigmask.len != sizeof sigset)
  1274. goto out;
  1275. r = -EFAULT;
  1276. if (copy_from_user(&sigset, sigmask_arg->sigset,
  1277. sizeof sigset))
  1278. goto out;
  1279. p = &sigset;
  1280. }
  1281. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  1282. break;
  1283. }
  1284. case KVM_GET_FPU: {
  1285. fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1286. r = -ENOMEM;
  1287. if (!fpu)
  1288. goto out;
  1289. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
  1290. if (r)
  1291. goto out;
  1292. r = -EFAULT;
  1293. if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
  1294. goto out;
  1295. r = 0;
  1296. break;
  1297. }
  1298. case KVM_SET_FPU: {
  1299. fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1300. r = -ENOMEM;
  1301. if (!fpu)
  1302. goto out;
  1303. r = -EFAULT;
  1304. if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
  1305. goto out;
  1306. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
  1307. if (r)
  1308. goto out;
  1309. r = 0;
  1310. break;
  1311. }
  1312. default:
  1313. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1314. }
  1315. out:
  1316. kfree(fpu);
  1317. kfree(kvm_sregs);
  1318. return r;
  1319. }
  1320. static long kvm_vm_ioctl(struct file *filp,
  1321. unsigned int ioctl, unsigned long arg)
  1322. {
  1323. struct kvm *kvm = filp->private_data;
  1324. void __user *argp = (void __user *)arg;
  1325. int r;
  1326. if (kvm->mm != current->mm)
  1327. return -EIO;
  1328. switch (ioctl) {
  1329. case KVM_CREATE_VCPU:
  1330. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  1331. if (r < 0)
  1332. goto out;
  1333. break;
  1334. case KVM_SET_USER_MEMORY_REGION: {
  1335. struct kvm_userspace_memory_region kvm_userspace_mem;
  1336. r = -EFAULT;
  1337. if (copy_from_user(&kvm_userspace_mem, argp,
  1338. sizeof kvm_userspace_mem))
  1339. goto out;
  1340. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
  1341. if (r)
  1342. goto out;
  1343. break;
  1344. }
  1345. case KVM_GET_DIRTY_LOG: {
  1346. struct kvm_dirty_log log;
  1347. r = -EFAULT;
  1348. if (copy_from_user(&log, argp, sizeof log))
  1349. goto out;
  1350. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1351. if (r)
  1352. goto out;
  1353. break;
  1354. }
  1355. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1356. case KVM_REGISTER_COALESCED_MMIO: {
  1357. struct kvm_coalesced_mmio_zone zone;
  1358. r = -EFAULT;
  1359. if (copy_from_user(&zone, argp, sizeof zone))
  1360. goto out;
  1361. r = -ENXIO;
  1362. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  1363. if (r)
  1364. goto out;
  1365. r = 0;
  1366. break;
  1367. }
  1368. case KVM_UNREGISTER_COALESCED_MMIO: {
  1369. struct kvm_coalesced_mmio_zone zone;
  1370. r = -EFAULT;
  1371. if (copy_from_user(&zone, argp, sizeof zone))
  1372. goto out;
  1373. r = -ENXIO;
  1374. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  1375. if (r)
  1376. goto out;
  1377. r = 0;
  1378. break;
  1379. }
  1380. #endif
  1381. case KVM_IRQFD: {
  1382. struct kvm_irqfd data;
  1383. r = -EFAULT;
  1384. if (copy_from_user(&data, argp, sizeof data))
  1385. goto out;
  1386. r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
  1387. break;
  1388. }
  1389. case KVM_IOEVENTFD: {
  1390. struct kvm_ioeventfd data;
  1391. r = -EFAULT;
  1392. if (copy_from_user(&data, argp, sizeof data))
  1393. goto out;
  1394. r = kvm_ioeventfd(kvm, &data);
  1395. break;
  1396. }
  1397. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1398. case KVM_SET_BOOT_CPU_ID:
  1399. r = 0;
  1400. mutex_lock(&kvm->lock);
  1401. if (atomic_read(&kvm->online_vcpus) != 0)
  1402. r = -EBUSY;
  1403. else
  1404. kvm->bsp_vcpu_id = arg;
  1405. mutex_unlock(&kvm->lock);
  1406. break;
  1407. #endif
  1408. default:
  1409. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  1410. if (r == -ENOTTY)
  1411. r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
  1412. }
  1413. out:
  1414. return r;
  1415. }
  1416. #ifdef CONFIG_COMPAT
  1417. struct compat_kvm_dirty_log {
  1418. __u32 slot;
  1419. __u32 padding1;
  1420. union {
  1421. compat_uptr_t dirty_bitmap; /* one bit per page */
  1422. __u64 padding2;
  1423. };
  1424. };
  1425. static long kvm_vm_compat_ioctl(struct file *filp,
  1426. unsigned int ioctl, unsigned long arg)
  1427. {
  1428. struct kvm *kvm = filp->private_data;
  1429. int r;
  1430. if (kvm->mm != current->mm)
  1431. return -EIO;
  1432. switch (ioctl) {
  1433. case KVM_GET_DIRTY_LOG: {
  1434. struct compat_kvm_dirty_log compat_log;
  1435. struct kvm_dirty_log log;
  1436. r = -EFAULT;
  1437. if (copy_from_user(&compat_log, (void __user *)arg,
  1438. sizeof(compat_log)))
  1439. goto out;
  1440. log.slot = compat_log.slot;
  1441. log.padding1 = compat_log.padding1;
  1442. log.padding2 = compat_log.padding2;
  1443. log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
  1444. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1445. if (r)
  1446. goto out;
  1447. break;
  1448. }
  1449. default:
  1450. r = kvm_vm_ioctl(filp, ioctl, arg);
  1451. }
  1452. out:
  1453. return r;
  1454. }
  1455. #endif
  1456. static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1457. {
  1458. struct page *page[1];
  1459. unsigned long addr;
  1460. int npages;
  1461. gfn_t gfn = vmf->pgoff;
  1462. struct kvm *kvm = vma->vm_file->private_data;
  1463. addr = gfn_to_hva(kvm, gfn);
  1464. if (kvm_is_error_hva(addr))
  1465. return VM_FAULT_SIGBUS;
  1466. npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
  1467. NULL);
  1468. if (unlikely(npages != 1))
  1469. return VM_FAULT_SIGBUS;
  1470. vmf->page = page[0];
  1471. return 0;
  1472. }
  1473. static const struct vm_operations_struct kvm_vm_vm_ops = {
  1474. .fault = kvm_vm_fault,
  1475. };
  1476. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  1477. {
  1478. vma->vm_ops = &kvm_vm_vm_ops;
  1479. return 0;
  1480. }
  1481. static struct file_operations kvm_vm_fops = {
  1482. .release = kvm_vm_release,
  1483. .unlocked_ioctl = kvm_vm_ioctl,
  1484. #ifdef CONFIG_COMPAT
  1485. .compat_ioctl = kvm_vm_compat_ioctl,
  1486. #endif
  1487. .mmap = kvm_vm_mmap,
  1488. };
  1489. static int kvm_dev_ioctl_create_vm(void)
  1490. {
  1491. int fd;
  1492. struct kvm *kvm;
  1493. kvm = kvm_create_vm();
  1494. if (IS_ERR(kvm))
  1495. return PTR_ERR(kvm);
  1496. fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
  1497. if (fd < 0)
  1498. kvm_put_kvm(kvm);
  1499. return fd;
  1500. }
  1501. static long kvm_dev_ioctl_check_extension_generic(long arg)
  1502. {
  1503. switch (arg) {
  1504. case KVM_CAP_USER_MEMORY:
  1505. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  1506. case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
  1507. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1508. case KVM_CAP_SET_BOOT_CPU_ID:
  1509. #endif
  1510. case KVM_CAP_INTERNAL_ERROR_DATA:
  1511. return 1;
  1512. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  1513. case KVM_CAP_IRQ_ROUTING:
  1514. return KVM_MAX_IRQ_ROUTES;
  1515. #endif
  1516. default:
  1517. break;
  1518. }
  1519. return kvm_dev_ioctl_check_extension(arg);
  1520. }
  1521. static long kvm_dev_ioctl(struct file *filp,
  1522. unsigned int ioctl, unsigned long arg)
  1523. {
  1524. long r = -EINVAL;
  1525. switch (ioctl) {
  1526. case KVM_GET_API_VERSION:
  1527. r = -EINVAL;
  1528. if (arg)
  1529. goto out;
  1530. r = KVM_API_VERSION;
  1531. break;
  1532. case KVM_CREATE_VM:
  1533. r = -EINVAL;
  1534. if (arg)
  1535. goto out;
  1536. r = kvm_dev_ioctl_create_vm();
  1537. break;
  1538. case KVM_CHECK_EXTENSION:
  1539. r = kvm_dev_ioctl_check_extension_generic(arg);
  1540. break;
  1541. case KVM_GET_VCPU_MMAP_SIZE:
  1542. r = -EINVAL;
  1543. if (arg)
  1544. goto out;
  1545. r = PAGE_SIZE; /* struct kvm_run */
  1546. #ifdef CONFIG_X86
  1547. r += PAGE_SIZE; /* pio data page */
  1548. #endif
  1549. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1550. r += PAGE_SIZE; /* coalesced mmio ring page */
  1551. #endif
  1552. break;
  1553. case KVM_TRACE_ENABLE:
  1554. case KVM_TRACE_PAUSE:
  1555. case KVM_TRACE_DISABLE:
  1556. r = -EOPNOTSUPP;
  1557. break;
  1558. default:
  1559. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  1560. }
  1561. out:
  1562. return r;
  1563. }
  1564. static struct file_operations kvm_chardev_ops = {
  1565. .unlocked_ioctl = kvm_dev_ioctl,
  1566. .compat_ioctl = kvm_dev_ioctl,
  1567. };
  1568. static struct miscdevice kvm_dev = {
  1569. KVM_MINOR,
  1570. "kvm",
  1571. &kvm_chardev_ops,
  1572. };
  1573. static void hardware_enable(void *junk)
  1574. {
  1575. int cpu = raw_smp_processor_id();
  1576. int r;
  1577. if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1578. return;
  1579. cpumask_set_cpu(cpu, cpus_hardware_enabled);
  1580. r = kvm_arch_hardware_enable(NULL);
  1581. if (r) {
  1582. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1583. atomic_inc(&hardware_enable_failed);
  1584. printk(KERN_INFO "kvm: enabling virtualization on "
  1585. "CPU%d failed\n", cpu);
  1586. }
  1587. }
  1588. static void hardware_disable(void *junk)
  1589. {
  1590. int cpu = raw_smp_processor_id();
  1591. if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1592. return;
  1593. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1594. kvm_arch_hardware_disable(NULL);
  1595. }
  1596. static void hardware_disable_all_nolock(void)
  1597. {
  1598. BUG_ON(!kvm_usage_count);
  1599. kvm_usage_count--;
  1600. if (!kvm_usage_count)
  1601. on_each_cpu(hardware_disable, NULL, 1);
  1602. }
  1603. static void hardware_disable_all(void)
  1604. {
  1605. spin_lock(&kvm_lock);
  1606. hardware_disable_all_nolock();
  1607. spin_unlock(&kvm_lock);
  1608. }
  1609. static int hardware_enable_all(void)
  1610. {
  1611. int r = 0;
  1612. spin_lock(&kvm_lock);
  1613. kvm_usage_count++;
  1614. if (kvm_usage_count == 1) {
  1615. atomic_set(&hardware_enable_failed, 0);
  1616. on_each_cpu(hardware_enable, NULL, 1);
  1617. if (atomic_read(&hardware_enable_failed)) {
  1618. hardware_disable_all_nolock();
  1619. r = -EBUSY;
  1620. }
  1621. }
  1622. spin_unlock(&kvm_lock);
  1623. return r;
  1624. }
  1625. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  1626. void *v)
  1627. {
  1628. int cpu = (long)v;
  1629. if (!kvm_usage_count)
  1630. return NOTIFY_OK;
  1631. val &= ~CPU_TASKS_FROZEN;
  1632. switch (val) {
  1633. case CPU_DYING:
  1634. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1635. cpu);
  1636. hardware_disable(NULL);
  1637. break;
  1638. case CPU_UP_CANCELED:
  1639. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1640. cpu);
  1641. smp_call_function_single(cpu, hardware_disable, NULL, 1);
  1642. break;
  1643. case CPU_ONLINE:
  1644. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  1645. cpu);
  1646. smp_call_function_single(cpu, hardware_enable, NULL, 1);
  1647. break;
  1648. }
  1649. return NOTIFY_OK;
  1650. }
  1651. asmlinkage void kvm_handle_fault_on_reboot(void)
  1652. {
  1653. if (kvm_rebooting)
  1654. /* spin while reset goes on */
  1655. while (true)
  1656. ;
  1657. /* Fault while not rebooting. We want the trace. */
  1658. BUG();
  1659. }
  1660. EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
  1661. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1662. void *v)
  1663. {
  1664. /*
  1665. * Some (well, at least mine) BIOSes hang on reboot if
  1666. * in vmx root mode.
  1667. *
  1668. * And Intel TXT required VMX off for all cpu when system shutdown.
  1669. */
  1670. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  1671. kvm_rebooting = true;
  1672. on_each_cpu(hardware_disable, NULL, 1);
  1673. return NOTIFY_OK;
  1674. }
  1675. static struct notifier_block kvm_reboot_notifier = {
  1676. .notifier_call = kvm_reboot,
  1677. .priority = 0,
  1678. };
  1679. static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  1680. {
  1681. int i;
  1682. for (i = 0; i < bus->dev_count; i++) {
  1683. struct kvm_io_device *pos = bus->devs[i];
  1684. kvm_iodevice_destructor(pos);
  1685. }
  1686. kfree(bus);
  1687. }
  1688. /* kvm_io_bus_write - called under kvm->slots_lock */
  1689. int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  1690. int len, const void *val)
  1691. {
  1692. int i;
  1693. struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
  1694. for (i = 0; i < bus->dev_count; i++)
  1695. if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
  1696. return 0;
  1697. return -EOPNOTSUPP;
  1698. }
  1699. /* kvm_io_bus_read - called under kvm->slots_lock */
  1700. int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  1701. int len, void *val)
  1702. {
  1703. int i;
  1704. struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
  1705. for (i = 0; i < bus->dev_count; i++)
  1706. if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
  1707. return 0;
  1708. return -EOPNOTSUPP;
  1709. }
  1710. /* Caller must hold slots_lock. */
  1711. int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  1712. struct kvm_io_device *dev)
  1713. {
  1714. struct kvm_io_bus *new_bus, *bus;
  1715. bus = kvm->buses[bus_idx];
  1716. if (bus->dev_count > NR_IOBUS_DEVS-1)
  1717. return -ENOSPC;
  1718. new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
  1719. if (!new_bus)
  1720. return -ENOMEM;
  1721. memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
  1722. new_bus->devs[new_bus->dev_count++] = dev;
  1723. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  1724. synchronize_srcu_expedited(&kvm->srcu);
  1725. kfree(bus);
  1726. return 0;
  1727. }
  1728. /* Caller must hold slots_lock. */
  1729. int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  1730. struct kvm_io_device *dev)
  1731. {
  1732. int i, r;
  1733. struct kvm_io_bus *new_bus, *bus;
  1734. new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
  1735. if (!new_bus)
  1736. return -ENOMEM;
  1737. bus = kvm->buses[bus_idx];
  1738. memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
  1739. r = -ENOENT;
  1740. for (i = 0; i < new_bus->dev_count; i++)
  1741. if (new_bus->devs[i] == dev) {
  1742. r = 0;
  1743. new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
  1744. break;
  1745. }
  1746. if (r) {
  1747. kfree(new_bus);
  1748. return r;
  1749. }
  1750. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  1751. synchronize_srcu_expedited(&kvm->srcu);
  1752. kfree(bus);
  1753. return r;
  1754. }
  1755. static struct notifier_block kvm_cpu_notifier = {
  1756. .notifier_call = kvm_cpu_hotplug,
  1757. .priority = 20, /* must be > scheduler priority */
  1758. };
  1759. static int vm_stat_get(void *_offset, u64 *val)
  1760. {
  1761. unsigned offset = (long)_offset;
  1762. struct kvm *kvm;
  1763. *val = 0;
  1764. spin_lock(&kvm_lock);
  1765. list_for_each_entry(kvm, &vm_list, vm_list)
  1766. *val += *(u32 *)((void *)kvm + offset);
  1767. spin_unlock(&kvm_lock);
  1768. return 0;
  1769. }
  1770. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
  1771. static int vcpu_stat_get(void *_offset, u64 *val)
  1772. {
  1773. unsigned offset = (long)_offset;
  1774. struct kvm *kvm;
  1775. struct kvm_vcpu *vcpu;
  1776. int i;
  1777. *val = 0;
  1778. spin_lock(&kvm_lock);
  1779. list_for_each_entry(kvm, &vm_list, vm_list)
  1780. kvm_for_each_vcpu(i, vcpu, kvm)
  1781. *val += *(u32 *)((void *)vcpu + offset);
  1782. spin_unlock(&kvm_lock);
  1783. return 0;
  1784. }
  1785. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
  1786. static const struct file_operations *stat_fops[] = {
  1787. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  1788. [KVM_STAT_VM] = &vm_stat_fops,
  1789. };
  1790. static void kvm_init_debug(void)
  1791. {
  1792. struct kvm_stats_debugfs_item *p;
  1793. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  1794. for (p = debugfs_entries; p->name; ++p)
  1795. p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
  1796. (void *)(long)p->offset,
  1797. stat_fops[p->kind]);
  1798. }
  1799. static void kvm_exit_debug(void)
  1800. {
  1801. struct kvm_stats_debugfs_item *p;
  1802. for (p = debugfs_entries; p->name; ++p)
  1803. debugfs_remove(p->dentry);
  1804. debugfs_remove(kvm_debugfs_dir);
  1805. }
  1806. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  1807. {
  1808. if (kvm_usage_count)
  1809. hardware_disable(NULL);
  1810. return 0;
  1811. }
  1812. static int kvm_resume(struct sys_device *dev)
  1813. {
  1814. if (kvm_usage_count)
  1815. hardware_enable(NULL);
  1816. return 0;
  1817. }
  1818. static struct sysdev_class kvm_sysdev_class = {
  1819. .name = "kvm",
  1820. .suspend = kvm_suspend,
  1821. .resume = kvm_resume,
  1822. };
  1823. static struct sys_device kvm_sysdev = {
  1824. .id = 0,
  1825. .cls = &kvm_sysdev_class,
  1826. };
  1827. struct page *bad_page;
  1828. pfn_t bad_pfn;
  1829. static inline
  1830. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  1831. {
  1832. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  1833. }
  1834. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  1835. {
  1836. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1837. kvm_arch_vcpu_load(vcpu, cpu);
  1838. }
  1839. static void kvm_sched_out(struct preempt_notifier *pn,
  1840. struct task_struct *next)
  1841. {
  1842. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1843. kvm_arch_vcpu_put(vcpu);
  1844. }
  1845. int kvm_init(void *opaque, unsigned int vcpu_size,
  1846. struct module *module)
  1847. {
  1848. int r;
  1849. int cpu;
  1850. r = kvm_arch_init(opaque);
  1851. if (r)
  1852. goto out_fail;
  1853. bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1854. if (bad_page == NULL) {
  1855. r = -ENOMEM;
  1856. goto out;
  1857. }
  1858. bad_pfn = page_to_pfn(bad_page);
  1859. if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
  1860. r = -ENOMEM;
  1861. goto out_free_0;
  1862. }
  1863. r = kvm_arch_hardware_setup();
  1864. if (r < 0)
  1865. goto out_free_0a;
  1866. for_each_online_cpu(cpu) {
  1867. smp_call_function_single(cpu,
  1868. kvm_arch_check_processor_compat,
  1869. &r, 1);
  1870. if (r < 0)
  1871. goto out_free_1;
  1872. }
  1873. r = register_cpu_notifier(&kvm_cpu_notifier);
  1874. if (r)
  1875. goto out_free_2;
  1876. register_reboot_notifier(&kvm_reboot_notifier);
  1877. r = sysdev_class_register(&kvm_sysdev_class);
  1878. if (r)
  1879. goto out_free_3;
  1880. r = sysdev_register(&kvm_sysdev);
  1881. if (r)
  1882. goto out_free_4;
  1883. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  1884. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
  1885. __alignof__(struct kvm_vcpu),
  1886. 0, NULL);
  1887. if (!kvm_vcpu_cache) {
  1888. r = -ENOMEM;
  1889. goto out_free_5;
  1890. }
  1891. kvm_chardev_ops.owner = module;
  1892. kvm_vm_fops.owner = module;
  1893. kvm_vcpu_fops.owner = module;
  1894. r = misc_register(&kvm_dev);
  1895. if (r) {
  1896. printk(KERN_ERR "kvm: misc device register failed\n");
  1897. goto out_free;
  1898. }
  1899. kvm_preempt_ops.sched_in = kvm_sched_in;
  1900. kvm_preempt_ops.sched_out = kvm_sched_out;
  1901. kvm_init_debug();
  1902. return 0;
  1903. out_free:
  1904. kmem_cache_destroy(kvm_vcpu_cache);
  1905. out_free_5:
  1906. sysdev_unregister(&kvm_sysdev);
  1907. out_free_4:
  1908. sysdev_class_unregister(&kvm_sysdev_class);
  1909. out_free_3:
  1910. unregister_reboot_notifier(&kvm_reboot_notifier);
  1911. unregister_cpu_notifier(&kvm_cpu_notifier);
  1912. out_free_2:
  1913. out_free_1:
  1914. kvm_arch_hardware_unsetup();
  1915. out_free_0a:
  1916. free_cpumask_var(cpus_hardware_enabled);
  1917. out_free_0:
  1918. __free_page(bad_page);
  1919. out:
  1920. kvm_arch_exit();
  1921. out_fail:
  1922. return r;
  1923. }
  1924. EXPORT_SYMBOL_GPL(kvm_init);
  1925. void kvm_exit(void)
  1926. {
  1927. tracepoint_synchronize_unregister();
  1928. kvm_exit_debug();
  1929. misc_deregister(&kvm_dev);
  1930. kmem_cache_destroy(kvm_vcpu_cache);
  1931. sysdev_unregister(&kvm_sysdev);
  1932. sysdev_class_unregister(&kvm_sysdev_class);
  1933. unregister_reboot_notifier(&kvm_reboot_notifier);
  1934. unregister_cpu_notifier(&kvm_cpu_notifier);
  1935. on_each_cpu(hardware_disable, NULL, 1);
  1936. kvm_arch_hardware_unsetup();
  1937. kvm_arch_exit();
  1938. free_cpumask_var(cpus_hardware_enabled);
  1939. __free_page(bad_page);
  1940. }
  1941. EXPORT_SYMBOL_GPL(kvm_exit);