kvm_main.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "iodev.h"
  18. #include <linux/kvm_host.h>
  19. #include <linux/kvm.h>
  20. #include <linux/module.h>
  21. #include <linux/errno.h>
  22. #include <linux/percpu.h>
  23. #include <linux/gfp.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/sysdev.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched.h>
  34. #include <linux/cpumask.h>
  35. #include <linux/smp.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/profile.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/mman.h>
  41. #include <linux/swap.h>
  42. #include <linux/bitops.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/compat.h>
  45. #include <asm/processor.h>
  46. #include <asm/io.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/pgtable.h>
  49. #include <asm-generic/bitops/le.h>
  50. #include "coalesced_mmio.h"
  51. #define CREATE_TRACE_POINTS
  52. #include <trace/events/kvm.h>
  53. MODULE_AUTHOR("Qumranet");
  54. MODULE_LICENSE("GPL");
  55. /*
  56. * Ordering of locks:
  57. *
  58. * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  59. */
  60. DEFINE_SPINLOCK(kvm_lock);
  61. LIST_HEAD(vm_list);
  62. static cpumask_var_t cpus_hardware_enabled;
  63. static int kvm_usage_count = 0;
  64. static atomic_t hardware_enable_failed;
  65. struct kmem_cache *kvm_vcpu_cache;
  66. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  67. static __read_mostly struct preempt_ops kvm_preempt_ops;
  68. struct dentry *kvm_debugfs_dir;
  69. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  70. unsigned long arg);
  71. static int hardware_enable_all(void);
  72. static void hardware_disable_all(void);
  73. static bool kvm_rebooting;
  74. static bool largepages_enabled = true;
  75. inline int kvm_is_mmio_pfn(pfn_t pfn)
  76. {
  77. if (pfn_valid(pfn)) {
  78. struct page *page = compound_head(pfn_to_page(pfn));
  79. return PageReserved(page);
  80. }
  81. return true;
  82. }
  83. /*
  84. * Switches to specified vcpu, until a matching vcpu_put()
  85. */
  86. void vcpu_load(struct kvm_vcpu *vcpu)
  87. {
  88. int cpu;
  89. mutex_lock(&vcpu->mutex);
  90. cpu = get_cpu();
  91. preempt_notifier_register(&vcpu->preempt_notifier);
  92. kvm_arch_vcpu_load(vcpu, cpu);
  93. put_cpu();
  94. }
  95. void vcpu_put(struct kvm_vcpu *vcpu)
  96. {
  97. preempt_disable();
  98. kvm_arch_vcpu_put(vcpu);
  99. preempt_notifier_unregister(&vcpu->preempt_notifier);
  100. preempt_enable();
  101. mutex_unlock(&vcpu->mutex);
  102. }
  103. static void ack_flush(void *_completed)
  104. {
  105. }
  106. static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
  107. {
  108. int i, cpu, me;
  109. cpumask_var_t cpus;
  110. bool called = true;
  111. struct kvm_vcpu *vcpu;
  112. zalloc_cpumask_var(&cpus, GFP_ATOMIC);
  113. spin_lock(&kvm->requests_lock);
  114. me = smp_processor_id();
  115. kvm_for_each_vcpu(i, vcpu, kvm) {
  116. if (test_and_set_bit(req, &vcpu->requests))
  117. continue;
  118. cpu = vcpu->cpu;
  119. if (cpus != NULL && cpu != -1 && cpu != me)
  120. cpumask_set_cpu(cpu, cpus);
  121. }
  122. if (unlikely(cpus == NULL))
  123. smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
  124. else if (!cpumask_empty(cpus))
  125. smp_call_function_many(cpus, ack_flush, NULL, 1);
  126. else
  127. called = false;
  128. spin_unlock(&kvm->requests_lock);
  129. free_cpumask_var(cpus);
  130. return called;
  131. }
  132. void kvm_flush_remote_tlbs(struct kvm *kvm)
  133. {
  134. if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
  135. ++kvm->stat.remote_tlb_flush;
  136. }
  137. void kvm_reload_remote_mmus(struct kvm *kvm)
  138. {
  139. make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
  140. }
  141. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  142. {
  143. struct page *page;
  144. int r;
  145. mutex_init(&vcpu->mutex);
  146. vcpu->cpu = -1;
  147. vcpu->kvm = kvm;
  148. vcpu->vcpu_id = id;
  149. init_waitqueue_head(&vcpu->wq);
  150. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  151. if (!page) {
  152. r = -ENOMEM;
  153. goto fail;
  154. }
  155. vcpu->run = page_address(page);
  156. r = kvm_arch_vcpu_init(vcpu);
  157. if (r < 0)
  158. goto fail_free_run;
  159. return 0;
  160. fail_free_run:
  161. free_page((unsigned long)vcpu->run);
  162. fail:
  163. return r;
  164. }
  165. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  166. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  167. {
  168. kvm_arch_vcpu_uninit(vcpu);
  169. free_page((unsigned long)vcpu->run);
  170. }
  171. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  172. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  173. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  174. {
  175. return container_of(mn, struct kvm, mmu_notifier);
  176. }
  177. static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
  178. struct mm_struct *mm,
  179. unsigned long address)
  180. {
  181. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  182. int need_tlb_flush;
  183. /*
  184. * When ->invalidate_page runs, the linux pte has been zapped
  185. * already but the page is still allocated until
  186. * ->invalidate_page returns. So if we increase the sequence
  187. * here the kvm page fault will notice if the spte can't be
  188. * established because the page is going to be freed. If
  189. * instead the kvm page fault establishes the spte before
  190. * ->invalidate_page runs, kvm_unmap_hva will release it
  191. * before returning.
  192. *
  193. * The sequence increase only need to be seen at spin_unlock
  194. * time, and not at spin_lock time.
  195. *
  196. * Increasing the sequence after the spin_unlock would be
  197. * unsafe because the kvm page fault could then establish the
  198. * pte after kvm_unmap_hva returned, without noticing the page
  199. * is going to be freed.
  200. */
  201. spin_lock(&kvm->mmu_lock);
  202. kvm->mmu_notifier_seq++;
  203. need_tlb_flush = kvm_unmap_hva(kvm, address);
  204. spin_unlock(&kvm->mmu_lock);
  205. /* we've to flush the tlb before the pages can be freed */
  206. if (need_tlb_flush)
  207. kvm_flush_remote_tlbs(kvm);
  208. }
  209. static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
  210. struct mm_struct *mm,
  211. unsigned long address,
  212. pte_t pte)
  213. {
  214. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  215. spin_lock(&kvm->mmu_lock);
  216. kvm->mmu_notifier_seq++;
  217. kvm_set_spte_hva(kvm, address, pte);
  218. spin_unlock(&kvm->mmu_lock);
  219. }
  220. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  221. struct mm_struct *mm,
  222. unsigned long start,
  223. unsigned long end)
  224. {
  225. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  226. int need_tlb_flush = 0;
  227. spin_lock(&kvm->mmu_lock);
  228. /*
  229. * The count increase must become visible at unlock time as no
  230. * spte can be established without taking the mmu_lock and
  231. * count is also read inside the mmu_lock critical section.
  232. */
  233. kvm->mmu_notifier_count++;
  234. for (; start < end; start += PAGE_SIZE)
  235. need_tlb_flush |= kvm_unmap_hva(kvm, start);
  236. spin_unlock(&kvm->mmu_lock);
  237. /* we've to flush the tlb before the pages can be freed */
  238. if (need_tlb_flush)
  239. kvm_flush_remote_tlbs(kvm);
  240. }
  241. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  242. struct mm_struct *mm,
  243. unsigned long start,
  244. unsigned long end)
  245. {
  246. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  247. spin_lock(&kvm->mmu_lock);
  248. /*
  249. * This sequence increase will notify the kvm page fault that
  250. * the page that is going to be mapped in the spte could have
  251. * been freed.
  252. */
  253. kvm->mmu_notifier_seq++;
  254. /*
  255. * The above sequence increase must be visible before the
  256. * below count decrease but both values are read by the kvm
  257. * page fault under mmu_lock spinlock so we don't need to add
  258. * a smb_wmb() here in between the two.
  259. */
  260. kvm->mmu_notifier_count--;
  261. spin_unlock(&kvm->mmu_lock);
  262. BUG_ON(kvm->mmu_notifier_count < 0);
  263. }
  264. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  265. struct mm_struct *mm,
  266. unsigned long address)
  267. {
  268. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  269. int young;
  270. spin_lock(&kvm->mmu_lock);
  271. young = kvm_age_hva(kvm, address);
  272. spin_unlock(&kvm->mmu_lock);
  273. if (young)
  274. kvm_flush_remote_tlbs(kvm);
  275. return young;
  276. }
  277. static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
  278. struct mm_struct *mm)
  279. {
  280. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  281. kvm_arch_flush_shadow(kvm);
  282. }
  283. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  284. .invalidate_page = kvm_mmu_notifier_invalidate_page,
  285. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  286. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  287. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  288. .change_pte = kvm_mmu_notifier_change_pte,
  289. .release = kvm_mmu_notifier_release,
  290. };
  291. static int kvm_init_mmu_notifier(struct kvm *kvm)
  292. {
  293. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  294. return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  295. }
  296. #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
  297. static int kvm_init_mmu_notifier(struct kvm *kvm)
  298. {
  299. return 0;
  300. }
  301. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  302. static struct kvm *kvm_create_vm(void)
  303. {
  304. int r = 0;
  305. struct kvm *kvm = kvm_arch_create_vm();
  306. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  307. struct page *page;
  308. #endif
  309. if (IS_ERR(kvm))
  310. goto out;
  311. r = hardware_enable_all();
  312. if (r)
  313. goto out_err_nodisable;
  314. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  315. INIT_HLIST_HEAD(&kvm->mask_notifier_list);
  316. INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
  317. #endif
  318. r = -ENOMEM;
  319. kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  320. if (!kvm->memslots)
  321. goto out_err;
  322. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  323. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  324. if (!page)
  325. goto out_err;
  326. kvm->coalesced_mmio_ring =
  327. (struct kvm_coalesced_mmio_ring *)page_address(page);
  328. #endif
  329. r = kvm_init_mmu_notifier(kvm);
  330. if (r) {
  331. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  332. put_page(page);
  333. #endif
  334. goto out_err;
  335. }
  336. kvm->mm = current->mm;
  337. atomic_inc(&kvm->mm->mm_count);
  338. spin_lock_init(&kvm->mmu_lock);
  339. spin_lock_init(&kvm->requests_lock);
  340. kvm_io_bus_init(&kvm->pio_bus);
  341. kvm_eventfd_init(kvm);
  342. mutex_init(&kvm->lock);
  343. mutex_init(&kvm->irq_lock);
  344. kvm_io_bus_init(&kvm->mmio_bus);
  345. init_rwsem(&kvm->slots_lock);
  346. atomic_set(&kvm->users_count, 1);
  347. spin_lock(&kvm_lock);
  348. list_add(&kvm->vm_list, &vm_list);
  349. spin_unlock(&kvm_lock);
  350. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  351. kvm_coalesced_mmio_init(kvm);
  352. #endif
  353. out:
  354. return kvm;
  355. out_err:
  356. hardware_disable_all();
  357. out_err_nodisable:
  358. kfree(kvm->memslots);
  359. kfree(kvm);
  360. return ERR_PTR(r);
  361. }
  362. /*
  363. * Free any memory in @free but not in @dont.
  364. */
  365. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  366. struct kvm_memory_slot *dont)
  367. {
  368. int i;
  369. if (!dont || free->rmap != dont->rmap)
  370. vfree(free->rmap);
  371. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  372. vfree(free->dirty_bitmap);
  373. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  374. if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
  375. vfree(free->lpage_info[i]);
  376. free->lpage_info[i] = NULL;
  377. }
  378. }
  379. free->npages = 0;
  380. free->dirty_bitmap = NULL;
  381. free->rmap = NULL;
  382. }
  383. void kvm_free_physmem(struct kvm *kvm)
  384. {
  385. int i;
  386. struct kvm_memslots *slots = kvm->memslots;
  387. for (i = 0; i < slots->nmemslots; ++i)
  388. kvm_free_physmem_slot(&slots->memslots[i], NULL);
  389. kfree(kvm->memslots);
  390. }
  391. static void kvm_destroy_vm(struct kvm *kvm)
  392. {
  393. struct mm_struct *mm = kvm->mm;
  394. kvm_arch_sync_events(kvm);
  395. spin_lock(&kvm_lock);
  396. list_del(&kvm->vm_list);
  397. spin_unlock(&kvm_lock);
  398. kvm_free_irq_routing(kvm);
  399. kvm_io_bus_destroy(&kvm->pio_bus);
  400. kvm_io_bus_destroy(&kvm->mmio_bus);
  401. kvm_coalesced_mmio_free(kvm);
  402. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  403. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  404. #else
  405. kvm_arch_flush_shadow(kvm);
  406. #endif
  407. kvm_arch_destroy_vm(kvm);
  408. hardware_disable_all();
  409. mmdrop(mm);
  410. }
  411. void kvm_get_kvm(struct kvm *kvm)
  412. {
  413. atomic_inc(&kvm->users_count);
  414. }
  415. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  416. void kvm_put_kvm(struct kvm *kvm)
  417. {
  418. if (atomic_dec_and_test(&kvm->users_count))
  419. kvm_destroy_vm(kvm);
  420. }
  421. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  422. static int kvm_vm_release(struct inode *inode, struct file *filp)
  423. {
  424. struct kvm *kvm = filp->private_data;
  425. kvm_irqfd_release(kvm);
  426. kvm_put_kvm(kvm);
  427. return 0;
  428. }
  429. /*
  430. * Allocate some memory and give it an address in the guest physical address
  431. * space.
  432. *
  433. * Discontiguous memory is allowed, mostly for framebuffers.
  434. *
  435. * Must be called holding mmap_sem for write.
  436. */
  437. int __kvm_set_memory_region(struct kvm *kvm,
  438. struct kvm_userspace_memory_region *mem,
  439. int user_alloc)
  440. {
  441. int r;
  442. gfn_t base_gfn;
  443. unsigned long npages;
  444. unsigned long i;
  445. struct kvm_memory_slot *memslot;
  446. struct kvm_memory_slot old, new;
  447. r = -EINVAL;
  448. /* General sanity checks */
  449. if (mem->memory_size & (PAGE_SIZE - 1))
  450. goto out;
  451. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  452. goto out;
  453. if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
  454. goto out;
  455. if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  456. goto out;
  457. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  458. goto out;
  459. memslot = &kvm->memslots->memslots[mem->slot];
  460. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  461. npages = mem->memory_size >> PAGE_SHIFT;
  462. if (!npages)
  463. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  464. new = old = *memslot;
  465. new.base_gfn = base_gfn;
  466. new.npages = npages;
  467. new.flags = mem->flags;
  468. /* Disallow changing a memory slot's size. */
  469. r = -EINVAL;
  470. if (npages && old.npages && npages != old.npages)
  471. goto out_free;
  472. /* Check for overlaps */
  473. r = -EEXIST;
  474. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  475. struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
  476. if (s == memslot || !s->npages)
  477. continue;
  478. if (!((base_gfn + npages <= s->base_gfn) ||
  479. (base_gfn >= s->base_gfn + s->npages)))
  480. goto out_free;
  481. }
  482. /* Free page dirty bitmap if unneeded */
  483. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  484. new.dirty_bitmap = NULL;
  485. r = -ENOMEM;
  486. /* Allocate if a slot is being created */
  487. #ifndef CONFIG_S390
  488. if (npages && !new.rmap) {
  489. new.rmap = vmalloc(npages * sizeof(struct page *));
  490. if (!new.rmap)
  491. goto out_free;
  492. memset(new.rmap, 0, npages * sizeof(*new.rmap));
  493. new.user_alloc = user_alloc;
  494. /*
  495. * hva_to_rmmap() serialzies with the mmu_lock and to be
  496. * safe it has to ignore memslots with !user_alloc &&
  497. * !userspace_addr.
  498. */
  499. if (user_alloc)
  500. new.userspace_addr = mem->userspace_addr;
  501. else
  502. new.userspace_addr = 0;
  503. }
  504. if (!npages)
  505. goto skip_lpage;
  506. for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
  507. unsigned long ugfn;
  508. unsigned long j;
  509. int lpages;
  510. int level = i + 2;
  511. /* Avoid unused variable warning if no large pages */
  512. (void)level;
  513. if (new.lpage_info[i])
  514. continue;
  515. lpages = 1 + (base_gfn + npages - 1) /
  516. KVM_PAGES_PER_HPAGE(level);
  517. lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
  518. new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
  519. if (!new.lpage_info[i])
  520. goto out_free;
  521. memset(new.lpage_info[i], 0,
  522. lpages * sizeof(*new.lpage_info[i]));
  523. if (base_gfn % KVM_PAGES_PER_HPAGE(level))
  524. new.lpage_info[i][0].write_count = 1;
  525. if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
  526. new.lpage_info[i][lpages - 1].write_count = 1;
  527. ugfn = new.userspace_addr >> PAGE_SHIFT;
  528. /*
  529. * If the gfn and userspace address are not aligned wrt each
  530. * other, or if explicitly asked to, disable large page
  531. * support for this slot
  532. */
  533. if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
  534. !largepages_enabled)
  535. for (j = 0; j < lpages; ++j)
  536. new.lpage_info[i][j].write_count = 1;
  537. }
  538. skip_lpage:
  539. /* Allocate page dirty bitmap if needed */
  540. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  541. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  542. new.dirty_bitmap = vmalloc(dirty_bytes);
  543. if (!new.dirty_bitmap)
  544. goto out_free;
  545. memset(new.dirty_bitmap, 0, dirty_bytes);
  546. if (old.npages)
  547. kvm_arch_flush_shadow(kvm);
  548. }
  549. #else /* not defined CONFIG_S390 */
  550. new.user_alloc = user_alloc;
  551. if (user_alloc)
  552. new.userspace_addr = mem->userspace_addr;
  553. #endif /* not defined CONFIG_S390 */
  554. if (!npages)
  555. kvm_arch_flush_shadow(kvm);
  556. spin_lock(&kvm->mmu_lock);
  557. if (mem->slot >= kvm->memslots->nmemslots)
  558. kvm->memslots->nmemslots = mem->slot + 1;
  559. *memslot = new;
  560. spin_unlock(&kvm->mmu_lock);
  561. r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
  562. if (r) {
  563. spin_lock(&kvm->mmu_lock);
  564. *memslot = old;
  565. spin_unlock(&kvm->mmu_lock);
  566. goto out_free;
  567. }
  568. kvm_free_physmem_slot(&old, npages ? &new : NULL);
  569. /* Slot deletion case: we have to update the current slot */
  570. spin_lock(&kvm->mmu_lock);
  571. if (!npages)
  572. *memslot = old;
  573. spin_unlock(&kvm->mmu_lock);
  574. #ifdef CONFIG_DMAR
  575. /* map the pages in iommu page table */
  576. r = kvm_iommu_map_pages(kvm, base_gfn, npages);
  577. if (r)
  578. goto out;
  579. #endif
  580. return 0;
  581. out_free:
  582. kvm_free_physmem_slot(&new, &old);
  583. out:
  584. return r;
  585. }
  586. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  587. int kvm_set_memory_region(struct kvm *kvm,
  588. struct kvm_userspace_memory_region *mem,
  589. int user_alloc)
  590. {
  591. int r;
  592. down_write(&kvm->slots_lock);
  593. r = __kvm_set_memory_region(kvm, mem, user_alloc);
  594. up_write(&kvm->slots_lock);
  595. return r;
  596. }
  597. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  598. int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  599. struct
  600. kvm_userspace_memory_region *mem,
  601. int user_alloc)
  602. {
  603. if (mem->slot >= KVM_MEMORY_SLOTS)
  604. return -EINVAL;
  605. return kvm_set_memory_region(kvm, mem, user_alloc);
  606. }
  607. int kvm_get_dirty_log(struct kvm *kvm,
  608. struct kvm_dirty_log *log, int *is_dirty)
  609. {
  610. struct kvm_memory_slot *memslot;
  611. int r, i;
  612. int n;
  613. unsigned long any = 0;
  614. r = -EINVAL;
  615. if (log->slot >= KVM_MEMORY_SLOTS)
  616. goto out;
  617. memslot = &kvm->memslots->memslots[log->slot];
  618. r = -ENOENT;
  619. if (!memslot->dirty_bitmap)
  620. goto out;
  621. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  622. for (i = 0; !any && i < n/sizeof(long); ++i)
  623. any = memslot->dirty_bitmap[i];
  624. r = -EFAULT;
  625. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  626. goto out;
  627. if (any)
  628. *is_dirty = 1;
  629. r = 0;
  630. out:
  631. return r;
  632. }
  633. void kvm_disable_largepages(void)
  634. {
  635. largepages_enabled = false;
  636. }
  637. EXPORT_SYMBOL_GPL(kvm_disable_largepages);
  638. int is_error_page(struct page *page)
  639. {
  640. return page == bad_page;
  641. }
  642. EXPORT_SYMBOL_GPL(is_error_page);
  643. int is_error_pfn(pfn_t pfn)
  644. {
  645. return pfn == bad_pfn;
  646. }
  647. EXPORT_SYMBOL_GPL(is_error_pfn);
  648. static inline unsigned long bad_hva(void)
  649. {
  650. return PAGE_OFFSET;
  651. }
  652. int kvm_is_error_hva(unsigned long addr)
  653. {
  654. return addr == bad_hva();
  655. }
  656. EXPORT_SYMBOL_GPL(kvm_is_error_hva);
  657. struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
  658. {
  659. int i;
  660. struct kvm_memslots *slots = kvm->memslots;
  661. for (i = 0; i < slots->nmemslots; ++i) {
  662. struct kvm_memory_slot *memslot = &slots->memslots[i];
  663. if (gfn >= memslot->base_gfn
  664. && gfn < memslot->base_gfn + memslot->npages)
  665. return memslot;
  666. }
  667. return NULL;
  668. }
  669. EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
  670. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  671. {
  672. gfn = unalias_gfn(kvm, gfn);
  673. return gfn_to_memslot_unaliased(kvm, gfn);
  674. }
  675. int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  676. {
  677. int i;
  678. struct kvm_memslots *slots = kvm->memslots;
  679. gfn = unalias_gfn(kvm, gfn);
  680. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  681. struct kvm_memory_slot *memslot = &slots->memslots[i];
  682. if (gfn >= memslot->base_gfn
  683. && gfn < memslot->base_gfn + memslot->npages)
  684. return 1;
  685. }
  686. return 0;
  687. }
  688. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  689. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  690. {
  691. struct kvm_memory_slot *slot;
  692. gfn = unalias_gfn(kvm, gfn);
  693. slot = gfn_to_memslot_unaliased(kvm, gfn);
  694. if (!slot)
  695. return bad_hva();
  696. return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
  697. }
  698. EXPORT_SYMBOL_GPL(gfn_to_hva);
  699. pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  700. {
  701. struct page *page[1];
  702. unsigned long addr;
  703. int npages;
  704. pfn_t pfn;
  705. might_sleep();
  706. addr = gfn_to_hva(kvm, gfn);
  707. if (kvm_is_error_hva(addr)) {
  708. get_page(bad_page);
  709. return page_to_pfn(bad_page);
  710. }
  711. npages = get_user_pages_fast(addr, 1, 1, page);
  712. if (unlikely(npages != 1)) {
  713. struct vm_area_struct *vma;
  714. down_read(&current->mm->mmap_sem);
  715. vma = find_vma(current->mm, addr);
  716. if (vma == NULL || addr < vma->vm_start ||
  717. !(vma->vm_flags & VM_PFNMAP)) {
  718. up_read(&current->mm->mmap_sem);
  719. get_page(bad_page);
  720. return page_to_pfn(bad_page);
  721. }
  722. pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  723. up_read(&current->mm->mmap_sem);
  724. BUG_ON(!kvm_is_mmio_pfn(pfn));
  725. } else
  726. pfn = page_to_pfn(page[0]);
  727. return pfn;
  728. }
  729. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  730. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  731. {
  732. pfn_t pfn;
  733. pfn = gfn_to_pfn(kvm, gfn);
  734. if (!kvm_is_mmio_pfn(pfn))
  735. return pfn_to_page(pfn);
  736. WARN_ON(kvm_is_mmio_pfn(pfn));
  737. get_page(bad_page);
  738. return bad_page;
  739. }
  740. EXPORT_SYMBOL_GPL(gfn_to_page);
  741. void kvm_release_page_clean(struct page *page)
  742. {
  743. kvm_release_pfn_clean(page_to_pfn(page));
  744. }
  745. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  746. void kvm_release_pfn_clean(pfn_t pfn)
  747. {
  748. if (!kvm_is_mmio_pfn(pfn))
  749. put_page(pfn_to_page(pfn));
  750. }
  751. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  752. void kvm_release_page_dirty(struct page *page)
  753. {
  754. kvm_release_pfn_dirty(page_to_pfn(page));
  755. }
  756. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  757. void kvm_release_pfn_dirty(pfn_t pfn)
  758. {
  759. kvm_set_pfn_dirty(pfn);
  760. kvm_release_pfn_clean(pfn);
  761. }
  762. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  763. void kvm_set_page_dirty(struct page *page)
  764. {
  765. kvm_set_pfn_dirty(page_to_pfn(page));
  766. }
  767. EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
  768. void kvm_set_pfn_dirty(pfn_t pfn)
  769. {
  770. if (!kvm_is_mmio_pfn(pfn)) {
  771. struct page *page = pfn_to_page(pfn);
  772. if (!PageReserved(page))
  773. SetPageDirty(page);
  774. }
  775. }
  776. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  777. void kvm_set_pfn_accessed(pfn_t pfn)
  778. {
  779. if (!kvm_is_mmio_pfn(pfn))
  780. mark_page_accessed(pfn_to_page(pfn));
  781. }
  782. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  783. void kvm_get_pfn(pfn_t pfn)
  784. {
  785. if (!kvm_is_mmio_pfn(pfn))
  786. get_page(pfn_to_page(pfn));
  787. }
  788. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  789. static int next_segment(unsigned long len, int offset)
  790. {
  791. if (len > PAGE_SIZE - offset)
  792. return PAGE_SIZE - offset;
  793. else
  794. return len;
  795. }
  796. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  797. int len)
  798. {
  799. int r;
  800. unsigned long addr;
  801. addr = gfn_to_hva(kvm, gfn);
  802. if (kvm_is_error_hva(addr))
  803. return -EFAULT;
  804. r = copy_from_user(data, (void __user *)addr + offset, len);
  805. if (r)
  806. return -EFAULT;
  807. return 0;
  808. }
  809. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  810. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  811. {
  812. gfn_t gfn = gpa >> PAGE_SHIFT;
  813. int seg;
  814. int offset = offset_in_page(gpa);
  815. int ret;
  816. while ((seg = next_segment(len, offset)) != 0) {
  817. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  818. if (ret < 0)
  819. return ret;
  820. offset = 0;
  821. len -= seg;
  822. data += seg;
  823. ++gfn;
  824. }
  825. return 0;
  826. }
  827. EXPORT_SYMBOL_GPL(kvm_read_guest);
  828. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  829. unsigned long len)
  830. {
  831. int r;
  832. unsigned long addr;
  833. gfn_t gfn = gpa >> PAGE_SHIFT;
  834. int offset = offset_in_page(gpa);
  835. addr = gfn_to_hva(kvm, gfn);
  836. if (kvm_is_error_hva(addr))
  837. return -EFAULT;
  838. pagefault_disable();
  839. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  840. pagefault_enable();
  841. if (r)
  842. return -EFAULT;
  843. return 0;
  844. }
  845. EXPORT_SYMBOL(kvm_read_guest_atomic);
  846. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  847. int offset, int len)
  848. {
  849. int r;
  850. unsigned long addr;
  851. addr = gfn_to_hva(kvm, gfn);
  852. if (kvm_is_error_hva(addr))
  853. return -EFAULT;
  854. r = copy_to_user((void __user *)addr + offset, data, len);
  855. if (r)
  856. return -EFAULT;
  857. mark_page_dirty(kvm, gfn);
  858. return 0;
  859. }
  860. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  861. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  862. unsigned long len)
  863. {
  864. gfn_t gfn = gpa >> PAGE_SHIFT;
  865. int seg;
  866. int offset = offset_in_page(gpa);
  867. int ret;
  868. while ((seg = next_segment(len, offset)) != 0) {
  869. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  870. if (ret < 0)
  871. return ret;
  872. offset = 0;
  873. len -= seg;
  874. data += seg;
  875. ++gfn;
  876. }
  877. return 0;
  878. }
  879. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  880. {
  881. return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
  882. }
  883. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  884. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  885. {
  886. gfn_t gfn = gpa >> PAGE_SHIFT;
  887. int seg;
  888. int offset = offset_in_page(gpa);
  889. int ret;
  890. while ((seg = next_segment(len, offset)) != 0) {
  891. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  892. if (ret < 0)
  893. return ret;
  894. offset = 0;
  895. len -= seg;
  896. ++gfn;
  897. }
  898. return 0;
  899. }
  900. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  901. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  902. {
  903. struct kvm_memory_slot *memslot;
  904. gfn = unalias_gfn(kvm, gfn);
  905. memslot = gfn_to_memslot_unaliased(kvm, gfn);
  906. if (memslot && memslot->dirty_bitmap) {
  907. unsigned long rel_gfn = gfn - memslot->base_gfn;
  908. /* avoid RMW */
  909. if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
  910. generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
  911. }
  912. }
  913. /*
  914. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  915. */
  916. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  917. {
  918. DEFINE_WAIT(wait);
  919. for (;;) {
  920. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  921. if (kvm_arch_vcpu_runnable(vcpu)) {
  922. set_bit(KVM_REQ_UNHALT, &vcpu->requests);
  923. break;
  924. }
  925. if (kvm_cpu_has_pending_timer(vcpu))
  926. break;
  927. if (signal_pending(current))
  928. break;
  929. schedule();
  930. }
  931. finish_wait(&vcpu->wq, &wait);
  932. }
  933. void kvm_resched(struct kvm_vcpu *vcpu)
  934. {
  935. if (!need_resched())
  936. return;
  937. cond_resched();
  938. }
  939. EXPORT_SYMBOL_GPL(kvm_resched);
  940. void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
  941. {
  942. ktime_t expires;
  943. DEFINE_WAIT(wait);
  944. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  945. /* Sleep for 100 us, and hope lock-holder got scheduled */
  946. expires = ktime_add_ns(ktime_get(), 100000UL);
  947. schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
  948. finish_wait(&vcpu->wq, &wait);
  949. }
  950. EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
  951. static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  952. {
  953. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  954. struct page *page;
  955. if (vmf->pgoff == 0)
  956. page = virt_to_page(vcpu->run);
  957. #ifdef CONFIG_X86
  958. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  959. page = virt_to_page(vcpu->arch.pio_data);
  960. #endif
  961. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  962. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  963. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  964. #endif
  965. else
  966. return VM_FAULT_SIGBUS;
  967. get_page(page);
  968. vmf->page = page;
  969. return 0;
  970. }
  971. static const struct vm_operations_struct kvm_vcpu_vm_ops = {
  972. .fault = kvm_vcpu_fault,
  973. };
  974. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  975. {
  976. vma->vm_ops = &kvm_vcpu_vm_ops;
  977. return 0;
  978. }
  979. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  980. {
  981. struct kvm_vcpu *vcpu = filp->private_data;
  982. kvm_put_kvm(vcpu->kvm);
  983. return 0;
  984. }
  985. static struct file_operations kvm_vcpu_fops = {
  986. .release = kvm_vcpu_release,
  987. .unlocked_ioctl = kvm_vcpu_ioctl,
  988. .compat_ioctl = kvm_vcpu_ioctl,
  989. .mmap = kvm_vcpu_mmap,
  990. };
  991. /*
  992. * Allocates an inode for the vcpu.
  993. */
  994. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  995. {
  996. return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
  997. }
  998. /*
  999. * Creates some virtual cpus. Good luck creating more than one.
  1000. */
  1001. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
  1002. {
  1003. int r;
  1004. struct kvm_vcpu *vcpu, *v;
  1005. vcpu = kvm_arch_vcpu_create(kvm, id);
  1006. if (IS_ERR(vcpu))
  1007. return PTR_ERR(vcpu);
  1008. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  1009. r = kvm_arch_vcpu_setup(vcpu);
  1010. if (r)
  1011. return r;
  1012. mutex_lock(&kvm->lock);
  1013. if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
  1014. r = -EINVAL;
  1015. goto vcpu_destroy;
  1016. }
  1017. kvm_for_each_vcpu(r, v, kvm)
  1018. if (v->vcpu_id == id) {
  1019. r = -EEXIST;
  1020. goto vcpu_destroy;
  1021. }
  1022. BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
  1023. /* Now it's all set up, let userspace reach it */
  1024. kvm_get_kvm(kvm);
  1025. r = create_vcpu_fd(vcpu);
  1026. if (r < 0) {
  1027. kvm_put_kvm(kvm);
  1028. goto vcpu_destroy;
  1029. }
  1030. kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
  1031. smp_wmb();
  1032. atomic_inc(&kvm->online_vcpus);
  1033. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1034. if (kvm->bsp_vcpu_id == id)
  1035. kvm->bsp_vcpu = vcpu;
  1036. #endif
  1037. mutex_unlock(&kvm->lock);
  1038. return r;
  1039. vcpu_destroy:
  1040. mutex_unlock(&kvm->lock);
  1041. kvm_arch_vcpu_destroy(vcpu);
  1042. return r;
  1043. }
  1044. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  1045. {
  1046. if (sigset) {
  1047. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  1048. vcpu->sigset_active = 1;
  1049. vcpu->sigset = *sigset;
  1050. } else
  1051. vcpu->sigset_active = 0;
  1052. return 0;
  1053. }
  1054. static long kvm_vcpu_ioctl(struct file *filp,
  1055. unsigned int ioctl, unsigned long arg)
  1056. {
  1057. struct kvm_vcpu *vcpu = filp->private_data;
  1058. void __user *argp = (void __user *)arg;
  1059. int r;
  1060. struct kvm_fpu *fpu = NULL;
  1061. struct kvm_sregs *kvm_sregs = NULL;
  1062. if (vcpu->kvm->mm != current->mm)
  1063. return -EIO;
  1064. switch (ioctl) {
  1065. case KVM_RUN:
  1066. r = -EINVAL;
  1067. if (arg)
  1068. goto out;
  1069. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  1070. break;
  1071. case KVM_GET_REGS: {
  1072. struct kvm_regs *kvm_regs;
  1073. r = -ENOMEM;
  1074. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1075. if (!kvm_regs)
  1076. goto out;
  1077. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  1078. if (r)
  1079. goto out_free1;
  1080. r = -EFAULT;
  1081. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  1082. goto out_free1;
  1083. r = 0;
  1084. out_free1:
  1085. kfree(kvm_regs);
  1086. break;
  1087. }
  1088. case KVM_SET_REGS: {
  1089. struct kvm_regs *kvm_regs;
  1090. r = -ENOMEM;
  1091. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1092. if (!kvm_regs)
  1093. goto out;
  1094. r = -EFAULT;
  1095. if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
  1096. goto out_free2;
  1097. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  1098. if (r)
  1099. goto out_free2;
  1100. r = 0;
  1101. out_free2:
  1102. kfree(kvm_regs);
  1103. break;
  1104. }
  1105. case KVM_GET_SREGS: {
  1106. kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1107. r = -ENOMEM;
  1108. if (!kvm_sregs)
  1109. goto out;
  1110. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
  1111. if (r)
  1112. goto out;
  1113. r = -EFAULT;
  1114. if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
  1115. goto out;
  1116. r = 0;
  1117. break;
  1118. }
  1119. case KVM_SET_SREGS: {
  1120. kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1121. r = -ENOMEM;
  1122. if (!kvm_sregs)
  1123. goto out;
  1124. r = -EFAULT;
  1125. if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
  1126. goto out;
  1127. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
  1128. if (r)
  1129. goto out;
  1130. r = 0;
  1131. break;
  1132. }
  1133. case KVM_GET_MP_STATE: {
  1134. struct kvm_mp_state mp_state;
  1135. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  1136. if (r)
  1137. goto out;
  1138. r = -EFAULT;
  1139. if (copy_to_user(argp, &mp_state, sizeof mp_state))
  1140. goto out;
  1141. r = 0;
  1142. break;
  1143. }
  1144. case KVM_SET_MP_STATE: {
  1145. struct kvm_mp_state mp_state;
  1146. r = -EFAULT;
  1147. if (copy_from_user(&mp_state, argp, sizeof mp_state))
  1148. goto out;
  1149. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  1150. if (r)
  1151. goto out;
  1152. r = 0;
  1153. break;
  1154. }
  1155. case KVM_TRANSLATE: {
  1156. struct kvm_translation tr;
  1157. r = -EFAULT;
  1158. if (copy_from_user(&tr, argp, sizeof tr))
  1159. goto out;
  1160. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  1161. if (r)
  1162. goto out;
  1163. r = -EFAULT;
  1164. if (copy_to_user(argp, &tr, sizeof tr))
  1165. goto out;
  1166. r = 0;
  1167. break;
  1168. }
  1169. case KVM_SET_GUEST_DEBUG: {
  1170. struct kvm_guest_debug dbg;
  1171. r = -EFAULT;
  1172. if (copy_from_user(&dbg, argp, sizeof dbg))
  1173. goto out;
  1174. r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
  1175. if (r)
  1176. goto out;
  1177. r = 0;
  1178. break;
  1179. }
  1180. case KVM_SET_SIGNAL_MASK: {
  1181. struct kvm_signal_mask __user *sigmask_arg = argp;
  1182. struct kvm_signal_mask kvm_sigmask;
  1183. sigset_t sigset, *p;
  1184. p = NULL;
  1185. if (argp) {
  1186. r = -EFAULT;
  1187. if (copy_from_user(&kvm_sigmask, argp,
  1188. sizeof kvm_sigmask))
  1189. goto out;
  1190. r = -EINVAL;
  1191. if (kvm_sigmask.len != sizeof sigset)
  1192. goto out;
  1193. r = -EFAULT;
  1194. if (copy_from_user(&sigset, sigmask_arg->sigset,
  1195. sizeof sigset))
  1196. goto out;
  1197. p = &sigset;
  1198. }
  1199. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  1200. break;
  1201. }
  1202. case KVM_GET_FPU: {
  1203. fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1204. r = -ENOMEM;
  1205. if (!fpu)
  1206. goto out;
  1207. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
  1208. if (r)
  1209. goto out;
  1210. r = -EFAULT;
  1211. if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
  1212. goto out;
  1213. r = 0;
  1214. break;
  1215. }
  1216. case KVM_SET_FPU: {
  1217. fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1218. r = -ENOMEM;
  1219. if (!fpu)
  1220. goto out;
  1221. r = -EFAULT;
  1222. if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
  1223. goto out;
  1224. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
  1225. if (r)
  1226. goto out;
  1227. r = 0;
  1228. break;
  1229. }
  1230. default:
  1231. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1232. }
  1233. out:
  1234. kfree(fpu);
  1235. kfree(kvm_sregs);
  1236. return r;
  1237. }
  1238. static long kvm_vm_ioctl(struct file *filp,
  1239. unsigned int ioctl, unsigned long arg)
  1240. {
  1241. struct kvm *kvm = filp->private_data;
  1242. void __user *argp = (void __user *)arg;
  1243. int r;
  1244. if (kvm->mm != current->mm)
  1245. return -EIO;
  1246. switch (ioctl) {
  1247. case KVM_CREATE_VCPU:
  1248. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  1249. if (r < 0)
  1250. goto out;
  1251. break;
  1252. case KVM_SET_USER_MEMORY_REGION: {
  1253. struct kvm_userspace_memory_region kvm_userspace_mem;
  1254. r = -EFAULT;
  1255. if (copy_from_user(&kvm_userspace_mem, argp,
  1256. sizeof kvm_userspace_mem))
  1257. goto out;
  1258. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
  1259. if (r)
  1260. goto out;
  1261. break;
  1262. }
  1263. case KVM_GET_DIRTY_LOG: {
  1264. struct kvm_dirty_log log;
  1265. r = -EFAULT;
  1266. if (copy_from_user(&log, argp, sizeof log))
  1267. goto out;
  1268. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1269. if (r)
  1270. goto out;
  1271. break;
  1272. }
  1273. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1274. case KVM_REGISTER_COALESCED_MMIO: {
  1275. struct kvm_coalesced_mmio_zone zone;
  1276. r = -EFAULT;
  1277. if (copy_from_user(&zone, argp, sizeof zone))
  1278. goto out;
  1279. r = -ENXIO;
  1280. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  1281. if (r)
  1282. goto out;
  1283. r = 0;
  1284. break;
  1285. }
  1286. case KVM_UNREGISTER_COALESCED_MMIO: {
  1287. struct kvm_coalesced_mmio_zone zone;
  1288. r = -EFAULT;
  1289. if (copy_from_user(&zone, argp, sizeof zone))
  1290. goto out;
  1291. r = -ENXIO;
  1292. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  1293. if (r)
  1294. goto out;
  1295. r = 0;
  1296. break;
  1297. }
  1298. #endif
  1299. case KVM_IRQFD: {
  1300. struct kvm_irqfd data;
  1301. r = -EFAULT;
  1302. if (copy_from_user(&data, argp, sizeof data))
  1303. goto out;
  1304. r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
  1305. break;
  1306. }
  1307. case KVM_IOEVENTFD: {
  1308. struct kvm_ioeventfd data;
  1309. r = -EFAULT;
  1310. if (copy_from_user(&data, argp, sizeof data))
  1311. goto out;
  1312. r = kvm_ioeventfd(kvm, &data);
  1313. break;
  1314. }
  1315. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1316. case KVM_SET_BOOT_CPU_ID:
  1317. r = 0;
  1318. mutex_lock(&kvm->lock);
  1319. if (atomic_read(&kvm->online_vcpus) != 0)
  1320. r = -EBUSY;
  1321. else
  1322. kvm->bsp_vcpu_id = arg;
  1323. mutex_unlock(&kvm->lock);
  1324. break;
  1325. #endif
  1326. default:
  1327. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  1328. if (r == -ENOTTY)
  1329. r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
  1330. }
  1331. out:
  1332. return r;
  1333. }
  1334. #ifdef CONFIG_COMPAT
  1335. struct compat_kvm_dirty_log {
  1336. __u32 slot;
  1337. __u32 padding1;
  1338. union {
  1339. compat_uptr_t dirty_bitmap; /* one bit per page */
  1340. __u64 padding2;
  1341. };
  1342. };
  1343. static long kvm_vm_compat_ioctl(struct file *filp,
  1344. unsigned int ioctl, unsigned long arg)
  1345. {
  1346. struct kvm *kvm = filp->private_data;
  1347. int r;
  1348. if (kvm->mm != current->mm)
  1349. return -EIO;
  1350. switch (ioctl) {
  1351. case KVM_GET_DIRTY_LOG: {
  1352. struct compat_kvm_dirty_log compat_log;
  1353. struct kvm_dirty_log log;
  1354. r = -EFAULT;
  1355. if (copy_from_user(&compat_log, (void __user *)arg,
  1356. sizeof(compat_log)))
  1357. goto out;
  1358. log.slot = compat_log.slot;
  1359. log.padding1 = compat_log.padding1;
  1360. log.padding2 = compat_log.padding2;
  1361. log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
  1362. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1363. if (r)
  1364. goto out;
  1365. break;
  1366. }
  1367. default:
  1368. r = kvm_vm_ioctl(filp, ioctl, arg);
  1369. }
  1370. out:
  1371. return r;
  1372. }
  1373. #endif
  1374. static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1375. {
  1376. struct page *page[1];
  1377. unsigned long addr;
  1378. int npages;
  1379. gfn_t gfn = vmf->pgoff;
  1380. struct kvm *kvm = vma->vm_file->private_data;
  1381. addr = gfn_to_hva(kvm, gfn);
  1382. if (kvm_is_error_hva(addr))
  1383. return VM_FAULT_SIGBUS;
  1384. npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
  1385. NULL);
  1386. if (unlikely(npages != 1))
  1387. return VM_FAULT_SIGBUS;
  1388. vmf->page = page[0];
  1389. return 0;
  1390. }
  1391. static const struct vm_operations_struct kvm_vm_vm_ops = {
  1392. .fault = kvm_vm_fault,
  1393. };
  1394. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  1395. {
  1396. vma->vm_ops = &kvm_vm_vm_ops;
  1397. return 0;
  1398. }
  1399. static struct file_operations kvm_vm_fops = {
  1400. .release = kvm_vm_release,
  1401. .unlocked_ioctl = kvm_vm_ioctl,
  1402. #ifdef CONFIG_COMPAT
  1403. .compat_ioctl = kvm_vm_compat_ioctl,
  1404. #endif
  1405. .mmap = kvm_vm_mmap,
  1406. };
  1407. static int kvm_dev_ioctl_create_vm(void)
  1408. {
  1409. int fd;
  1410. struct kvm *kvm;
  1411. kvm = kvm_create_vm();
  1412. if (IS_ERR(kvm))
  1413. return PTR_ERR(kvm);
  1414. fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
  1415. if (fd < 0)
  1416. kvm_put_kvm(kvm);
  1417. return fd;
  1418. }
  1419. static long kvm_dev_ioctl_check_extension_generic(long arg)
  1420. {
  1421. switch (arg) {
  1422. case KVM_CAP_USER_MEMORY:
  1423. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  1424. case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
  1425. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1426. case KVM_CAP_SET_BOOT_CPU_ID:
  1427. #endif
  1428. case KVM_CAP_INTERNAL_ERROR_DATA:
  1429. return 1;
  1430. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  1431. case KVM_CAP_IRQ_ROUTING:
  1432. return KVM_MAX_IRQ_ROUTES;
  1433. #endif
  1434. default:
  1435. break;
  1436. }
  1437. return kvm_dev_ioctl_check_extension(arg);
  1438. }
  1439. static long kvm_dev_ioctl(struct file *filp,
  1440. unsigned int ioctl, unsigned long arg)
  1441. {
  1442. long r = -EINVAL;
  1443. switch (ioctl) {
  1444. case KVM_GET_API_VERSION:
  1445. r = -EINVAL;
  1446. if (arg)
  1447. goto out;
  1448. r = KVM_API_VERSION;
  1449. break;
  1450. case KVM_CREATE_VM:
  1451. r = -EINVAL;
  1452. if (arg)
  1453. goto out;
  1454. r = kvm_dev_ioctl_create_vm();
  1455. break;
  1456. case KVM_CHECK_EXTENSION:
  1457. r = kvm_dev_ioctl_check_extension_generic(arg);
  1458. break;
  1459. case KVM_GET_VCPU_MMAP_SIZE:
  1460. r = -EINVAL;
  1461. if (arg)
  1462. goto out;
  1463. r = PAGE_SIZE; /* struct kvm_run */
  1464. #ifdef CONFIG_X86
  1465. r += PAGE_SIZE; /* pio data page */
  1466. #endif
  1467. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1468. r += PAGE_SIZE; /* coalesced mmio ring page */
  1469. #endif
  1470. break;
  1471. case KVM_TRACE_ENABLE:
  1472. case KVM_TRACE_PAUSE:
  1473. case KVM_TRACE_DISABLE:
  1474. r = -EOPNOTSUPP;
  1475. break;
  1476. default:
  1477. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  1478. }
  1479. out:
  1480. return r;
  1481. }
  1482. static struct file_operations kvm_chardev_ops = {
  1483. .unlocked_ioctl = kvm_dev_ioctl,
  1484. .compat_ioctl = kvm_dev_ioctl,
  1485. };
  1486. static struct miscdevice kvm_dev = {
  1487. KVM_MINOR,
  1488. "kvm",
  1489. &kvm_chardev_ops,
  1490. };
  1491. static void hardware_enable(void *junk)
  1492. {
  1493. int cpu = raw_smp_processor_id();
  1494. int r;
  1495. if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1496. return;
  1497. cpumask_set_cpu(cpu, cpus_hardware_enabled);
  1498. r = kvm_arch_hardware_enable(NULL);
  1499. if (r) {
  1500. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1501. atomic_inc(&hardware_enable_failed);
  1502. printk(KERN_INFO "kvm: enabling virtualization on "
  1503. "CPU%d failed\n", cpu);
  1504. }
  1505. }
  1506. static void hardware_disable(void *junk)
  1507. {
  1508. int cpu = raw_smp_processor_id();
  1509. if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1510. return;
  1511. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1512. kvm_arch_hardware_disable(NULL);
  1513. }
  1514. static void hardware_disable_all_nolock(void)
  1515. {
  1516. BUG_ON(!kvm_usage_count);
  1517. kvm_usage_count--;
  1518. if (!kvm_usage_count)
  1519. on_each_cpu(hardware_disable, NULL, 1);
  1520. }
  1521. static void hardware_disable_all(void)
  1522. {
  1523. spin_lock(&kvm_lock);
  1524. hardware_disable_all_nolock();
  1525. spin_unlock(&kvm_lock);
  1526. }
  1527. static int hardware_enable_all(void)
  1528. {
  1529. int r = 0;
  1530. spin_lock(&kvm_lock);
  1531. kvm_usage_count++;
  1532. if (kvm_usage_count == 1) {
  1533. atomic_set(&hardware_enable_failed, 0);
  1534. on_each_cpu(hardware_enable, NULL, 1);
  1535. if (atomic_read(&hardware_enable_failed)) {
  1536. hardware_disable_all_nolock();
  1537. r = -EBUSY;
  1538. }
  1539. }
  1540. spin_unlock(&kvm_lock);
  1541. return r;
  1542. }
  1543. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  1544. void *v)
  1545. {
  1546. int cpu = (long)v;
  1547. if (!kvm_usage_count)
  1548. return NOTIFY_OK;
  1549. val &= ~CPU_TASKS_FROZEN;
  1550. switch (val) {
  1551. case CPU_DYING:
  1552. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1553. cpu);
  1554. hardware_disable(NULL);
  1555. break;
  1556. case CPU_UP_CANCELED:
  1557. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1558. cpu);
  1559. smp_call_function_single(cpu, hardware_disable, NULL, 1);
  1560. break;
  1561. case CPU_ONLINE:
  1562. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  1563. cpu);
  1564. smp_call_function_single(cpu, hardware_enable, NULL, 1);
  1565. break;
  1566. }
  1567. return NOTIFY_OK;
  1568. }
  1569. asmlinkage void kvm_handle_fault_on_reboot(void)
  1570. {
  1571. if (kvm_rebooting)
  1572. /* spin while reset goes on */
  1573. while (true)
  1574. ;
  1575. /* Fault while not rebooting. We want the trace. */
  1576. BUG();
  1577. }
  1578. EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
  1579. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1580. void *v)
  1581. {
  1582. /*
  1583. * Some (well, at least mine) BIOSes hang on reboot if
  1584. * in vmx root mode.
  1585. *
  1586. * And Intel TXT required VMX off for all cpu when system shutdown.
  1587. */
  1588. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  1589. kvm_rebooting = true;
  1590. on_each_cpu(hardware_disable, NULL, 1);
  1591. return NOTIFY_OK;
  1592. }
  1593. static struct notifier_block kvm_reboot_notifier = {
  1594. .notifier_call = kvm_reboot,
  1595. .priority = 0,
  1596. };
  1597. void kvm_io_bus_init(struct kvm_io_bus *bus)
  1598. {
  1599. memset(bus, 0, sizeof(*bus));
  1600. }
  1601. void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  1602. {
  1603. int i;
  1604. for (i = 0; i < bus->dev_count; i++) {
  1605. struct kvm_io_device *pos = bus->devs[i];
  1606. kvm_iodevice_destructor(pos);
  1607. }
  1608. }
  1609. /* kvm_io_bus_write - called under kvm->slots_lock */
  1610. int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
  1611. int len, const void *val)
  1612. {
  1613. int i;
  1614. for (i = 0; i < bus->dev_count; i++)
  1615. if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
  1616. return 0;
  1617. return -EOPNOTSUPP;
  1618. }
  1619. /* kvm_io_bus_read - called under kvm->slots_lock */
  1620. int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
  1621. {
  1622. int i;
  1623. for (i = 0; i < bus->dev_count; i++)
  1624. if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
  1625. return 0;
  1626. return -EOPNOTSUPP;
  1627. }
  1628. int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
  1629. struct kvm_io_device *dev)
  1630. {
  1631. int ret;
  1632. down_write(&kvm->slots_lock);
  1633. ret = __kvm_io_bus_register_dev(bus, dev);
  1634. up_write(&kvm->slots_lock);
  1635. return ret;
  1636. }
  1637. /* An unlocked version. Caller must have write lock on slots_lock. */
  1638. int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
  1639. struct kvm_io_device *dev)
  1640. {
  1641. if (bus->dev_count > NR_IOBUS_DEVS-1)
  1642. return -ENOSPC;
  1643. bus->devs[bus->dev_count++] = dev;
  1644. return 0;
  1645. }
  1646. void kvm_io_bus_unregister_dev(struct kvm *kvm,
  1647. struct kvm_io_bus *bus,
  1648. struct kvm_io_device *dev)
  1649. {
  1650. down_write(&kvm->slots_lock);
  1651. __kvm_io_bus_unregister_dev(bus, dev);
  1652. up_write(&kvm->slots_lock);
  1653. }
  1654. /* An unlocked version. Caller must have write lock on slots_lock. */
  1655. void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
  1656. struct kvm_io_device *dev)
  1657. {
  1658. int i;
  1659. for (i = 0; i < bus->dev_count; i++)
  1660. if (bus->devs[i] == dev) {
  1661. bus->devs[i] = bus->devs[--bus->dev_count];
  1662. break;
  1663. }
  1664. }
  1665. static struct notifier_block kvm_cpu_notifier = {
  1666. .notifier_call = kvm_cpu_hotplug,
  1667. .priority = 20, /* must be > scheduler priority */
  1668. };
  1669. static int vm_stat_get(void *_offset, u64 *val)
  1670. {
  1671. unsigned offset = (long)_offset;
  1672. struct kvm *kvm;
  1673. *val = 0;
  1674. spin_lock(&kvm_lock);
  1675. list_for_each_entry(kvm, &vm_list, vm_list)
  1676. *val += *(u32 *)((void *)kvm + offset);
  1677. spin_unlock(&kvm_lock);
  1678. return 0;
  1679. }
  1680. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
  1681. static int vcpu_stat_get(void *_offset, u64 *val)
  1682. {
  1683. unsigned offset = (long)_offset;
  1684. struct kvm *kvm;
  1685. struct kvm_vcpu *vcpu;
  1686. int i;
  1687. *val = 0;
  1688. spin_lock(&kvm_lock);
  1689. list_for_each_entry(kvm, &vm_list, vm_list)
  1690. kvm_for_each_vcpu(i, vcpu, kvm)
  1691. *val += *(u32 *)((void *)vcpu + offset);
  1692. spin_unlock(&kvm_lock);
  1693. return 0;
  1694. }
  1695. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
  1696. static const struct file_operations *stat_fops[] = {
  1697. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  1698. [KVM_STAT_VM] = &vm_stat_fops,
  1699. };
  1700. static void kvm_init_debug(void)
  1701. {
  1702. struct kvm_stats_debugfs_item *p;
  1703. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  1704. for (p = debugfs_entries; p->name; ++p)
  1705. p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
  1706. (void *)(long)p->offset,
  1707. stat_fops[p->kind]);
  1708. }
  1709. static void kvm_exit_debug(void)
  1710. {
  1711. struct kvm_stats_debugfs_item *p;
  1712. for (p = debugfs_entries; p->name; ++p)
  1713. debugfs_remove(p->dentry);
  1714. debugfs_remove(kvm_debugfs_dir);
  1715. }
  1716. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  1717. {
  1718. if (kvm_usage_count)
  1719. hardware_disable(NULL);
  1720. return 0;
  1721. }
  1722. static int kvm_resume(struct sys_device *dev)
  1723. {
  1724. if (kvm_usage_count)
  1725. hardware_enable(NULL);
  1726. return 0;
  1727. }
  1728. static struct sysdev_class kvm_sysdev_class = {
  1729. .name = "kvm",
  1730. .suspend = kvm_suspend,
  1731. .resume = kvm_resume,
  1732. };
  1733. static struct sys_device kvm_sysdev = {
  1734. .id = 0,
  1735. .cls = &kvm_sysdev_class,
  1736. };
  1737. struct page *bad_page;
  1738. pfn_t bad_pfn;
  1739. static inline
  1740. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  1741. {
  1742. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  1743. }
  1744. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  1745. {
  1746. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1747. kvm_arch_vcpu_load(vcpu, cpu);
  1748. }
  1749. static void kvm_sched_out(struct preempt_notifier *pn,
  1750. struct task_struct *next)
  1751. {
  1752. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1753. kvm_arch_vcpu_put(vcpu);
  1754. }
  1755. int kvm_init(void *opaque, unsigned int vcpu_size,
  1756. struct module *module)
  1757. {
  1758. int r;
  1759. int cpu;
  1760. r = kvm_arch_init(opaque);
  1761. if (r)
  1762. goto out_fail;
  1763. bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1764. if (bad_page == NULL) {
  1765. r = -ENOMEM;
  1766. goto out;
  1767. }
  1768. bad_pfn = page_to_pfn(bad_page);
  1769. if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
  1770. r = -ENOMEM;
  1771. goto out_free_0;
  1772. }
  1773. r = kvm_arch_hardware_setup();
  1774. if (r < 0)
  1775. goto out_free_0a;
  1776. for_each_online_cpu(cpu) {
  1777. smp_call_function_single(cpu,
  1778. kvm_arch_check_processor_compat,
  1779. &r, 1);
  1780. if (r < 0)
  1781. goto out_free_1;
  1782. }
  1783. r = register_cpu_notifier(&kvm_cpu_notifier);
  1784. if (r)
  1785. goto out_free_2;
  1786. register_reboot_notifier(&kvm_reboot_notifier);
  1787. r = sysdev_class_register(&kvm_sysdev_class);
  1788. if (r)
  1789. goto out_free_3;
  1790. r = sysdev_register(&kvm_sysdev);
  1791. if (r)
  1792. goto out_free_4;
  1793. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  1794. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
  1795. __alignof__(struct kvm_vcpu),
  1796. 0, NULL);
  1797. if (!kvm_vcpu_cache) {
  1798. r = -ENOMEM;
  1799. goto out_free_5;
  1800. }
  1801. kvm_chardev_ops.owner = module;
  1802. kvm_vm_fops.owner = module;
  1803. kvm_vcpu_fops.owner = module;
  1804. r = misc_register(&kvm_dev);
  1805. if (r) {
  1806. printk(KERN_ERR "kvm: misc device register failed\n");
  1807. goto out_free;
  1808. }
  1809. kvm_preempt_ops.sched_in = kvm_sched_in;
  1810. kvm_preempt_ops.sched_out = kvm_sched_out;
  1811. kvm_init_debug();
  1812. return 0;
  1813. out_free:
  1814. kmem_cache_destroy(kvm_vcpu_cache);
  1815. out_free_5:
  1816. sysdev_unregister(&kvm_sysdev);
  1817. out_free_4:
  1818. sysdev_class_unregister(&kvm_sysdev_class);
  1819. out_free_3:
  1820. unregister_reboot_notifier(&kvm_reboot_notifier);
  1821. unregister_cpu_notifier(&kvm_cpu_notifier);
  1822. out_free_2:
  1823. out_free_1:
  1824. kvm_arch_hardware_unsetup();
  1825. out_free_0a:
  1826. free_cpumask_var(cpus_hardware_enabled);
  1827. out_free_0:
  1828. __free_page(bad_page);
  1829. out:
  1830. kvm_arch_exit();
  1831. out_fail:
  1832. return r;
  1833. }
  1834. EXPORT_SYMBOL_GPL(kvm_init);
  1835. void kvm_exit(void)
  1836. {
  1837. tracepoint_synchronize_unregister();
  1838. kvm_exit_debug();
  1839. misc_deregister(&kvm_dev);
  1840. kmem_cache_destroy(kvm_vcpu_cache);
  1841. sysdev_unregister(&kvm_sysdev);
  1842. sysdev_class_unregister(&kvm_sysdev_class);
  1843. unregister_reboot_notifier(&kvm_reboot_notifier);
  1844. unregister_cpu_notifier(&kvm_cpu_notifier);
  1845. on_each_cpu(hardware_disable, NULL, 1);
  1846. kvm_arch_hardware_unsetup();
  1847. kvm_arch_exit();
  1848. free_cpumask_var(cpus_hardware_enabled);
  1849. __free_page(bad_page);
  1850. }
  1851. EXPORT_SYMBOL_GPL(kvm_exit);