kvm_main.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "iodev.h"
  18. #include <linux/kvm_host.h>
  19. #include <linux/kvm.h>
  20. #include <linux/module.h>
  21. #include <linux/errno.h>
  22. #include <linux/percpu.h>
  23. #include <linux/gfp.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/sysdev.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched.h>
  34. #include <linux/cpumask.h>
  35. #include <linux/smp.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/profile.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/mman.h>
  41. #include <linux/swap.h>
  42. #include <asm/processor.h>
  43. #include <asm/io.h>
  44. #include <asm/uaccess.h>
  45. #include <asm/pgtable.h>
  46. MODULE_AUTHOR("Qumranet");
  47. MODULE_LICENSE("GPL");
  48. DEFINE_SPINLOCK(kvm_lock);
  49. LIST_HEAD(vm_list);
  50. static cpumask_t cpus_hardware_enabled;
  51. struct kmem_cache *kvm_vcpu_cache;
  52. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  53. static __read_mostly struct preempt_ops kvm_preempt_ops;
  54. struct dentry *kvm_debugfs_dir;
  55. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  56. unsigned long arg);
  57. bool kvm_rebooting;
  58. static inline int valid_vcpu(int n)
  59. {
  60. return likely(n >= 0 && n < KVM_MAX_VCPUS);
  61. }
  62. /*
  63. * Switches to specified vcpu, until a matching vcpu_put()
  64. */
  65. void vcpu_load(struct kvm_vcpu *vcpu)
  66. {
  67. int cpu;
  68. mutex_lock(&vcpu->mutex);
  69. cpu = get_cpu();
  70. preempt_notifier_register(&vcpu->preempt_notifier);
  71. kvm_arch_vcpu_load(vcpu, cpu);
  72. put_cpu();
  73. }
  74. void vcpu_put(struct kvm_vcpu *vcpu)
  75. {
  76. preempt_disable();
  77. kvm_arch_vcpu_put(vcpu);
  78. preempt_notifier_unregister(&vcpu->preempt_notifier);
  79. preempt_enable();
  80. mutex_unlock(&vcpu->mutex);
  81. }
  82. static void ack_flush(void *_completed)
  83. {
  84. }
  85. void kvm_flush_remote_tlbs(struct kvm *kvm)
  86. {
  87. int i, cpu;
  88. cpumask_t cpus;
  89. struct kvm_vcpu *vcpu;
  90. cpus_clear(cpus);
  91. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  92. vcpu = kvm->vcpus[i];
  93. if (!vcpu)
  94. continue;
  95. if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
  96. continue;
  97. cpu = vcpu->cpu;
  98. if (cpu != -1 && cpu != raw_smp_processor_id())
  99. cpu_set(cpu, cpus);
  100. }
  101. if (cpus_empty(cpus))
  102. return;
  103. ++kvm->stat.remote_tlb_flush;
  104. smp_call_function_mask(cpus, ack_flush, NULL, 1);
  105. }
  106. void kvm_reload_remote_mmus(struct kvm *kvm)
  107. {
  108. int i, cpu;
  109. cpumask_t cpus;
  110. struct kvm_vcpu *vcpu;
  111. cpus_clear(cpus);
  112. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  113. vcpu = kvm->vcpus[i];
  114. if (!vcpu)
  115. continue;
  116. if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
  117. continue;
  118. cpu = vcpu->cpu;
  119. if (cpu != -1 && cpu != raw_smp_processor_id())
  120. cpu_set(cpu, cpus);
  121. }
  122. if (cpus_empty(cpus))
  123. return;
  124. smp_call_function_mask(cpus, ack_flush, NULL, 1);
  125. }
  126. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  127. {
  128. struct page *page;
  129. int r;
  130. mutex_init(&vcpu->mutex);
  131. vcpu->cpu = -1;
  132. vcpu->kvm = kvm;
  133. vcpu->vcpu_id = id;
  134. init_waitqueue_head(&vcpu->wq);
  135. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  136. if (!page) {
  137. r = -ENOMEM;
  138. goto fail;
  139. }
  140. vcpu->run = page_address(page);
  141. r = kvm_arch_vcpu_init(vcpu);
  142. if (r < 0)
  143. goto fail_free_run;
  144. return 0;
  145. fail_free_run:
  146. free_page((unsigned long)vcpu->run);
  147. fail:
  148. return r;
  149. }
  150. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  151. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  152. {
  153. kvm_arch_vcpu_uninit(vcpu);
  154. free_page((unsigned long)vcpu->run);
  155. }
  156. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  157. static struct kvm *kvm_create_vm(void)
  158. {
  159. struct kvm *kvm = kvm_arch_create_vm();
  160. if (IS_ERR(kvm))
  161. goto out;
  162. kvm->mm = current->mm;
  163. atomic_inc(&kvm->mm->mm_count);
  164. spin_lock_init(&kvm->mmu_lock);
  165. kvm_io_bus_init(&kvm->pio_bus);
  166. mutex_init(&kvm->lock);
  167. kvm_io_bus_init(&kvm->mmio_bus);
  168. init_rwsem(&kvm->slots_lock);
  169. atomic_set(&kvm->users_count, 1);
  170. spin_lock(&kvm_lock);
  171. list_add(&kvm->vm_list, &vm_list);
  172. spin_unlock(&kvm_lock);
  173. out:
  174. return kvm;
  175. }
  176. /*
  177. * Free any memory in @free but not in @dont.
  178. */
  179. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  180. struct kvm_memory_slot *dont)
  181. {
  182. if (!dont || free->rmap != dont->rmap)
  183. vfree(free->rmap);
  184. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  185. vfree(free->dirty_bitmap);
  186. if (!dont || free->lpage_info != dont->lpage_info)
  187. vfree(free->lpage_info);
  188. free->npages = 0;
  189. free->dirty_bitmap = NULL;
  190. free->rmap = NULL;
  191. free->lpage_info = NULL;
  192. }
  193. void kvm_free_physmem(struct kvm *kvm)
  194. {
  195. int i;
  196. for (i = 0; i < kvm->nmemslots; ++i)
  197. kvm_free_physmem_slot(&kvm->memslots[i], NULL);
  198. }
  199. static void kvm_destroy_vm(struct kvm *kvm)
  200. {
  201. struct mm_struct *mm = kvm->mm;
  202. spin_lock(&kvm_lock);
  203. list_del(&kvm->vm_list);
  204. spin_unlock(&kvm_lock);
  205. kvm_io_bus_destroy(&kvm->pio_bus);
  206. kvm_io_bus_destroy(&kvm->mmio_bus);
  207. kvm_arch_destroy_vm(kvm);
  208. mmdrop(mm);
  209. }
  210. void kvm_get_kvm(struct kvm *kvm)
  211. {
  212. atomic_inc(&kvm->users_count);
  213. }
  214. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  215. void kvm_put_kvm(struct kvm *kvm)
  216. {
  217. if (atomic_dec_and_test(&kvm->users_count))
  218. kvm_destroy_vm(kvm);
  219. }
  220. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  221. static int kvm_vm_release(struct inode *inode, struct file *filp)
  222. {
  223. struct kvm *kvm = filp->private_data;
  224. kvm_put_kvm(kvm);
  225. return 0;
  226. }
  227. /*
  228. * Allocate some memory and give it an address in the guest physical address
  229. * space.
  230. *
  231. * Discontiguous memory is allowed, mostly for framebuffers.
  232. *
  233. * Must be called holding mmap_sem for write.
  234. */
  235. int __kvm_set_memory_region(struct kvm *kvm,
  236. struct kvm_userspace_memory_region *mem,
  237. int user_alloc)
  238. {
  239. int r;
  240. gfn_t base_gfn;
  241. unsigned long npages;
  242. unsigned long i;
  243. struct kvm_memory_slot *memslot;
  244. struct kvm_memory_slot old, new;
  245. r = -EINVAL;
  246. /* General sanity checks */
  247. if (mem->memory_size & (PAGE_SIZE - 1))
  248. goto out;
  249. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  250. goto out;
  251. if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  252. goto out;
  253. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  254. goto out;
  255. memslot = &kvm->memslots[mem->slot];
  256. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  257. npages = mem->memory_size >> PAGE_SHIFT;
  258. if (!npages)
  259. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  260. new = old = *memslot;
  261. new.base_gfn = base_gfn;
  262. new.npages = npages;
  263. new.flags = mem->flags;
  264. /* Disallow changing a memory slot's size. */
  265. r = -EINVAL;
  266. if (npages && old.npages && npages != old.npages)
  267. goto out_free;
  268. /* Check for overlaps */
  269. r = -EEXIST;
  270. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  271. struct kvm_memory_slot *s = &kvm->memslots[i];
  272. if (s == memslot)
  273. continue;
  274. if (!((base_gfn + npages <= s->base_gfn) ||
  275. (base_gfn >= s->base_gfn + s->npages)))
  276. goto out_free;
  277. }
  278. /* Free page dirty bitmap if unneeded */
  279. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  280. new.dirty_bitmap = NULL;
  281. r = -ENOMEM;
  282. /* Allocate if a slot is being created */
  283. if (npages && !new.rmap) {
  284. new.rmap = vmalloc(npages * sizeof(struct page *));
  285. if (!new.rmap)
  286. goto out_free;
  287. memset(new.rmap, 0, npages * sizeof(*new.rmap));
  288. new.user_alloc = user_alloc;
  289. new.userspace_addr = mem->userspace_addr;
  290. }
  291. if (npages && !new.lpage_info) {
  292. int largepages = npages / KVM_PAGES_PER_HPAGE;
  293. if (npages % KVM_PAGES_PER_HPAGE)
  294. largepages++;
  295. if (base_gfn % KVM_PAGES_PER_HPAGE)
  296. largepages++;
  297. new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
  298. if (!new.lpage_info)
  299. goto out_free;
  300. memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
  301. if (base_gfn % KVM_PAGES_PER_HPAGE)
  302. new.lpage_info[0].write_count = 1;
  303. if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
  304. new.lpage_info[largepages-1].write_count = 1;
  305. }
  306. /* Allocate page dirty bitmap if needed */
  307. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  308. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  309. new.dirty_bitmap = vmalloc(dirty_bytes);
  310. if (!new.dirty_bitmap)
  311. goto out_free;
  312. memset(new.dirty_bitmap, 0, dirty_bytes);
  313. }
  314. if (mem->slot >= kvm->nmemslots)
  315. kvm->nmemslots = mem->slot + 1;
  316. *memslot = new;
  317. r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
  318. if (r) {
  319. *memslot = old;
  320. goto out_free;
  321. }
  322. kvm_free_physmem_slot(&old, &new);
  323. return 0;
  324. out_free:
  325. kvm_free_physmem_slot(&new, &old);
  326. out:
  327. return r;
  328. }
  329. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  330. int kvm_set_memory_region(struct kvm *kvm,
  331. struct kvm_userspace_memory_region *mem,
  332. int user_alloc)
  333. {
  334. int r;
  335. down_write(&kvm->slots_lock);
  336. r = __kvm_set_memory_region(kvm, mem, user_alloc);
  337. up_write(&kvm->slots_lock);
  338. return r;
  339. }
  340. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  341. int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  342. struct
  343. kvm_userspace_memory_region *mem,
  344. int user_alloc)
  345. {
  346. if (mem->slot >= KVM_MEMORY_SLOTS)
  347. return -EINVAL;
  348. return kvm_set_memory_region(kvm, mem, user_alloc);
  349. }
  350. int kvm_get_dirty_log(struct kvm *kvm,
  351. struct kvm_dirty_log *log, int *is_dirty)
  352. {
  353. struct kvm_memory_slot *memslot;
  354. int r, i;
  355. int n;
  356. unsigned long any = 0;
  357. r = -EINVAL;
  358. if (log->slot >= KVM_MEMORY_SLOTS)
  359. goto out;
  360. memslot = &kvm->memslots[log->slot];
  361. r = -ENOENT;
  362. if (!memslot->dirty_bitmap)
  363. goto out;
  364. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  365. for (i = 0; !any && i < n/sizeof(long); ++i)
  366. any = memslot->dirty_bitmap[i];
  367. r = -EFAULT;
  368. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  369. goto out;
  370. if (any)
  371. *is_dirty = 1;
  372. r = 0;
  373. out:
  374. return r;
  375. }
  376. int is_error_page(struct page *page)
  377. {
  378. return page == bad_page;
  379. }
  380. EXPORT_SYMBOL_GPL(is_error_page);
  381. int is_error_pfn(pfn_t pfn)
  382. {
  383. return pfn == bad_pfn;
  384. }
  385. EXPORT_SYMBOL_GPL(is_error_pfn);
  386. static inline unsigned long bad_hva(void)
  387. {
  388. return PAGE_OFFSET;
  389. }
  390. int kvm_is_error_hva(unsigned long addr)
  391. {
  392. return addr == bad_hva();
  393. }
  394. EXPORT_SYMBOL_GPL(kvm_is_error_hva);
  395. static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  396. {
  397. int i;
  398. for (i = 0; i < kvm->nmemslots; ++i) {
  399. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  400. if (gfn >= memslot->base_gfn
  401. && gfn < memslot->base_gfn + memslot->npages)
  402. return memslot;
  403. }
  404. return NULL;
  405. }
  406. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  407. {
  408. gfn = unalias_gfn(kvm, gfn);
  409. return __gfn_to_memslot(kvm, gfn);
  410. }
  411. int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  412. {
  413. int i;
  414. gfn = unalias_gfn(kvm, gfn);
  415. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  416. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  417. if (gfn >= memslot->base_gfn
  418. && gfn < memslot->base_gfn + memslot->npages)
  419. return 1;
  420. }
  421. return 0;
  422. }
  423. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  424. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  425. {
  426. struct kvm_memory_slot *slot;
  427. gfn = unalias_gfn(kvm, gfn);
  428. slot = __gfn_to_memslot(kvm, gfn);
  429. if (!slot)
  430. return bad_hva();
  431. return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
  432. }
  433. EXPORT_SYMBOL_GPL(gfn_to_hva);
  434. /*
  435. * Requires current->mm->mmap_sem to be held
  436. */
  437. pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  438. {
  439. struct page *page[1];
  440. unsigned long addr;
  441. int npages;
  442. pfn_t pfn;
  443. might_sleep();
  444. addr = gfn_to_hva(kvm, gfn);
  445. if (kvm_is_error_hva(addr)) {
  446. get_page(bad_page);
  447. return page_to_pfn(bad_page);
  448. }
  449. npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
  450. NULL);
  451. if (unlikely(npages != 1)) {
  452. struct vm_area_struct *vma;
  453. vma = find_vma(current->mm, addr);
  454. if (vma == NULL || addr < vma->vm_start ||
  455. !(vma->vm_flags & VM_PFNMAP)) {
  456. get_page(bad_page);
  457. return page_to_pfn(bad_page);
  458. }
  459. pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  460. BUG_ON(pfn_valid(pfn));
  461. } else
  462. pfn = page_to_pfn(page[0]);
  463. return pfn;
  464. }
  465. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  466. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  467. {
  468. pfn_t pfn;
  469. pfn = gfn_to_pfn(kvm, gfn);
  470. if (pfn_valid(pfn))
  471. return pfn_to_page(pfn);
  472. WARN_ON(!pfn_valid(pfn));
  473. get_page(bad_page);
  474. return bad_page;
  475. }
  476. EXPORT_SYMBOL_GPL(gfn_to_page);
  477. void kvm_release_page_clean(struct page *page)
  478. {
  479. kvm_release_pfn_clean(page_to_pfn(page));
  480. }
  481. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  482. void kvm_release_pfn_clean(pfn_t pfn)
  483. {
  484. if (pfn_valid(pfn))
  485. put_page(pfn_to_page(pfn));
  486. }
  487. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  488. void kvm_release_page_dirty(struct page *page)
  489. {
  490. kvm_release_pfn_dirty(page_to_pfn(page));
  491. }
  492. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  493. void kvm_release_pfn_dirty(pfn_t pfn)
  494. {
  495. kvm_set_pfn_dirty(pfn);
  496. kvm_release_pfn_clean(pfn);
  497. }
  498. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  499. void kvm_set_page_dirty(struct page *page)
  500. {
  501. kvm_set_pfn_dirty(page_to_pfn(page));
  502. }
  503. EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
  504. void kvm_set_pfn_dirty(pfn_t pfn)
  505. {
  506. if (pfn_valid(pfn)) {
  507. struct page *page = pfn_to_page(pfn);
  508. if (!PageReserved(page))
  509. SetPageDirty(page);
  510. }
  511. }
  512. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  513. void kvm_set_pfn_accessed(pfn_t pfn)
  514. {
  515. if (pfn_valid(pfn))
  516. mark_page_accessed(pfn_to_page(pfn));
  517. }
  518. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  519. void kvm_get_pfn(pfn_t pfn)
  520. {
  521. if (pfn_valid(pfn))
  522. get_page(pfn_to_page(pfn));
  523. }
  524. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  525. static int next_segment(unsigned long len, int offset)
  526. {
  527. if (len > PAGE_SIZE - offset)
  528. return PAGE_SIZE - offset;
  529. else
  530. return len;
  531. }
  532. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  533. int len)
  534. {
  535. int r;
  536. unsigned long addr;
  537. addr = gfn_to_hva(kvm, gfn);
  538. if (kvm_is_error_hva(addr))
  539. return -EFAULT;
  540. r = copy_from_user(data, (void __user *)addr + offset, len);
  541. if (r)
  542. return -EFAULT;
  543. return 0;
  544. }
  545. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  546. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  547. {
  548. gfn_t gfn = gpa >> PAGE_SHIFT;
  549. int seg;
  550. int offset = offset_in_page(gpa);
  551. int ret;
  552. while ((seg = next_segment(len, offset)) != 0) {
  553. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  554. if (ret < 0)
  555. return ret;
  556. offset = 0;
  557. len -= seg;
  558. data += seg;
  559. ++gfn;
  560. }
  561. return 0;
  562. }
  563. EXPORT_SYMBOL_GPL(kvm_read_guest);
  564. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  565. unsigned long len)
  566. {
  567. int r;
  568. unsigned long addr;
  569. gfn_t gfn = gpa >> PAGE_SHIFT;
  570. int offset = offset_in_page(gpa);
  571. addr = gfn_to_hva(kvm, gfn);
  572. if (kvm_is_error_hva(addr))
  573. return -EFAULT;
  574. pagefault_disable();
  575. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  576. pagefault_enable();
  577. if (r)
  578. return -EFAULT;
  579. return 0;
  580. }
  581. EXPORT_SYMBOL(kvm_read_guest_atomic);
  582. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  583. int offset, int len)
  584. {
  585. int r;
  586. unsigned long addr;
  587. addr = gfn_to_hva(kvm, gfn);
  588. if (kvm_is_error_hva(addr))
  589. return -EFAULT;
  590. r = copy_to_user((void __user *)addr + offset, data, len);
  591. if (r)
  592. return -EFAULT;
  593. mark_page_dirty(kvm, gfn);
  594. return 0;
  595. }
  596. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  597. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  598. unsigned long len)
  599. {
  600. gfn_t gfn = gpa >> PAGE_SHIFT;
  601. int seg;
  602. int offset = offset_in_page(gpa);
  603. int ret;
  604. while ((seg = next_segment(len, offset)) != 0) {
  605. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  606. if (ret < 0)
  607. return ret;
  608. offset = 0;
  609. len -= seg;
  610. data += seg;
  611. ++gfn;
  612. }
  613. return 0;
  614. }
  615. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  616. {
  617. return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
  618. }
  619. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  620. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  621. {
  622. gfn_t gfn = gpa >> PAGE_SHIFT;
  623. int seg;
  624. int offset = offset_in_page(gpa);
  625. int ret;
  626. while ((seg = next_segment(len, offset)) != 0) {
  627. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  628. if (ret < 0)
  629. return ret;
  630. offset = 0;
  631. len -= seg;
  632. ++gfn;
  633. }
  634. return 0;
  635. }
  636. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  637. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  638. {
  639. struct kvm_memory_slot *memslot;
  640. gfn = unalias_gfn(kvm, gfn);
  641. memslot = __gfn_to_memslot(kvm, gfn);
  642. if (memslot && memslot->dirty_bitmap) {
  643. unsigned long rel_gfn = gfn - memslot->base_gfn;
  644. /* avoid RMW */
  645. if (!test_bit(rel_gfn, memslot->dirty_bitmap))
  646. set_bit(rel_gfn, memslot->dirty_bitmap);
  647. }
  648. }
  649. /*
  650. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  651. */
  652. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  653. {
  654. DEFINE_WAIT(wait);
  655. for (;;) {
  656. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  657. if (kvm_cpu_has_interrupt(vcpu))
  658. break;
  659. if (kvm_cpu_has_pending_timer(vcpu))
  660. break;
  661. if (kvm_arch_vcpu_runnable(vcpu))
  662. break;
  663. if (signal_pending(current))
  664. break;
  665. vcpu_put(vcpu);
  666. schedule();
  667. vcpu_load(vcpu);
  668. }
  669. finish_wait(&vcpu->wq, &wait);
  670. }
  671. void kvm_resched(struct kvm_vcpu *vcpu)
  672. {
  673. if (!need_resched())
  674. return;
  675. cond_resched();
  676. }
  677. EXPORT_SYMBOL_GPL(kvm_resched);
  678. static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  679. {
  680. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  681. struct page *page;
  682. if (vmf->pgoff == 0)
  683. page = virt_to_page(vcpu->run);
  684. #ifdef CONFIG_X86
  685. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  686. page = virt_to_page(vcpu->arch.pio_data);
  687. #endif
  688. else
  689. return VM_FAULT_SIGBUS;
  690. get_page(page);
  691. vmf->page = page;
  692. return 0;
  693. }
  694. static struct vm_operations_struct kvm_vcpu_vm_ops = {
  695. .fault = kvm_vcpu_fault,
  696. };
  697. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  698. {
  699. vma->vm_ops = &kvm_vcpu_vm_ops;
  700. return 0;
  701. }
  702. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  703. {
  704. struct kvm_vcpu *vcpu = filp->private_data;
  705. kvm_put_kvm(vcpu->kvm);
  706. return 0;
  707. }
  708. static const struct file_operations kvm_vcpu_fops = {
  709. .release = kvm_vcpu_release,
  710. .unlocked_ioctl = kvm_vcpu_ioctl,
  711. .compat_ioctl = kvm_vcpu_ioctl,
  712. .mmap = kvm_vcpu_mmap,
  713. };
  714. /*
  715. * Allocates an inode for the vcpu.
  716. */
  717. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  718. {
  719. int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu);
  720. if (fd < 0)
  721. kvm_put_kvm(vcpu->kvm);
  722. return fd;
  723. }
  724. /*
  725. * Creates some virtual cpus. Good luck creating more than one.
  726. */
  727. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
  728. {
  729. int r;
  730. struct kvm_vcpu *vcpu;
  731. if (!valid_vcpu(n))
  732. return -EINVAL;
  733. vcpu = kvm_arch_vcpu_create(kvm, n);
  734. if (IS_ERR(vcpu))
  735. return PTR_ERR(vcpu);
  736. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  737. r = kvm_arch_vcpu_setup(vcpu);
  738. if (r)
  739. goto vcpu_destroy;
  740. mutex_lock(&kvm->lock);
  741. if (kvm->vcpus[n]) {
  742. r = -EEXIST;
  743. mutex_unlock(&kvm->lock);
  744. goto vcpu_destroy;
  745. }
  746. kvm->vcpus[n] = vcpu;
  747. mutex_unlock(&kvm->lock);
  748. /* Now it's all set up, let userspace reach it */
  749. kvm_get_kvm(kvm);
  750. r = create_vcpu_fd(vcpu);
  751. if (r < 0)
  752. goto unlink;
  753. return r;
  754. unlink:
  755. mutex_lock(&kvm->lock);
  756. kvm->vcpus[n] = NULL;
  757. mutex_unlock(&kvm->lock);
  758. vcpu_destroy:
  759. kvm_arch_vcpu_destroy(vcpu);
  760. return r;
  761. }
  762. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  763. {
  764. if (sigset) {
  765. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  766. vcpu->sigset_active = 1;
  767. vcpu->sigset = *sigset;
  768. } else
  769. vcpu->sigset_active = 0;
  770. return 0;
  771. }
  772. static long kvm_vcpu_ioctl(struct file *filp,
  773. unsigned int ioctl, unsigned long arg)
  774. {
  775. struct kvm_vcpu *vcpu = filp->private_data;
  776. void __user *argp = (void __user *)arg;
  777. int r;
  778. if (vcpu->kvm->mm != current->mm)
  779. return -EIO;
  780. switch (ioctl) {
  781. case KVM_RUN:
  782. r = -EINVAL;
  783. if (arg)
  784. goto out;
  785. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  786. break;
  787. case KVM_GET_REGS: {
  788. struct kvm_regs *kvm_regs;
  789. r = -ENOMEM;
  790. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  791. if (!kvm_regs)
  792. goto out;
  793. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  794. if (r)
  795. goto out_free1;
  796. r = -EFAULT;
  797. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  798. goto out_free1;
  799. r = 0;
  800. out_free1:
  801. kfree(kvm_regs);
  802. break;
  803. }
  804. case KVM_SET_REGS: {
  805. struct kvm_regs *kvm_regs;
  806. r = -ENOMEM;
  807. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  808. if (!kvm_regs)
  809. goto out;
  810. r = -EFAULT;
  811. if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
  812. goto out_free2;
  813. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  814. if (r)
  815. goto out_free2;
  816. r = 0;
  817. out_free2:
  818. kfree(kvm_regs);
  819. break;
  820. }
  821. case KVM_GET_SREGS: {
  822. struct kvm_sregs kvm_sregs;
  823. memset(&kvm_sregs, 0, sizeof kvm_sregs);
  824. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
  825. if (r)
  826. goto out;
  827. r = -EFAULT;
  828. if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
  829. goto out;
  830. r = 0;
  831. break;
  832. }
  833. case KVM_SET_SREGS: {
  834. struct kvm_sregs kvm_sregs;
  835. r = -EFAULT;
  836. if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
  837. goto out;
  838. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
  839. if (r)
  840. goto out;
  841. r = 0;
  842. break;
  843. }
  844. case KVM_GET_MP_STATE: {
  845. struct kvm_mp_state mp_state;
  846. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  847. if (r)
  848. goto out;
  849. r = -EFAULT;
  850. if (copy_to_user(argp, &mp_state, sizeof mp_state))
  851. goto out;
  852. r = 0;
  853. break;
  854. }
  855. case KVM_SET_MP_STATE: {
  856. struct kvm_mp_state mp_state;
  857. r = -EFAULT;
  858. if (copy_from_user(&mp_state, argp, sizeof mp_state))
  859. goto out;
  860. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  861. if (r)
  862. goto out;
  863. r = 0;
  864. break;
  865. }
  866. case KVM_TRANSLATE: {
  867. struct kvm_translation tr;
  868. r = -EFAULT;
  869. if (copy_from_user(&tr, argp, sizeof tr))
  870. goto out;
  871. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  872. if (r)
  873. goto out;
  874. r = -EFAULT;
  875. if (copy_to_user(argp, &tr, sizeof tr))
  876. goto out;
  877. r = 0;
  878. break;
  879. }
  880. case KVM_DEBUG_GUEST: {
  881. struct kvm_debug_guest dbg;
  882. r = -EFAULT;
  883. if (copy_from_user(&dbg, argp, sizeof dbg))
  884. goto out;
  885. r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
  886. if (r)
  887. goto out;
  888. r = 0;
  889. break;
  890. }
  891. case KVM_SET_SIGNAL_MASK: {
  892. struct kvm_signal_mask __user *sigmask_arg = argp;
  893. struct kvm_signal_mask kvm_sigmask;
  894. sigset_t sigset, *p;
  895. p = NULL;
  896. if (argp) {
  897. r = -EFAULT;
  898. if (copy_from_user(&kvm_sigmask, argp,
  899. sizeof kvm_sigmask))
  900. goto out;
  901. r = -EINVAL;
  902. if (kvm_sigmask.len != sizeof sigset)
  903. goto out;
  904. r = -EFAULT;
  905. if (copy_from_user(&sigset, sigmask_arg->sigset,
  906. sizeof sigset))
  907. goto out;
  908. p = &sigset;
  909. }
  910. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  911. break;
  912. }
  913. case KVM_GET_FPU: {
  914. struct kvm_fpu fpu;
  915. memset(&fpu, 0, sizeof fpu);
  916. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
  917. if (r)
  918. goto out;
  919. r = -EFAULT;
  920. if (copy_to_user(argp, &fpu, sizeof fpu))
  921. goto out;
  922. r = 0;
  923. break;
  924. }
  925. case KVM_SET_FPU: {
  926. struct kvm_fpu fpu;
  927. r = -EFAULT;
  928. if (copy_from_user(&fpu, argp, sizeof fpu))
  929. goto out;
  930. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
  931. if (r)
  932. goto out;
  933. r = 0;
  934. break;
  935. }
  936. default:
  937. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  938. }
  939. out:
  940. return r;
  941. }
  942. static long kvm_vm_ioctl(struct file *filp,
  943. unsigned int ioctl, unsigned long arg)
  944. {
  945. struct kvm *kvm = filp->private_data;
  946. void __user *argp = (void __user *)arg;
  947. int r;
  948. if (kvm->mm != current->mm)
  949. return -EIO;
  950. switch (ioctl) {
  951. case KVM_CREATE_VCPU:
  952. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  953. if (r < 0)
  954. goto out;
  955. break;
  956. case KVM_SET_USER_MEMORY_REGION: {
  957. struct kvm_userspace_memory_region kvm_userspace_mem;
  958. r = -EFAULT;
  959. if (copy_from_user(&kvm_userspace_mem, argp,
  960. sizeof kvm_userspace_mem))
  961. goto out;
  962. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
  963. if (r)
  964. goto out;
  965. break;
  966. }
  967. case KVM_GET_DIRTY_LOG: {
  968. struct kvm_dirty_log log;
  969. r = -EFAULT;
  970. if (copy_from_user(&log, argp, sizeof log))
  971. goto out;
  972. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  973. if (r)
  974. goto out;
  975. break;
  976. }
  977. default:
  978. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  979. }
  980. out:
  981. return r;
  982. }
  983. static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  984. {
  985. struct kvm *kvm = vma->vm_file->private_data;
  986. struct page *page;
  987. if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
  988. return VM_FAULT_SIGBUS;
  989. page = gfn_to_page(kvm, vmf->pgoff);
  990. if (is_error_page(page)) {
  991. kvm_release_page_clean(page);
  992. return VM_FAULT_SIGBUS;
  993. }
  994. vmf->page = page;
  995. return 0;
  996. }
  997. static struct vm_operations_struct kvm_vm_vm_ops = {
  998. .fault = kvm_vm_fault,
  999. };
  1000. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  1001. {
  1002. vma->vm_ops = &kvm_vm_vm_ops;
  1003. return 0;
  1004. }
  1005. static const struct file_operations kvm_vm_fops = {
  1006. .release = kvm_vm_release,
  1007. .unlocked_ioctl = kvm_vm_ioctl,
  1008. .compat_ioctl = kvm_vm_ioctl,
  1009. .mmap = kvm_vm_mmap,
  1010. };
  1011. static int kvm_dev_ioctl_create_vm(void)
  1012. {
  1013. int fd;
  1014. struct kvm *kvm;
  1015. kvm = kvm_create_vm();
  1016. if (IS_ERR(kvm))
  1017. return PTR_ERR(kvm);
  1018. fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm);
  1019. if (fd < 0)
  1020. kvm_put_kvm(kvm);
  1021. return fd;
  1022. }
  1023. static long kvm_dev_ioctl(struct file *filp,
  1024. unsigned int ioctl, unsigned long arg)
  1025. {
  1026. long r = -EINVAL;
  1027. switch (ioctl) {
  1028. case KVM_GET_API_VERSION:
  1029. r = -EINVAL;
  1030. if (arg)
  1031. goto out;
  1032. r = KVM_API_VERSION;
  1033. break;
  1034. case KVM_CREATE_VM:
  1035. r = -EINVAL;
  1036. if (arg)
  1037. goto out;
  1038. r = kvm_dev_ioctl_create_vm();
  1039. break;
  1040. case KVM_CHECK_EXTENSION:
  1041. r = kvm_dev_ioctl_check_extension(arg);
  1042. break;
  1043. case KVM_GET_VCPU_MMAP_SIZE:
  1044. r = -EINVAL;
  1045. if (arg)
  1046. goto out;
  1047. r = PAGE_SIZE; /* struct kvm_run */
  1048. #ifdef CONFIG_X86
  1049. r += PAGE_SIZE; /* pio data page */
  1050. #endif
  1051. break;
  1052. case KVM_TRACE_ENABLE:
  1053. case KVM_TRACE_PAUSE:
  1054. case KVM_TRACE_DISABLE:
  1055. r = kvm_trace_ioctl(ioctl, arg);
  1056. break;
  1057. default:
  1058. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  1059. }
  1060. out:
  1061. return r;
  1062. }
  1063. static struct file_operations kvm_chardev_ops = {
  1064. .unlocked_ioctl = kvm_dev_ioctl,
  1065. .compat_ioctl = kvm_dev_ioctl,
  1066. };
  1067. static struct miscdevice kvm_dev = {
  1068. KVM_MINOR,
  1069. "kvm",
  1070. &kvm_chardev_ops,
  1071. };
  1072. static void hardware_enable(void *junk)
  1073. {
  1074. int cpu = raw_smp_processor_id();
  1075. if (cpu_isset(cpu, cpus_hardware_enabled))
  1076. return;
  1077. cpu_set(cpu, cpus_hardware_enabled);
  1078. kvm_arch_hardware_enable(NULL);
  1079. }
  1080. static void hardware_disable(void *junk)
  1081. {
  1082. int cpu = raw_smp_processor_id();
  1083. if (!cpu_isset(cpu, cpus_hardware_enabled))
  1084. return;
  1085. cpu_clear(cpu, cpus_hardware_enabled);
  1086. decache_vcpus_on_cpu(cpu);
  1087. kvm_arch_hardware_disable(NULL);
  1088. }
  1089. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  1090. void *v)
  1091. {
  1092. int cpu = (long)v;
  1093. val &= ~CPU_TASKS_FROZEN;
  1094. switch (val) {
  1095. case CPU_DYING:
  1096. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1097. cpu);
  1098. hardware_disable(NULL);
  1099. break;
  1100. case CPU_UP_CANCELED:
  1101. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1102. cpu);
  1103. smp_call_function_single(cpu, hardware_disable, NULL, 1);
  1104. break;
  1105. case CPU_ONLINE:
  1106. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  1107. cpu);
  1108. smp_call_function_single(cpu, hardware_enable, NULL, 1);
  1109. break;
  1110. }
  1111. return NOTIFY_OK;
  1112. }
  1113. asmlinkage void kvm_handle_fault_on_reboot(void)
  1114. {
  1115. if (kvm_rebooting)
  1116. /* spin while reset goes on */
  1117. while (true)
  1118. ;
  1119. /* Fault while not rebooting. We want the trace. */
  1120. BUG();
  1121. }
  1122. EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
  1123. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1124. void *v)
  1125. {
  1126. if (val == SYS_RESTART) {
  1127. /*
  1128. * Some (well, at least mine) BIOSes hang on reboot if
  1129. * in vmx root mode.
  1130. */
  1131. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  1132. kvm_rebooting = true;
  1133. on_each_cpu(hardware_disable, NULL, 1);
  1134. }
  1135. return NOTIFY_OK;
  1136. }
  1137. static struct notifier_block kvm_reboot_notifier = {
  1138. .notifier_call = kvm_reboot,
  1139. .priority = 0,
  1140. };
  1141. void kvm_io_bus_init(struct kvm_io_bus *bus)
  1142. {
  1143. memset(bus, 0, sizeof(*bus));
  1144. }
  1145. void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  1146. {
  1147. int i;
  1148. for (i = 0; i < bus->dev_count; i++) {
  1149. struct kvm_io_device *pos = bus->devs[i];
  1150. kvm_iodevice_destructor(pos);
  1151. }
  1152. }
  1153. struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
  1154. {
  1155. int i;
  1156. for (i = 0; i < bus->dev_count; i++) {
  1157. struct kvm_io_device *pos = bus->devs[i];
  1158. if (pos->in_range(pos, addr))
  1159. return pos;
  1160. }
  1161. return NULL;
  1162. }
  1163. void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
  1164. {
  1165. BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
  1166. bus->devs[bus->dev_count++] = dev;
  1167. }
  1168. static struct notifier_block kvm_cpu_notifier = {
  1169. .notifier_call = kvm_cpu_hotplug,
  1170. .priority = 20, /* must be > scheduler priority */
  1171. };
  1172. static int vm_stat_get(void *_offset, u64 *val)
  1173. {
  1174. unsigned offset = (long)_offset;
  1175. struct kvm *kvm;
  1176. *val = 0;
  1177. spin_lock(&kvm_lock);
  1178. list_for_each_entry(kvm, &vm_list, vm_list)
  1179. *val += *(u32 *)((void *)kvm + offset);
  1180. spin_unlock(&kvm_lock);
  1181. return 0;
  1182. }
  1183. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
  1184. static int vcpu_stat_get(void *_offset, u64 *val)
  1185. {
  1186. unsigned offset = (long)_offset;
  1187. struct kvm *kvm;
  1188. struct kvm_vcpu *vcpu;
  1189. int i;
  1190. *val = 0;
  1191. spin_lock(&kvm_lock);
  1192. list_for_each_entry(kvm, &vm_list, vm_list)
  1193. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  1194. vcpu = kvm->vcpus[i];
  1195. if (vcpu)
  1196. *val += *(u32 *)((void *)vcpu + offset);
  1197. }
  1198. spin_unlock(&kvm_lock);
  1199. return 0;
  1200. }
  1201. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
  1202. static struct file_operations *stat_fops[] = {
  1203. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  1204. [KVM_STAT_VM] = &vm_stat_fops,
  1205. };
  1206. static void kvm_init_debug(void)
  1207. {
  1208. struct kvm_stats_debugfs_item *p;
  1209. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  1210. for (p = debugfs_entries; p->name; ++p)
  1211. p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
  1212. (void *)(long)p->offset,
  1213. stat_fops[p->kind]);
  1214. }
  1215. static void kvm_exit_debug(void)
  1216. {
  1217. struct kvm_stats_debugfs_item *p;
  1218. for (p = debugfs_entries; p->name; ++p)
  1219. debugfs_remove(p->dentry);
  1220. debugfs_remove(kvm_debugfs_dir);
  1221. }
  1222. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  1223. {
  1224. hardware_disable(NULL);
  1225. return 0;
  1226. }
  1227. static int kvm_resume(struct sys_device *dev)
  1228. {
  1229. hardware_enable(NULL);
  1230. return 0;
  1231. }
  1232. static struct sysdev_class kvm_sysdev_class = {
  1233. .name = "kvm",
  1234. .suspend = kvm_suspend,
  1235. .resume = kvm_resume,
  1236. };
  1237. static struct sys_device kvm_sysdev = {
  1238. .id = 0,
  1239. .cls = &kvm_sysdev_class,
  1240. };
  1241. struct page *bad_page;
  1242. pfn_t bad_pfn;
  1243. static inline
  1244. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  1245. {
  1246. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  1247. }
  1248. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  1249. {
  1250. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1251. kvm_arch_vcpu_load(vcpu, cpu);
  1252. }
  1253. static void kvm_sched_out(struct preempt_notifier *pn,
  1254. struct task_struct *next)
  1255. {
  1256. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1257. kvm_arch_vcpu_put(vcpu);
  1258. }
  1259. int kvm_init(void *opaque, unsigned int vcpu_size,
  1260. struct module *module)
  1261. {
  1262. int r;
  1263. int cpu;
  1264. kvm_init_debug();
  1265. r = kvm_arch_init(opaque);
  1266. if (r)
  1267. goto out_fail;
  1268. bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1269. if (bad_page == NULL) {
  1270. r = -ENOMEM;
  1271. goto out;
  1272. }
  1273. bad_pfn = page_to_pfn(bad_page);
  1274. r = kvm_arch_hardware_setup();
  1275. if (r < 0)
  1276. goto out_free_0;
  1277. for_each_online_cpu(cpu) {
  1278. smp_call_function_single(cpu,
  1279. kvm_arch_check_processor_compat,
  1280. &r, 1);
  1281. if (r < 0)
  1282. goto out_free_1;
  1283. }
  1284. on_each_cpu(hardware_enable, NULL, 1);
  1285. r = register_cpu_notifier(&kvm_cpu_notifier);
  1286. if (r)
  1287. goto out_free_2;
  1288. register_reboot_notifier(&kvm_reboot_notifier);
  1289. r = sysdev_class_register(&kvm_sysdev_class);
  1290. if (r)
  1291. goto out_free_3;
  1292. r = sysdev_register(&kvm_sysdev);
  1293. if (r)
  1294. goto out_free_4;
  1295. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  1296. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
  1297. __alignof__(struct kvm_vcpu),
  1298. 0, NULL);
  1299. if (!kvm_vcpu_cache) {
  1300. r = -ENOMEM;
  1301. goto out_free_5;
  1302. }
  1303. kvm_chardev_ops.owner = module;
  1304. r = misc_register(&kvm_dev);
  1305. if (r) {
  1306. printk(KERN_ERR "kvm: misc device register failed\n");
  1307. goto out_free;
  1308. }
  1309. kvm_preempt_ops.sched_in = kvm_sched_in;
  1310. kvm_preempt_ops.sched_out = kvm_sched_out;
  1311. return 0;
  1312. out_free:
  1313. kmem_cache_destroy(kvm_vcpu_cache);
  1314. out_free_5:
  1315. sysdev_unregister(&kvm_sysdev);
  1316. out_free_4:
  1317. sysdev_class_unregister(&kvm_sysdev_class);
  1318. out_free_3:
  1319. unregister_reboot_notifier(&kvm_reboot_notifier);
  1320. unregister_cpu_notifier(&kvm_cpu_notifier);
  1321. out_free_2:
  1322. on_each_cpu(hardware_disable, NULL, 1);
  1323. out_free_1:
  1324. kvm_arch_hardware_unsetup();
  1325. out_free_0:
  1326. __free_page(bad_page);
  1327. out:
  1328. kvm_arch_exit();
  1329. kvm_exit_debug();
  1330. out_fail:
  1331. return r;
  1332. }
  1333. EXPORT_SYMBOL_GPL(kvm_init);
  1334. void kvm_exit(void)
  1335. {
  1336. kvm_trace_cleanup();
  1337. misc_deregister(&kvm_dev);
  1338. kmem_cache_destroy(kvm_vcpu_cache);
  1339. sysdev_unregister(&kvm_sysdev);
  1340. sysdev_class_unregister(&kvm_sysdev_class);
  1341. unregister_reboot_notifier(&kvm_reboot_notifier);
  1342. unregister_cpu_notifier(&kvm_cpu_notifier);
  1343. on_each_cpu(hardware_disable, NULL, 1);
  1344. kvm_arch_hardware_unsetup();
  1345. kvm_arch_exit();
  1346. kvm_exit_debug();
  1347. __free_page(bad_page);
  1348. }
  1349. EXPORT_SYMBOL_GPL(kvm_exit);