kvm_main.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  9. *
  10. * Authors:
  11. * Avi Kivity <avi@qumranet.com>
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. *
  14. * This work is licensed under the terms of the GNU GPL, version 2. See
  15. * the COPYING file in the top-level directory.
  16. *
  17. */
  18. #include "iodev.h"
  19. #include <linux/kvm_host.h>
  20. #include <linux/kvm.h>
  21. #include <linux/module.h>
  22. #include <linux/errno.h>
  23. #include <linux/percpu.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/syscore_ops.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched.h>
  34. #include <linux/cpumask.h>
  35. #include <linux/smp.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/profile.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/mman.h>
  41. #include <linux/swap.h>
  42. #include <linux/bitops.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/compat.h>
  45. #include <linux/srcu.h>
  46. #include <linux/hugetlb.h>
  47. #include <linux/slab.h>
  48. #include <linux/sort.h>
  49. #include <linux/bsearch.h>
  50. #include <asm/processor.h>
  51. #include <asm/io.h>
  52. #include <asm/uaccess.h>
  53. #include <asm/pgtable.h>
  54. #include "coalesced_mmio.h"
  55. #include "async_pf.h"
  56. #define CREATE_TRACE_POINTS
  57. #include <trace/events/kvm.h>
  58. MODULE_AUTHOR("Qumranet");
  59. MODULE_LICENSE("GPL");
  60. /*
  61. * Ordering of locks:
  62. *
  63. * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  64. */
  65. DEFINE_RAW_SPINLOCK(kvm_lock);
  66. LIST_HEAD(vm_list);
  67. static cpumask_var_t cpus_hardware_enabled;
  68. static int kvm_usage_count = 0;
  69. static atomic_t hardware_enable_failed;
  70. struct kmem_cache *kvm_vcpu_cache;
  71. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  72. static __read_mostly struct preempt_ops kvm_preempt_ops;
  73. struct dentry *kvm_debugfs_dir;
  74. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  75. unsigned long arg);
  76. #ifdef CONFIG_COMPAT
  77. static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
  78. unsigned long arg);
  79. #endif
  80. static int hardware_enable_all(void);
  81. static void hardware_disable_all(void);
  82. static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
  83. bool kvm_rebooting;
  84. EXPORT_SYMBOL_GPL(kvm_rebooting);
  85. static bool largepages_enabled = true;
  86. bool kvm_is_mmio_pfn(pfn_t pfn)
  87. {
  88. if (is_error_pfn(pfn))
  89. return false;
  90. if (pfn_valid(pfn)) {
  91. int reserved;
  92. struct page *tail = pfn_to_page(pfn);
  93. struct page *head = compound_trans_head(tail);
  94. reserved = PageReserved(head);
  95. if (head != tail) {
  96. /*
  97. * "head" is not a dangling pointer
  98. * (compound_trans_head takes care of that)
  99. * but the hugepage may have been splitted
  100. * from under us (and we may not hold a
  101. * reference count on the head page so it can
  102. * be reused before we run PageReferenced), so
  103. * we've to check PageTail before returning
  104. * what we just read.
  105. */
  106. smp_rmb();
  107. if (PageTail(tail))
  108. return reserved;
  109. }
  110. return PageReserved(tail);
  111. }
  112. return true;
  113. }
  114. /*
  115. * Switches to specified vcpu, until a matching vcpu_put()
  116. */
  117. void vcpu_load(struct kvm_vcpu *vcpu)
  118. {
  119. int cpu;
  120. mutex_lock(&vcpu->mutex);
  121. if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
  122. /* The thread running this VCPU changed. */
  123. struct pid *oldpid = vcpu->pid;
  124. struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
  125. rcu_assign_pointer(vcpu->pid, newpid);
  126. synchronize_rcu();
  127. put_pid(oldpid);
  128. }
  129. cpu = get_cpu();
  130. preempt_notifier_register(&vcpu->preempt_notifier);
  131. kvm_arch_vcpu_load(vcpu, cpu);
  132. put_cpu();
  133. }
  134. void vcpu_put(struct kvm_vcpu *vcpu)
  135. {
  136. preempt_disable();
  137. kvm_arch_vcpu_put(vcpu);
  138. preempt_notifier_unregister(&vcpu->preempt_notifier);
  139. preempt_enable();
  140. mutex_unlock(&vcpu->mutex);
  141. }
  142. static void ack_flush(void *_completed)
  143. {
  144. }
  145. static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
  146. {
  147. int i, cpu, me;
  148. cpumask_var_t cpus;
  149. bool called = true;
  150. struct kvm_vcpu *vcpu;
  151. zalloc_cpumask_var(&cpus, GFP_ATOMIC);
  152. me = get_cpu();
  153. kvm_for_each_vcpu(i, vcpu, kvm) {
  154. kvm_make_request(req, vcpu);
  155. cpu = vcpu->cpu;
  156. /* Set ->requests bit before we read ->mode */
  157. smp_mb();
  158. if (cpus != NULL && cpu != -1 && cpu != me &&
  159. kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
  160. cpumask_set_cpu(cpu, cpus);
  161. }
  162. if (unlikely(cpus == NULL))
  163. smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
  164. else if (!cpumask_empty(cpus))
  165. smp_call_function_many(cpus, ack_flush, NULL, 1);
  166. else
  167. called = false;
  168. put_cpu();
  169. free_cpumask_var(cpus);
  170. return called;
  171. }
  172. void kvm_flush_remote_tlbs(struct kvm *kvm)
  173. {
  174. long dirty_count = kvm->tlbs_dirty;
  175. smp_mb();
  176. if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
  177. ++kvm->stat.remote_tlb_flush;
  178. cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
  179. }
  180. void kvm_reload_remote_mmus(struct kvm *kvm)
  181. {
  182. make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
  183. }
  184. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  185. {
  186. struct page *page;
  187. int r;
  188. mutex_init(&vcpu->mutex);
  189. vcpu->cpu = -1;
  190. vcpu->kvm = kvm;
  191. vcpu->vcpu_id = id;
  192. vcpu->pid = NULL;
  193. init_waitqueue_head(&vcpu->wq);
  194. kvm_async_pf_vcpu_init(vcpu);
  195. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  196. if (!page) {
  197. r = -ENOMEM;
  198. goto fail;
  199. }
  200. vcpu->run = page_address(page);
  201. kvm_vcpu_set_in_spin_loop(vcpu, false);
  202. kvm_vcpu_set_dy_eligible(vcpu, false);
  203. r = kvm_arch_vcpu_init(vcpu);
  204. if (r < 0)
  205. goto fail_free_run;
  206. return 0;
  207. fail_free_run:
  208. free_page((unsigned long)vcpu->run);
  209. fail:
  210. return r;
  211. }
  212. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  213. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  214. {
  215. put_pid(vcpu->pid);
  216. kvm_arch_vcpu_uninit(vcpu);
  217. free_page((unsigned long)vcpu->run);
  218. }
  219. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  220. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  221. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  222. {
  223. return container_of(mn, struct kvm, mmu_notifier);
  224. }
  225. static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
  226. struct mm_struct *mm,
  227. unsigned long address)
  228. {
  229. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  230. int need_tlb_flush, idx;
  231. /*
  232. * When ->invalidate_page runs, the linux pte has been zapped
  233. * already but the page is still allocated until
  234. * ->invalidate_page returns. So if we increase the sequence
  235. * here the kvm page fault will notice if the spte can't be
  236. * established because the page is going to be freed. If
  237. * instead the kvm page fault establishes the spte before
  238. * ->invalidate_page runs, kvm_unmap_hva will release it
  239. * before returning.
  240. *
  241. * The sequence increase only need to be seen at spin_unlock
  242. * time, and not at spin_lock time.
  243. *
  244. * Increasing the sequence after the spin_unlock would be
  245. * unsafe because the kvm page fault could then establish the
  246. * pte after kvm_unmap_hva returned, without noticing the page
  247. * is going to be freed.
  248. */
  249. idx = srcu_read_lock(&kvm->srcu);
  250. spin_lock(&kvm->mmu_lock);
  251. kvm->mmu_notifier_seq++;
  252. need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
  253. /* we've to flush the tlb before the pages can be freed */
  254. if (need_tlb_flush)
  255. kvm_flush_remote_tlbs(kvm);
  256. spin_unlock(&kvm->mmu_lock);
  257. srcu_read_unlock(&kvm->srcu, idx);
  258. }
  259. static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
  260. struct mm_struct *mm,
  261. unsigned long address,
  262. pte_t pte)
  263. {
  264. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  265. int idx;
  266. idx = srcu_read_lock(&kvm->srcu);
  267. spin_lock(&kvm->mmu_lock);
  268. kvm->mmu_notifier_seq++;
  269. kvm_set_spte_hva(kvm, address, pte);
  270. spin_unlock(&kvm->mmu_lock);
  271. srcu_read_unlock(&kvm->srcu, idx);
  272. }
  273. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  274. struct mm_struct *mm,
  275. unsigned long start,
  276. unsigned long end)
  277. {
  278. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  279. int need_tlb_flush = 0, idx;
  280. idx = srcu_read_lock(&kvm->srcu);
  281. spin_lock(&kvm->mmu_lock);
  282. /*
  283. * The count increase must become visible at unlock time as no
  284. * spte can be established without taking the mmu_lock and
  285. * count is also read inside the mmu_lock critical section.
  286. */
  287. kvm->mmu_notifier_count++;
  288. need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
  289. need_tlb_flush |= kvm->tlbs_dirty;
  290. /* we've to flush the tlb before the pages can be freed */
  291. if (need_tlb_flush)
  292. kvm_flush_remote_tlbs(kvm);
  293. spin_unlock(&kvm->mmu_lock);
  294. srcu_read_unlock(&kvm->srcu, idx);
  295. }
  296. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  297. struct mm_struct *mm,
  298. unsigned long start,
  299. unsigned long end)
  300. {
  301. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  302. spin_lock(&kvm->mmu_lock);
  303. /*
  304. * This sequence increase will notify the kvm page fault that
  305. * the page that is going to be mapped in the spte could have
  306. * been freed.
  307. */
  308. kvm->mmu_notifier_seq++;
  309. smp_wmb();
  310. /*
  311. * The above sequence increase must be visible before the
  312. * below count decrease, which is ensured by the smp_wmb above
  313. * in conjunction with the smp_rmb in mmu_notifier_retry().
  314. */
  315. kvm->mmu_notifier_count--;
  316. spin_unlock(&kvm->mmu_lock);
  317. BUG_ON(kvm->mmu_notifier_count < 0);
  318. }
  319. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  320. struct mm_struct *mm,
  321. unsigned long address)
  322. {
  323. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  324. int young, idx;
  325. idx = srcu_read_lock(&kvm->srcu);
  326. spin_lock(&kvm->mmu_lock);
  327. young = kvm_age_hva(kvm, address);
  328. if (young)
  329. kvm_flush_remote_tlbs(kvm);
  330. spin_unlock(&kvm->mmu_lock);
  331. srcu_read_unlock(&kvm->srcu, idx);
  332. return young;
  333. }
  334. static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
  335. struct mm_struct *mm,
  336. unsigned long address)
  337. {
  338. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  339. int young, idx;
  340. idx = srcu_read_lock(&kvm->srcu);
  341. spin_lock(&kvm->mmu_lock);
  342. young = kvm_test_age_hva(kvm, address);
  343. spin_unlock(&kvm->mmu_lock);
  344. srcu_read_unlock(&kvm->srcu, idx);
  345. return young;
  346. }
  347. static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
  348. struct mm_struct *mm)
  349. {
  350. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  351. int idx;
  352. idx = srcu_read_lock(&kvm->srcu);
  353. kvm_arch_flush_shadow(kvm);
  354. srcu_read_unlock(&kvm->srcu, idx);
  355. }
  356. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  357. .invalidate_page = kvm_mmu_notifier_invalidate_page,
  358. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  359. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  360. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  361. .test_young = kvm_mmu_notifier_test_young,
  362. .change_pte = kvm_mmu_notifier_change_pte,
  363. .release = kvm_mmu_notifier_release,
  364. };
  365. static int kvm_init_mmu_notifier(struct kvm *kvm)
  366. {
  367. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  368. return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  369. }
  370. #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
  371. static int kvm_init_mmu_notifier(struct kvm *kvm)
  372. {
  373. return 0;
  374. }
  375. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  376. static void kvm_init_memslots_id(struct kvm *kvm)
  377. {
  378. int i;
  379. struct kvm_memslots *slots = kvm->memslots;
  380. for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
  381. slots->id_to_index[i] = slots->memslots[i].id = i;
  382. }
  383. static struct kvm *kvm_create_vm(unsigned long type)
  384. {
  385. int r, i;
  386. struct kvm *kvm = kvm_arch_alloc_vm();
  387. if (!kvm)
  388. return ERR_PTR(-ENOMEM);
  389. r = kvm_arch_init_vm(kvm, type);
  390. if (r)
  391. goto out_err_nodisable;
  392. r = hardware_enable_all();
  393. if (r)
  394. goto out_err_nodisable;
  395. #ifdef CONFIG_HAVE_KVM_IRQCHIP
  396. INIT_HLIST_HEAD(&kvm->mask_notifier_list);
  397. INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
  398. #endif
  399. r = -ENOMEM;
  400. kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
  401. if (!kvm->memslots)
  402. goto out_err_nosrcu;
  403. kvm_init_memslots_id(kvm);
  404. if (init_srcu_struct(&kvm->srcu))
  405. goto out_err_nosrcu;
  406. for (i = 0; i < KVM_NR_BUSES; i++) {
  407. kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
  408. GFP_KERNEL);
  409. if (!kvm->buses[i])
  410. goto out_err;
  411. }
  412. spin_lock_init(&kvm->mmu_lock);
  413. kvm->mm = current->mm;
  414. atomic_inc(&kvm->mm->mm_count);
  415. kvm_eventfd_init(kvm);
  416. mutex_init(&kvm->lock);
  417. mutex_init(&kvm->irq_lock);
  418. mutex_init(&kvm->slots_lock);
  419. atomic_set(&kvm->users_count, 1);
  420. r = kvm_init_mmu_notifier(kvm);
  421. if (r)
  422. goto out_err;
  423. raw_spin_lock(&kvm_lock);
  424. list_add(&kvm->vm_list, &vm_list);
  425. raw_spin_unlock(&kvm_lock);
  426. return kvm;
  427. out_err:
  428. cleanup_srcu_struct(&kvm->srcu);
  429. out_err_nosrcu:
  430. hardware_disable_all();
  431. out_err_nodisable:
  432. for (i = 0; i < KVM_NR_BUSES; i++)
  433. kfree(kvm->buses[i]);
  434. kfree(kvm->memslots);
  435. kvm_arch_free_vm(kvm);
  436. return ERR_PTR(r);
  437. }
  438. /*
  439. * Avoid using vmalloc for a small buffer.
  440. * Should not be used when the size is statically known.
  441. */
  442. void *kvm_kvzalloc(unsigned long size)
  443. {
  444. if (size > PAGE_SIZE)
  445. return vzalloc(size);
  446. else
  447. return kzalloc(size, GFP_KERNEL);
  448. }
  449. void kvm_kvfree(const void *addr)
  450. {
  451. if (is_vmalloc_addr(addr))
  452. vfree(addr);
  453. else
  454. kfree(addr);
  455. }
  456. static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
  457. {
  458. if (!memslot->dirty_bitmap)
  459. return;
  460. kvm_kvfree(memslot->dirty_bitmap);
  461. memslot->dirty_bitmap = NULL;
  462. }
  463. /*
  464. * Free any memory in @free but not in @dont.
  465. */
  466. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  467. struct kvm_memory_slot *dont)
  468. {
  469. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  470. kvm_destroy_dirty_bitmap(free);
  471. kvm_arch_free_memslot(free, dont);
  472. free->npages = 0;
  473. }
  474. void kvm_free_physmem(struct kvm *kvm)
  475. {
  476. struct kvm_memslots *slots = kvm->memslots;
  477. struct kvm_memory_slot *memslot;
  478. kvm_for_each_memslot(memslot, slots)
  479. kvm_free_physmem_slot(memslot, NULL);
  480. kfree(kvm->memslots);
  481. }
  482. static void kvm_destroy_vm(struct kvm *kvm)
  483. {
  484. int i;
  485. struct mm_struct *mm = kvm->mm;
  486. kvm_arch_sync_events(kvm);
  487. raw_spin_lock(&kvm_lock);
  488. list_del(&kvm->vm_list);
  489. raw_spin_unlock(&kvm_lock);
  490. kvm_free_irq_routing(kvm);
  491. for (i = 0; i < KVM_NR_BUSES; i++)
  492. kvm_io_bus_destroy(kvm->buses[i]);
  493. kvm_coalesced_mmio_free(kvm);
  494. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  495. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  496. #else
  497. kvm_arch_flush_shadow(kvm);
  498. #endif
  499. kvm_arch_destroy_vm(kvm);
  500. kvm_free_physmem(kvm);
  501. cleanup_srcu_struct(&kvm->srcu);
  502. kvm_arch_free_vm(kvm);
  503. hardware_disable_all();
  504. mmdrop(mm);
  505. }
  506. void kvm_get_kvm(struct kvm *kvm)
  507. {
  508. atomic_inc(&kvm->users_count);
  509. }
  510. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  511. void kvm_put_kvm(struct kvm *kvm)
  512. {
  513. if (atomic_dec_and_test(&kvm->users_count))
  514. kvm_destroy_vm(kvm);
  515. }
  516. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  517. static int kvm_vm_release(struct inode *inode, struct file *filp)
  518. {
  519. struct kvm *kvm = filp->private_data;
  520. kvm_irqfd_release(kvm);
  521. kvm_put_kvm(kvm);
  522. return 0;
  523. }
  524. /*
  525. * Allocation size is twice as large as the actual dirty bitmap size.
  526. * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
  527. */
  528. static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
  529. {
  530. #ifndef CONFIG_S390
  531. unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
  532. memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
  533. if (!memslot->dirty_bitmap)
  534. return -ENOMEM;
  535. #endif /* !CONFIG_S390 */
  536. return 0;
  537. }
  538. static int cmp_memslot(const void *slot1, const void *slot2)
  539. {
  540. struct kvm_memory_slot *s1, *s2;
  541. s1 = (struct kvm_memory_slot *)slot1;
  542. s2 = (struct kvm_memory_slot *)slot2;
  543. if (s1->npages < s2->npages)
  544. return 1;
  545. if (s1->npages > s2->npages)
  546. return -1;
  547. return 0;
  548. }
  549. /*
  550. * Sort the memslots base on its size, so the larger slots
  551. * will get better fit.
  552. */
  553. static void sort_memslots(struct kvm_memslots *slots)
  554. {
  555. int i;
  556. sort(slots->memslots, KVM_MEM_SLOTS_NUM,
  557. sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
  558. for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
  559. slots->id_to_index[slots->memslots[i].id] = i;
  560. }
  561. void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
  562. {
  563. if (new) {
  564. int id = new->id;
  565. struct kvm_memory_slot *old = id_to_memslot(slots, id);
  566. unsigned long npages = old->npages;
  567. *old = *new;
  568. if (new->npages != npages)
  569. sort_memslots(slots);
  570. }
  571. slots->generation++;
  572. }
  573. /*
  574. * Allocate some memory and give it an address in the guest physical address
  575. * space.
  576. *
  577. * Discontiguous memory is allowed, mostly for framebuffers.
  578. *
  579. * Must be called holding mmap_sem for write.
  580. */
  581. int __kvm_set_memory_region(struct kvm *kvm,
  582. struct kvm_userspace_memory_region *mem,
  583. int user_alloc)
  584. {
  585. int r;
  586. gfn_t base_gfn;
  587. unsigned long npages;
  588. unsigned long i;
  589. struct kvm_memory_slot *memslot;
  590. struct kvm_memory_slot old, new;
  591. struct kvm_memslots *slots, *old_memslots;
  592. r = -EINVAL;
  593. /* General sanity checks */
  594. if (mem->memory_size & (PAGE_SIZE - 1))
  595. goto out;
  596. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  597. goto out;
  598. /* We can read the guest memory with __xxx_user() later on. */
  599. if (user_alloc &&
  600. ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
  601. !access_ok(VERIFY_WRITE,
  602. (void __user *)(unsigned long)mem->userspace_addr,
  603. mem->memory_size)))
  604. goto out;
  605. if (mem->slot >= KVM_MEM_SLOTS_NUM)
  606. goto out;
  607. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  608. goto out;
  609. memslot = id_to_memslot(kvm->memslots, mem->slot);
  610. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  611. npages = mem->memory_size >> PAGE_SHIFT;
  612. r = -EINVAL;
  613. if (npages > KVM_MEM_MAX_NR_PAGES)
  614. goto out;
  615. if (!npages)
  616. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  617. new = old = *memslot;
  618. new.id = mem->slot;
  619. new.base_gfn = base_gfn;
  620. new.npages = npages;
  621. new.flags = mem->flags;
  622. /* Disallow changing a memory slot's size. */
  623. r = -EINVAL;
  624. if (npages && old.npages && npages != old.npages)
  625. goto out_free;
  626. /* Check for overlaps */
  627. r = -EEXIST;
  628. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  629. struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
  630. if (s == memslot || !s->npages)
  631. continue;
  632. if (!((base_gfn + npages <= s->base_gfn) ||
  633. (base_gfn >= s->base_gfn + s->npages)))
  634. goto out_free;
  635. }
  636. /* Free page dirty bitmap if unneeded */
  637. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  638. new.dirty_bitmap = NULL;
  639. r = -ENOMEM;
  640. /* Allocate if a slot is being created */
  641. if (npages && !old.npages) {
  642. new.user_alloc = user_alloc;
  643. new.userspace_addr = mem->userspace_addr;
  644. if (kvm_arch_create_memslot(&new, npages))
  645. goto out_free;
  646. }
  647. /* Allocate page dirty bitmap if needed */
  648. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  649. if (kvm_create_dirty_bitmap(&new) < 0)
  650. goto out_free;
  651. /* destroy any largepage mappings for dirty tracking */
  652. }
  653. if (!npages) {
  654. struct kvm_memory_slot *slot;
  655. r = -ENOMEM;
  656. slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
  657. GFP_KERNEL);
  658. if (!slots)
  659. goto out_free;
  660. slot = id_to_memslot(slots, mem->slot);
  661. slot->flags |= KVM_MEMSLOT_INVALID;
  662. update_memslots(slots, NULL);
  663. old_memslots = kvm->memslots;
  664. rcu_assign_pointer(kvm->memslots, slots);
  665. synchronize_srcu_expedited(&kvm->srcu);
  666. /* From this point no new shadow pages pointing to a deleted
  667. * memslot will be created.
  668. *
  669. * validation of sp->gfn happens in:
  670. * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
  671. * - kvm_is_visible_gfn (mmu_check_roots)
  672. */
  673. kvm_arch_flush_shadow(kvm);
  674. kfree(old_memslots);
  675. }
  676. r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
  677. if (r)
  678. goto out_free;
  679. /* map/unmap the pages in iommu page table */
  680. if (npages) {
  681. r = kvm_iommu_map_pages(kvm, &new);
  682. if (r)
  683. goto out_free;
  684. } else
  685. kvm_iommu_unmap_pages(kvm, &old);
  686. r = -ENOMEM;
  687. slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
  688. GFP_KERNEL);
  689. if (!slots)
  690. goto out_free;
  691. /* actual memory is freed via old in kvm_free_physmem_slot below */
  692. if (!npages) {
  693. new.dirty_bitmap = NULL;
  694. memset(&new.arch, 0, sizeof(new.arch));
  695. }
  696. update_memslots(slots, &new);
  697. old_memslots = kvm->memslots;
  698. rcu_assign_pointer(kvm->memslots, slots);
  699. synchronize_srcu_expedited(&kvm->srcu);
  700. kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
  701. /*
  702. * If the new memory slot is created, we need to clear all
  703. * mmio sptes.
  704. */
  705. if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
  706. kvm_arch_flush_shadow(kvm);
  707. kvm_free_physmem_slot(&old, &new);
  708. kfree(old_memslots);
  709. return 0;
  710. out_free:
  711. kvm_free_physmem_slot(&new, &old);
  712. out:
  713. return r;
  714. }
  715. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  716. int kvm_set_memory_region(struct kvm *kvm,
  717. struct kvm_userspace_memory_region *mem,
  718. int user_alloc)
  719. {
  720. int r;
  721. mutex_lock(&kvm->slots_lock);
  722. r = __kvm_set_memory_region(kvm, mem, user_alloc);
  723. mutex_unlock(&kvm->slots_lock);
  724. return r;
  725. }
  726. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  727. int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  728. struct
  729. kvm_userspace_memory_region *mem,
  730. int user_alloc)
  731. {
  732. if (mem->slot >= KVM_MEMORY_SLOTS)
  733. return -EINVAL;
  734. return kvm_set_memory_region(kvm, mem, user_alloc);
  735. }
  736. int kvm_get_dirty_log(struct kvm *kvm,
  737. struct kvm_dirty_log *log, int *is_dirty)
  738. {
  739. struct kvm_memory_slot *memslot;
  740. int r, i;
  741. unsigned long n;
  742. unsigned long any = 0;
  743. r = -EINVAL;
  744. if (log->slot >= KVM_MEMORY_SLOTS)
  745. goto out;
  746. memslot = id_to_memslot(kvm->memslots, log->slot);
  747. r = -ENOENT;
  748. if (!memslot->dirty_bitmap)
  749. goto out;
  750. n = kvm_dirty_bitmap_bytes(memslot);
  751. for (i = 0; !any && i < n/sizeof(long); ++i)
  752. any = memslot->dirty_bitmap[i];
  753. r = -EFAULT;
  754. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  755. goto out;
  756. if (any)
  757. *is_dirty = 1;
  758. r = 0;
  759. out:
  760. return r;
  761. }
  762. bool kvm_largepages_enabled(void)
  763. {
  764. return largepages_enabled;
  765. }
  766. void kvm_disable_largepages(void)
  767. {
  768. largepages_enabled = false;
  769. }
  770. EXPORT_SYMBOL_GPL(kvm_disable_largepages);
  771. int is_error_page(struct page *page)
  772. {
  773. return IS_ERR(page);
  774. }
  775. EXPORT_SYMBOL_GPL(is_error_page);
  776. struct page *get_bad_page(void)
  777. {
  778. return ERR_PTR(-ENOENT);
  779. }
  780. static inline unsigned long bad_hva(void)
  781. {
  782. return PAGE_OFFSET;
  783. }
  784. int kvm_is_error_hva(unsigned long addr)
  785. {
  786. return addr == bad_hva();
  787. }
  788. EXPORT_SYMBOL_GPL(kvm_is_error_hva);
  789. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  790. {
  791. return __gfn_to_memslot(kvm_memslots(kvm), gfn);
  792. }
  793. EXPORT_SYMBOL_GPL(gfn_to_memslot);
  794. int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  795. {
  796. struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
  797. if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
  798. memslot->flags & KVM_MEMSLOT_INVALID)
  799. return 0;
  800. return 1;
  801. }
  802. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  803. unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
  804. {
  805. struct vm_area_struct *vma;
  806. unsigned long addr, size;
  807. size = PAGE_SIZE;
  808. addr = gfn_to_hva(kvm, gfn);
  809. if (kvm_is_error_hva(addr))
  810. return PAGE_SIZE;
  811. down_read(&current->mm->mmap_sem);
  812. vma = find_vma(current->mm, addr);
  813. if (!vma)
  814. goto out;
  815. size = vma_kernel_pagesize(vma);
  816. out:
  817. up_read(&current->mm->mmap_sem);
  818. return size;
  819. }
  820. static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
  821. gfn_t *nr_pages)
  822. {
  823. if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
  824. return bad_hva();
  825. if (nr_pages)
  826. *nr_pages = slot->npages - (gfn - slot->base_gfn);
  827. return gfn_to_hva_memslot(slot, gfn);
  828. }
  829. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  830. {
  831. return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
  832. }
  833. EXPORT_SYMBOL_GPL(gfn_to_hva);
  834. int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
  835. unsigned long start, int write, struct page **page)
  836. {
  837. int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
  838. if (write)
  839. flags |= FOLL_WRITE;
  840. return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
  841. }
  842. static inline int check_user_page_hwpoison(unsigned long addr)
  843. {
  844. int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
  845. rc = __get_user_pages(current, current->mm, addr, 1,
  846. flags, NULL, NULL, NULL);
  847. return rc == -EHWPOISON;
  848. }
  849. static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
  850. bool write_fault, bool *writable)
  851. {
  852. struct page *page[1];
  853. int npages = 0;
  854. pfn_t pfn;
  855. /* we can do it either atomically or asynchronously, not both */
  856. BUG_ON(atomic && async);
  857. BUG_ON(!write_fault && !writable);
  858. if (writable)
  859. *writable = true;
  860. if (atomic || async)
  861. npages = __get_user_pages_fast(addr, 1, 1, page);
  862. if (unlikely(npages != 1) && !atomic) {
  863. might_sleep();
  864. if (writable)
  865. *writable = write_fault;
  866. if (async) {
  867. down_read(&current->mm->mmap_sem);
  868. npages = get_user_page_nowait(current, current->mm,
  869. addr, write_fault, page);
  870. up_read(&current->mm->mmap_sem);
  871. } else
  872. npages = get_user_pages_fast(addr, 1, write_fault,
  873. page);
  874. /* map read fault as writable if possible */
  875. if (unlikely(!write_fault) && npages == 1) {
  876. struct page *wpage[1];
  877. npages = __get_user_pages_fast(addr, 1, 1, wpage);
  878. if (npages == 1) {
  879. *writable = true;
  880. put_page(page[0]);
  881. page[0] = wpage[0];
  882. }
  883. npages = 1;
  884. }
  885. }
  886. if (unlikely(npages != 1)) {
  887. struct vm_area_struct *vma;
  888. if (atomic)
  889. return KVM_PFN_ERR_FAULT;
  890. down_read(&current->mm->mmap_sem);
  891. if (npages == -EHWPOISON ||
  892. (!async && check_user_page_hwpoison(addr))) {
  893. up_read(&current->mm->mmap_sem);
  894. return KVM_PFN_ERR_HWPOISON;
  895. }
  896. vma = find_vma_intersection(current->mm, addr, addr+1);
  897. if (vma == NULL)
  898. pfn = KVM_PFN_ERR_FAULT;
  899. else if ((vma->vm_flags & VM_PFNMAP)) {
  900. pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
  901. vma->vm_pgoff;
  902. BUG_ON(!kvm_is_mmio_pfn(pfn));
  903. } else {
  904. if (async && (vma->vm_flags & VM_WRITE))
  905. *async = true;
  906. pfn = KVM_PFN_ERR_FAULT;
  907. }
  908. up_read(&current->mm->mmap_sem);
  909. } else
  910. pfn = page_to_pfn(page[0]);
  911. return pfn;
  912. }
  913. pfn_t hva_to_pfn_atomic(unsigned long addr)
  914. {
  915. return hva_to_pfn(addr, true, NULL, true, NULL);
  916. }
  917. EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
  918. static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
  919. bool write_fault, bool *writable)
  920. {
  921. unsigned long addr;
  922. if (async)
  923. *async = false;
  924. addr = gfn_to_hva(kvm, gfn);
  925. if (kvm_is_error_hva(addr))
  926. return KVM_PFN_ERR_BAD;
  927. return hva_to_pfn(addr, atomic, async, write_fault, writable);
  928. }
  929. pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
  930. {
  931. return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
  932. }
  933. EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
  934. pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
  935. bool write_fault, bool *writable)
  936. {
  937. return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
  938. }
  939. EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
  940. pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  941. {
  942. return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
  943. }
  944. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  945. pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
  946. bool *writable)
  947. {
  948. return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
  949. }
  950. EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
  951. pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
  952. {
  953. unsigned long addr = gfn_to_hva_memslot(slot, gfn);
  954. return hva_to_pfn(addr, false, NULL, true, NULL);
  955. }
  956. int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
  957. int nr_pages)
  958. {
  959. unsigned long addr;
  960. gfn_t entry;
  961. addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
  962. if (kvm_is_error_hva(addr))
  963. return -1;
  964. if (entry < nr_pages)
  965. return 0;
  966. return __get_user_pages_fast(addr, nr_pages, 1, pages);
  967. }
  968. EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
  969. static struct page *kvm_pfn_to_page(pfn_t pfn)
  970. {
  971. WARN_ON(kvm_is_mmio_pfn(pfn));
  972. if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn))
  973. return get_bad_page();
  974. return pfn_to_page(pfn);
  975. }
  976. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  977. {
  978. pfn_t pfn;
  979. pfn = gfn_to_pfn(kvm, gfn);
  980. return kvm_pfn_to_page(pfn);
  981. }
  982. EXPORT_SYMBOL_GPL(gfn_to_page);
  983. void kvm_release_page_clean(struct page *page)
  984. {
  985. if (!is_error_page(page))
  986. kvm_release_pfn_clean(page_to_pfn(page));
  987. }
  988. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  989. void kvm_release_pfn_clean(pfn_t pfn)
  990. {
  991. if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
  992. put_page(pfn_to_page(pfn));
  993. }
  994. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  995. void kvm_release_page_dirty(struct page *page)
  996. {
  997. WARN_ON(is_error_page(page));
  998. kvm_release_pfn_dirty(page_to_pfn(page));
  999. }
  1000. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  1001. void kvm_release_pfn_dirty(pfn_t pfn)
  1002. {
  1003. kvm_set_pfn_dirty(pfn);
  1004. kvm_release_pfn_clean(pfn);
  1005. }
  1006. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  1007. void kvm_set_page_dirty(struct page *page)
  1008. {
  1009. kvm_set_pfn_dirty(page_to_pfn(page));
  1010. }
  1011. EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
  1012. void kvm_set_pfn_dirty(pfn_t pfn)
  1013. {
  1014. if (!kvm_is_mmio_pfn(pfn)) {
  1015. struct page *page = pfn_to_page(pfn);
  1016. if (!PageReserved(page))
  1017. SetPageDirty(page);
  1018. }
  1019. }
  1020. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  1021. void kvm_set_pfn_accessed(pfn_t pfn)
  1022. {
  1023. if (!kvm_is_mmio_pfn(pfn))
  1024. mark_page_accessed(pfn_to_page(pfn));
  1025. }
  1026. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  1027. void kvm_get_pfn(pfn_t pfn)
  1028. {
  1029. if (!kvm_is_mmio_pfn(pfn))
  1030. get_page(pfn_to_page(pfn));
  1031. }
  1032. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  1033. static int next_segment(unsigned long len, int offset)
  1034. {
  1035. if (len > PAGE_SIZE - offset)
  1036. return PAGE_SIZE - offset;
  1037. else
  1038. return len;
  1039. }
  1040. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  1041. int len)
  1042. {
  1043. int r;
  1044. unsigned long addr;
  1045. addr = gfn_to_hva(kvm, gfn);
  1046. if (kvm_is_error_hva(addr))
  1047. return -EFAULT;
  1048. r = __copy_from_user(data, (void __user *)addr + offset, len);
  1049. if (r)
  1050. return -EFAULT;
  1051. return 0;
  1052. }
  1053. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  1054. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  1055. {
  1056. gfn_t gfn = gpa >> PAGE_SHIFT;
  1057. int seg;
  1058. int offset = offset_in_page(gpa);
  1059. int ret;
  1060. while ((seg = next_segment(len, offset)) != 0) {
  1061. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  1062. if (ret < 0)
  1063. return ret;
  1064. offset = 0;
  1065. len -= seg;
  1066. data += seg;
  1067. ++gfn;
  1068. }
  1069. return 0;
  1070. }
  1071. EXPORT_SYMBOL_GPL(kvm_read_guest);
  1072. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  1073. unsigned long len)
  1074. {
  1075. int r;
  1076. unsigned long addr;
  1077. gfn_t gfn = gpa >> PAGE_SHIFT;
  1078. int offset = offset_in_page(gpa);
  1079. addr = gfn_to_hva(kvm, gfn);
  1080. if (kvm_is_error_hva(addr))
  1081. return -EFAULT;
  1082. pagefault_disable();
  1083. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  1084. pagefault_enable();
  1085. if (r)
  1086. return -EFAULT;
  1087. return 0;
  1088. }
  1089. EXPORT_SYMBOL(kvm_read_guest_atomic);
  1090. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  1091. int offset, int len)
  1092. {
  1093. int r;
  1094. unsigned long addr;
  1095. addr = gfn_to_hva(kvm, gfn);
  1096. if (kvm_is_error_hva(addr))
  1097. return -EFAULT;
  1098. r = __copy_to_user((void __user *)addr + offset, data, len);
  1099. if (r)
  1100. return -EFAULT;
  1101. mark_page_dirty(kvm, gfn);
  1102. return 0;
  1103. }
  1104. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  1105. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  1106. unsigned long len)
  1107. {
  1108. gfn_t gfn = gpa >> PAGE_SHIFT;
  1109. int seg;
  1110. int offset = offset_in_page(gpa);
  1111. int ret;
  1112. while ((seg = next_segment(len, offset)) != 0) {
  1113. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  1114. if (ret < 0)
  1115. return ret;
  1116. offset = 0;
  1117. len -= seg;
  1118. data += seg;
  1119. ++gfn;
  1120. }
  1121. return 0;
  1122. }
  1123. int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1124. gpa_t gpa)
  1125. {
  1126. struct kvm_memslots *slots = kvm_memslots(kvm);
  1127. int offset = offset_in_page(gpa);
  1128. gfn_t gfn = gpa >> PAGE_SHIFT;
  1129. ghc->gpa = gpa;
  1130. ghc->generation = slots->generation;
  1131. ghc->memslot = gfn_to_memslot(kvm, gfn);
  1132. ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
  1133. if (!kvm_is_error_hva(ghc->hva))
  1134. ghc->hva += offset;
  1135. else
  1136. return -EFAULT;
  1137. return 0;
  1138. }
  1139. EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
  1140. int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1141. void *data, unsigned long len)
  1142. {
  1143. struct kvm_memslots *slots = kvm_memslots(kvm);
  1144. int r;
  1145. if (slots->generation != ghc->generation)
  1146. kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
  1147. if (kvm_is_error_hva(ghc->hva))
  1148. return -EFAULT;
  1149. r = __copy_to_user((void __user *)ghc->hva, data, len);
  1150. if (r)
  1151. return -EFAULT;
  1152. mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
  1153. return 0;
  1154. }
  1155. EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
  1156. int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  1157. void *data, unsigned long len)
  1158. {
  1159. struct kvm_memslots *slots = kvm_memslots(kvm);
  1160. int r;
  1161. if (slots->generation != ghc->generation)
  1162. kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
  1163. if (kvm_is_error_hva(ghc->hva))
  1164. return -EFAULT;
  1165. r = __copy_from_user(data, (void __user *)ghc->hva, len);
  1166. if (r)
  1167. return -EFAULT;
  1168. return 0;
  1169. }
  1170. EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
  1171. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  1172. {
  1173. return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
  1174. offset, len);
  1175. }
  1176. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  1177. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  1178. {
  1179. gfn_t gfn = gpa >> PAGE_SHIFT;
  1180. int seg;
  1181. int offset = offset_in_page(gpa);
  1182. int ret;
  1183. while ((seg = next_segment(len, offset)) != 0) {
  1184. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  1185. if (ret < 0)
  1186. return ret;
  1187. offset = 0;
  1188. len -= seg;
  1189. ++gfn;
  1190. }
  1191. return 0;
  1192. }
  1193. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  1194. void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
  1195. gfn_t gfn)
  1196. {
  1197. if (memslot && memslot->dirty_bitmap) {
  1198. unsigned long rel_gfn = gfn - memslot->base_gfn;
  1199. /* TODO: introduce set_bit_le() and use it */
  1200. test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap);
  1201. }
  1202. }
  1203. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  1204. {
  1205. struct kvm_memory_slot *memslot;
  1206. memslot = gfn_to_memslot(kvm, gfn);
  1207. mark_page_dirty_in_slot(kvm, memslot, gfn);
  1208. }
  1209. /*
  1210. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  1211. */
  1212. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  1213. {
  1214. DEFINE_WAIT(wait);
  1215. for (;;) {
  1216. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  1217. if (kvm_arch_vcpu_runnable(vcpu)) {
  1218. kvm_make_request(KVM_REQ_UNHALT, vcpu);
  1219. break;
  1220. }
  1221. if (kvm_cpu_has_pending_timer(vcpu))
  1222. break;
  1223. if (signal_pending(current))
  1224. break;
  1225. schedule();
  1226. }
  1227. finish_wait(&vcpu->wq, &wait);
  1228. }
  1229. #ifndef CONFIG_S390
  1230. /*
  1231. * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
  1232. */
  1233. void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
  1234. {
  1235. int me;
  1236. int cpu = vcpu->cpu;
  1237. wait_queue_head_t *wqp;
  1238. wqp = kvm_arch_vcpu_wq(vcpu);
  1239. if (waitqueue_active(wqp)) {
  1240. wake_up_interruptible(wqp);
  1241. ++vcpu->stat.halt_wakeup;
  1242. }
  1243. me = get_cpu();
  1244. if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
  1245. if (kvm_arch_vcpu_should_kick(vcpu))
  1246. smp_send_reschedule(cpu);
  1247. put_cpu();
  1248. }
  1249. #endif /* !CONFIG_S390 */
  1250. void kvm_resched(struct kvm_vcpu *vcpu)
  1251. {
  1252. if (!need_resched())
  1253. return;
  1254. cond_resched();
  1255. }
  1256. EXPORT_SYMBOL_GPL(kvm_resched);
  1257. bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
  1258. {
  1259. struct pid *pid;
  1260. struct task_struct *task = NULL;
  1261. rcu_read_lock();
  1262. pid = rcu_dereference(target->pid);
  1263. if (pid)
  1264. task = get_pid_task(target->pid, PIDTYPE_PID);
  1265. rcu_read_unlock();
  1266. if (!task)
  1267. return false;
  1268. if (task->flags & PF_VCPU) {
  1269. put_task_struct(task);
  1270. return false;
  1271. }
  1272. if (yield_to(task, 1)) {
  1273. put_task_struct(task);
  1274. return true;
  1275. }
  1276. put_task_struct(task);
  1277. return false;
  1278. }
  1279. EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
  1280. #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
  1281. /*
  1282. * Helper that checks whether a VCPU is eligible for directed yield.
  1283. * Most eligible candidate to yield is decided by following heuristics:
  1284. *
  1285. * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
  1286. * (preempted lock holder), indicated by @in_spin_loop.
  1287. * Set at the beiginning and cleared at the end of interception/PLE handler.
  1288. *
  1289. * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
  1290. * chance last time (mostly it has become eligible now since we have probably
  1291. * yielded to lockholder in last iteration. This is done by toggling
  1292. * @dy_eligible each time a VCPU checked for eligibility.)
  1293. *
  1294. * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
  1295. * to preempted lock-holder could result in wrong VCPU selection and CPU
  1296. * burning. Giving priority for a potential lock-holder increases lock
  1297. * progress.
  1298. *
  1299. * Since algorithm is based on heuristics, accessing another VCPU data without
  1300. * locking does not harm. It may result in trying to yield to same VCPU, fail
  1301. * and continue with next VCPU and so on.
  1302. */
  1303. bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
  1304. {
  1305. bool eligible;
  1306. eligible = !vcpu->spin_loop.in_spin_loop ||
  1307. (vcpu->spin_loop.in_spin_loop &&
  1308. vcpu->spin_loop.dy_eligible);
  1309. if (vcpu->spin_loop.in_spin_loop)
  1310. kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
  1311. return eligible;
  1312. }
  1313. #endif
  1314. void kvm_vcpu_on_spin(struct kvm_vcpu *me)
  1315. {
  1316. struct kvm *kvm = me->kvm;
  1317. struct kvm_vcpu *vcpu;
  1318. int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
  1319. int yielded = 0;
  1320. int pass;
  1321. int i;
  1322. kvm_vcpu_set_in_spin_loop(me, true);
  1323. /*
  1324. * We boost the priority of a VCPU that is runnable but not
  1325. * currently running, because it got preempted by something
  1326. * else and called schedule in __vcpu_run. Hopefully that
  1327. * VCPU is holding the lock that we need and will release it.
  1328. * We approximate round-robin by starting at the last boosted VCPU.
  1329. */
  1330. for (pass = 0; pass < 2 && !yielded; pass++) {
  1331. kvm_for_each_vcpu(i, vcpu, kvm) {
  1332. if (!pass && i <= last_boosted_vcpu) {
  1333. i = last_boosted_vcpu;
  1334. continue;
  1335. } else if (pass && i > last_boosted_vcpu)
  1336. break;
  1337. if (vcpu == me)
  1338. continue;
  1339. if (waitqueue_active(&vcpu->wq))
  1340. continue;
  1341. if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
  1342. continue;
  1343. if (kvm_vcpu_yield_to(vcpu)) {
  1344. kvm->last_boosted_vcpu = i;
  1345. yielded = 1;
  1346. break;
  1347. }
  1348. }
  1349. }
  1350. kvm_vcpu_set_in_spin_loop(me, false);
  1351. /* Ensure vcpu is not eligible during next spinloop */
  1352. kvm_vcpu_set_dy_eligible(me, false);
  1353. }
  1354. EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
  1355. static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1356. {
  1357. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  1358. struct page *page;
  1359. if (vmf->pgoff == 0)
  1360. page = virt_to_page(vcpu->run);
  1361. #ifdef CONFIG_X86
  1362. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  1363. page = virt_to_page(vcpu->arch.pio_data);
  1364. #endif
  1365. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1366. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  1367. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  1368. #endif
  1369. else
  1370. return kvm_arch_vcpu_fault(vcpu, vmf);
  1371. get_page(page);
  1372. vmf->page = page;
  1373. return 0;
  1374. }
  1375. static const struct vm_operations_struct kvm_vcpu_vm_ops = {
  1376. .fault = kvm_vcpu_fault,
  1377. };
  1378. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  1379. {
  1380. vma->vm_ops = &kvm_vcpu_vm_ops;
  1381. return 0;
  1382. }
  1383. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  1384. {
  1385. struct kvm_vcpu *vcpu = filp->private_data;
  1386. kvm_put_kvm(vcpu->kvm);
  1387. return 0;
  1388. }
  1389. static struct file_operations kvm_vcpu_fops = {
  1390. .release = kvm_vcpu_release,
  1391. .unlocked_ioctl = kvm_vcpu_ioctl,
  1392. #ifdef CONFIG_COMPAT
  1393. .compat_ioctl = kvm_vcpu_compat_ioctl,
  1394. #endif
  1395. .mmap = kvm_vcpu_mmap,
  1396. .llseek = noop_llseek,
  1397. };
  1398. /*
  1399. * Allocates an inode for the vcpu.
  1400. */
  1401. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  1402. {
  1403. return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
  1404. }
  1405. /*
  1406. * Creates some virtual cpus. Good luck creating more than one.
  1407. */
  1408. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
  1409. {
  1410. int r;
  1411. struct kvm_vcpu *vcpu, *v;
  1412. vcpu = kvm_arch_vcpu_create(kvm, id);
  1413. if (IS_ERR(vcpu))
  1414. return PTR_ERR(vcpu);
  1415. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  1416. r = kvm_arch_vcpu_setup(vcpu);
  1417. if (r)
  1418. goto vcpu_destroy;
  1419. mutex_lock(&kvm->lock);
  1420. if (!kvm_vcpu_compatible(vcpu)) {
  1421. r = -EINVAL;
  1422. goto unlock_vcpu_destroy;
  1423. }
  1424. if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
  1425. r = -EINVAL;
  1426. goto unlock_vcpu_destroy;
  1427. }
  1428. kvm_for_each_vcpu(r, v, kvm)
  1429. if (v->vcpu_id == id) {
  1430. r = -EEXIST;
  1431. goto unlock_vcpu_destroy;
  1432. }
  1433. BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
  1434. /* Now it's all set up, let userspace reach it */
  1435. kvm_get_kvm(kvm);
  1436. r = create_vcpu_fd(vcpu);
  1437. if (r < 0) {
  1438. kvm_put_kvm(kvm);
  1439. goto unlock_vcpu_destroy;
  1440. }
  1441. kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
  1442. smp_wmb();
  1443. atomic_inc(&kvm->online_vcpus);
  1444. mutex_unlock(&kvm->lock);
  1445. return r;
  1446. unlock_vcpu_destroy:
  1447. mutex_unlock(&kvm->lock);
  1448. vcpu_destroy:
  1449. kvm_arch_vcpu_destroy(vcpu);
  1450. return r;
  1451. }
  1452. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  1453. {
  1454. if (sigset) {
  1455. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  1456. vcpu->sigset_active = 1;
  1457. vcpu->sigset = *sigset;
  1458. } else
  1459. vcpu->sigset_active = 0;
  1460. return 0;
  1461. }
  1462. static long kvm_vcpu_ioctl(struct file *filp,
  1463. unsigned int ioctl, unsigned long arg)
  1464. {
  1465. struct kvm_vcpu *vcpu = filp->private_data;
  1466. void __user *argp = (void __user *)arg;
  1467. int r;
  1468. struct kvm_fpu *fpu = NULL;
  1469. struct kvm_sregs *kvm_sregs = NULL;
  1470. if (vcpu->kvm->mm != current->mm)
  1471. return -EIO;
  1472. #if defined(CONFIG_S390) || defined(CONFIG_PPC)
  1473. /*
  1474. * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
  1475. * so vcpu_load() would break it.
  1476. */
  1477. if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
  1478. return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1479. #endif
  1480. vcpu_load(vcpu);
  1481. switch (ioctl) {
  1482. case KVM_RUN:
  1483. r = -EINVAL;
  1484. if (arg)
  1485. goto out;
  1486. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  1487. trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
  1488. break;
  1489. case KVM_GET_REGS: {
  1490. struct kvm_regs *kvm_regs;
  1491. r = -ENOMEM;
  1492. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1493. if (!kvm_regs)
  1494. goto out;
  1495. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  1496. if (r)
  1497. goto out_free1;
  1498. r = -EFAULT;
  1499. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  1500. goto out_free1;
  1501. r = 0;
  1502. out_free1:
  1503. kfree(kvm_regs);
  1504. break;
  1505. }
  1506. case KVM_SET_REGS: {
  1507. struct kvm_regs *kvm_regs;
  1508. r = -ENOMEM;
  1509. kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
  1510. if (IS_ERR(kvm_regs)) {
  1511. r = PTR_ERR(kvm_regs);
  1512. goto out;
  1513. }
  1514. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  1515. if (r)
  1516. goto out_free2;
  1517. r = 0;
  1518. out_free2:
  1519. kfree(kvm_regs);
  1520. break;
  1521. }
  1522. case KVM_GET_SREGS: {
  1523. kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1524. r = -ENOMEM;
  1525. if (!kvm_sregs)
  1526. goto out;
  1527. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
  1528. if (r)
  1529. goto out;
  1530. r = -EFAULT;
  1531. if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
  1532. goto out;
  1533. r = 0;
  1534. break;
  1535. }
  1536. case KVM_SET_SREGS: {
  1537. kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
  1538. if (IS_ERR(kvm_sregs)) {
  1539. r = PTR_ERR(kvm_sregs);
  1540. goto out;
  1541. }
  1542. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
  1543. if (r)
  1544. goto out;
  1545. r = 0;
  1546. break;
  1547. }
  1548. case KVM_GET_MP_STATE: {
  1549. struct kvm_mp_state mp_state;
  1550. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  1551. if (r)
  1552. goto out;
  1553. r = -EFAULT;
  1554. if (copy_to_user(argp, &mp_state, sizeof mp_state))
  1555. goto out;
  1556. r = 0;
  1557. break;
  1558. }
  1559. case KVM_SET_MP_STATE: {
  1560. struct kvm_mp_state mp_state;
  1561. r = -EFAULT;
  1562. if (copy_from_user(&mp_state, argp, sizeof mp_state))
  1563. goto out;
  1564. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  1565. if (r)
  1566. goto out;
  1567. r = 0;
  1568. break;
  1569. }
  1570. case KVM_TRANSLATE: {
  1571. struct kvm_translation tr;
  1572. r = -EFAULT;
  1573. if (copy_from_user(&tr, argp, sizeof tr))
  1574. goto out;
  1575. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  1576. if (r)
  1577. goto out;
  1578. r = -EFAULT;
  1579. if (copy_to_user(argp, &tr, sizeof tr))
  1580. goto out;
  1581. r = 0;
  1582. break;
  1583. }
  1584. case KVM_SET_GUEST_DEBUG: {
  1585. struct kvm_guest_debug dbg;
  1586. r = -EFAULT;
  1587. if (copy_from_user(&dbg, argp, sizeof dbg))
  1588. goto out;
  1589. r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
  1590. if (r)
  1591. goto out;
  1592. r = 0;
  1593. break;
  1594. }
  1595. case KVM_SET_SIGNAL_MASK: {
  1596. struct kvm_signal_mask __user *sigmask_arg = argp;
  1597. struct kvm_signal_mask kvm_sigmask;
  1598. sigset_t sigset, *p;
  1599. p = NULL;
  1600. if (argp) {
  1601. r = -EFAULT;
  1602. if (copy_from_user(&kvm_sigmask, argp,
  1603. sizeof kvm_sigmask))
  1604. goto out;
  1605. r = -EINVAL;
  1606. if (kvm_sigmask.len != sizeof sigset)
  1607. goto out;
  1608. r = -EFAULT;
  1609. if (copy_from_user(&sigset, sigmask_arg->sigset,
  1610. sizeof sigset))
  1611. goto out;
  1612. p = &sigset;
  1613. }
  1614. r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
  1615. break;
  1616. }
  1617. case KVM_GET_FPU: {
  1618. fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1619. r = -ENOMEM;
  1620. if (!fpu)
  1621. goto out;
  1622. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
  1623. if (r)
  1624. goto out;
  1625. r = -EFAULT;
  1626. if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
  1627. goto out;
  1628. r = 0;
  1629. break;
  1630. }
  1631. case KVM_SET_FPU: {
  1632. fpu = memdup_user(argp, sizeof(*fpu));
  1633. if (IS_ERR(fpu)) {
  1634. r = PTR_ERR(fpu);
  1635. goto out;
  1636. }
  1637. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
  1638. if (r)
  1639. goto out;
  1640. r = 0;
  1641. break;
  1642. }
  1643. default:
  1644. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1645. }
  1646. out:
  1647. vcpu_put(vcpu);
  1648. kfree(fpu);
  1649. kfree(kvm_sregs);
  1650. return r;
  1651. }
  1652. #ifdef CONFIG_COMPAT
  1653. static long kvm_vcpu_compat_ioctl(struct file *filp,
  1654. unsigned int ioctl, unsigned long arg)
  1655. {
  1656. struct kvm_vcpu *vcpu = filp->private_data;
  1657. void __user *argp = compat_ptr(arg);
  1658. int r;
  1659. if (vcpu->kvm->mm != current->mm)
  1660. return -EIO;
  1661. switch (ioctl) {
  1662. case KVM_SET_SIGNAL_MASK: {
  1663. struct kvm_signal_mask __user *sigmask_arg = argp;
  1664. struct kvm_signal_mask kvm_sigmask;
  1665. compat_sigset_t csigset;
  1666. sigset_t sigset;
  1667. if (argp) {
  1668. r = -EFAULT;
  1669. if (copy_from_user(&kvm_sigmask, argp,
  1670. sizeof kvm_sigmask))
  1671. goto out;
  1672. r = -EINVAL;
  1673. if (kvm_sigmask.len != sizeof csigset)
  1674. goto out;
  1675. r = -EFAULT;
  1676. if (copy_from_user(&csigset, sigmask_arg->sigset,
  1677. sizeof csigset))
  1678. goto out;
  1679. }
  1680. sigset_from_compat(&sigset, &csigset);
  1681. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  1682. break;
  1683. }
  1684. default:
  1685. r = kvm_vcpu_ioctl(filp, ioctl, arg);
  1686. }
  1687. out:
  1688. return r;
  1689. }
  1690. #endif
  1691. static long kvm_vm_ioctl(struct file *filp,
  1692. unsigned int ioctl, unsigned long arg)
  1693. {
  1694. struct kvm *kvm = filp->private_data;
  1695. void __user *argp = (void __user *)arg;
  1696. int r;
  1697. if (kvm->mm != current->mm)
  1698. return -EIO;
  1699. switch (ioctl) {
  1700. case KVM_CREATE_VCPU:
  1701. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  1702. if (r < 0)
  1703. goto out;
  1704. break;
  1705. case KVM_SET_USER_MEMORY_REGION: {
  1706. struct kvm_userspace_memory_region kvm_userspace_mem;
  1707. r = -EFAULT;
  1708. if (copy_from_user(&kvm_userspace_mem, argp,
  1709. sizeof kvm_userspace_mem))
  1710. goto out;
  1711. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
  1712. if (r)
  1713. goto out;
  1714. break;
  1715. }
  1716. case KVM_GET_DIRTY_LOG: {
  1717. struct kvm_dirty_log log;
  1718. r = -EFAULT;
  1719. if (copy_from_user(&log, argp, sizeof log))
  1720. goto out;
  1721. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1722. if (r)
  1723. goto out;
  1724. break;
  1725. }
  1726. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1727. case KVM_REGISTER_COALESCED_MMIO: {
  1728. struct kvm_coalesced_mmio_zone zone;
  1729. r = -EFAULT;
  1730. if (copy_from_user(&zone, argp, sizeof zone))
  1731. goto out;
  1732. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  1733. if (r)
  1734. goto out;
  1735. r = 0;
  1736. break;
  1737. }
  1738. case KVM_UNREGISTER_COALESCED_MMIO: {
  1739. struct kvm_coalesced_mmio_zone zone;
  1740. r = -EFAULT;
  1741. if (copy_from_user(&zone, argp, sizeof zone))
  1742. goto out;
  1743. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  1744. if (r)
  1745. goto out;
  1746. r = 0;
  1747. break;
  1748. }
  1749. #endif
  1750. case KVM_IRQFD: {
  1751. struct kvm_irqfd data;
  1752. r = -EFAULT;
  1753. if (copy_from_user(&data, argp, sizeof data))
  1754. goto out;
  1755. r = kvm_irqfd(kvm, &data);
  1756. break;
  1757. }
  1758. case KVM_IOEVENTFD: {
  1759. struct kvm_ioeventfd data;
  1760. r = -EFAULT;
  1761. if (copy_from_user(&data, argp, sizeof data))
  1762. goto out;
  1763. r = kvm_ioeventfd(kvm, &data);
  1764. break;
  1765. }
  1766. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1767. case KVM_SET_BOOT_CPU_ID:
  1768. r = 0;
  1769. mutex_lock(&kvm->lock);
  1770. if (atomic_read(&kvm->online_vcpus) != 0)
  1771. r = -EBUSY;
  1772. else
  1773. kvm->bsp_vcpu_id = arg;
  1774. mutex_unlock(&kvm->lock);
  1775. break;
  1776. #endif
  1777. #ifdef CONFIG_HAVE_KVM_MSI
  1778. case KVM_SIGNAL_MSI: {
  1779. struct kvm_msi msi;
  1780. r = -EFAULT;
  1781. if (copy_from_user(&msi, argp, sizeof msi))
  1782. goto out;
  1783. r = kvm_send_userspace_msi(kvm, &msi);
  1784. break;
  1785. }
  1786. #endif
  1787. #ifdef __KVM_HAVE_IRQ_LINE
  1788. case KVM_IRQ_LINE_STATUS:
  1789. case KVM_IRQ_LINE: {
  1790. struct kvm_irq_level irq_event;
  1791. r = -EFAULT;
  1792. if (copy_from_user(&irq_event, argp, sizeof irq_event))
  1793. goto out;
  1794. r = kvm_vm_ioctl_irq_line(kvm, &irq_event);
  1795. if (r)
  1796. goto out;
  1797. r = -EFAULT;
  1798. if (ioctl == KVM_IRQ_LINE_STATUS) {
  1799. if (copy_to_user(argp, &irq_event, sizeof irq_event))
  1800. goto out;
  1801. }
  1802. r = 0;
  1803. break;
  1804. }
  1805. #endif
  1806. default:
  1807. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  1808. if (r == -ENOTTY)
  1809. r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
  1810. }
  1811. out:
  1812. return r;
  1813. }
  1814. #ifdef CONFIG_COMPAT
  1815. struct compat_kvm_dirty_log {
  1816. __u32 slot;
  1817. __u32 padding1;
  1818. union {
  1819. compat_uptr_t dirty_bitmap; /* one bit per page */
  1820. __u64 padding2;
  1821. };
  1822. };
  1823. static long kvm_vm_compat_ioctl(struct file *filp,
  1824. unsigned int ioctl, unsigned long arg)
  1825. {
  1826. struct kvm *kvm = filp->private_data;
  1827. int r;
  1828. if (kvm->mm != current->mm)
  1829. return -EIO;
  1830. switch (ioctl) {
  1831. case KVM_GET_DIRTY_LOG: {
  1832. struct compat_kvm_dirty_log compat_log;
  1833. struct kvm_dirty_log log;
  1834. r = -EFAULT;
  1835. if (copy_from_user(&compat_log, (void __user *)arg,
  1836. sizeof(compat_log)))
  1837. goto out;
  1838. log.slot = compat_log.slot;
  1839. log.padding1 = compat_log.padding1;
  1840. log.padding2 = compat_log.padding2;
  1841. log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
  1842. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1843. if (r)
  1844. goto out;
  1845. break;
  1846. }
  1847. default:
  1848. r = kvm_vm_ioctl(filp, ioctl, arg);
  1849. }
  1850. out:
  1851. return r;
  1852. }
  1853. #endif
  1854. static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1855. {
  1856. struct page *page[1];
  1857. unsigned long addr;
  1858. int npages;
  1859. gfn_t gfn = vmf->pgoff;
  1860. struct kvm *kvm = vma->vm_file->private_data;
  1861. addr = gfn_to_hva(kvm, gfn);
  1862. if (kvm_is_error_hva(addr))
  1863. return VM_FAULT_SIGBUS;
  1864. npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
  1865. NULL);
  1866. if (unlikely(npages != 1))
  1867. return VM_FAULT_SIGBUS;
  1868. vmf->page = page[0];
  1869. return 0;
  1870. }
  1871. static const struct vm_operations_struct kvm_vm_vm_ops = {
  1872. .fault = kvm_vm_fault,
  1873. };
  1874. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  1875. {
  1876. vma->vm_ops = &kvm_vm_vm_ops;
  1877. return 0;
  1878. }
  1879. static struct file_operations kvm_vm_fops = {
  1880. .release = kvm_vm_release,
  1881. .unlocked_ioctl = kvm_vm_ioctl,
  1882. #ifdef CONFIG_COMPAT
  1883. .compat_ioctl = kvm_vm_compat_ioctl,
  1884. #endif
  1885. .mmap = kvm_vm_mmap,
  1886. .llseek = noop_llseek,
  1887. };
  1888. static int kvm_dev_ioctl_create_vm(unsigned long type)
  1889. {
  1890. int r;
  1891. struct kvm *kvm;
  1892. kvm = kvm_create_vm(type);
  1893. if (IS_ERR(kvm))
  1894. return PTR_ERR(kvm);
  1895. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1896. r = kvm_coalesced_mmio_init(kvm);
  1897. if (r < 0) {
  1898. kvm_put_kvm(kvm);
  1899. return r;
  1900. }
  1901. #endif
  1902. r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
  1903. if (r < 0)
  1904. kvm_put_kvm(kvm);
  1905. return r;
  1906. }
  1907. static long kvm_dev_ioctl_check_extension_generic(long arg)
  1908. {
  1909. switch (arg) {
  1910. case KVM_CAP_USER_MEMORY:
  1911. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  1912. case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
  1913. #ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1914. case KVM_CAP_SET_BOOT_CPU_ID:
  1915. #endif
  1916. case KVM_CAP_INTERNAL_ERROR_DATA:
  1917. #ifdef CONFIG_HAVE_KVM_MSI
  1918. case KVM_CAP_SIGNAL_MSI:
  1919. #endif
  1920. return 1;
  1921. #ifdef KVM_CAP_IRQ_ROUTING
  1922. case KVM_CAP_IRQ_ROUTING:
  1923. return KVM_MAX_IRQ_ROUTES;
  1924. #endif
  1925. default:
  1926. break;
  1927. }
  1928. return kvm_dev_ioctl_check_extension(arg);
  1929. }
  1930. static long kvm_dev_ioctl(struct file *filp,
  1931. unsigned int ioctl, unsigned long arg)
  1932. {
  1933. long r = -EINVAL;
  1934. switch (ioctl) {
  1935. case KVM_GET_API_VERSION:
  1936. r = -EINVAL;
  1937. if (arg)
  1938. goto out;
  1939. r = KVM_API_VERSION;
  1940. break;
  1941. case KVM_CREATE_VM:
  1942. r = kvm_dev_ioctl_create_vm(arg);
  1943. break;
  1944. case KVM_CHECK_EXTENSION:
  1945. r = kvm_dev_ioctl_check_extension_generic(arg);
  1946. break;
  1947. case KVM_GET_VCPU_MMAP_SIZE:
  1948. r = -EINVAL;
  1949. if (arg)
  1950. goto out;
  1951. r = PAGE_SIZE; /* struct kvm_run */
  1952. #ifdef CONFIG_X86
  1953. r += PAGE_SIZE; /* pio data page */
  1954. #endif
  1955. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1956. r += PAGE_SIZE; /* coalesced mmio ring page */
  1957. #endif
  1958. break;
  1959. case KVM_TRACE_ENABLE:
  1960. case KVM_TRACE_PAUSE:
  1961. case KVM_TRACE_DISABLE:
  1962. r = -EOPNOTSUPP;
  1963. break;
  1964. default:
  1965. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  1966. }
  1967. out:
  1968. return r;
  1969. }
  1970. static struct file_operations kvm_chardev_ops = {
  1971. .unlocked_ioctl = kvm_dev_ioctl,
  1972. .compat_ioctl = kvm_dev_ioctl,
  1973. .llseek = noop_llseek,
  1974. };
  1975. static struct miscdevice kvm_dev = {
  1976. KVM_MINOR,
  1977. "kvm",
  1978. &kvm_chardev_ops,
  1979. };
  1980. static void hardware_enable_nolock(void *junk)
  1981. {
  1982. int cpu = raw_smp_processor_id();
  1983. int r;
  1984. if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1985. return;
  1986. cpumask_set_cpu(cpu, cpus_hardware_enabled);
  1987. r = kvm_arch_hardware_enable(NULL);
  1988. if (r) {
  1989. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1990. atomic_inc(&hardware_enable_failed);
  1991. printk(KERN_INFO "kvm: enabling virtualization on "
  1992. "CPU%d failed\n", cpu);
  1993. }
  1994. }
  1995. static void hardware_enable(void *junk)
  1996. {
  1997. raw_spin_lock(&kvm_lock);
  1998. hardware_enable_nolock(junk);
  1999. raw_spin_unlock(&kvm_lock);
  2000. }
  2001. static void hardware_disable_nolock(void *junk)
  2002. {
  2003. int cpu = raw_smp_processor_id();
  2004. if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
  2005. return;
  2006. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  2007. kvm_arch_hardware_disable(NULL);
  2008. }
  2009. static void hardware_disable(void *junk)
  2010. {
  2011. raw_spin_lock(&kvm_lock);
  2012. hardware_disable_nolock(junk);
  2013. raw_spin_unlock(&kvm_lock);
  2014. }
  2015. static void hardware_disable_all_nolock(void)
  2016. {
  2017. BUG_ON(!kvm_usage_count);
  2018. kvm_usage_count--;
  2019. if (!kvm_usage_count)
  2020. on_each_cpu(hardware_disable_nolock, NULL, 1);
  2021. }
  2022. static void hardware_disable_all(void)
  2023. {
  2024. raw_spin_lock(&kvm_lock);
  2025. hardware_disable_all_nolock();
  2026. raw_spin_unlock(&kvm_lock);
  2027. }
  2028. static int hardware_enable_all(void)
  2029. {
  2030. int r = 0;
  2031. raw_spin_lock(&kvm_lock);
  2032. kvm_usage_count++;
  2033. if (kvm_usage_count == 1) {
  2034. atomic_set(&hardware_enable_failed, 0);
  2035. on_each_cpu(hardware_enable_nolock, NULL, 1);
  2036. if (atomic_read(&hardware_enable_failed)) {
  2037. hardware_disable_all_nolock();
  2038. r = -EBUSY;
  2039. }
  2040. }
  2041. raw_spin_unlock(&kvm_lock);
  2042. return r;
  2043. }
  2044. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  2045. void *v)
  2046. {
  2047. int cpu = (long)v;
  2048. if (!kvm_usage_count)
  2049. return NOTIFY_OK;
  2050. val &= ~CPU_TASKS_FROZEN;
  2051. switch (val) {
  2052. case CPU_DYING:
  2053. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  2054. cpu);
  2055. hardware_disable(NULL);
  2056. break;
  2057. case CPU_STARTING:
  2058. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  2059. cpu);
  2060. hardware_enable(NULL);
  2061. break;
  2062. }
  2063. return NOTIFY_OK;
  2064. }
  2065. asmlinkage void kvm_spurious_fault(void)
  2066. {
  2067. /* Fault while not rebooting. We want the trace. */
  2068. BUG();
  2069. }
  2070. EXPORT_SYMBOL_GPL(kvm_spurious_fault);
  2071. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  2072. void *v)
  2073. {
  2074. /*
  2075. * Some (well, at least mine) BIOSes hang on reboot if
  2076. * in vmx root mode.
  2077. *
  2078. * And Intel TXT required VMX off for all cpu when system shutdown.
  2079. */
  2080. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  2081. kvm_rebooting = true;
  2082. on_each_cpu(hardware_disable_nolock, NULL, 1);
  2083. return NOTIFY_OK;
  2084. }
  2085. static struct notifier_block kvm_reboot_notifier = {
  2086. .notifier_call = kvm_reboot,
  2087. .priority = 0,
  2088. };
  2089. static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  2090. {
  2091. int i;
  2092. for (i = 0; i < bus->dev_count; i++) {
  2093. struct kvm_io_device *pos = bus->range[i].dev;
  2094. kvm_iodevice_destructor(pos);
  2095. }
  2096. kfree(bus);
  2097. }
  2098. int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
  2099. {
  2100. const struct kvm_io_range *r1 = p1;
  2101. const struct kvm_io_range *r2 = p2;
  2102. if (r1->addr < r2->addr)
  2103. return -1;
  2104. if (r1->addr + r1->len > r2->addr + r2->len)
  2105. return 1;
  2106. return 0;
  2107. }
  2108. int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
  2109. gpa_t addr, int len)
  2110. {
  2111. bus->range[bus->dev_count++] = (struct kvm_io_range) {
  2112. .addr = addr,
  2113. .len = len,
  2114. .dev = dev,
  2115. };
  2116. sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
  2117. kvm_io_bus_sort_cmp, NULL);
  2118. return 0;
  2119. }
  2120. int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
  2121. gpa_t addr, int len)
  2122. {
  2123. struct kvm_io_range *range, key;
  2124. int off;
  2125. key = (struct kvm_io_range) {
  2126. .addr = addr,
  2127. .len = len,
  2128. };
  2129. range = bsearch(&key, bus->range, bus->dev_count,
  2130. sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
  2131. if (range == NULL)
  2132. return -ENOENT;
  2133. off = range - bus->range;
  2134. while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0)
  2135. off--;
  2136. return off;
  2137. }
  2138. /* kvm_io_bus_write - called under kvm->slots_lock */
  2139. int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  2140. int len, const void *val)
  2141. {
  2142. int idx;
  2143. struct kvm_io_bus *bus;
  2144. struct kvm_io_range range;
  2145. range = (struct kvm_io_range) {
  2146. .addr = addr,
  2147. .len = len,
  2148. };
  2149. bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
  2150. idx = kvm_io_bus_get_first_dev(bus, addr, len);
  2151. if (idx < 0)
  2152. return -EOPNOTSUPP;
  2153. while (idx < bus->dev_count &&
  2154. kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
  2155. if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val))
  2156. return 0;
  2157. idx++;
  2158. }
  2159. return -EOPNOTSUPP;
  2160. }
  2161. /* kvm_io_bus_read - called under kvm->slots_lock */
  2162. int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  2163. int len, void *val)
  2164. {
  2165. int idx;
  2166. struct kvm_io_bus *bus;
  2167. struct kvm_io_range range;
  2168. range = (struct kvm_io_range) {
  2169. .addr = addr,
  2170. .len = len,
  2171. };
  2172. bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
  2173. idx = kvm_io_bus_get_first_dev(bus, addr, len);
  2174. if (idx < 0)
  2175. return -EOPNOTSUPP;
  2176. while (idx < bus->dev_count &&
  2177. kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
  2178. if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val))
  2179. return 0;
  2180. idx++;
  2181. }
  2182. return -EOPNOTSUPP;
  2183. }
  2184. /* Caller must hold slots_lock. */
  2185. int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  2186. int len, struct kvm_io_device *dev)
  2187. {
  2188. struct kvm_io_bus *new_bus, *bus;
  2189. bus = kvm->buses[bus_idx];
  2190. if (bus->dev_count > NR_IOBUS_DEVS - 1)
  2191. return -ENOSPC;
  2192. new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
  2193. sizeof(struct kvm_io_range)), GFP_KERNEL);
  2194. if (!new_bus)
  2195. return -ENOMEM;
  2196. memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
  2197. sizeof(struct kvm_io_range)));
  2198. kvm_io_bus_insert_dev(new_bus, dev, addr, len);
  2199. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  2200. synchronize_srcu_expedited(&kvm->srcu);
  2201. kfree(bus);
  2202. return 0;
  2203. }
  2204. /* Caller must hold slots_lock. */
  2205. int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  2206. struct kvm_io_device *dev)
  2207. {
  2208. int i, r;
  2209. struct kvm_io_bus *new_bus, *bus;
  2210. bus = kvm->buses[bus_idx];
  2211. r = -ENOENT;
  2212. for (i = 0; i < bus->dev_count; i++)
  2213. if (bus->range[i].dev == dev) {
  2214. r = 0;
  2215. break;
  2216. }
  2217. if (r)
  2218. return r;
  2219. new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
  2220. sizeof(struct kvm_io_range)), GFP_KERNEL);
  2221. if (!new_bus)
  2222. return -ENOMEM;
  2223. memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
  2224. new_bus->dev_count--;
  2225. memcpy(new_bus->range + i, bus->range + i + 1,
  2226. (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
  2227. rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
  2228. synchronize_srcu_expedited(&kvm->srcu);
  2229. kfree(bus);
  2230. return r;
  2231. }
  2232. static struct notifier_block kvm_cpu_notifier = {
  2233. .notifier_call = kvm_cpu_hotplug,
  2234. };
  2235. static int vm_stat_get(void *_offset, u64 *val)
  2236. {
  2237. unsigned offset = (long)_offset;
  2238. struct kvm *kvm;
  2239. *val = 0;
  2240. raw_spin_lock(&kvm_lock);
  2241. list_for_each_entry(kvm, &vm_list, vm_list)
  2242. *val += *(u32 *)((void *)kvm + offset);
  2243. raw_spin_unlock(&kvm_lock);
  2244. return 0;
  2245. }
  2246. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
  2247. static int vcpu_stat_get(void *_offset, u64 *val)
  2248. {
  2249. unsigned offset = (long)_offset;
  2250. struct kvm *kvm;
  2251. struct kvm_vcpu *vcpu;
  2252. int i;
  2253. *val = 0;
  2254. raw_spin_lock(&kvm_lock);
  2255. list_for_each_entry(kvm, &vm_list, vm_list)
  2256. kvm_for_each_vcpu(i, vcpu, kvm)
  2257. *val += *(u32 *)((void *)vcpu + offset);
  2258. raw_spin_unlock(&kvm_lock);
  2259. return 0;
  2260. }
  2261. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
  2262. static const struct file_operations *stat_fops[] = {
  2263. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  2264. [KVM_STAT_VM] = &vm_stat_fops,
  2265. };
  2266. static int kvm_init_debug(void)
  2267. {
  2268. int r = -EFAULT;
  2269. struct kvm_stats_debugfs_item *p;
  2270. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  2271. if (kvm_debugfs_dir == NULL)
  2272. goto out;
  2273. for (p = debugfs_entries; p->name; ++p) {
  2274. p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
  2275. (void *)(long)p->offset,
  2276. stat_fops[p->kind]);
  2277. if (p->dentry == NULL)
  2278. goto out_dir;
  2279. }
  2280. return 0;
  2281. out_dir:
  2282. debugfs_remove_recursive(kvm_debugfs_dir);
  2283. out:
  2284. return r;
  2285. }
  2286. static void kvm_exit_debug(void)
  2287. {
  2288. struct kvm_stats_debugfs_item *p;
  2289. for (p = debugfs_entries; p->name; ++p)
  2290. debugfs_remove(p->dentry);
  2291. debugfs_remove(kvm_debugfs_dir);
  2292. }
  2293. static int kvm_suspend(void)
  2294. {
  2295. if (kvm_usage_count)
  2296. hardware_disable_nolock(NULL);
  2297. return 0;
  2298. }
  2299. static void kvm_resume(void)
  2300. {
  2301. if (kvm_usage_count) {
  2302. WARN_ON(raw_spin_is_locked(&kvm_lock));
  2303. hardware_enable_nolock(NULL);
  2304. }
  2305. }
  2306. static struct syscore_ops kvm_syscore_ops = {
  2307. .suspend = kvm_suspend,
  2308. .resume = kvm_resume,
  2309. };
  2310. static inline
  2311. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  2312. {
  2313. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  2314. }
  2315. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  2316. {
  2317. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  2318. kvm_arch_vcpu_load(vcpu, cpu);
  2319. }
  2320. static void kvm_sched_out(struct preempt_notifier *pn,
  2321. struct task_struct *next)
  2322. {
  2323. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  2324. kvm_arch_vcpu_put(vcpu);
  2325. }
  2326. int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
  2327. struct module *module)
  2328. {
  2329. int r;
  2330. int cpu;
  2331. r = kvm_arch_init(opaque);
  2332. if (r)
  2333. goto out_fail;
  2334. if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
  2335. r = -ENOMEM;
  2336. goto out_free_0;
  2337. }
  2338. r = kvm_arch_hardware_setup();
  2339. if (r < 0)
  2340. goto out_free_0a;
  2341. for_each_online_cpu(cpu) {
  2342. smp_call_function_single(cpu,
  2343. kvm_arch_check_processor_compat,
  2344. &r, 1);
  2345. if (r < 0)
  2346. goto out_free_1;
  2347. }
  2348. r = register_cpu_notifier(&kvm_cpu_notifier);
  2349. if (r)
  2350. goto out_free_2;
  2351. register_reboot_notifier(&kvm_reboot_notifier);
  2352. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  2353. if (!vcpu_align)
  2354. vcpu_align = __alignof__(struct kvm_vcpu);
  2355. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
  2356. 0, NULL);
  2357. if (!kvm_vcpu_cache) {
  2358. r = -ENOMEM;
  2359. goto out_free_3;
  2360. }
  2361. r = kvm_async_pf_init();
  2362. if (r)
  2363. goto out_free;
  2364. kvm_chardev_ops.owner = module;
  2365. kvm_vm_fops.owner = module;
  2366. kvm_vcpu_fops.owner = module;
  2367. r = misc_register(&kvm_dev);
  2368. if (r) {
  2369. printk(KERN_ERR "kvm: misc device register failed\n");
  2370. goto out_unreg;
  2371. }
  2372. register_syscore_ops(&kvm_syscore_ops);
  2373. kvm_preempt_ops.sched_in = kvm_sched_in;
  2374. kvm_preempt_ops.sched_out = kvm_sched_out;
  2375. r = kvm_init_debug();
  2376. if (r) {
  2377. printk(KERN_ERR "kvm: create debugfs files failed\n");
  2378. goto out_undebugfs;
  2379. }
  2380. return 0;
  2381. out_undebugfs:
  2382. unregister_syscore_ops(&kvm_syscore_ops);
  2383. out_unreg:
  2384. kvm_async_pf_deinit();
  2385. out_free:
  2386. kmem_cache_destroy(kvm_vcpu_cache);
  2387. out_free_3:
  2388. unregister_reboot_notifier(&kvm_reboot_notifier);
  2389. unregister_cpu_notifier(&kvm_cpu_notifier);
  2390. out_free_2:
  2391. out_free_1:
  2392. kvm_arch_hardware_unsetup();
  2393. out_free_0a:
  2394. free_cpumask_var(cpus_hardware_enabled);
  2395. out_free_0:
  2396. kvm_arch_exit();
  2397. out_fail:
  2398. return r;
  2399. }
  2400. EXPORT_SYMBOL_GPL(kvm_init);
  2401. void kvm_exit(void)
  2402. {
  2403. kvm_exit_debug();
  2404. misc_deregister(&kvm_dev);
  2405. kmem_cache_destroy(kvm_vcpu_cache);
  2406. kvm_async_pf_deinit();
  2407. unregister_syscore_ops(&kvm_syscore_ops);
  2408. unregister_reboot_notifier(&kvm_reboot_notifier);
  2409. unregister_cpu_notifier(&kvm_cpu_notifier);
  2410. on_each_cpu(hardware_disable_nolock, NULL, 1);
  2411. kvm_arch_hardware_unsetup();
  2412. kvm_arch_exit();
  2413. free_cpumask_var(cpus_hardware_enabled);
  2414. }
  2415. EXPORT_SYMBOL_GPL(kvm_exit);