kvm_main.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "iodev.h"
  18. #include <linux/kvm_host.h>
  19. #include <linux/kvm.h>
  20. #include <linux/module.h>
  21. #include <linux/errno.h>
  22. #include <linux/percpu.h>
  23. #include <linux/gfp.h>
  24. #include <linux/mm.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/reboot.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/highmem.h>
  30. #include <linux/file.h>
  31. #include <linux/sysdev.h>
  32. #include <linux/cpu.h>
  33. #include <linux/sched.h>
  34. #include <linux/cpumask.h>
  35. #include <linux/smp.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/profile.h>
  38. #include <linux/kvm_para.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/mman.h>
  41. #include <linux/swap.h>
  42. #include <asm/processor.h>
  43. #include <asm/io.h>
  44. #include <asm/uaccess.h>
  45. #include <asm/pgtable.h>
  46. #ifdef CONFIG_X86
  47. #include <asm/msidef.h>
  48. #endif
  49. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  50. #include "coalesced_mmio.h"
  51. #endif
  52. #ifdef KVM_CAP_DEVICE_ASSIGNMENT
  53. #include <linux/pci.h>
  54. #include <linux/interrupt.h>
  55. #include "irq.h"
  56. #endif
  57. MODULE_AUTHOR("Qumranet");
  58. MODULE_LICENSE("GPL");
  59. static int msi2intx = 1;
  60. module_param(msi2intx, bool, 0);
  61. DEFINE_SPINLOCK(kvm_lock);
  62. LIST_HEAD(vm_list);
  63. static cpumask_var_t cpus_hardware_enabled;
  64. struct kmem_cache *kvm_vcpu_cache;
  65. EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  66. static __read_mostly struct preempt_ops kvm_preempt_ops;
  67. struct dentry *kvm_debugfs_dir;
  68. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  69. unsigned long arg);
  70. static bool kvm_rebooting;
  71. #ifdef KVM_CAP_DEVICE_ASSIGNMENT
  72. #ifdef CONFIG_X86
  73. static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
  74. {
  75. int vcpu_id;
  76. struct kvm_vcpu *vcpu;
  77. struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
  78. int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
  79. >> MSI_ADDR_DEST_ID_SHIFT;
  80. int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
  81. >> MSI_DATA_VECTOR_SHIFT;
  82. int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
  83. (unsigned long *)&dev->guest_msi.address_lo);
  84. int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
  85. (unsigned long *)&dev->guest_msi.data);
  86. int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
  87. (unsigned long *)&dev->guest_msi.data);
  88. u32 deliver_bitmask;
  89. BUG_ON(!ioapic);
  90. deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
  91. dest_id, dest_mode);
  92. /* IOAPIC delivery mode value is the same as MSI here */
  93. switch (delivery_mode) {
  94. case IOAPIC_LOWEST_PRIORITY:
  95. vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
  96. deliver_bitmask);
  97. if (vcpu != NULL)
  98. kvm_apic_set_irq(vcpu, vector, trig_mode);
  99. else
  100. printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
  101. break;
  102. case IOAPIC_FIXED:
  103. for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
  104. if (!(deliver_bitmask & (1 << vcpu_id)))
  105. continue;
  106. deliver_bitmask &= ~(1 << vcpu_id);
  107. vcpu = ioapic->kvm->vcpus[vcpu_id];
  108. if (vcpu)
  109. kvm_apic_set_irq(vcpu, vector, trig_mode);
  110. }
  111. break;
  112. default:
  113. printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
  114. }
  115. }
  116. #else
  117. static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
  118. #endif
  119. static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
  120. int assigned_dev_id)
  121. {
  122. struct list_head *ptr;
  123. struct kvm_assigned_dev_kernel *match;
  124. list_for_each(ptr, head) {
  125. match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
  126. if (match->assigned_dev_id == assigned_dev_id)
  127. return match;
  128. }
  129. return NULL;
  130. }
  131. static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
  132. {
  133. struct kvm_assigned_dev_kernel *assigned_dev;
  134. assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
  135. interrupt_work);
  136. /* This is taken to safely inject irq inside the guest. When
  137. * the interrupt injection (or the ioapic code) uses a
  138. * finer-grained lock, update this
  139. */
  140. mutex_lock(&assigned_dev->kvm->lock);
  141. if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX)
  142. kvm_set_irq(assigned_dev->kvm,
  143. assigned_dev->irq_source_id,
  144. assigned_dev->guest_irq, 1);
  145. else if (assigned_dev->irq_requested_type &
  146. KVM_ASSIGNED_DEV_GUEST_MSI) {
  147. assigned_device_msi_dispatch(assigned_dev);
  148. enable_irq(assigned_dev->host_irq);
  149. assigned_dev->host_irq_disabled = false;
  150. }
  151. mutex_unlock(&assigned_dev->kvm->lock);
  152. kvm_put_kvm(assigned_dev->kvm);
  153. }
  154. static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
  155. {
  156. struct kvm_assigned_dev_kernel *assigned_dev =
  157. (struct kvm_assigned_dev_kernel *) dev_id;
  158. kvm_get_kvm(assigned_dev->kvm);
  159. schedule_work(&assigned_dev->interrupt_work);
  160. disable_irq_nosync(irq);
  161. assigned_dev->host_irq_disabled = true;
  162. return IRQ_HANDLED;
  163. }
  164. /* Ack the irq line for an assigned device */
  165. static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
  166. {
  167. struct kvm_assigned_dev_kernel *dev;
  168. if (kian->gsi == -1)
  169. return;
  170. dev = container_of(kian, struct kvm_assigned_dev_kernel,
  171. ack_notifier);
  172. kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
  173. /* The guest irq may be shared so this ack may be
  174. * from another device.
  175. */
  176. if (dev->host_irq_disabled) {
  177. enable_irq(dev->host_irq);
  178. dev->host_irq_disabled = false;
  179. }
  180. }
  181. static void kvm_free_assigned_irq(struct kvm *kvm,
  182. struct kvm_assigned_dev_kernel *assigned_dev)
  183. {
  184. if (!irqchip_in_kernel(kvm))
  185. return;
  186. kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
  187. if (assigned_dev->irq_source_id != -1)
  188. kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
  189. assigned_dev->irq_source_id = -1;
  190. if (!assigned_dev->irq_requested_type)
  191. return;
  192. if (cancel_work_sync(&assigned_dev->interrupt_work))
  193. /* We had pending work. That means we will have to take
  194. * care of kvm_put_kvm.
  195. */
  196. kvm_put_kvm(kvm);
  197. free_irq(assigned_dev->host_irq, (void *)assigned_dev);
  198. if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
  199. pci_disable_msi(assigned_dev->dev);
  200. assigned_dev->irq_requested_type = 0;
  201. }
  202. static void kvm_free_assigned_device(struct kvm *kvm,
  203. struct kvm_assigned_dev_kernel
  204. *assigned_dev)
  205. {
  206. kvm_free_assigned_irq(kvm, assigned_dev);
  207. pci_reset_function(assigned_dev->dev);
  208. pci_release_regions(assigned_dev->dev);
  209. pci_disable_device(assigned_dev->dev);
  210. pci_dev_put(assigned_dev->dev);
  211. list_del(&assigned_dev->list);
  212. kfree(assigned_dev);
  213. }
  214. void kvm_free_all_assigned_devices(struct kvm *kvm)
  215. {
  216. struct list_head *ptr, *ptr2;
  217. struct kvm_assigned_dev_kernel *assigned_dev;
  218. list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
  219. assigned_dev = list_entry(ptr,
  220. struct kvm_assigned_dev_kernel,
  221. list);
  222. kvm_free_assigned_device(kvm, assigned_dev);
  223. }
  224. }
  225. static int assigned_device_update_intx(struct kvm *kvm,
  226. struct kvm_assigned_dev_kernel *adev,
  227. struct kvm_assigned_irq *airq)
  228. {
  229. adev->guest_irq = airq->guest_irq;
  230. adev->ack_notifier.gsi = airq->guest_irq;
  231. if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX)
  232. return 0;
  233. if (irqchip_in_kernel(kvm)) {
  234. if (!msi2intx &&
  235. adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) {
  236. free_irq(adev->host_irq, (void *)kvm);
  237. pci_disable_msi(adev->dev);
  238. }
  239. if (!capable(CAP_SYS_RAWIO))
  240. return -EPERM;
  241. if (airq->host_irq)
  242. adev->host_irq = airq->host_irq;
  243. else
  244. adev->host_irq = adev->dev->irq;
  245. /* Even though this is PCI, we don't want to use shared
  246. * interrupts. Sharing host devices with guest-assigned devices
  247. * on the same interrupt line is not a happy situation: there
  248. * are going to be long delays in accepting, acking, etc.
  249. */
  250. if (request_irq(adev->host_irq, kvm_assigned_dev_intr,
  251. 0, "kvm_assigned_intx_device", (void *)adev))
  252. return -EIO;
  253. }
  254. adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX |
  255. KVM_ASSIGNED_DEV_HOST_INTX;
  256. return 0;
  257. }
  258. #ifdef CONFIG_X86
  259. static int assigned_device_update_msi(struct kvm *kvm,
  260. struct kvm_assigned_dev_kernel *adev,
  261. struct kvm_assigned_irq *airq)
  262. {
  263. int r;
  264. if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
  265. /* x86 don't care upper address of guest msi message addr */
  266. adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
  267. adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
  268. adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
  269. adev->guest_msi.data = airq->guest_msi.data;
  270. adev->ack_notifier.gsi = -1;
  271. } else if (msi2intx) {
  272. adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
  273. adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
  274. adev->guest_irq = airq->guest_irq;
  275. adev->ack_notifier.gsi = airq->guest_irq;
  276. }
  277. if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
  278. return 0;
  279. if (irqchip_in_kernel(kvm)) {
  280. if (!msi2intx) {
  281. if (adev->irq_requested_type &
  282. KVM_ASSIGNED_DEV_HOST_INTX)
  283. free_irq(adev->host_irq, (void *)adev);
  284. r = pci_enable_msi(adev->dev);
  285. if (r)
  286. return r;
  287. }
  288. adev->host_irq = adev->dev->irq;
  289. if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0,
  290. "kvm_assigned_msi_device", (void *)adev))
  291. return -EIO;
  292. }
  293. if (!msi2intx)
  294. adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI;
  295. adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI;
  296. return 0;
  297. }
  298. #endif
  299. static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
  300. struct kvm_assigned_irq
  301. *assigned_irq)
  302. {
  303. int r = 0;
  304. struct kvm_assigned_dev_kernel *match;
  305. mutex_lock(&kvm->lock);
  306. match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
  307. assigned_irq->assigned_dev_id);
  308. if (!match) {
  309. mutex_unlock(&kvm->lock);
  310. return -EINVAL;
  311. }
  312. if (!match->irq_requested_type) {
  313. INIT_WORK(&match->interrupt_work,
  314. kvm_assigned_dev_interrupt_work_handler);
  315. if (irqchip_in_kernel(kvm)) {
  316. /* Register ack nofitier */
  317. match->ack_notifier.gsi = -1;
  318. match->ack_notifier.irq_acked =
  319. kvm_assigned_dev_ack_irq;
  320. kvm_register_irq_ack_notifier(kvm,
  321. &match->ack_notifier);
  322. /* Request IRQ source ID */
  323. r = kvm_request_irq_source_id(kvm);
  324. if (r < 0)
  325. goto out_release;
  326. else
  327. match->irq_source_id = r;
  328. #ifdef CONFIG_X86
  329. /* Determine host device irq type, we can know the
  330. * result from dev->msi_enabled */
  331. if (msi2intx)
  332. pci_enable_msi(match->dev);
  333. #endif
  334. }
  335. }
  336. if ((!msi2intx &&
  337. (assigned_irq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI)) ||
  338. (msi2intx && match->dev->msi_enabled)) {
  339. #ifdef CONFIG_X86
  340. r = assigned_device_update_msi(kvm, match, assigned_irq);
  341. if (r) {
  342. printk(KERN_WARNING "kvm: failed to enable "
  343. "MSI device!\n");
  344. goto out_release;
  345. }
  346. #else
  347. r = -ENOTTY;
  348. #endif
  349. } else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) {
  350. /* Host device IRQ 0 means don't support INTx */
  351. if (!msi2intx) {
  352. printk(KERN_WARNING
  353. "kvm: wait device to enable MSI!\n");
  354. r = 0;
  355. } else {
  356. printk(KERN_WARNING
  357. "kvm: failed to enable MSI device!\n");
  358. r = -ENOTTY;
  359. goto out_release;
  360. }
  361. } else {
  362. /* Non-sharing INTx mode */
  363. r = assigned_device_update_intx(kvm, match, assigned_irq);
  364. if (r) {
  365. printk(KERN_WARNING "kvm: failed to enable "
  366. "INTx device!\n");
  367. goto out_release;
  368. }
  369. }
  370. mutex_unlock(&kvm->lock);
  371. return r;
  372. out_release:
  373. mutex_unlock(&kvm->lock);
  374. kvm_free_assigned_device(kvm, match);
  375. return r;
  376. }
  377. static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
  378. struct kvm_assigned_pci_dev *assigned_dev)
  379. {
  380. int r = 0;
  381. struct kvm_assigned_dev_kernel *match;
  382. struct pci_dev *dev;
  383. mutex_lock(&kvm->lock);
  384. match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
  385. assigned_dev->assigned_dev_id);
  386. if (match) {
  387. /* device already assigned */
  388. r = -EINVAL;
  389. goto out;
  390. }
  391. match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
  392. if (match == NULL) {
  393. printk(KERN_INFO "%s: Couldn't allocate memory\n",
  394. __func__);
  395. r = -ENOMEM;
  396. goto out;
  397. }
  398. dev = pci_get_bus_and_slot(assigned_dev->busnr,
  399. assigned_dev->devfn);
  400. if (!dev) {
  401. printk(KERN_INFO "%s: host device not found\n", __func__);
  402. r = -EINVAL;
  403. goto out_free;
  404. }
  405. if (pci_enable_device(dev)) {
  406. printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
  407. r = -EBUSY;
  408. goto out_put;
  409. }
  410. r = pci_request_regions(dev, "kvm_assigned_device");
  411. if (r) {
  412. printk(KERN_INFO "%s: Could not get access to device regions\n",
  413. __func__);
  414. goto out_disable;
  415. }
  416. pci_reset_function(dev);
  417. match->assigned_dev_id = assigned_dev->assigned_dev_id;
  418. match->host_busnr = assigned_dev->busnr;
  419. match->host_devfn = assigned_dev->devfn;
  420. match->dev = dev;
  421. match->irq_source_id = -1;
  422. match->kvm = kvm;
  423. list_add(&match->list, &kvm->arch.assigned_dev_head);
  424. if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
  425. if (!kvm->arch.intel_iommu_domain) {
  426. r = kvm_iommu_map_guest(kvm);
  427. if (r)
  428. goto out_list_del;
  429. }
  430. r = kvm_assign_device(kvm, match);
  431. if (r)
  432. goto out_list_del;
  433. }
  434. out:
  435. mutex_unlock(&kvm->lock);
  436. return r;
  437. out_list_del:
  438. list_del(&match->list);
  439. pci_release_regions(dev);
  440. out_disable:
  441. pci_disable_device(dev);
  442. out_put:
  443. pci_dev_put(dev);
  444. out_free:
  445. kfree(match);
  446. mutex_unlock(&kvm->lock);
  447. return r;
  448. }
  449. #endif
  450. static inline int valid_vcpu(int n)
  451. {
  452. return likely(n >= 0 && n < KVM_MAX_VCPUS);
  453. }
  454. inline int kvm_is_mmio_pfn(pfn_t pfn)
  455. {
  456. if (pfn_valid(pfn))
  457. return PageReserved(pfn_to_page(pfn));
  458. return true;
  459. }
  460. /*
  461. * Switches to specified vcpu, until a matching vcpu_put()
  462. */
  463. void vcpu_load(struct kvm_vcpu *vcpu)
  464. {
  465. int cpu;
  466. mutex_lock(&vcpu->mutex);
  467. cpu = get_cpu();
  468. preempt_notifier_register(&vcpu->preempt_notifier);
  469. kvm_arch_vcpu_load(vcpu, cpu);
  470. put_cpu();
  471. }
  472. void vcpu_put(struct kvm_vcpu *vcpu)
  473. {
  474. preempt_disable();
  475. kvm_arch_vcpu_put(vcpu);
  476. preempt_notifier_unregister(&vcpu->preempt_notifier);
  477. preempt_enable();
  478. mutex_unlock(&vcpu->mutex);
  479. }
  480. static void ack_flush(void *_completed)
  481. {
  482. }
  483. static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
  484. {
  485. int i, cpu, me;
  486. cpumask_var_t cpus;
  487. bool called = true;
  488. struct kvm_vcpu *vcpu;
  489. if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
  490. cpumask_clear(cpus);
  491. me = get_cpu();
  492. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  493. vcpu = kvm->vcpus[i];
  494. if (!vcpu)
  495. continue;
  496. if (test_and_set_bit(req, &vcpu->requests))
  497. continue;
  498. cpu = vcpu->cpu;
  499. if (cpus != NULL && cpu != -1 && cpu != me)
  500. cpumask_set_cpu(cpu, cpus);
  501. }
  502. if (unlikely(cpus == NULL))
  503. smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
  504. else if (!cpumask_empty(cpus))
  505. smp_call_function_many(cpus, ack_flush, NULL, 1);
  506. else
  507. called = false;
  508. put_cpu();
  509. free_cpumask_var(cpus);
  510. return called;
  511. }
  512. void kvm_flush_remote_tlbs(struct kvm *kvm)
  513. {
  514. if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
  515. ++kvm->stat.remote_tlb_flush;
  516. }
  517. void kvm_reload_remote_mmus(struct kvm *kvm)
  518. {
  519. make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
  520. }
  521. int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  522. {
  523. struct page *page;
  524. int r;
  525. mutex_init(&vcpu->mutex);
  526. vcpu->cpu = -1;
  527. vcpu->kvm = kvm;
  528. vcpu->vcpu_id = id;
  529. init_waitqueue_head(&vcpu->wq);
  530. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  531. if (!page) {
  532. r = -ENOMEM;
  533. goto fail;
  534. }
  535. vcpu->run = page_address(page);
  536. r = kvm_arch_vcpu_init(vcpu);
  537. if (r < 0)
  538. goto fail_free_run;
  539. return 0;
  540. fail_free_run:
  541. free_page((unsigned long)vcpu->run);
  542. fail:
  543. return r;
  544. }
  545. EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  546. void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  547. {
  548. kvm_arch_vcpu_uninit(vcpu);
  549. free_page((unsigned long)vcpu->run);
  550. }
  551. EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  552. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  553. static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
  554. {
  555. return container_of(mn, struct kvm, mmu_notifier);
  556. }
  557. static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
  558. struct mm_struct *mm,
  559. unsigned long address)
  560. {
  561. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  562. int need_tlb_flush;
  563. /*
  564. * When ->invalidate_page runs, the linux pte has been zapped
  565. * already but the page is still allocated until
  566. * ->invalidate_page returns. So if we increase the sequence
  567. * here the kvm page fault will notice if the spte can't be
  568. * established because the page is going to be freed. If
  569. * instead the kvm page fault establishes the spte before
  570. * ->invalidate_page runs, kvm_unmap_hva will release it
  571. * before returning.
  572. *
  573. * The sequence increase only need to be seen at spin_unlock
  574. * time, and not at spin_lock time.
  575. *
  576. * Increasing the sequence after the spin_unlock would be
  577. * unsafe because the kvm page fault could then establish the
  578. * pte after kvm_unmap_hva returned, without noticing the page
  579. * is going to be freed.
  580. */
  581. spin_lock(&kvm->mmu_lock);
  582. kvm->mmu_notifier_seq++;
  583. need_tlb_flush = kvm_unmap_hva(kvm, address);
  584. spin_unlock(&kvm->mmu_lock);
  585. /* we've to flush the tlb before the pages can be freed */
  586. if (need_tlb_flush)
  587. kvm_flush_remote_tlbs(kvm);
  588. }
  589. static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
  590. struct mm_struct *mm,
  591. unsigned long start,
  592. unsigned long end)
  593. {
  594. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  595. int need_tlb_flush = 0;
  596. spin_lock(&kvm->mmu_lock);
  597. /*
  598. * The count increase must become visible at unlock time as no
  599. * spte can be established without taking the mmu_lock and
  600. * count is also read inside the mmu_lock critical section.
  601. */
  602. kvm->mmu_notifier_count++;
  603. for (; start < end; start += PAGE_SIZE)
  604. need_tlb_flush |= kvm_unmap_hva(kvm, start);
  605. spin_unlock(&kvm->mmu_lock);
  606. /* we've to flush the tlb before the pages can be freed */
  607. if (need_tlb_flush)
  608. kvm_flush_remote_tlbs(kvm);
  609. }
  610. static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
  611. struct mm_struct *mm,
  612. unsigned long start,
  613. unsigned long end)
  614. {
  615. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  616. spin_lock(&kvm->mmu_lock);
  617. /*
  618. * This sequence increase will notify the kvm page fault that
  619. * the page that is going to be mapped in the spte could have
  620. * been freed.
  621. */
  622. kvm->mmu_notifier_seq++;
  623. /*
  624. * The above sequence increase must be visible before the
  625. * below count decrease but both values are read by the kvm
  626. * page fault under mmu_lock spinlock so we don't need to add
  627. * a smb_wmb() here in between the two.
  628. */
  629. kvm->mmu_notifier_count--;
  630. spin_unlock(&kvm->mmu_lock);
  631. BUG_ON(kvm->mmu_notifier_count < 0);
  632. }
  633. static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
  634. struct mm_struct *mm,
  635. unsigned long address)
  636. {
  637. struct kvm *kvm = mmu_notifier_to_kvm(mn);
  638. int young;
  639. spin_lock(&kvm->mmu_lock);
  640. young = kvm_age_hva(kvm, address);
  641. spin_unlock(&kvm->mmu_lock);
  642. if (young)
  643. kvm_flush_remote_tlbs(kvm);
  644. return young;
  645. }
  646. static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
  647. .invalidate_page = kvm_mmu_notifier_invalidate_page,
  648. .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
  649. .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
  650. .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
  651. };
  652. #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
  653. static struct kvm *kvm_create_vm(void)
  654. {
  655. struct kvm *kvm = kvm_arch_create_vm();
  656. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  657. struct page *page;
  658. #endif
  659. if (IS_ERR(kvm))
  660. goto out;
  661. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  662. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  663. if (!page) {
  664. kfree(kvm);
  665. return ERR_PTR(-ENOMEM);
  666. }
  667. kvm->coalesced_mmio_ring =
  668. (struct kvm_coalesced_mmio_ring *)page_address(page);
  669. #endif
  670. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  671. {
  672. int err;
  673. kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
  674. err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
  675. if (err) {
  676. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  677. put_page(page);
  678. #endif
  679. kfree(kvm);
  680. return ERR_PTR(err);
  681. }
  682. }
  683. #endif
  684. kvm->mm = current->mm;
  685. atomic_inc(&kvm->mm->mm_count);
  686. spin_lock_init(&kvm->mmu_lock);
  687. kvm_io_bus_init(&kvm->pio_bus);
  688. mutex_init(&kvm->lock);
  689. kvm_io_bus_init(&kvm->mmio_bus);
  690. init_rwsem(&kvm->slots_lock);
  691. atomic_set(&kvm->users_count, 1);
  692. spin_lock(&kvm_lock);
  693. list_add(&kvm->vm_list, &vm_list);
  694. spin_unlock(&kvm_lock);
  695. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  696. kvm_coalesced_mmio_init(kvm);
  697. #endif
  698. out:
  699. return kvm;
  700. }
  701. /*
  702. * Free any memory in @free but not in @dont.
  703. */
  704. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  705. struct kvm_memory_slot *dont)
  706. {
  707. if (!dont || free->rmap != dont->rmap)
  708. vfree(free->rmap);
  709. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  710. vfree(free->dirty_bitmap);
  711. if (!dont || free->lpage_info != dont->lpage_info)
  712. vfree(free->lpage_info);
  713. free->npages = 0;
  714. free->dirty_bitmap = NULL;
  715. free->rmap = NULL;
  716. free->lpage_info = NULL;
  717. }
  718. void kvm_free_physmem(struct kvm *kvm)
  719. {
  720. int i;
  721. for (i = 0; i < kvm->nmemslots; ++i)
  722. kvm_free_physmem_slot(&kvm->memslots[i], NULL);
  723. }
  724. static void kvm_destroy_vm(struct kvm *kvm)
  725. {
  726. struct mm_struct *mm = kvm->mm;
  727. spin_lock(&kvm_lock);
  728. list_del(&kvm->vm_list);
  729. spin_unlock(&kvm_lock);
  730. kvm_io_bus_destroy(&kvm->pio_bus);
  731. kvm_io_bus_destroy(&kvm->mmio_bus);
  732. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  733. if (kvm->coalesced_mmio_ring != NULL)
  734. free_page((unsigned long)kvm->coalesced_mmio_ring);
  735. #endif
  736. #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
  737. mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
  738. #endif
  739. kvm_arch_destroy_vm(kvm);
  740. mmdrop(mm);
  741. }
  742. void kvm_get_kvm(struct kvm *kvm)
  743. {
  744. atomic_inc(&kvm->users_count);
  745. }
  746. EXPORT_SYMBOL_GPL(kvm_get_kvm);
  747. void kvm_put_kvm(struct kvm *kvm)
  748. {
  749. if (atomic_dec_and_test(&kvm->users_count))
  750. kvm_destroy_vm(kvm);
  751. }
  752. EXPORT_SYMBOL_GPL(kvm_put_kvm);
  753. static int kvm_vm_release(struct inode *inode, struct file *filp)
  754. {
  755. struct kvm *kvm = filp->private_data;
  756. kvm_put_kvm(kvm);
  757. return 0;
  758. }
  759. /*
  760. * Allocate some memory and give it an address in the guest physical address
  761. * space.
  762. *
  763. * Discontiguous memory is allowed, mostly for framebuffers.
  764. *
  765. * Must be called holding mmap_sem for write.
  766. */
  767. int __kvm_set_memory_region(struct kvm *kvm,
  768. struct kvm_userspace_memory_region *mem,
  769. int user_alloc)
  770. {
  771. int r;
  772. gfn_t base_gfn;
  773. unsigned long npages;
  774. unsigned long i;
  775. struct kvm_memory_slot *memslot;
  776. struct kvm_memory_slot old, new;
  777. r = -EINVAL;
  778. /* General sanity checks */
  779. if (mem->memory_size & (PAGE_SIZE - 1))
  780. goto out;
  781. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  782. goto out;
  783. if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
  784. goto out;
  785. if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  786. goto out;
  787. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  788. goto out;
  789. memslot = &kvm->memslots[mem->slot];
  790. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  791. npages = mem->memory_size >> PAGE_SHIFT;
  792. if (!npages)
  793. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  794. new = old = *memslot;
  795. new.base_gfn = base_gfn;
  796. new.npages = npages;
  797. new.flags = mem->flags;
  798. /* Disallow changing a memory slot's size. */
  799. r = -EINVAL;
  800. if (npages && old.npages && npages != old.npages)
  801. goto out_free;
  802. /* Check for overlaps */
  803. r = -EEXIST;
  804. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  805. struct kvm_memory_slot *s = &kvm->memslots[i];
  806. if (s == memslot)
  807. continue;
  808. if (!((base_gfn + npages <= s->base_gfn) ||
  809. (base_gfn >= s->base_gfn + s->npages)))
  810. goto out_free;
  811. }
  812. /* Free page dirty bitmap if unneeded */
  813. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  814. new.dirty_bitmap = NULL;
  815. r = -ENOMEM;
  816. /* Allocate if a slot is being created */
  817. #ifndef CONFIG_S390
  818. if (npages && !new.rmap) {
  819. new.rmap = vmalloc(npages * sizeof(struct page *));
  820. if (!new.rmap)
  821. goto out_free;
  822. memset(new.rmap, 0, npages * sizeof(*new.rmap));
  823. new.user_alloc = user_alloc;
  824. /*
  825. * hva_to_rmmap() serialzies with the mmu_lock and to be
  826. * safe it has to ignore memslots with !user_alloc &&
  827. * !userspace_addr.
  828. */
  829. if (user_alloc)
  830. new.userspace_addr = mem->userspace_addr;
  831. else
  832. new.userspace_addr = 0;
  833. }
  834. if (npages && !new.lpage_info) {
  835. int largepages = npages / KVM_PAGES_PER_HPAGE;
  836. if (npages % KVM_PAGES_PER_HPAGE)
  837. largepages++;
  838. if (base_gfn % KVM_PAGES_PER_HPAGE)
  839. largepages++;
  840. new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
  841. if (!new.lpage_info)
  842. goto out_free;
  843. memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
  844. if (base_gfn % KVM_PAGES_PER_HPAGE)
  845. new.lpage_info[0].write_count = 1;
  846. if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
  847. new.lpage_info[largepages-1].write_count = 1;
  848. }
  849. /* Allocate page dirty bitmap if needed */
  850. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  851. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  852. new.dirty_bitmap = vmalloc(dirty_bytes);
  853. if (!new.dirty_bitmap)
  854. goto out_free;
  855. memset(new.dirty_bitmap, 0, dirty_bytes);
  856. }
  857. #endif /* not defined CONFIG_S390 */
  858. if (!npages)
  859. kvm_arch_flush_shadow(kvm);
  860. spin_lock(&kvm->mmu_lock);
  861. if (mem->slot >= kvm->nmemslots)
  862. kvm->nmemslots = mem->slot + 1;
  863. *memslot = new;
  864. spin_unlock(&kvm->mmu_lock);
  865. r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
  866. if (r) {
  867. spin_lock(&kvm->mmu_lock);
  868. *memslot = old;
  869. spin_unlock(&kvm->mmu_lock);
  870. goto out_free;
  871. }
  872. kvm_free_physmem_slot(&old, npages ? &new : NULL);
  873. /* Slot deletion case: we have to update the current slot */
  874. if (!npages)
  875. *memslot = old;
  876. #ifdef CONFIG_DMAR
  877. /* map the pages in iommu page table */
  878. r = kvm_iommu_map_pages(kvm, base_gfn, npages);
  879. if (r)
  880. goto out;
  881. #endif
  882. return 0;
  883. out_free:
  884. kvm_free_physmem_slot(&new, &old);
  885. out:
  886. return r;
  887. }
  888. EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  889. int kvm_set_memory_region(struct kvm *kvm,
  890. struct kvm_userspace_memory_region *mem,
  891. int user_alloc)
  892. {
  893. int r;
  894. down_write(&kvm->slots_lock);
  895. r = __kvm_set_memory_region(kvm, mem, user_alloc);
  896. up_write(&kvm->slots_lock);
  897. return r;
  898. }
  899. EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  900. int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  901. struct
  902. kvm_userspace_memory_region *mem,
  903. int user_alloc)
  904. {
  905. if (mem->slot >= KVM_MEMORY_SLOTS)
  906. return -EINVAL;
  907. return kvm_set_memory_region(kvm, mem, user_alloc);
  908. }
  909. int kvm_get_dirty_log(struct kvm *kvm,
  910. struct kvm_dirty_log *log, int *is_dirty)
  911. {
  912. struct kvm_memory_slot *memslot;
  913. int r, i;
  914. int n;
  915. unsigned long any = 0;
  916. r = -EINVAL;
  917. if (log->slot >= KVM_MEMORY_SLOTS)
  918. goto out;
  919. memslot = &kvm->memslots[log->slot];
  920. r = -ENOENT;
  921. if (!memslot->dirty_bitmap)
  922. goto out;
  923. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  924. for (i = 0; !any && i < n/sizeof(long); ++i)
  925. any = memslot->dirty_bitmap[i];
  926. r = -EFAULT;
  927. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  928. goto out;
  929. if (any)
  930. *is_dirty = 1;
  931. r = 0;
  932. out:
  933. return r;
  934. }
  935. int is_error_page(struct page *page)
  936. {
  937. return page == bad_page;
  938. }
  939. EXPORT_SYMBOL_GPL(is_error_page);
  940. int is_error_pfn(pfn_t pfn)
  941. {
  942. return pfn == bad_pfn;
  943. }
  944. EXPORT_SYMBOL_GPL(is_error_pfn);
  945. static inline unsigned long bad_hva(void)
  946. {
  947. return PAGE_OFFSET;
  948. }
  949. int kvm_is_error_hva(unsigned long addr)
  950. {
  951. return addr == bad_hva();
  952. }
  953. EXPORT_SYMBOL_GPL(kvm_is_error_hva);
  954. struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
  955. {
  956. int i;
  957. for (i = 0; i < kvm->nmemslots; ++i) {
  958. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  959. if (gfn >= memslot->base_gfn
  960. && gfn < memslot->base_gfn + memslot->npages)
  961. return memslot;
  962. }
  963. return NULL;
  964. }
  965. EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
  966. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  967. {
  968. gfn = unalias_gfn(kvm, gfn);
  969. return gfn_to_memslot_unaliased(kvm, gfn);
  970. }
  971. int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
  972. {
  973. int i;
  974. gfn = unalias_gfn(kvm, gfn);
  975. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  976. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  977. if (gfn >= memslot->base_gfn
  978. && gfn < memslot->base_gfn + memslot->npages)
  979. return 1;
  980. }
  981. return 0;
  982. }
  983. EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
  984. unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
  985. {
  986. struct kvm_memory_slot *slot;
  987. gfn = unalias_gfn(kvm, gfn);
  988. slot = gfn_to_memslot_unaliased(kvm, gfn);
  989. if (!slot)
  990. return bad_hva();
  991. return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
  992. }
  993. EXPORT_SYMBOL_GPL(gfn_to_hva);
  994. pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
  995. {
  996. struct page *page[1];
  997. unsigned long addr;
  998. int npages;
  999. pfn_t pfn;
  1000. might_sleep();
  1001. addr = gfn_to_hva(kvm, gfn);
  1002. if (kvm_is_error_hva(addr)) {
  1003. get_page(bad_page);
  1004. return page_to_pfn(bad_page);
  1005. }
  1006. npages = get_user_pages_fast(addr, 1, 1, page);
  1007. if (unlikely(npages != 1)) {
  1008. struct vm_area_struct *vma;
  1009. down_read(&current->mm->mmap_sem);
  1010. vma = find_vma(current->mm, addr);
  1011. if (vma == NULL || addr < vma->vm_start ||
  1012. !(vma->vm_flags & VM_PFNMAP)) {
  1013. up_read(&current->mm->mmap_sem);
  1014. get_page(bad_page);
  1015. return page_to_pfn(bad_page);
  1016. }
  1017. pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  1018. up_read(&current->mm->mmap_sem);
  1019. BUG_ON(!kvm_is_mmio_pfn(pfn));
  1020. } else
  1021. pfn = page_to_pfn(page[0]);
  1022. return pfn;
  1023. }
  1024. EXPORT_SYMBOL_GPL(gfn_to_pfn);
  1025. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  1026. {
  1027. pfn_t pfn;
  1028. pfn = gfn_to_pfn(kvm, gfn);
  1029. if (!kvm_is_mmio_pfn(pfn))
  1030. return pfn_to_page(pfn);
  1031. WARN_ON(kvm_is_mmio_pfn(pfn));
  1032. get_page(bad_page);
  1033. return bad_page;
  1034. }
  1035. EXPORT_SYMBOL_GPL(gfn_to_page);
  1036. void kvm_release_page_clean(struct page *page)
  1037. {
  1038. kvm_release_pfn_clean(page_to_pfn(page));
  1039. }
  1040. EXPORT_SYMBOL_GPL(kvm_release_page_clean);
  1041. void kvm_release_pfn_clean(pfn_t pfn)
  1042. {
  1043. if (!kvm_is_mmio_pfn(pfn))
  1044. put_page(pfn_to_page(pfn));
  1045. }
  1046. EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
  1047. void kvm_release_page_dirty(struct page *page)
  1048. {
  1049. kvm_release_pfn_dirty(page_to_pfn(page));
  1050. }
  1051. EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
  1052. void kvm_release_pfn_dirty(pfn_t pfn)
  1053. {
  1054. kvm_set_pfn_dirty(pfn);
  1055. kvm_release_pfn_clean(pfn);
  1056. }
  1057. EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
  1058. void kvm_set_page_dirty(struct page *page)
  1059. {
  1060. kvm_set_pfn_dirty(page_to_pfn(page));
  1061. }
  1062. EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
  1063. void kvm_set_pfn_dirty(pfn_t pfn)
  1064. {
  1065. if (!kvm_is_mmio_pfn(pfn)) {
  1066. struct page *page = pfn_to_page(pfn);
  1067. if (!PageReserved(page))
  1068. SetPageDirty(page);
  1069. }
  1070. }
  1071. EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
  1072. void kvm_set_pfn_accessed(pfn_t pfn)
  1073. {
  1074. if (!kvm_is_mmio_pfn(pfn))
  1075. mark_page_accessed(pfn_to_page(pfn));
  1076. }
  1077. EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
  1078. void kvm_get_pfn(pfn_t pfn)
  1079. {
  1080. if (!kvm_is_mmio_pfn(pfn))
  1081. get_page(pfn_to_page(pfn));
  1082. }
  1083. EXPORT_SYMBOL_GPL(kvm_get_pfn);
  1084. static int next_segment(unsigned long len, int offset)
  1085. {
  1086. if (len > PAGE_SIZE - offset)
  1087. return PAGE_SIZE - offset;
  1088. else
  1089. return len;
  1090. }
  1091. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  1092. int len)
  1093. {
  1094. int r;
  1095. unsigned long addr;
  1096. addr = gfn_to_hva(kvm, gfn);
  1097. if (kvm_is_error_hva(addr))
  1098. return -EFAULT;
  1099. r = copy_from_user(data, (void __user *)addr + offset, len);
  1100. if (r)
  1101. return -EFAULT;
  1102. return 0;
  1103. }
  1104. EXPORT_SYMBOL_GPL(kvm_read_guest_page);
  1105. int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
  1106. {
  1107. gfn_t gfn = gpa >> PAGE_SHIFT;
  1108. int seg;
  1109. int offset = offset_in_page(gpa);
  1110. int ret;
  1111. while ((seg = next_segment(len, offset)) != 0) {
  1112. ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
  1113. if (ret < 0)
  1114. return ret;
  1115. offset = 0;
  1116. len -= seg;
  1117. data += seg;
  1118. ++gfn;
  1119. }
  1120. return 0;
  1121. }
  1122. EXPORT_SYMBOL_GPL(kvm_read_guest);
  1123. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  1124. unsigned long len)
  1125. {
  1126. int r;
  1127. unsigned long addr;
  1128. gfn_t gfn = gpa >> PAGE_SHIFT;
  1129. int offset = offset_in_page(gpa);
  1130. addr = gfn_to_hva(kvm, gfn);
  1131. if (kvm_is_error_hva(addr))
  1132. return -EFAULT;
  1133. pagefault_disable();
  1134. r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
  1135. pagefault_enable();
  1136. if (r)
  1137. return -EFAULT;
  1138. return 0;
  1139. }
  1140. EXPORT_SYMBOL(kvm_read_guest_atomic);
  1141. int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
  1142. int offset, int len)
  1143. {
  1144. int r;
  1145. unsigned long addr;
  1146. addr = gfn_to_hva(kvm, gfn);
  1147. if (kvm_is_error_hva(addr))
  1148. return -EFAULT;
  1149. r = copy_to_user((void __user *)addr + offset, data, len);
  1150. if (r)
  1151. return -EFAULT;
  1152. mark_page_dirty(kvm, gfn);
  1153. return 0;
  1154. }
  1155. EXPORT_SYMBOL_GPL(kvm_write_guest_page);
  1156. int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
  1157. unsigned long len)
  1158. {
  1159. gfn_t gfn = gpa >> PAGE_SHIFT;
  1160. int seg;
  1161. int offset = offset_in_page(gpa);
  1162. int ret;
  1163. while ((seg = next_segment(len, offset)) != 0) {
  1164. ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
  1165. if (ret < 0)
  1166. return ret;
  1167. offset = 0;
  1168. len -= seg;
  1169. data += seg;
  1170. ++gfn;
  1171. }
  1172. return 0;
  1173. }
  1174. int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  1175. {
  1176. return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
  1177. }
  1178. EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  1179. int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  1180. {
  1181. gfn_t gfn = gpa >> PAGE_SHIFT;
  1182. int seg;
  1183. int offset = offset_in_page(gpa);
  1184. int ret;
  1185. while ((seg = next_segment(len, offset)) != 0) {
  1186. ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
  1187. if (ret < 0)
  1188. return ret;
  1189. offset = 0;
  1190. len -= seg;
  1191. ++gfn;
  1192. }
  1193. return 0;
  1194. }
  1195. EXPORT_SYMBOL_GPL(kvm_clear_guest);
  1196. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  1197. {
  1198. struct kvm_memory_slot *memslot;
  1199. gfn = unalias_gfn(kvm, gfn);
  1200. memslot = gfn_to_memslot_unaliased(kvm, gfn);
  1201. if (memslot && memslot->dirty_bitmap) {
  1202. unsigned long rel_gfn = gfn - memslot->base_gfn;
  1203. /* avoid RMW */
  1204. if (!test_bit(rel_gfn, memslot->dirty_bitmap))
  1205. set_bit(rel_gfn, memslot->dirty_bitmap);
  1206. }
  1207. }
  1208. /*
  1209. * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  1210. */
  1211. void kvm_vcpu_block(struct kvm_vcpu *vcpu)
  1212. {
  1213. DEFINE_WAIT(wait);
  1214. for (;;) {
  1215. prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
  1216. if (kvm_cpu_has_interrupt(vcpu) ||
  1217. kvm_cpu_has_pending_timer(vcpu) ||
  1218. kvm_arch_vcpu_runnable(vcpu)) {
  1219. set_bit(KVM_REQ_UNHALT, &vcpu->requests);
  1220. break;
  1221. }
  1222. if (signal_pending(current))
  1223. break;
  1224. vcpu_put(vcpu);
  1225. schedule();
  1226. vcpu_load(vcpu);
  1227. }
  1228. finish_wait(&vcpu->wq, &wait);
  1229. }
  1230. void kvm_resched(struct kvm_vcpu *vcpu)
  1231. {
  1232. if (!need_resched())
  1233. return;
  1234. cond_resched();
  1235. }
  1236. EXPORT_SYMBOL_GPL(kvm_resched);
  1237. static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1238. {
  1239. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  1240. struct page *page;
  1241. if (vmf->pgoff == 0)
  1242. page = virt_to_page(vcpu->run);
  1243. #ifdef CONFIG_X86
  1244. else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
  1245. page = virt_to_page(vcpu->arch.pio_data);
  1246. #endif
  1247. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1248. else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
  1249. page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
  1250. #endif
  1251. else
  1252. return VM_FAULT_SIGBUS;
  1253. get_page(page);
  1254. vmf->page = page;
  1255. return 0;
  1256. }
  1257. static struct vm_operations_struct kvm_vcpu_vm_ops = {
  1258. .fault = kvm_vcpu_fault,
  1259. };
  1260. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  1261. {
  1262. vma->vm_ops = &kvm_vcpu_vm_ops;
  1263. return 0;
  1264. }
  1265. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  1266. {
  1267. struct kvm_vcpu *vcpu = filp->private_data;
  1268. kvm_put_kvm(vcpu->kvm);
  1269. return 0;
  1270. }
  1271. static struct file_operations kvm_vcpu_fops = {
  1272. .release = kvm_vcpu_release,
  1273. .unlocked_ioctl = kvm_vcpu_ioctl,
  1274. .compat_ioctl = kvm_vcpu_ioctl,
  1275. .mmap = kvm_vcpu_mmap,
  1276. };
  1277. /*
  1278. * Allocates an inode for the vcpu.
  1279. */
  1280. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  1281. {
  1282. int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
  1283. if (fd < 0)
  1284. kvm_put_kvm(vcpu->kvm);
  1285. return fd;
  1286. }
  1287. /*
  1288. * Creates some virtual cpus. Good luck creating more than one.
  1289. */
  1290. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
  1291. {
  1292. int r;
  1293. struct kvm_vcpu *vcpu;
  1294. if (!valid_vcpu(n))
  1295. return -EINVAL;
  1296. vcpu = kvm_arch_vcpu_create(kvm, n);
  1297. if (IS_ERR(vcpu))
  1298. return PTR_ERR(vcpu);
  1299. preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
  1300. r = kvm_arch_vcpu_setup(vcpu);
  1301. if (r)
  1302. return r;
  1303. mutex_lock(&kvm->lock);
  1304. if (kvm->vcpus[n]) {
  1305. r = -EEXIST;
  1306. goto vcpu_destroy;
  1307. }
  1308. kvm->vcpus[n] = vcpu;
  1309. mutex_unlock(&kvm->lock);
  1310. /* Now it's all set up, let userspace reach it */
  1311. kvm_get_kvm(kvm);
  1312. r = create_vcpu_fd(vcpu);
  1313. if (r < 0)
  1314. goto unlink;
  1315. return r;
  1316. unlink:
  1317. mutex_lock(&kvm->lock);
  1318. kvm->vcpus[n] = NULL;
  1319. vcpu_destroy:
  1320. mutex_unlock(&kvm->lock);
  1321. kvm_arch_vcpu_destroy(vcpu);
  1322. return r;
  1323. }
  1324. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  1325. {
  1326. if (sigset) {
  1327. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  1328. vcpu->sigset_active = 1;
  1329. vcpu->sigset = *sigset;
  1330. } else
  1331. vcpu->sigset_active = 0;
  1332. return 0;
  1333. }
  1334. static long kvm_vcpu_ioctl(struct file *filp,
  1335. unsigned int ioctl, unsigned long arg)
  1336. {
  1337. struct kvm_vcpu *vcpu = filp->private_data;
  1338. void __user *argp = (void __user *)arg;
  1339. int r;
  1340. struct kvm_fpu *fpu = NULL;
  1341. struct kvm_sregs *kvm_sregs = NULL;
  1342. if (vcpu->kvm->mm != current->mm)
  1343. return -EIO;
  1344. switch (ioctl) {
  1345. case KVM_RUN:
  1346. r = -EINVAL;
  1347. if (arg)
  1348. goto out;
  1349. r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
  1350. break;
  1351. case KVM_GET_REGS: {
  1352. struct kvm_regs *kvm_regs;
  1353. r = -ENOMEM;
  1354. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1355. if (!kvm_regs)
  1356. goto out;
  1357. r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
  1358. if (r)
  1359. goto out_free1;
  1360. r = -EFAULT;
  1361. if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
  1362. goto out_free1;
  1363. r = 0;
  1364. out_free1:
  1365. kfree(kvm_regs);
  1366. break;
  1367. }
  1368. case KVM_SET_REGS: {
  1369. struct kvm_regs *kvm_regs;
  1370. r = -ENOMEM;
  1371. kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
  1372. if (!kvm_regs)
  1373. goto out;
  1374. r = -EFAULT;
  1375. if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
  1376. goto out_free2;
  1377. r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
  1378. if (r)
  1379. goto out_free2;
  1380. r = 0;
  1381. out_free2:
  1382. kfree(kvm_regs);
  1383. break;
  1384. }
  1385. case KVM_GET_SREGS: {
  1386. kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1387. r = -ENOMEM;
  1388. if (!kvm_sregs)
  1389. goto out;
  1390. r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
  1391. if (r)
  1392. goto out;
  1393. r = -EFAULT;
  1394. if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
  1395. goto out;
  1396. r = 0;
  1397. break;
  1398. }
  1399. case KVM_SET_SREGS: {
  1400. kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
  1401. r = -ENOMEM;
  1402. if (!kvm_sregs)
  1403. goto out;
  1404. r = -EFAULT;
  1405. if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
  1406. goto out;
  1407. r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
  1408. if (r)
  1409. goto out;
  1410. r = 0;
  1411. break;
  1412. }
  1413. case KVM_GET_MP_STATE: {
  1414. struct kvm_mp_state mp_state;
  1415. r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
  1416. if (r)
  1417. goto out;
  1418. r = -EFAULT;
  1419. if (copy_to_user(argp, &mp_state, sizeof mp_state))
  1420. goto out;
  1421. r = 0;
  1422. break;
  1423. }
  1424. case KVM_SET_MP_STATE: {
  1425. struct kvm_mp_state mp_state;
  1426. r = -EFAULT;
  1427. if (copy_from_user(&mp_state, argp, sizeof mp_state))
  1428. goto out;
  1429. r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
  1430. if (r)
  1431. goto out;
  1432. r = 0;
  1433. break;
  1434. }
  1435. case KVM_TRANSLATE: {
  1436. struct kvm_translation tr;
  1437. r = -EFAULT;
  1438. if (copy_from_user(&tr, argp, sizeof tr))
  1439. goto out;
  1440. r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
  1441. if (r)
  1442. goto out;
  1443. r = -EFAULT;
  1444. if (copy_to_user(argp, &tr, sizeof tr))
  1445. goto out;
  1446. r = 0;
  1447. break;
  1448. }
  1449. case KVM_DEBUG_GUEST: {
  1450. struct kvm_debug_guest dbg;
  1451. r = -EFAULT;
  1452. if (copy_from_user(&dbg, argp, sizeof dbg))
  1453. goto out;
  1454. r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
  1455. if (r)
  1456. goto out;
  1457. r = 0;
  1458. break;
  1459. }
  1460. case KVM_SET_SIGNAL_MASK: {
  1461. struct kvm_signal_mask __user *sigmask_arg = argp;
  1462. struct kvm_signal_mask kvm_sigmask;
  1463. sigset_t sigset, *p;
  1464. p = NULL;
  1465. if (argp) {
  1466. r = -EFAULT;
  1467. if (copy_from_user(&kvm_sigmask, argp,
  1468. sizeof kvm_sigmask))
  1469. goto out;
  1470. r = -EINVAL;
  1471. if (kvm_sigmask.len != sizeof sigset)
  1472. goto out;
  1473. r = -EFAULT;
  1474. if (copy_from_user(&sigset, sigmask_arg->sigset,
  1475. sizeof sigset))
  1476. goto out;
  1477. p = &sigset;
  1478. }
  1479. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  1480. break;
  1481. }
  1482. case KVM_GET_FPU: {
  1483. fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1484. r = -ENOMEM;
  1485. if (!fpu)
  1486. goto out;
  1487. r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
  1488. if (r)
  1489. goto out;
  1490. r = -EFAULT;
  1491. if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
  1492. goto out;
  1493. r = 0;
  1494. break;
  1495. }
  1496. case KVM_SET_FPU: {
  1497. fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
  1498. r = -ENOMEM;
  1499. if (!fpu)
  1500. goto out;
  1501. r = -EFAULT;
  1502. if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
  1503. goto out;
  1504. r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
  1505. if (r)
  1506. goto out;
  1507. r = 0;
  1508. break;
  1509. }
  1510. default:
  1511. r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1512. }
  1513. out:
  1514. kfree(fpu);
  1515. kfree(kvm_sregs);
  1516. return r;
  1517. }
  1518. static long kvm_vm_ioctl(struct file *filp,
  1519. unsigned int ioctl, unsigned long arg)
  1520. {
  1521. struct kvm *kvm = filp->private_data;
  1522. void __user *argp = (void __user *)arg;
  1523. int r;
  1524. if (kvm->mm != current->mm)
  1525. return -EIO;
  1526. switch (ioctl) {
  1527. case KVM_CREATE_VCPU:
  1528. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  1529. if (r < 0)
  1530. goto out;
  1531. break;
  1532. case KVM_SET_USER_MEMORY_REGION: {
  1533. struct kvm_userspace_memory_region kvm_userspace_mem;
  1534. r = -EFAULT;
  1535. if (copy_from_user(&kvm_userspace_mem, argp,
  1536. sizeof kvm_userspace_mem))
  1537. goto out;
  1538. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
  1539. if (r)
  1540. goto out;
  1541. break;
  1542. }
  1543. case KVM_GET_DIRTY_LOG: {
  1544. struct kvm_dirty_log log;
  1545. r = -EFAULT;
  1546. if (copy_from_user(&log, argp, sizeof log))
  1547. goto out;
  1548. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  1549. if (r)
  1550. goto out;
  1551. break;
  1552. }
  1553. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1554. case KVM_REGISTER_COALESCED_MMIO: {
  1555. struct kvm_coalesced_mmio_zone zone;
  1556. r = -EFAULT;
  1557. if (copy_from_user(&zone, argp, sizeof zone))
  1558. goto out;
  1559. r = -ENXIO;
  1560. r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
  1561. if (r)
  1562. goto out;
  1563. r = 0;
  1564. break;
  1565. }
  1566. case KVM_UNREGISTER_COALESCED_MMIO: {
  1567. struct kvm_coalesced_mmio_zone zone;
  1568. r = -EFAULT;
  1569. if (copy_from_user(&zone, argp, sizeof zone))
  1570. goto out;
  1571. r = -ENXIO;
  1572. r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
  1573. if (r)
  1574. goto out;
  1575. r = 0;
  1576. break;
  1577. }
  1578. #endif
  1579. #ifdef KVM_CAP_DEVICE_ASSIGNMENT
  1580. case KVM_ASSIGN_PCI_DEVICE: {
  1581. struct kvm_assigned_pci_dev assigned_dev;
  1582. r = -EFAULT;
  1583. if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
  1584. goto out;
  1585. r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
  1586. if (r)
  1587. goto out;
  1588. break;
  1589. }
  1590. case KVM_ASSIGN_IRQ: {
  1591. struct kvm_assigned_irq assigned_irq;
  1592. r = -EFAULT;
  1593. if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
  1594. goto out;
  1595. r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
  1596. if (r)
  1597. goto out;
  1598. break;
  1599. }
  1600. #endif
  1601. default:
  1602. r = kvm_arch_vm_ioctl(filp, ioctl, arg);
  1603. }
  1604. out:
  1605. return r;
  1606. }
  1607. static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1608. {
  1609. struct page *page[1];
  1610. unsigned long addr;
  1611. int npages;
  1612. gfn_t gfn = vmf->pgoff;
  1613. struct kvm *kvm = vma->vm_file->private_data;
  1614. addr = gfn_to_hva(kvm, gfn);
  1615. if (kvm_is_error_hva(addr))
  1616. return VM_FAULT_SIGBUS;
  1617. npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
  1618. NULL);
  1619. if (unlikely(npages != 1))
  1620. return VM_FAULT_SIGBUS;
  1621. vmf->page = page[0];
  1622. return 0;
  1623. }
  1624. static struct vm_operations_struct kvm_vm_vm_ops = {
  1625. .fault = kvm_vm_fault,
  1626. };
  1627. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  1628. {
  1629. vma->vm_ops = &kvm_vm_vm_ops;
  1630. return 0;
  1631. }
  1632. static struct file_operations kvm_vm_fops = {
  1633. .release = kvm_vm_release,
  1634. .unlocked_ioctl = kvm_vm_ioctl,
  1635. .compat_ioctl = kvm_vm_ioctl,
  1636. .mmap = kvm_vm_mmap,
  1637. };
  1638. static int kvm_dev_ioctl_create_vm(void)
  1639. {
  1640. int fd;
  1641. struct kvm *kvm;
  1642. kvm = kvm_create_vm();
  1643. if (IS_ERR(kvm))
  1644. return PTR_ERR(kvm);
  1645. fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
  1646. if (fd < 0)
  1647. kvm_put_kvm(kvm);
  1648. return fd;
  1649. }
  1650. static long kvm_dev_ioctl_check_extension_generic(long arg)
  1651. {
  1652. switch (arg) {
  1653. case KVM_CAP_USER_MEMORY:
  1654. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  1655. return 1;
  1656. default:
  1657. break;
  1658. }
  1659. return kvm_dev_ioctl_check_extension(arg);
  1660. }
  1661. static long kvm_dev_ioctl(struct file *filp,
  1662. unsigned int ioctl, unsigned long arg)
  1663. {
  1664. long r = -EINVAL;
  1665. switch (ioctl) {
  1666. case KVM_GET_API_VERSION:
  1667. r = -EINVAL;
  1668. if (arg)
  1669. goto out;
  1670. r = KVM_API_VERSION;
  1671. break;
  1672. case KVM_CREATE_VM:
  1673. r = -EINVAL;
  1674. if (arg)
  1675. goto out;
  1676. r = kvm_dev_ioctl_create_vm();
  1677. break;
  1678. case KVM_CHECK_EXTENSION:
  1679. r = kvm_dev_ioctl_check_extension_generic(arg);
  1680. break;
  1681. case KVM_GET_VCPU_MMAP_SIZE:
  1682. r = -EINVAL;
  1683. if (arg)
  1684. goto out;
  1685. r = PAGE_SIZE; /* struct kvm_run */
  1686. #ifdef CONFIG_X86
  1687. r += PAGE_SIZE; /* pio data page */
  1688. #endif
  1689. #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  1690. r += PAGE_SIZE; /* coalesced mmio ring page */
  1691. #endif
  1692. break;
  1693. case KVM_TRACE_ENABLE:
  1694. case KVM_TRACE_PAUSE:
  1695. case KVM_TRACE_DISABLE:
  1696. r = kvm_trace_ioctl(ioctl, arg);
  1697. break;
  1698. default:
  1699. return kvm_arch_dev_ioctl(filp, ioctl, arg);
  1700. }
  1701. out:
  1702. return r;
  1703. }
  1704. static struct file_operations kvm_chardev_ops = {
  1705. .unlocked_ioctl = kvm_dev_ioctl,
  1706. .compat_ioctl = kvm_dev_ioctl,
  1707. };
  1708. static struct miscdevice kvm_dev = {
  1709. KVM_MINOR,
  1710. "kvm",
  1711. &kvm_chardev_ops,
  1712. };
  1713. static void hardware_enable(void *junk)
  1714. {
  1715. int cpu = raw_smp_processor_id();
  1716. if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1717. return;
  1718. cpumask_set_cpu(cpu, cpus_hardware_enabled);
  1719. kvm_arch_hardware_enable(NULL);
  1720. }
  1721. static void hardware_disable(void *junk)
  1722. {
  1723. int cpu = raw_smp_processor_id();
  1724. if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
  1725. return;
  1726. cpumask_clear_cpu(cpu, cpus_hardware_enabled);
  1727. kvm_arch_hardware_disable(NULL);
  1728. }
  1729. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  1730. void *v)
  1731. {
  1732. int cpu = (long)v;
  1733. val &= ~CPU_TASKS_FROZEN;
  1734. switch (val) {
  1735. case CPU_DYING:
  1736. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1737. cpu);
  1738. hardware_disable(NULL);
  1739. break;
  1740. case CPU_UP_CANCELED:
  1741. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  1742. cpu);
  1743. smp_call_function_single(cpu, hardware_disable, NULL, 1);
  1744. break;
  1745. case CPU_ONLINE:
  1746. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  1747. cpu);
  1748. smp_call_function_single(cpu, hardware_enable, NULL, 1);
  1749. break;
  1750. }
  1751. return NOTIFY_OK;
  1752. }
  1753. asmlinkage void kvm_handle_fault_on_reboot(void)
  1754. {
  1755. if (kvm_rebooting)
  1756. /* spin while reset goes on */
  1757. while (true)
  1758. ;
  1759. /* Fault while not rebooting. We want the trace. */
  1760. BUG();
  1761. }
  1762. EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
  1763. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  1764. void *v)
  1765. {
  1766. if (val == SYS_RESTART) {
  1767. /*
  1768. * Some (well, at least mine) BIOSes hang on reboot if
  1769. * in vmx root mode.
  1770. */
  1771. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  1772. kvm_rebooting = true;
  1773. on_each_cpu(hardware_disable, NULL, 1);
  1774. }
  1775. return NOTIFY_OK;
  1776. }
  1777. static struct notifier_block kvm_reboot_notifier = {
  1778. .notifier_call = kvm_reboot,
  1779. .priority = 0,
  1780. };
  1781. void kvm_io_bus_init(struct kvm_io_bus *bus)
  1782. {
  1783. memset(bus, 0, sizeof(*bus));
  1784. }
  1785. void kvm_io_bus_destroy(struct kvm_io_bus *bus)
  1786. {
  1787. int i;
  1788. for (i = 0; i < bus->dev_count; i++) {
  1789. struct kvm_io_device *pos = bus->devs[i];
  1790. kvm_iodevice_destructor(pos);
  1791. }
  1792. }
  1793. struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
  1794. gpa_t addr, int len, int is_write)
  1795. {
  1796. int i;
  1797. for (i = 0; i < bus->dev_count; i++) {
  1798. struct kvm_io_device *pos = bus->devs[i];
  1799. if (pos->in_range(pos, addr, len, is_write))
  1800. return pos;
  1801. }
  1802. return NULL;
  1803. }
  1804. void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
  1805. {
  1806. BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
  1807. bus->devs[bus->dev_count++] = dev;
  1808. }
  1809. static struct notifier_block kvm_cpu_notifier = {
  1810. .notifier_call = kvm_cpu_hotplug,
  1811. .priority = 20, /* must be > scheduler priority */
  1812. };
  1813. static int vm_stat_get(void *_offset, u64 *val)
  1814. {
  1815. unsigned offset = (long)_offset;
  1816. struct kvm *kvm;
  1817. *val = 0;
  1818. spin_lock(&kvm_lock);
  1819. list_for_each_entry(kvm, &vm_list, vm_list)
  1820. *val += *(u32 *)((void *)kvm + offset);
  1821. spin_unlock(&kvm_lock);
  1822. return 0;
  1823. }
  1824. DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
  1825. static int vcpu_stat_get(void *_offset, u64 *val)
  1826. {
  1827. unsigned offset = (long)_offset;
  1828. struct kvm *kvm;
  1829. struct kvm_vcpu *vcpu;
  1830. int i;
  1831. *val = 0;
  1832. spin_lock(&kvm_lock);
  1833. list_for_each_entry(kvm, &vm_list, vm_list)
  1834. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  1835. vcpu = kvm->vcpus[i];
  1836. if (vcpu)
  1837. *val += *(u32 *)((void *)vcpu + offset);
  1838. }
  1839. spin_unlock(&kvm_lock);
  1840. return 0;
  1841. }
  1842. DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
  1843. static struct file_operations *stat_fops[] = {
  1844. [KVM_STAT_VCPU] = &vcpu_stat_fops,
  1845. [KVM_STAT_VM] = &vm_stat_fops,
  1846. };
  1847. static void kvm_init_debug(void)
  1848. {
  1849. struct kvm_stats_debugfs_item *p;
  1850. kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
  1851. for (p = debugfs_entries; p->name; ++p)
  1852. p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
  1853. (void *)(long)p->offset,
  1854. stat_fops[p->kind]);
  1855. }
  1856. static void kvm_exit_debug(void)
  1857. {
  1858. struct kvm_stats_debugfs_item *p;
  1859. for (p = debugfs_entries; p->name; ++p)
  1860. debugfs_remove(p->dentry);
  1861. debugfs_remove(kvm_debugfs_dir);
  1862. }
  1863. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  1864. {
  1865. hardware_disable(NULL);
  1866. return 0;
  1867. }
  1868. static int kvm_resume(struct sys_device *dev)
  1869. {
  1870. hardware_enable(NULL);
  1871. return 0;
  1872. }
  1873. static struct sysdev_class kvm_sysdev_class = {
  1874. .name = "kvm",
  1875. .suspend = kvm_suspend,
  1876. .resume = kvm_resume,
  1877. };
  1878. static struct sys_device kvm_sysdev = {
  1879. .id = 0,
  1880. .cls = &kvm_sysdev_class,
  1881. };
  1882. struct page *bad_page;
  1883. pfn_t bad_pfn;
  1884. static inline
  1885. struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
  1886. {
  1887. return container_of(pn, struct kvm_vcpu, preempt_notifier);
  1888. }
  1889. static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  1890. {
  1891. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1892. kvm_arch_vcpu_load(vcpu, cpu);
  1893. }
  1894. static void kvm_sched_out(struct preempt_notifier *pn,
  1895. struct task_struct *next)
  1896. {
  1897. struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  1898. kvm_arch_vcpu_put(vcpu);
  1899. }
  1900. int kvm_init(void *opaque, unsigned int vcpu_size,
  1901. struct module *module)
  1902. {
  1903. int r;
  1904. int cpu;
  1905. kvm_init_debug();
  1906. r = kvm_arch_init(opaque);
  1907. if (r)
  1908. goto out_fail;
  1909. bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1910. if (bad_page == NULL) {
  1911. r = -ENOMEM;
  1912. goto out;
  1913. }
  1914. bad_pfn = page_to_pfn(bad_page);
  1915. if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
  1916. r = -ENOMEM;
  1917. goto out_free_0;
  1918. }
  1919. r = kvm_arch_hardware_setup();
  1920. if (r < 0)
  1921. goto out_free_0a;
  1922. for_each_online_cpu(cpu) {
  1923. smp_call_function_single(cpu,
  1924. kvm_arch_check_processor_compat,
  1925. &r, 1);
  1926. if (r < 0)
  1927. goto out_free_1;
  1928. }
  1929. on_each_cpu(hardware_enable, NULL, 1);
  1930. r = register_cpu_notifier(&kvm_cpu_notifier);
  1931. if (r)
  1932. goto out_free_2;
  1933. register_reboot_notifier(&kvm_reboot_notifier);
  1934. r = sysdev_class_register(&kvm_sysdev_class);
  1935. if (r)
  1936. goto out_free_3;
  1937. r = sysdev_register(&kvm_sysdev);
  1938. if (r)
  1939. goto out_free_4;
  1940. /* A kmem cache lets us meet the alignment requirements of fx_save. */
  1941. kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
  1942. __alignof__(struct kvm_vcpu),
  1943. 0, NULL);
  1944. if (!kvm_vcpu_cache) {
  1945. r = -ENOMEM;
  1946. goto out_free_5;
  1947. }
  1948. kvm_chardev_ops.owner = module;
  1949. kvm_vm_fops.owner = module;
  1950. kvm_vcpu_fops.owner = module;
  1951. r = misc_register(&kvm_dev);
  1952. if (r) {
  1953. printk(KERN_ERR "kvm: misc device register failed\n");
  1954. goto out_free;
  1955. }
  1956. kvm_preempt_ops.sched_in = kvm_sched_in;
  1957. kvm_preempt_ops.sched_out = kvm_sched_out;
  1958. #ifndef CONFIG_X86
  1959. msi2intx = 0;
  1960. #endif
  1961. return 0;
  1962. out_free:
  1963. kmem_cache_destroy(kvm_vcpu_cache);
  1964. out_free_5:
  1965. sysdev_unregister(&kvm_sysdev);
  1966. out_free_4:
  1967. sysdev_class_unregister(&kvm_sysdev_class);
  1968. out_free_3:
  1969. unregister_reboot_notifier(&kvm_reboot_notifier);
  1970. unregister_cpu_notifier(&kvm_cpu_notifier);
  1971. out_free_2:
  1972. on_each_cpu(hardware_disable, NULL, 1);
  1973. out_free_1:
  1974. kvm_arch_hardware_unsetup();
  1975. out_free_0a:
  1976. free_cpumask_var(cpus_hardware_enabled);
  1977. out_free_0:
  1978. __free_page(bad_page);
  1979. out:
  1980. kvm_arch_exit();
  1981. kvm_exit_debug();
  1982. out_fail:
  1983. return r;
  1984. }
  1985. EXPORT_SYMBOL_GPL(kvm_init);
  1986. void kvm_exit(void)
  1987. {
  1988. kvm_trace_cleanup();
  1989. misc_deregister(&kvm_dev);
  1990. kmem_cache_destroy(kvm_vcpu_cache);
  1991. sysdev_unregister(&kvm_sysdev);
  1992. sysdev_class_unregister(&kvm_sysdev_class);
  1993. unregister_reboot_notifier(&kvm_reboot_notifier);
  1994. unregister_cpu_notifier(&kvm_cpu_notifier);
  1995. on_each_cpu(hardware_disable, NULL, 1);
  1996. kvm_arch_hardware_unsetup();
  1997. kvm_arch_exit();
  1998. kvm_exit_debug();
  1999. free_cpumask_var(cpus_hardware_enabled);
  2000. __free_page(bad_page);
  2001. }
  2002. EXPORT_SYMBOL_GPL(kvm_exit);