kvm_main.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. *
  9. * Authors:
  10. * Avi Kivity <avi@qumranet.com>
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. *
  13. * This work is licensed under the terms of the GNU GPL, version 2. See
  14. * the COPYING file in the top-level directory.
  15. *
  16. */
  17. #include "kvm.h"
  18. #include <linux/kvm.h>
  19. #include <linux/module.h>
  20. #include <linux/errno.h>
  21. #include <linux/magic.h>
  22. #include <asm/processor.h>
  23. #include <linux/percpu.h>
  24. #include <linux/gfp.h>
  25. #include <asm/msr.h>
  26. #include <linux/mm.h>
  27. #include <linux/miscdevice.h>
  28. #include <linux/vmalloc.h>
  29. #include <asm/uaccess.h>
  30. #include <linux/reboot.h>
  31. #include <asm/io.h>
  32. #include <linux/debugfs.h>
  33. #include <linux/highmem.h>
  34. #include <linux/file.h>
  35. #include <asm/desc.h>
  36. #include <linux/sysdev.h>
  37. #include <linux/cpu.h>
  38. #include <linux/file.h>
  39. #include <linux/fs.h>
  40. #include <linux/mount.h>
  41. #include "x86_emulate.h"
  42. #include "segment_descriptor.h"
  43. MODULE_AUTHOR("Qumranet");
  44. MODULE_LICENSE("GPL");
  45. static DEFINE_SPINLOCK(kvm_lock);
  46. static LIST_HEAD(vm_list);
  47. struct kvm_arch_ops *kvm_arch_ops;
  48. #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
  49. static struct kvm_stats_debugfs_item {
  50. const char *name;
  51. int offset;
  52. struct dentry *dentry;
  53. } debugfs_entries[] = {
  54. { "pf_fixed", STAT_OFFSET(pf_fixed) },
  55. { "pf_guest", STAT_OFFSET(pf_guest) },
  56. { "tlb_flush", STAT_OFFSET(tlb_flush) },
  57. { "invlpg", STAT_OFFSET(invlpg) },
  58. { "exits", STAT_OFFSET(exits) },
  59. { "io_exits", STAT_OFFSET(io_exits) },
  60. { "mmio_exits", STAT_OFFSET(mmio_exits) },
  61. { "signal_exits", STAT_OFFSET(signal_exits) },
  62. { "irq_window", STAT_OFFSET(irq_window_exits) },
  63. { "halt_exits", STAT_OFFSET(halt_exits) },
  64. { "request_irq", STAT_OFFSET(request_irq_exits) },
  65. { "irq_exits", STAT_OFFSET(irq_exits) },
  66. { NULL }
  67. };
  68. static struct dentry *debugfs_dir;
  69. struct vfsmount *kvmfs_mnt;
  70. #define MAX_IO_MSRS 256
  71. #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
  72. #define LMSW_GUEST_MASK 0x0eULL
  73. #define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
  74. #define CR8_RESEVED_BITS (~0x0fULL)
  75. #define EFER_RESERVED_BITS 0xfffffffffffff2fe
  76. #ifdef CONFIG_X86_64
  77. // LDT or TSS descriptor in the GDT. 16 bytes.
  78. struct segment_descriptor_64 {
  79. struct segment_descriptor s;
  80. u32 base_higher;
  81. u32 pad_zero;
  82. };
  83. #endif
  84. static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  85. unsigned long arg);
  86. static struct inode *kvmfs_inode(struct file_operations *fops)
  87. {
  88. int error = -ENOMEM;
  89. struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
  90. if (!inode)
  91. goto eexit_1;
  92. inode->i_fop = fops;
  93. /*
  94. * Mark the inode dirty from the very beginning,
  95. * that way it will never be moved to the dirty
  96. * list because mark_inode_dirty() will think
  97. * that it already _is_ on the dirty list.
  98. */
  99. inode->i_state = I_DIRTY;
  100. inode->i_mode = S_IRUSR | S_IWUSR;
  101. inode->i_uid = current->fsuid;
  102. inode->i_gid = current->fsgid;
  103. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  104. return inode;
  105. eexit_1:
  106. return ERR_PTR(error);
  107. }
  108. static struct file *kvmfs_file(struct inode *inode, void *private_data)
  109. {
  110. struct file *file = get_empty_filp();
  111. if (!file)
  112. return ERR_PTR(-ENFILE);
  113. file->f_path.mnt = mntget(kvmfs_mnt);
  114. file->f_path.dentry = d_alloc_anon(inode);
  115. if (!file->f_path.dentry)
  116. return ERR_PTR(-ENOMEM);
  117. file->f_mapping = inode->i_mapping;
  118. file->f_pos = 0;
  119. file->f_flags = O_RDWR;
  120. file->f_op = inode->i_fop;
  121. file->f_mode = FMODE_READ | FMODE_WRITE;
  122. file->f_version = 0;
  123. file->private_data = private_data;
  124. return file;
  125. }
  126. unsigned long segment_base(u16 selector)
  127. {
  128. struct descriptor_table gdt;
  129. struct segment_descriptor *d;
  130. unsigned long table_base;
  131. typedef unsigned long ul;
  132. unsigned long v;
  133. if (selector == 0)
  134. return 0;
  135. asm ("sgdt %0" : "=m"(gdt));
  136. table_base = gdt.base;
  137. if (selector & 4) { /* from ldt */
  138. u16 ldt_selector;
  139. asm ("sldt %0" : "=g"(ldt_selector));
  140. table_base = segment_base(ldt_selector);
  141. }
  142. d = (struct segment_descriptor *)(table_base + (selector & ~7));
  143. v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
  144. #ifdef CONFIG_X86_64
  145. if (d->system == 0
  146. && (d->type == 2 || d->type == 9 || d->type == 11))
  147. v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
  148. #endif
  149. return v;
  150. }
  151. EXPORT_SYMBOL_GPL(segment_base);
  152. static inline int valid_vcpu(int n)
  153. {
  154. return likely(n >= 0 && n < KVM_MAX_VCPUS);
  155. }
  156. int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
  157. void *dest)
  158. {
  159. unsigned char *host_buf = dest;
  160. unsigned long req_size = size;
  161. while (size) {
  162. hpa_t paddr;
  163. unsigned now;
  164. unsigned offset;
  165. hva_t guest_buf;
  166. paddr = gva_to_hpa(vcpu, addr);
  167. if (is_error_hpa(paddr))
  168. break;
  169. guest_buf = (hva_t)kmap_atomic(
  170. pfn_to_page(paddr >> PAGE_SHIFT),
  171. KM_USER0);
  172. offset = addr & ~PAGE_MASK;
  173. guest_buf |= offset;
  174. now = min(size, PAGE_SIZE - offset);
  175. memcpy(host_buf, (void*)guest_buf, now);
  176. host_buf += now;
  177. addr += now;
  178. size -= now;
  179. kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
  180. }
  181. return req_size - size;
  182. }
  183. EXPORT_SYMBOL_GPL(kvm_read_guest);
  184. int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
  185. void *data)
  186. {
  187. unsigned char *host_buf = data;
  188. unsigned long req_size = size;
  189. while (size) {
  190. hpa_t paddr;
  191. unsigned now;
  192. unsigned offset;
  193. hva_t guest_buf;
  194. gfn_t gfn;
  195. paddr = gva_to_hpa(vcpu, addr);
  196. if (is_error_hpa(paddr))
  197. break;
  198. gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
  199. mark_page_dirty(vcpu->kvm, gfn);
  200. guest_buf = (hva_t)kmap_atomic(
  201. pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
  202. offset = addr & ~PAGE_MASK;
  203. guest_buf |= offset;
  204. now = min(size, PAGE_SIZE - offset);
  205. memcpy((void*)guest_buf, host_buf, now);
  206. host_buf += now;
  207. addr += now;
  208. size -= now;
  209. kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
  210. }
  211. return req_size - size;
  212. }
  213. EXPORT_SYMBOL_GPL(kvm_write_guest);
  214. /*
  215. * Switches to specified vcpu, until a matching vcpu_put()
  216. */
  217. static void vcpu_load(struct kvm_vcpu *vcpu)
  218. {
  219. mutex_lock(&vcpu->mutex);
  220. kvm_arch_ops->vcpu_load(vcpu);
  221. }
  222. /*
  223. * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL
  224. * if the slot is not populated.
  225. */
  226. static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot)
  227. {
  228. struct kvm_vcpu *vcpu = &kvm->vcpus[slot];
  229. mutex_lock(&vcpu->mutex);
  230. if (!vcpu->vmcs) {
  231. mutex_unlock(&vcpu->mutex);
  232. return NULL;
  233. }
  234. kvm_arch_ops->vcpu_load(vcpu);
  235. return vcpu;
  236. }
  237. static void vcpu_put(struct kvm_vcpu *vcpu)
  238. {
  239. kvm_arch_ops->vcpu_put(vcpu);
  240. mutex_unlock(&vcpu->mutex);
  241. }
  242. static struct kvm *kvm_create_vm(void)
  243. {
  244. struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
  245. int i;
  246. if (!kvm)
  247. return ERR_PTR(-ENOMEM);
  248. spin_lock_init(&kvm->lock);
  249. INIT_LIST_HEAD(&kvm->active_mmu_pages);
  250. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  251. struct kvm_vcpu *vcpu = &kvm->vcpus[i];
  252. mutex_init(&vcpu->mutex);
  253. vcpu->cpu = -1;
  254. vcpu->kvm = kvm;
  255. vcpu->mmu.root_hpa = INVALID_PAGE;
  256. INIT_LIST_HEAD(&vcpu->free_pages);
  257. spin_lock(&kvm_lock);
  258. list_add(&kvm->vm_list, &vm_list);
  259. spin_unlock(&kvm_lock);
  260. }
  261. return kvm;
  262. }
  263. static int kvm_dev_open(struct inode *inode, struct file *filp)
  264. {
  265. return 0;
  266. }
  267. /*
  268. * Free any memory in @free but not in @dont.
  269. */
  270. static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
  271. struct kvm_memory_slot *dont)
  272. {
  273. int i;
  274. if (!dont || free->phys_mem != dont->phys_mem)
  275. if (free->phys_mem) {
  276. for (i = 0; i < free->npages; ++i)
  277. if (free->phys_mem[i])
  278. __free_page(free->phys_mem[i]);
  279. vfree(free->phys_mem);
  280. }
  281. if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
  282. vfree(free->dirty_bitmap);
  283. free->phys_mem = NULL;
  284. free->npages = 0;
  285. free->dirty_bitmap = NULL;
  286. }
  287. static void kvm_free_physmem(struct kvm *kvm)
  288. {
  289. int i;
  290. for (i = 0; i < kvm->nmemslots; ++i)
  291. kvm_free_physmem_slot(&kvm->memslots[i], NULL);
  292. }
  293. static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
  294. {
  295. int i;
  296. for (i = 0; i < 2; ++i)
  297. if (vcpu->pio.guest_pages[i]) {
  298. __free_page(vcpu->pio.guest_pages[i]);
  299. vcpu->pio.guest_pages[i] = NULL;
  300. }
  301. }
  302. static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
  303. {
  304. if (!vcpu->vmcs)
  305. return;
  306. vcpu_load(vcpu);
  307. kvm_mmu_destroy(vcpu);
  308. vcpu_put(vcpu);
  309. kvm_arch_ops->vcpu_free(vcpu);
  310. free_page((unsigned long)vcpu->run);
  311. vcpu->run = NULL;
  312. free_page((unsigned long)vcpu->pio_data);
  313. vcpu->pio_data = NULL;
  314. free_pio_guest_pages(vcpu);
  315. }
  316. static void kvm_free_vcpus(struct kvm *kvm)
  317. {
  318. unsigned int i;
  319. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  320. kvm_free_vcpu(&kvm->vcpus[i]);
  321. }
  322. static int kvm_dev_release(struct inode *inode, struct file *filp)
  323. {
  324. return 0;
  325. }
  326. static void kvm_destroy_vm(struct kvm *kvm)
  327. {
  328. spin_lock(&kvm_lock);
  329. list_del(&kvm->vm_list);
  330. spin_unlock(&kvm_lock);
  331. kvm_free_vcpus(kvm);
  332. kvm_free_physmem(kvm);
  333. kfree(kvm);
  334. }
  335. static int kvm_vm_release(struct inode *inode, struct file *filp)
  336. {
  337. struct kvm *kvm = filp->private_data;
  338. kvm_destroy_vm(kvm);
  339. return 0;
  340. }
  341. static void inject_gp(struct kvm_vcpu *vcpu)
  342. {
  343. kvm_arch_ops->inject_gp(vcpu, 0);
  344. }
  345. /*
  346. * Load the pae pdptrs. Return true is they are all valid.
  347. */
  348. static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
  349. {
  350. gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
  351. unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
  352. int i;
  353. u64 pdpte;
  354. u64 *pdpt;
  355. int ret;
  356. struct page *page;
  357. spin_lock(&vcpu->kvm->lock);
  358. page = gfn_to_page(vcpu->kvm, pdpt_gfn);
  359. /* FIXME: !page - emulate? 0xff? */
  360. pdpt = kmap_atomic(page, KM_USER0);
  361. ret = 1;
  362. for (i = 0; i < 4; ++i) {
  363. pdpte = pdpt[offset + i];
  364. if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
  365. ret = 0;
  366. goto out;
  367. }
  368. }
  369. for (i = 0; i < 4; ++i)
  370. vcpu->pdptrs[i] = pdpt[offset + i];
  371. out:
  372. kunmap_atomic(pdpt, KM_USER0);
  373. spin_unlock(&vcpu->kvm->lock);
  374. return ret;
  375. }
  376. void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
  377. {
  378. if (cr0 & CR0_RESEVED_BITS) {
  379. printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
  380. cr0, vcpu->cr0);
  381. inject_gp(vcpu);
  382. return;
  383. }
  384. if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) {
  385. printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
  386. inject_gp(vcpu);
  387. return;
  388. }
  389. if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) {
  390. printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
  391. "and a clear PE flag\n");
  392. inject_gp(vcpu);
  393. return;
  394. }
  395. if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
  396. #ifdef CONFIG_X86_64
  397. if ((vcpu->shadow_efer & EFER_LME)) {
  398. int cs_db, cs_l;
  399. if (!is_pae(vcpu)) {
  400. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  401. "in long mode while PAE is disabled\n");
  402. inject_gp(vcpu);
  403. return;
  404. }
  405. kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  406. if (cs_l) {
  407. printk(KERN_DEBUG "set_cr0: #GP, start paging "
  408. "in long mode while CS.L == 1\n");
  409. inject_gp(vcpu);
  410. return;
  411. }
  412. } else
  413. #endif
  414. if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
  415. printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
  416. "reserved bits\n");
  417. inject_gp(vcpu);
  418. return;
  419. }
  420. }
  421. kvm_arch_ops->set_cr0(vcpu, cr0);
  422. vcpu->cr0 = cr0;
  423. spin_lock(&vcpu->kvm->lock);
  424. kvm_mmu_reset_context(vcpu);
  425. spin_unlock(&vcpu->kvm->lock);
  426. return;
  427. }
  428. EXPORT_SYMBOL_GPL(set_cr0);
  429. void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
  430. {
  431. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  432. set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
  433. }
  434. EXPORT_SYMBOL_GPL(lmsw);
  435. void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
  436. {
  437. if (cr4 & CR4_RESEVED_BITS) {
  438. printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
  439. inject_gp(vcpu);
  440. return;
  441. }
  442. if (is_long_mode(vcpu)) {
  443. if (!(cr4 & CR4_PAE_MASK)) {
  444. printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
  445. "in long mode\n");
  446. inject_gp(vcpu);
  447. return;
  448. }
  449. } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
  450. && !load_pdptrs(vcpu, vcpu->cr3)) {
  451. printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
  452. inject_gp(vcpu);
  453. }
  454. if (cr4 & CR4_VMXE_MASK) {
  455. printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
  456. inject_gp(vcpu);
  457. return;
  458. }
  459. kvm_arch_ops->set_cr4(vcpu, cr4);
  460. spin_lock(&vcpu->kvm->lock);
  461. kvm_mmu_reset_context(vcpu);
  462. spin_unlock(&vcpu->kvm->lock);
  463. }
  464. EXPORT_SYMBOL_GPL(set_cr4);
  465. void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
  466. {
  467. if (is_long_mode(vcpu)) {
  468. if (cr3 & CR3_L_MODE_RESEVED_BITS) {
  469. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  470. inject_gp(vcpu);
  471. return;
  472. }
  473. } else {
  474. if (cr3 & CR3_RESEVED_BITS) {
  475. printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
  476. inject_gp(vcpu);
  477. return;
  478. }
  479. if (is_paging(vcpu) && is_pae(vcpu) &&
  480. !load_pdptrs(vcpu, cr3)) {
  481. printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
  482. "reserved bits\n");
  483. inject_gp(vcpu);
  484. return;
  485. }
  486. }
  487. vcpu->cr3 = cr3;
  488. spin_lock(&vcpu->kvm->lock);
  489. /*
  490. * Does the new cr3 value map to physical memory? (Note, we
  491. * catch an invalid cr3 even in real-mode, because it would
  492. * cause trouble later on when we turn on paging anyway.)
  493. *
  494. * A real CPU would silently accept an invalid cr3 and would
  495. * attempt to use it - with largely undefined (and often hard
  496. * to debug) behavior on the guest side.
  497. */
  498. if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
  499. inject_gp(vcpu);
  500. else
  501. vcpu->mmu.new_cr3(vcpu);
  502. spin_unlock(&vcpu->kvm->lock);
  503. }
  504. EXPORT_SYMBOL_GPL(set_cr3);
  505. void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
  506. {
  507. if ( cr8 & CR8_RESEVED_BITS) {
  508. printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
  509. inject_gp(vcpu);
  510. return;
  511. }
  512. vcpu->cr8 = cr8;
  513. }
  514. EXPORT_SYMBOL_GPL(set_cr8);
  515. void fx_init(struct kvm_vcpu *vcpu)
  516. {
  517. struct __attribute__ ((__packed__)) fx_image_s {
  518. u16 control; //fcw
  519. u16 status; //fsw
  520. u16 tag; // ftw
  521. u16 opcode; //fop
  522. u64 ip; // fpu ip
  523. u64 operand;// fpu dp
  524. u32 mxcsr;
  525. u32 mxcsr_mask;
  526. } *fx_image;
  527. fx_save(vcpu->host_fx_image);
  528. fpu_init();
  529. fx_save(vcpu->guest_fx_image);
  530. fx_restore(vcpu->host_fx_image);
  531. fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
  532. fx_image->mxcsr = 0x1f80;
  533. memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
  534. 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
  535. }
  536. EXPORT_SYMBOL_GPL(fx_init);
  537. static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
  538. {
  539. spin_lock(&vcpu->kvm->lock);
  540. kvm_mmu_slot_remove_write_access(vcpu, slot);
  541. spin_unlock(&vcpu->kvm->lock);
  542. }
  543. /*
  544. * Allocate some memory and give it an address in the guest physical address
  545. * space.
  546. *
  547. * Discontiguous memory is allowed, mostly for framebuffers.
  548. */
  549. static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  550. struct kvm_memory_region *mem)
  551. {
  552. int r;
  553. gfn_t base_gfn;
  554. unsigned long npages;
  555. unsigned long i;
  556. struct kvm_memory_slot *memslot;
  557. struct kvm_memory_slot old, new;
  558. int memory_config_version;
  559. r = -EINVAL;
  560. /* General sanity checks */
  561. if (mem->memory_size & (PAGE_SIZE - 1))
  562. goto out;
  563. if (mem->guest_phys_addr & (PAGE_SIZE - 1))
  564. goto out;
  565. if (mem->slot >= KVM_MEMORY_SLOTS)
  566. goto out;
  567. if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
  568. goto out;
  569. memslot = &kvm->memslots[mem->slot];
  570. base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
  571. npages = mem->memory_size >> PAGE_SHIFT;
  572. if (!npages)
  573. mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
  574. raced:
  575. spin_lock(&kvm->lock);
  576. memory_config_version = kvm->memory_config_version;
  577. new = old = *memslot;
  578. new.base_gfn = base_gfn;
  579. new.npages = npages;
  580. new.flags = mem->flags;
  581. /* Disallow changing a memory slot's size. */
  582. r = -EINVAL;
  583. if (npages && old.npages && npages != old.npages)
  584. goto out_unlock;
  585. /* Check for overlaps */
  586. r = -EEXIST;
  587. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  588. struct kvm_memory_slot *s = &kvm->memslots[i];
  589. if (s == memslot)
  590. continue;
  591. if (!((base_gfn + npages <= s->base_gfn) ||
  592. (base_gfn >= s->base_gfn + s->npages)))
  593. goto out_unlock;
  594. }
  595. /*
  596. * Do memory allocations outside lock. memory_config_version will
  597. * detect any races.
  598. */
  599. spin_unlock(&kvm->lock);
  600. /* Deallocate if slot is being removed */
  601. if (!npages)
  602. new.phys_mem = NULL;
  603. /* Free page dirty bitmap if unneeded */
  604. if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
  605. new.dirty_bitmap = NULL;
  606. r = -ENOMEM;
  607. /* Allocate if a slot is being created */
  608. if (npages && !new.phys_mem) {
  609. new.phys_mem = vmalloc(npages * sizeof(struct page *));
  610. if (!new.phys_mem)
  611. goto out_free;
  612. memset(new.phys_mem, 0, npages * sizeof(struct page *));
  613. for (i = 0; i < npages; ++i) {
  614. new.phys_mem[i] = alloc_page(GFP_HIGHUSER
  615. | __GFP_ZERO);
  616. if (!new.phys_mem[i])
  617. goto out_free;
  618. set_page_private(new.phys_mem[i],0);
  619. }
  620. }
  621. /* Allocate page dirty bitmap if needed */
  622. if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
  623. unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
  624. new.dirty_bitmap = vmalloc(dirty_bytes);
  625. if (!new.dirty_bitmap)
  626. goto out_free;
  627. memset(new.dirty_bitmap, 0, dirty_bytes);
  628. }
  629. spin_lock(&kvm->lock);
  630. if (memory_config_version != kvm->memory_config_version) {
  631. spin_unlock(&kvm->lock);
  632. kvm_free_physmem_slot(&new, &old);
  633. goto raced;
  634. }
  635. r = -EAGAIN;
  636. if (kvm->busy)
  637. goto out_unlock;
  638. if (mem->slot >= kvm->nmemslots)
  639. kvm->nmemslots = mem->slot + 1;
  640. *memslot = new;
  641. ++kvm->memory_config_version;
  642. spin_unlock(&kvm->lock);
  643. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  644. struct kvm_vcpu *vcpu;
  645. vcpu = vcpu_load_slot(kvm, i);
  646. if (!vcpu)
  647. continue;
  648. if (new.flags & KVM_MEM_LOG_DIRTY_PAGES)
  649. do_remove_write_access(vcpu, mem->slot);
  650. kvm_mmu_reset_context(vcpu);
  651. vcpu_put(vcpu);
  652. }
  653. kvm_free_physmem_slot(&old, &new);
  654. return 0;
  655. out_unlock:
  656. spin_unlock(&kvm->lock);
  657. out_free:
  658. kvm_free_physmem_slot(&new, &old);
  659. out:
  660. return r;
  661. }
  662. /*
  663. * Get (and clear) the dirty memory log for a memory slot.
  664. */
  665. static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  666. struct kvm_dirty_log *log)
  667. {
  668. struct kvm_memory_slot *memslot;
  669. int r, i;
  670. int n;
  671. int cleared;
  672. unsigned long any = 0;
  673. spin_lock(&kvm->lock);
  674. /*
  675. * Prevent changes to guest memory configuration even while the lock
  676. * is not taken.
  677. */
  678. ++kvm->busy;
  679. spin_unlock(&kvm->lock);
  680. r = -EINVAL;
  681. if (log->slot >= KVM_MEMORY_SLOTS)
  682. goto out;
  683. memslot = &kvm->memslots[log->slot];
  684. r = -ENOENT;
  685. if (!memslot->dirty_bitmap)
  686. goto out;
  687. n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
  688. for (i = 0; !any && i < n/sizeof(long); ++i)
  689. any = memslot->dirty_bitmap[i];
  690. r = -EFAULT;
  691. if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
  692. goto out;
  693. if (any) {
  694. cleared = 0;
  695. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  696. struct kvm_vcpu *vcpu;
  697. vcpu = vcpu_load_slot(kvm, i);
  698. if (!vcpu)
  699. continue;
  700. if (!cleared) {
  701. do_remove_write_access(vcpu, log->slot);
  702. memset(memslot->dirty_bitmap, 0, n);
  703. cleared = 1;
  704. }
  705. kvm_arch_ops->tlb_flush(vcpu);
  706. vcpu_put(vcpu);
  707. }
  708. }
  709. r = 0;
  710. out:
  711. spin_lock(&kvm->lock);
  712. --kvm->busy;
  713. spin_unlock(&kvm->lock);
  714. return r;
  715. }
  716. /*
  717. * Set a new alias region. Aliases map a portion of physical memory into
  718. * another portion. This is useful for memory windows, for example the PC
  719. * VGA region.
  720. */
  721. static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
  722. struct kvm_memory_alias *alias)
  723. {
  724. int r, n;
  725. struct kvm_mem_alias *p;
  726. r = -EINVAL;
  727. /* General sanity checks */
  728. if (alias->memory_size & (PAGE_SIZE - 1))
  729. goto out;
  730. if (alias->guest_phys_addr & (PAGE_SIZE - 1))
  731. goto out;
  732. if (alias->slot >= KVM_ALIAS_SLOTS)
  733. goto out;
  734. if (alias->guest_phys_addr + alias->memory_size
  735. < alias->guest_phys_addr)
  736. goto out;
  737. if (alias->target_phys_addr + alias->memory_size
  738. < alias->target_phys_addr)
  739. goto out;
  740. spin_lock(&kvm->lock);
  741. p = &kvm->aliases[alias->slot];
  742. p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
  743. p->npages = alias->memory_size >> PAGE_SHIFT;
  744. p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
  745. for (n = KVM_ALIAS_SLOTS; n > 0; --n)
  746. if (kvm->aliases[n - 1].npages)
  747. break;
  748. kvm->naliases = n;
  749. spin_unlock(&kvm->lock);
  750. vcpu_load(&kvm->vcpus[0]);
  751. spin_lock(&kvm->lock);
  752. kvm_mmu_zap_all(&kvm->vcpus[0]);
  753. spin_unlock(&kvm->lock);
  754. vcpu_put(&kvm->vcpus[0]);
  755. return 0;
  756. out:
  757. return r;
  758. }
  759. static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  760. {
  761. int i;
  762. struct kvm_mem_alias *alias;
  763. for (i = 0; i < kvm->naliases; ++i) {
  764. alias = &kvm->aliases[i];
  765. if (gfn >= alias->base_gfn
  766. && gfn < alias->base_gfn + alias->npages)
  767. return alias->target_gfn + gfn - alias->base_gfn;
  768. }
  769. return gfn;
  770. }
  771. static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  772. {
  773. int i;
  774. for (i = 0; i < kvm->nmemslots; ++i) {
  775. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  776. if (gfn >= memslot->base_gfn
  777. && gfn < memslot->base_gfn + memslot->npages)
  778. return memslot;
  779. }
  780. return NULL;
  781. }
  782. struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
  783. {
  784. gfn = unalias_gfn(kvm, gfn);
  785. return __gfn_to_memslot(kvm, gfn);
  786. }
  787. struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  788. {
  789. struct kvm_memory_slot *slot;
  790. gfn = unalias_gfn(kvm, gfn);
  791. slot = __gfn_to_memslot(kvm, gfn);
  792. if (!slot)
  793. return NULL;
  794. return slot->phys_mem[gfn - slot->base_gfn];
  795. }
  796. EXPORT_SYMBOL_GPL(gfn_to_page);
  797. void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
  798. {
  799. int i;
  800. struct kvm_memory_slot *memslot = NULL;
  801. unsigned long rel_gfn;
  802. for (i = 0; i < kvm->nmemslots; ++i) {
  803. memslot = &kvm->memslots[i];
  804. if (gfn >= memslot->base_gfn
  805. && gfn < memslot->base_gfn + memslot->npages) {
  806. if (!memslot || !memslot->dirty_bitmap)
  807. return;
  808. rel_gfn = gfn - memslot->base_gfn;
  809. /* avoid RMW */
  810. if (!test_bit(rel_gfn, memslot->dirty_bitmap))
  811. set_bit(rel_gfn, memslot->dirty_bitmap);
  812. return;
  813. }
  814. }
  815. }
  816. static int emulator_read_std(unsigned long addr,
  817. unsigned long *val,
  818. unsigned int bytes,
  819. struct x86_emulate_ctxt *ctxt)
  820. {
  821. struct kvm_vcpu *vcpu = ctxt->vcpu;
  822. void *data = val;
  823. while (bytes) {
  824. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  825. unsigned offset = addr & (PAGE_SIZE-1);
  826. unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
  827. unsigned long pfn;
  828. struct page *page;
  829. void *page_virt;
  830. if (gpa == UNMAPPED_GVA)
  831. return X86EMUL_PROPAGATE_FAULT;
  832. pfn = gpa >> PAGE_SHIFT;
  833. page = gfn_to_page(vcpu->kvm, pfn);
  834. if (!page)
  835. return X86EMUL_UNHANDLEABLE;
  836. page_virt = kmap_atomic(page, KM_USER0);
  837. memcpy(data, page_virt + offset, tocopy);
  838. kunmap_atomic(page_virt, KM_USER0);
  839. bytes -= tocopy;
  840. data += tocopy;
  841. addr += tocopy;
  842. }
  843. return X86EMUL_CONTINUE;
  844. }
  845. static int emulator_write_std(unsigned long addr,
  846. unsigned long val,
  847. unsigned int bytes,
  848. struct x86_emulate_ctxt *ctxt)
  849. {
  850. printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
  851. addr, bytes);
  852. return X86EMUL_UNHANDLEABLE;
  853. }
  854. static int emulator_read_emulated(unsigned long addr,
  855. unsigned long *val,
  856. unsigned int bytes,
  857. struct x86_emulate_ctxt *ctxt)
  858. {
  859. struct kvm_vcpu *vcpu = ctxt->vcpu;
  860. if (vcpu->mmio_read_completed) {
  861. memcpy(val, vcpu->mmio_data, bytes);
  862. vcpu->mmio_read_completed = 0;
  863. return X86EMUL_CONTINUE;
  864. } else if (emulator_read_std(addr, val, bytes, ctxt)
  865. == X86EMUL_CONTINUE)
  866. return X86EMUL_CONTINUE;
  867. else {
  868. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  869. if (gpa == UNMAPPED_GVA)
  870. return X86EMUL_PROPAGATE_FAULT;
  871. vcpu->mmio_needed = 1;
  872. vcpu->mmio_phys_addr = gpa;
  873. vcpu->mmio_size = bytes;
  874. vcpu->mmio_is_write = 0;
  875. return X86EMUL_UNHANDLEABLE;
  876. }
  877. }
  878. static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
  879. unsigned long val, int bytes)
  880. {
  881. struct page *page;
  882. void *virt;
  883. if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
  884. return 0;
  885. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  886. if (!page)
  887. return 0;
  888. kvm_mmu_pre_write(vcpu, gpa, bytes);
  889. mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
  890. virt = kmap_atomic(page, KM_USER0);
  891. memcpy(virt + offset_in_page(gpa), &val, bytes);
  892. kunmap_atomic(virt, KM_USER0);
  893. kvm_mmu_post_write(vcpu, gpa, bytes);
  894. return 1;
  895. }
  896. static int emulator_write_emulated(unsigned long addr,
  897. unsigned long val,
  898. unsigned int bytes,
  899. struct x86_emulate_ctxt *ctxt)
  900. {
  901. struct kvm_vcpu *vcpu = ctxt->vcpu;
  902. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
  903. if (gpa == UNMAPPED_GVA) {
  904. kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
  905. return X86EMUL_PROPAGATE_FAULT;
  906. }
  907. if (emulator_write_phys(vcpu, gpa, val, bytes))
  908. return X86EMUL_CONTINUE;
  909. vcpu->mmio_needed = 1;
  910. vcpu->mmio_phys_addr = gpa;
  911. vcpu->mmio_size = bytes;
  912. vcpu->mmio_is_write = 1;
  913. memcpy(vcpu->mmio_data, &val, bytes);
  914. return X86EMUL_CONTINUE;
  915. }
  916. static int emulator_cmpxchg_emulated(unsigned long addr,
  917. unsigned long old,
  918. unsigned long new,
  919. unsigned int bytes,
  920. struct x86_emulate_ctxt *ctxt)
  921. {
  922. static int reported;
  923. if (!reported) {
  924. reported = 1;
  925. printk(KERN_WARNING "kvm: emulating exchange as write\n");
  926. }
  927. return emulator_write_emulated(addr, new, bytes, ctxt);
  928. }
  929. #ifdef CONFIG_X86_32
  930. static int emulator_cmpxchg8b_emulated(unsigned long addr,
  931. unsigned long old_lo,
  932. unsigned long old_hi,
  933. unsigned long new_lo,
  934. unsigned long new_hi,
  935. struct x86_emulate_ctxt *ctxt)
  936. {
  937. static int reported;
  938. int r;
  939. if (!reported) {
  940. reported = 1;
  941. printk(KERN_WARNING "kvm: emulating exchange8b as write\n");
  942. }
  943. r = emulator_write_emulated(addr, new_lo, 4, ctxt);
  944. if (r != X86EMUL_CONTINUE)
  945. return r;
  946. return emulator_write_emulated(addr+4, new_hi, 4, ctxt);
  947. }
  948. #endif
  949. static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
  950. {
  951. return kvm_arch_ops->get_segment_base(vcpu, seg);
  952. }
  953. int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
  954. {
  955. return X86EMUL_CONTINUE;
  956. }
  957. int emulate_clts(struct kvm_vcpu *vcpu)
  958. {
  959. unsigned long cr0;
  960. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  961. cr0 = vcpu->cr0 & ~CR0_TS_MASK;
  962. kvm_arch_ops->set_cr0(vcpu, cr0);
  963. return X86EMUL_CONTINUE;
  964. }
  965. int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
  966. {
  967. struct kvm_vcpu *vcpu = ctxt->vcpu;
  968. switch (dr) {
  969. case 0 ... 3:
  970. *dest = kvm_arch_ops->get_dr(vcpu, dr);
  971. return X86EMUL_CONTINUE;
  972. default:
  973. printk(KERN_DEBUG "%s: unexpected dr %u\n",
  974. __FUNCTION__, dr);
  975. return X86EMUL_UNHANDLEABLE;
  976. }
  977. }
  978. int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
  979. {
  980. unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
  981. int exception;
  982. kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
  983. if (exception) {
  984. /* FIXME: better handling */
  985. return X86EMUL_UNHANDLEABLE;
  986. }
  987. return X86EMUL_CONTINUE;
  988. }
  989. static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
  990. {
  991. static int reported;
  992. u8 opcodes[4];
  993. unsigned long rip = ctxt->vcpu->rip;
  994. unsigned long rip_linear;
  995. rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
  996. if (reported)
  997. return;
  998. emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
  999. printk(KERN_ERR "emulation failed but !mmio_needed?"
  1000. " rip %lx %02x %02x %02x %02x\n",
  1001. rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
  1002. reported = 1;
  1003. }
  1004. struct x86_emulate_ops emulate_ops = {
  1005. .read_std = emulator_read_std,
  1006. .write_std = emulator_write_std,
  1007. .read_emulated = emulator_read_emulated,
  1008. .write_emulated = emulator_write_emulated,
  1009. .cmpxchg_emulated = emulator_cmpxchg_emulated,
  1010. #ifdef CONFIG_X86_32
  1011. .cmpxchg8b_emulated = emulator_cmpxchg8b_emulated,
  1012. #endif
  1013. };
  1014. int emulate_instruction(struct kvm_vcpu *vcpu,
  1015. struct kvm_run *run,
  1016. unsigned long cr2,
  1017. u16 error_code)
  1018. {
  1019. struct x86_emulate_ctxt emulate_ctxt;
  1020. int r;
  1021. int cs_db, cs_l;
  1022. kvm_arch_ops->cache_regs(vcpu);
  1023. kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  1024. emulate_ctxt.vcpu = vcpu;
  1025. emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
  1026. emulate_ctxt.cr2 = cr2;
  1027. emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
  1028. ? X86EMUL_MODE_REAL : cs_l
  1029. ? X86EMUL_MODE_PROT64 : cs_db
  1030. ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
  1031. if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
  1032. emulate_ctxt.cs_base = 0;
  1033. emulate_ctxt.ds_base = 0;
  1034. emulate_ctxt.es_base = 0;
  1035. emulate_ctxt.ss_base = 0;
  1036. } else {
  1037. emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
  1038. emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
  1039. emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
  1040. emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
  1041. }
  1042. emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
  1043. emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
  1044. vcpu->mmio_is_write = 0;
  1045. r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
  1046. if ((r || vcpu->mmio_is_write) && run) {
  1047. run->mmio.phys_addr = vcpu->mmio_phys_addr;
  1048. memcpy(run->mmio.data, vcpu->mmio_data, 8);
  1049. run->mmio.len = vcpu->mmio_size;
  1050. run->mmio.is_write = vcpu->mmio_is_write;
  1051. }
  1052. if (r) {
  1053. if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
  1054. return EMULATE_DONE;
  1055. if (!vcpu->mmio_needed) {
  1056. report_emulation_failure(&emulate_ctxt);
  1057. return EMULATE_FAIL;
  1058. }
  1059. return EMULATE_DO_MMIO;
  1060. }
  1061. kvm_arch_ops->decache_regs(vcpu);
  1062. kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
  1063. if (vcpu->mmio_is_write)
  1064. return EMULATE_DO_MMIO;
  1065. return EMULATE_DONE;
  1066. }
  1067. EXPORT_SYMBOL_GPL(emulate_instruction);
  1068. int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1069. {
  1070. unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
  1071. kvm_arch_ops->cache_regs(vcpu);
  1072. ret = -KVM_EINVAL;
  1073. #ifdef CONFIG_X86_64
  1074. if (is_long_mode(vcpu)) {
  1075. nr = vcpu->regs[VCPU_REGS_RAX];
  1076. a0 = vcpu->regs[VCPU_REGS_RDI];
  1077. a1 = vcpu->regs[VCPU_REGS_RSI];
  1078. a2 = vcpu->regs[VCPU_REGS_RDX];
  1079. a3 = vcpu->regs[VCPU_REGS_RCX];
  1080. a4 = vcpu->regs[VCPU_REGS_R8];
  1081. a5 = vcpu->regs[VCPU_REGS_R9];
  1082. } else
  1083. #endif
  1084. {
  1085. nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
  1086. a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
  1087. a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
  1088. a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
  1089. a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
  1090. a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
  1091. a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
  1092. }
  1093. switch (nr) {
  1094. default:
  1095. run->hypercall.args[0] = a0;
  1096. run->hypercall.args[1] = a1;
  1097. run->hypercall.args[2] = a2;
  1098. run->hypercall.args[3] = a3;
  1099. run->hypercall.args[4] = a4;
  1100. run->hypercall.args[5] = a5;
  1101. run->hypercall.ret = ret;
  1102. run->hypercall.longmode = is_long_mode(vcpu);
  1103. kvm_arch_ops->decache_regs(vcpu);
  1104. return 0;
  1105. }
  1106. vcpu->regs[VCPU_REGS_RAX] = ret;
  1107. kvm_arch_ops->decache_regs(vcpu);
  1108. return 1;
  1109. }
  1110. EXPORT_SYMBOL_GPL(kvm_hypercall);
  1111. static u64 mk_cr_64(u64 curr_cr, u32 new_val)
  1112. {
  1113. return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
  1114. }
  1115. void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  1116. {
  1117. struct descriptor_table dt = { limit, base };
  1118. kvm_arch_ops->set_gdt(vcpu, &dt);
  1119. }
  1120. void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
  1121. {
  1122. struct descriptor_table dt = { limit, base };
  1123. kvm_arch_ops->set_idt(vcpu, &dt);
  1124. }
  1125. void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
  1126. unsigned long *rflags)
  1127. {
  1128. lmsw(vcpu, msw);
  1129. *rflags = kvm_arch_ops->get_rflags(vcpu);
  1130. }
  1131. unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
  1132. {
  1133. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  1134. switch (cr) {
  1135. case 0:
  1136. return vcpu->cr0;
  1137. case 2:
  1138. return vcpu->cr2;
  1139. case 3:
  1140. return vcpu->cr3;
  1141. case 4:
  1142. return vcpu->cr4;
  1143. default:
  1144. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  1145. return 0;
  1146. }
  1147. }
  1148. void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
  1149. unsigned long *rflags)
  1150. {
  1151. switch (cr) {
  1152. case 0:
  1153. set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
  1154. *rflags = kvm_arch_ops->get_rflags(vcpu);
  1155. break;
  1156. case 2:
  1157. vcpu->cr2 = val;
  1158. break;
  1159. case 3:
  1160. set_cr3(vcpu, val);
  1161. break;
  1162. case 4:
  1163. set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
  1164. break;
  1165. default:
  1166. vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
  1167. }
  1168. }
  1169. /*
  1170. * Register the para guest with the host:
  1171. */
  1172. static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
  1173. {
  1174. struct kvm_vcpu_para_state *para_state;
  1175. hpa_t para_state_hpa, hypercall_hpa;
  1176. struct page *para_state_page;
  1177. unsigned char *hypercall;
  1178. gpa_t hypercall_gpa;
  1179. printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
  1180. printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
  1181. /*
  1182. * Needs to be page aligned:
  1183. */
  1184. if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
  1185. goto err_gp;
  1186. para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
  1187. printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
  1188. if (is_error_hpa(para_state_hpa))
  1189. goto err_gp;
  1190. mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
  1191. para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
  1192. para_state = kmap_atomic(para_state_page, KM_USER0);
  1193. printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
  1194. printk(KERN_DEBUG ".... size: %d\n", para_state->size);
  1195. para_state->host_version = KVM_PARA_API_VERSION;
  1196. /*
  1197. * We cannot support guests that try to register themselves
  1198. * with a newer API version than the host supports:
  1199. */
  1200. if (para_state->guest_version > KVM_PARA_API_VERSION) {
  1201. para_state->ret = -KVM_EINVAL;
  1202. goto err_kunmap_skip;
  1203. }
  1204. hypercall_gpa = para_state->hypercall_gpa;
  1205. hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
  1206. printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
  1207. if (is_error_hpa(hypercall_hpa)) {
  1208. para_state->ret = -KVM_EINVAL;
  1209. goto err_kunmap_skip;
  1210. }
  1211. printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
  1212. vcpu->para_state_page = para_state_page;
  1213. vcpu->para_state_gpa = para_state_gpa;
  1214. vcpu->hypercall_gpa = hypercall_gpa;
  1215. mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
  1216. hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
  1217. KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
  1218. kvm_arch_ops->patch_hypercall(vcpu, hypercall);
  1219. kunmap_atomic(hypercall, KM_USER1);
  1220. para_state->ret = 0;
  1221. err_kunmap_skip:
  1222. kunmap_atomic(para_state, KM_USER0);
  1223. return 0;
  1224. err_gp:
  1225. return 1;
  1226. }
  1227. int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1228. {
  1229. u64 data;
  1230. switch (msr) {
  1231. case 0xc0010010: /* SYSCFG */
  1232. case 0xc0010015: /* HWCR */
  1233. case MSR_IA32_PLATFORM_ID:
  1234. case MSR_IA32_P5_MC_ADDR:
  1235. case MSR_IA32_P5_MC_TYPE:
  1236. case MSR_IA32_MC0_CTL:
  1237. case MSR_IA32_MCG_STATUS:
  1238. case MSR_IA32_MCG_CAP:
  1239. case MSR_IA32_MC0_MISC:
  1240. case MSR_IA32_MC0_MISC+4:
  1241. case MSR_IA32_MC0_MISC+8:
  1242. case MSR_IA32_MC0_MISC+12:
  1243. case MSR_IA32_MC0_MISC+16:
  1244. case MSR_IA32_UCODE_REV:
  1245. case MSR_IA32_PERF_STATUS:
  1246. /* MTRR registers */
  1247. case 0xfe:
  1248. case 0x200 ... 0x2ff:
  1249. data = 0;
  1250. break;
  1251. case 0xcd: /* fsb frequency */
  1252. data = 3;
  1253. break;
  1254. case MSR_IA32_APICBASE:
  1255. data = vcpu->apic_base;
  1256. break;
  1257. case MSR_IA32_MISC_ENABLE:
  1258. data = vcpu->ia32_misc_enable_msr;
  1259. break;
  1260. #ifdef CONFIG_X86_64
  1261. case MSR_EFER:
  1262. data = vcpu->shadow_efer;
  1263. break;
  1264. #endif
  1265. default:
  1266. printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr);
  1267. return 1;
  1268. }
  1269. *pdata = data;
  1270. return 0;
  1271. }
  1272. EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  1273. /*
  1274. * Reads an msr value (of 'msr_index') into 'pdata'.
  1275. * Returns 0 on success, non-0 otherwise.
  1276. * Assumes vcpu_load() was already called.
  1277. */
  1278. static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  1279. {
  1280. return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
  1281. }
  1282. #ifdef CONFIG_X86_64
  1283. static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
  1284. {
  1285. if (efer & EFER_RESERVED_BITS) {
  1286. printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
  1287. efer);
  1288. inject_gp(vcpu);
  1289. return;
  1290. }
  1291. if (is_paging(vcpu)
  1292. && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
  1293. printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
  1294. inject_gp(vcpu);
  1295. return;
  1296. }
  1297. kvm_arch_ops->set_efer(vcpu, efer);
  1298. efer &= ~EFER_LMA;
  1299. efer |= vcpu->shadow_efer & EFER_LMA;
  1300. vcpu->shadow_efer = efer;
  1301. }
  1302. #endif
  1303. int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  1304. {
  1305. switch (msr) {
  1306. #ifdef CONFIG_X86_64
  1307. case MSR_EFER:
  1308. set_efer(vcpu, data);
  1309. break;
  1310. #endif
  1311. case MSR_IA32_MC0_STATUS:
  1312. printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
  1313. __FUNCTION__, data);
  1314. break;
  1315. case MSR_IA32_MCG_STATUS:
  1316. printk(KERN_WARNING "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
  1317. __FUNCTION__, data);
  1318. break;
  1319. case MSR_IA32_UCODE_REV:
  1320. case MSR_IA32_UCODE_WRITE:
  1321. case 0x200 ... 0x2ff: /* MTRRs */
  1322. break;
  1323. case MSR_IA32_APICBASE:
  1324. vcpu->apic_base = data;
  1325. break;
  1326. case MSR_IA32_MISC_ENABLE:
  1327. vcpu->ia32_misc_enable_msr = data;
  1328. break;
  1329. /*
  1330. * This is the 'probe whether the host is KVM' logic:
  1331. */
  1332. case MSR_KVM_API_MAGIC:
  1333. return vcpu_register_para(vcpu, data);
  1334. default:
  1335. printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
  1336. return 1;
  1337. }
  1338. return 0;
  1339. }
  1340. EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  1341. /*
  1342. * Writes msr value into into the appropriate "register".
  1343. * Returns 0 on success, non-0 otherwise.
  1344. * Assumes vcpu_load() was already called.
  1345. */
  1346. static int set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  1347. {
  1348. return kvm_arch_ops->set_msr(vcpu, msr_index, data);
  1349. }
  1350. void kvm_resched(struct kvm_vcpu *vcpu)
  1351. {
  1352. if (!need_resched())
  1353. return;
  1354. vcpu_put(vcpu);
  1355. cond_resched();
  1356. vcpu_load(vcpu);
  1357. }
  1358. EXPORT_SYMBOL_GPL(kvm_resched);
  1359. void load_msrs(struct vmx_msr_entry *e, int n)
  1360. {
  1361. int i;
  1362. for (i = 0; i < n; ++i)
  1363. wrmsrl(e[i].index, e[i].data);
  1364. }
  1365. EXPORT_SYMBOL_GPL(load_msrs);
  1366. void save_msrs(struct vmx_msr_entry *e, int n)
  1367. {
  1368. int i;
  1369. for (i = 0; i < n; ++i)
  1370. rdmsrl(e[i].index, e[i].data);
  1371. }
  1372. EXPORT_SYMBOL_GPL(save_msrs);
  1373. void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
  1374. {
  1375. int i;
  1376. u32 function;
  1377. struct kvm_cpuid_entry *e, *best;
  1378. kvm_arch_ops->cache_regs(vcpu);
  1379. function = vcpu->regs[VCPU_REGS_RAX];
  1380. vcpu->regs[VCPU_REGS_RAX] = 0;
  1381. vcpu->regs[VCPU_REGS_RBX] = 0;
  1382. vcpu->regs[VCPU_REGS_RCX] = 0;
  1383. vcpu->regs[VCPU_REGS_RDX] = 0;
  1384. best = NULL;
  1385. for (i = 0; i < vcpu->cpuid_nent; ++i) {
  1386. e = &vcpu->cpuid_entries[i];
  1387. if (e->function == function) {
  1388. best = e;
  1389. break;
  1390. }
  1391. /*
  1392. * Both basic or both extended?
  1393. */
  1394. if (((e->function ^ function) & 0x80000000) == 0)
  1395. if (!best || e->function > best->function)
  1396. best = e;
  1397. }
  1398. if (best) {
  1399. vcpu->regs[VCPU_REGS_RAX] = best->eax;
  1400. vcpu->regs[VCPU_REGS_RBX] = best->ebx;
  1401. vcpu->regs[VCPU_REGS_RCX] = best->ecx;
  1402. vcpu->regs[VCPU_REGS_RDX] = best->edx;
  1403. }
  1404. kvm_arch_ops->decache_regs(vcpu);
  1405. kvm_arch_ops->skip_emulated_instruction(vcpu);
  1406. }
  1407. EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
  1408. static int pio_copy_data(struct kvm_vcpu *vcpu)
  1409. {
  1410. void *p = vcpu->pio_data;
  1411. void *q;
  1412. unsigned bytes;
  1413. int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
  1414. kvm_arch_ops->vcpu_put(vcpu);
  1415. q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
  1416. PAGE_KERNEL);
  1417. if (!q) {
  1418. kvm_arch_ops->vcpu_load(vcpu);
  1419. free_pio_guest_pages(vcpu);
  1420. return -ENOMEM;
  1421. }
  1422. q += vcpu->pio.guest_page_offset;
  1423. bytes = vcpu->pio.size * vcpu->pio.cur_count;
  1424. if (vcpu->pio.in)
  1425. memcpy(q, p, bytes);
  1426. else
  1427. memcpy(p, q, bytes);
  1428. q -= vcpu->pio.guest_page_offset;
  1429. vunmap(q);
  1430. kvm_arch_ops->vcpu_load(vcpu);
  1431. free_pio_guest_pages(vcpu);
  1432. return 0;
  1433. }
  1434. static int complete_pio(struct kvm_vcpu *vcpu)
  1435. {
  1436. struct kvm_pio_request *io = &vcpu->pio;
  1437. long delta;
  1438. int r;
  1439. kvm_arch_ops->cache_regs(vcpu);
  1440. if (!io->string) {
  1441. if (io->in)
  1442. memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
  1443. io->size);
  1444. } else {
  1445. if (io->in) {
  1446. r = pio_copy_data(vcpu);
  1447. if (r) {
  1448. kvm_arch_ops->cache_regs(vcpu);
  1449. return r;
  1450. }
  1451. }
  1452. delta = 1;
  1453. if (io->rep) {
  1454. delta *= io->cur_count;
  1455. /*
  1456. * The size of the register should really depend on
  1457. * current address size.
  1458. */
  1459. vcpu->regs[VCPU_REGS_RCX] -= delta;
  1460. }
  1461. if (io->down)
  1462. delta = -delta;
  1463. delta *= io->size;
  1464. if (io->in)
  1465. vcpu->regs[VCPU_REGS_RDI] += delta;
  1466. else
  1467. vcpu->regs[VCPU_REGS_RSI] += delta;
  1468. }
  1469. vcpu->run->io_completed = 0;
  1470. kvm_arch_ops->decache_regs(vcpu);
  1471. io->count -= io->cur_count;
  1472. io->cur_count = 0;
  1473. if (!io->count)
  1474. kvm_arch_ops->skip_emulated_instruction(vcpu);
  1475. return 0;
  1476. }
  1477. int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
  1478. int size, unsigned long count, int string, int down,
  1479. gva_t address, int rep, unsigned port)
  1480. {
  1481. unsigned now, in_page;
  1482. int i;
  1483. int nr_pages = 1;
  1484. struct page *page;
  1485. vcpu->run->exit_reason = KVM_EXIT_IO;
  1486. vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
  1487. vcpu->run->io.size = size;
  1488. vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
  1489. vcpu->run->io.count = count;
  1490. vcpu->run->io.port = port;
  1491. vcpu->pio.count = count;
  1492. vcpu->pio.cur_count = count;
  1493. vcpu->pio.size = size;
  1494. vcpu->pio.in = in;
  1495. vcpu->pio.string = string;
  1496. vcpu->pio.down = down;
  1497. vcpu->pio.guest_page_offset = offset_in_page(address);
  1498. vcpu->pio.rep = rep;
  1499. if (!string) {
  1500. kvm_arch_ops->cache_regs(vcpu);
  1501. memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
  1502. kvm_arch_ops->decache_regs(vcpu);
  1503. return 0;
  1504. }
  1505. if (!count) {
  1506. kvm_arch_ops->skip_emulated_instruction(vcpu);
  1507. return 1;
  1508. }
  1509. now = min(count, PAGE_SIZE / size);
  1510. if (!down)
  1511. in_page = PAGE_SIZE - offset_in_page(address);
  1512. else
  1513. in_page = offset_in_page(address) + size;
  1514. now = min(count, (unsigned long)in_page / size);
  1515. if (!now) {
  1516. /*
  1517. * String I/O straddles page boundary. Pin two guest pages
  1518. * so that we satisfy atomicity constraints. Do just one
  1519. * transaction to avoid complexity.
  1520. */
  1521. nr_pages = 2;
  1522. now = 1;
  1523. }
  1524. if (down) {
  1525. /*
  1526. * String I/O in reverse. Yuck. Kill the guest, fix later.
  1527. */
  1528. printk(KERN_ERR "kvm: guest string pio down\n");
  1529. inject_gp(vcpu);
  1530. return 1;
  1531. }
  1532. vcpu->run->io.count = now;
  1533. vcpu->pio.cur_count = now;
  1534. for (i = 0; i < nr_pages; ++i) {
  1535. spin_lock(&vcpu->kvm->lock);
  1536. page = gva_to_page(vcpu, address + i * PAGE_SIZE);
  1537. if (page)
  1538. get_page(page);
  1539. vcpu->pio.guest_pages[i] = page;
  1540. spin_unlock(&vcpu->kvm->lock);
  1541. if (!page) {
  1542. inject_gp(vcpu);
  1543. free_pio_guest_pages(vcpu);
  1544. return 1;
  1545. }
  1546. }
  1547. if (!vcpu->pio.in)
  1548. return pio_copy_data(vcpu);
  1549. return 0;
  1550. }
  1551. EXPORT_SYMBOL_GPL(kvm_setup_pio);
  1552. static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
  1553. {
  1554. int r;
  1555. sigset_t sigsaved;
  1556. vcpu_load(vcpu);
  1557. if (vcpu->sigset_active)
  1558. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  1559. /* re-sync apic's tpr */
  1560. vcpu->cr8 = kvm_run->cr8;
  1561. if (kvm_run->io_completed) {
  1562. if (vcpu->pio.cur_count) {
  1563. r = complete_pio(vcpu);
  1564. if (r)
  1565. goto out;
  1566. } else {
  1567. memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
  1568. vcpu->mmio_read_completed = 1;
  1569. }
  1570. }
  1571. vcpu->mmio_needed = 0;
  1572. if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
  1573. kvm_arch_ops->cache_regs(vcpu);
  1574. vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
  1575. kvm_arch_ops->decache_regs(vcpu);
  1576. }
  1577. r = kvm_arch_ops->run(vcpu, kvm_run);
  1578. out:
  1579. if (vcpu->sigset_active)
  1580. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  1581. vcpu_put(vcpu);
  1582. return r;
  1583. }
  1584. static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
  1585. struct kvm_regs *regs)
  1586. {
  1587. vcpu_load(vcpu);
  1588. kvm_arch_ops->cache_regs(vcpu);
  1589. regs->rax = vcpu->regs[VCPU_REGS_RAX];
  1590. regs->rbx = vcpu->regs[VCPU_REGS_RBX];
  1591. regs->rcx = vcpu->regs[VCPU_REGS_RCX];
  1592. regs->rdx = vcpu->regs[VCPU_REGS_RDX];
  1593. regs->rsi = vcpu->regs[VCPU_REGS_RSI];
  1594. regs->rdi = vcpu->regs[VCPU_REGS_RDI];
  1595. regs->rsp = vcpu->regs[VCPU_REGS_RSP];
  1596. regs->rbp = vcpu->regs[VCPU_REGS_RBP];
  1597. #ifdef CONFIG_X86_64
  1598. regs->r8 = vcpu->regs[VCPU_REGS_R8];
  1599. regs->r9 = vcpu->regs[VCPU_REGS_R9];
  1600. regs->r10 = vcpu->regs[VCPU_REGS_R10];
  1601. regs->r11 = vcpu->regs[VCPU_REGS_R11];
  1602. regs->r12 = vcpu->regs[VCPU_REGS_R12];
  1603. regs->r13 = vcpu->regs[VCPU_REGS_R13];
  1604. regs->r14 = vcpu->regs[VCPU_REGS_R14];
  1605. regs->r15 = vcpu->regs[VCPU_REGS_R15];
  1606. #endif
  1607. regs->rip = vcpu->rip;
  1608. regs->rflags = kvm_arch_ops->get_rflags(vcpu);
  1609. /*
  1610. * Don't leak debug flags in case they were set for guest debugging
  1611. */
  1612. if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
  1613. regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
  1614. vcpu_put(vcpu);
  1615. return 0;
  1616. }
  1617. static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
  1618. struct kvm_regs *regs)
  1619. {
  1620. vcpu_load(vcpu);
  1621. vcpu->regs[VCPU_REGS_RAX] = regs->rax;
  1622. vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
  1623. vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
  1624. vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
  1625. vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
  1626. vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
  1627. vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
  1628. vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
  1629. #ifdef CONFIG_X86_64
  1630. vcpu->regs[VCPU_REGS_R8] = regs->r8;
  1631. vcpu->regs[VCPU_REGS_R9] = regs->r9;
  1632. vcpu->regs[VCPU_REGS_R10] = regs->r10;
  1633. vcpu->regs[VCPU_REGS_R11] = regs->r11;
  1634. vcpu->regs[VCPU_REGS_R12] = regs->r12;
  1635. vcpu->regs[VCPU_REGS_R13] = regs->r13;
  1636. vcpu->regs[VCPU_REGS_R14] = regs->r14;
  1637. vcpu->regs[VCPU_REGS_R15] = regs->r15;
  1638. #endif
  1639. vcpu->rip = regs->rip;
  1640. kvm_arch_ops->set_rflags(vcpu, regs->rflags);
  1641. kvm_arch_ops->decache_regs(vcpu);
  1642. vcpu_put(vcpu);
  1643. return 0;
  1644. }
  1645. static void get_segment(struct kvm_vcpu *vcpu,
  1646. struct kvm_segment *var, int seg)
  1647. {
  1648. return kvm_arch_ops->get_segment(vcpu, var, seg);
  1649. }
  1650. static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  1651. struct kvm_sregs *sregs)
  1652. {
  1653. struct descriptor_table dt;
  1654. vcpu_load(vcpu);
  1655. get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  1656. get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  1657. get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  1658. get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  1659. get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  1660. get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  1661. get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  1662. get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  1663. kvm_arch_ops->get_idt(vcpu, &dt);
  1664. sregs->idt.limit = dt.limit;
  1665. sregs->idt.base = dt.base;
  1666. kvm_arch_ops->get_gdt(vcpu, &dt);
  1667. sregs->gdt.limit = dt.limit;
  1668. sregs->gdt.base = dt.base;
  1669. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  1670. sregs->cr0 = vcpu->cr0;
  1671. sregs->cr2 = vcpu->cr2;
  1672. sregs->cr3 = vcpu->cr3;
  1673. sregs->cr4 = vcpu->cr4;
  1674. sregs->cr8 = vcpu->cr8;
  1675. sregs->efer = vcpu->shadow_efer;
  1676. sregs->apic_base = vcpu->apic_base;
  1677. memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
  1678. sizeof sregs->interrupt_bitmap);
  1679. vcpu_put(vcpu);
  1680. return 0;
  1681. }
  1682. static void set_segment(struct kvm_vcpu *vcpu,
  1683. struct kvm_segment *var, int seg)
  1684. {
  1685. return kvm_arch_ops->set_segment(vcpu, var, seg);
  1686. }
  1687. static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  1688. struct kvm_sregs *sregs)
  1689. {
  1690. int mmu_reset_needed = 0;
  1691. int i;
  1692. struct descriptor_table dt;
  1693. vcpu_load(vcpu);
  1694. dt.limit = sregs->idt.limit;
  1695. dt.base = sregs->idt.base;
  1696. kvm_arch_ops->set_idt(vcpu, &dt);
  1697. dt.limit = sregs->gdt.limit;
  1698. dt.base = sregs->gdt.base;
  1699. kvm_arch_ops->set_gdt(vcpu, &dt);
  1700. vcpu->cr2 = sregs->cr2;
  1701. mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
  1702. vcpu->cr3 = sregs->cr3;
  1703. vcpu->cr8 = sregs->cr8;
  1704. mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
  1705. #ifdef CONFIG_X86_64
  1706. kvm_arch_ops->set_efer(vcpu, sregs->efer);
  1707. #endif
  1708. vcpu->apic_base = sregs->apic_base;
  1709. kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
  1710. mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
  1711. kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
  1712. mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
  1713. kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
  1714. if (!is_long_mode(vcpu) && is_pae(vcpu))
  1715. load_pdptrs(vcpu, vcpu->cr3);
  1716. if (mmu_reset_needed)
  1717. kvm_mmu_reset_context(vcpu);
  1718. memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
  1719. sizeof vcpu->irq_pending);
  1720. vcpu->irq_summary = 0;
  1721. for (i = 0; i < NR_IRQ_WORDS; ++i)
  1722. if (vcpu->irq_pending[i])
  1723. __set_bit(i, &vcpu->irq_summary);
  1724. set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
  1725. set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
  1726. set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
  1727. set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
  1728. set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
  1729. set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
  1730. set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
  1731. set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
  1732. vcpu_put(vcpu);
  1733. return 0;
  1734. }
  1735. /*
  1736. * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  1737. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
  1738. *
  1739. * This list is modified at module load time to reflect the
  1740. * capabilities of the host cpu.
  1741. */
  1742. static u32 msrs_to_save[] = {
  1743. MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
  1744. MSR_K6_STAR,
  1745. #ifdef CONFIG_X86_64
  1746. MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  1747. #endif
  1748. MSR_IA32_TIME_STAMP_COUNTER,
  1749. };
  1750. static unsigned num_msrs_to_save;
  1751. static u32 emulated_msrs[] = {
  1752. MSR_IA32_MISC_ENABLE,
  1753. };
  1754. static __init void kvm_init_msr_list(void)
  1755. {
  1756. u32 dummy[2];
  1757. unsigned i, j;
  1758. for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
  1759. if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
  1760. continue;
  1761. if (j < i)
  1762. msrs_to_save[j] = msrs_to_save[i];
  1763. j++;
  1764. }
  1765. num_msrs_to_save = j;
  1766. }
  1767. /*
  1768. * Adapt set_msr() to msr_io()'s calling convention
  1769. */
  1770. static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
  1771. {
  1772. return set_msr(vcpu, index, *data);
  1773. }
  1774. /*
  1775. * Read or write a bunch of msrs. All parameters are kernel addresses.
  1776. *
  1777. * @return number of msrs set successfully.
  1778. */
  1779. static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
  1780. struct kvm_msr_entry *entries,
  1781. int (*do_msr)(struct kvm_vcpu *vcpu,
  1782. unsigned index, u64 *data))
  1783. {
  1784. int i;
  1785. vcpu_load(vcpu);
  1786. for (i = 0; i < msrs->nmsrs; ++i)
  1787. if (do_msr(vcpu, entries[i].index, &entries[i].data))
  1788. break;
  1789. vcpu_put(vcpu);
  1790. return i;
  1791. }
  1792. /*
  1793. * Read or write a bunch of msrs. Parameters are user addresses.
  1794. *
  1795. * @return number of msrs set successfully.
  1796. */
  1797. static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
  1798. int (*do_msr)(struct kvm_vcpu *vcpu,
  1799. unsigned index, u64 *data),
  1800. int writeback)
  1801. {
  1802. struct kvm_msrs msrs;
  1803. struct kvm_msr_entry *entries;
  1804. int r, n;
  1805. unsigned size;
  1806. r = -EFAULT;
  1807. if (copy_from_user(&msrs, user_msrs, sizeof msrs))
  1808. goto out;
  1809. r = -E2BIG;
  1810. if (msrs.nmsrs >= MAX_IO_MSRS)
  1811. goto out;
  1812. r = -ENOMEM;
  1813. size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
  1814. entries = vmalloc(size);
  1815. if (!entries)
  1816. goto out;
  1817. r = -EFAULT;
  1818. if (copy_from_user(entries, user_msrs->entries, size))
  1819. goto out_free;
  1820. r = n = __msr_io(vcpu, &msrs, entries, do_msr);
  1821. if (r < 0)
  1822. goto out_free;
  1823. r = -EFAULT;
  1824. if (writeback && copy_to_user(user_msrs->entries, entries, size))
  1825. goto out_free;
  1826. r = n;
  1827. out_free:
  1828. vfree(entries);
  1829. out:
  1830. return r;
  1831. }
  1832. /*
  1833. * Translate a guest virtual address to a guest physical address.
  1834. */
  1835. static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  1836. struct kvm_translation *tr)
  1837. {
  1838. unsigned long vaddr = tr->linear_address;
  1839. gpa_t gpa;
  1840. vcpu_load(vcpu);
  1841. spin_lock(&vcpu->kvm->lock);
  1842. gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
  1843. tr->physical_address = gpa;
  1844. tr->valid = gpa != UNMAPPED_GVA;
  1845. tr->writeable = 1;
  1846. tr->usermode = 0;
  1847. spin_unlock(&vcpu->kvm->lock);
  1848. vcpu_put(vcpu);
  1849. return 0;
  1850. }
  1851. static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  1852. struct kvm_interrupt *irq)
  1853. {
  1854. if (irq->irq < 0 || irq->irq >= 256)
  1855. return -EINVAL;
  1856. vcpu_load(vcpu);
  1857. set_bit(irq->irq, vcpu->irq_pending);
  1858. set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
  1859. vcpu_put(vcpu);
  1860. return 0;
  1861. }
  1862. static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
  1863. struct kvm_debug_guest *dbg)
  1864. {
  1865. int r;
  1866. vcpu_load(vcpu);
  1867. r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
  1868. vcpu_put(vcpu);
  1869. return r;
  1870. }
  1871. static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
  1872. unsigned long address,
  1873. int *type)
  1874. {
  1875. struct kvm_vcpu *vcpu = vma->vm_file->private_data;
  1876. unsigned long pgoff;
  1877. struct page *page;
  1878. *type = VM_FAULT_MINOR;
  1879. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  1880. if (pgoff == 0)
  1881. page = virt_to_page(vcpu->run);
  1882. else if (pgoff == KVM_PIO_PAGE_OFFSET)
  1883. page = virt_to_page(vcpu->pio_data);
  1884. else
  1885. return NOPAGE_SIGBUS;
  1886. get_page(page);
  1887. return page;
  1888. }
  1889. static struct vm_operations_struct kvm_vcpu_vm_ops = {
  1890. .nopage = kvm_vcpu_nopage,
  1891. };
  1892. static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
  1893. {
  1894. vma->vm_ops = &kvm_vcpu_vm_ops;
  1895. return 0;
  1896. }
  1897. static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  1898. {
  1899. struct kvm_vcpu *vcpu = filp->private_data;
  1900. fput(vcpu->kvm->filp);
  1901. return 0;
  1902. }
  1903. static struct file_operations kvm_vcpu_fops = {
  1904. .release = kvm_vcpu_release,
  1905. .unlocked_ioctl = kvm_vcpu_ioctl,
  1906. .compat_ioctl = kvm_vcpu_ioctl,
  1907. .mmap = kvm_vcpu_mmap,
  1908. };
  1909. /*
  1910. * Allocates an inode for the vcpu.
  1911. */
  1912. static int create_vcpu_fd(struct kvm_vcpu *vcpu)
  1913. {
  1914. int fd, r;
  1915. struct inode *inode;
  1916. struct file *file;
  1917. atomic_inc(&vcpu->kvm->filp->f_count);
  1918. inode = kvmfs_inode(&kvm_vcpu_fops);
  1919. if (IS_ERR(inode)) {
  1920. r = PTR_ERR(inode);
  1921. goto out1;
  1922. }
  1923. file = kvmfs_file(inode, vcpu);
  1924. if (IS_ERR(file)) {
  1925. r = PTR_ERR(file);
  1926. goto out2;
  1927. }
  1928. r = get_unused_fd();
  1929. if (r < 0)
  1930. goto out3;
  1931. fd = r;
  1932. fd_install(fd, file);
  1933. return fd;
  1934. out3:
  1935. fput(file);
  1936. out2:
  1937. iput(inode);
  1938. out1:
  1939. fput(vcpu->kvm->filp);
  1940. return r;
  1941. }
  1942. /*
  1943. * Creates some virtual cpus. Good luck creating more than one.
  1944. */
  1945. static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
  1946. {
  1947. int r;
  1948. struct kvm_vcpu *vcpu;
  1949. struct page *page;
  1950. r = -EINVAL;
  1951. if (!valid_vcpu(n))
  1952. goto out;
  1953. vcpu = &kvm->vcpus[n];
  1954. mutex_lock(&vcpu->mutex);
  1955. if (vcpu->vmcs) {
  1956. mutex_unlock(&vcpu->mutex);
  1957. return -EEXIST;
  1958. }
  1959. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1960. r = -ENOMEM;
  1961. if (!page)
  1962. goto out_unlock;
  1963. vcpu->run = page_address(page);
  1964. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1965. r = -ENOMEM;
  1966. if (!page)
  1967. goto out_free_run;
  1968. vcpu->pio_data = page_address(page);
  1969. vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
  1970. FX_IMAGE_ALIGN);
  1971. vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
  1972. vcpu->cr0 = 0x10;
  1973. r = kvm_arch_ops->vcpu_create(vcpu);
  1974. if (r < 0)
  1975. goto out_free_vcpus;
  1976. r = kvm_mmu_create(vcpu);
  1977. if (r < 0)
  1978. goto out_free_vcpus;
  1979. kvm_arch_ops->vcpu_load(vcpu);
  1980. r = kvm_mmu_setup(vcpu);
  1981. if (r >= 0)
  1982. r = kvm_arch_ops->vcpu_setup(vcpu);
  1983. vcpu_put(vcpu);
  1984. if (r < 0)
  1985. goto out_free_vcpus;
  1986. r = create_vcpu_fd(vcpu);
  1987. if (r < 0)
  1988. goto out_free_vcpus;
  1989. return r;
  1990. out_free_vcpus:
  1991. kvm_free_vcpu(vcpu);
  1992. out_free_run:
  1993. free_page((unsigned long)vcpu->run);
  1994. vcpu->run = NULL;
  1995. out_unlock:
  1996. mutex_unlock(&vcpu->mutex);
  1997. out:
  1998. return r;
  1999. }
  2000. static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  2001. struct kvm_cpuid *cpuid,
  2002. struct kvm_cpuid_entry __user *entries)
  2003. {
  2004. int r;
  2005. r = -E2BIG;
  2006. if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
  2007. goto out;
  2008. r = -EFAULT;
  2009. if (copy_from_user(&vcpu->cpuid_entries, entries,
  2010. cpuid->nent * sizeof(struct kvm_cpuid_entry)))
  2011. goto out;
  2012. vcpu->cpuid_nent = cpuid->nent;
  2013. return 0;
  2014. out:
  2015. return r;
  2016. }
  2017. static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
  2018. {
  2019. if (sigset) {
  2020. sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
  2021. vcpu->sigset_active = 1;
  2022. vcpu->sigset = *sigset;
  2023. } else
  2024. vcpu->sigset_active = 0;
  2025. return 0;
  2026. }
  2027. /*
  2028. * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
  2029. * we have asm/x86/processor.h
  2030. */
  2031. struct fxsave {
  2032. u16 cwd;
  2033. u16 swd;
  2034. u16 twd;
  2035. u16 fop;
  2036. u64 rip;
  2037. u64 rdp;
  2038. u32 mxcsr;
  2039. u32 mxcsr_mask;
  2040. u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
  2041. #ifdef CONFIG_X86_64
  2042. u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
  2043. #else
  2044. u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
  2045. #endif
  2046. };
  2047. static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  2048. {
  2049. struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
  2050. vcpu_load(vcpu);
  2051. memcpy(fpu->fpr, fxsave->st_space, 128);
  2052. fpu->fcw = fxsave->cwd;
  2053. fpu->fsw = fxsave->swd;
  2054. fpu->ftwx = fxsave->twd;
  2055. fpu->last_opcode = fxsave->fop;
  2056. fpu->last_ip = fxsave->rip;
  2057. fpu->last_dp = fxsave->rdp;
  2058. memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
  2059. vcpu_put(vcpu);
  2060. return 0;
  2061. }
  2062. static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  2063. {
  2064. struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
  2065. vcpu_load(vcpu);
  2066. memcpy(fxsave->st_space, fpu->fpr, 128);
  2067. fxsave->cwd = fpu->fcw;
  2068. fxsave->swd = fpu->fsw;
  2069. fxsave->twd = fpu->ftwx;
  2070. fxsave->fop = fpu->last_opcode;
  2071. fxsave->rip = fpu->last_ip;
  2072. fxsave->rdp = fpu->last_dp;
  2073. memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
  2074. vcpu_put(vcpu);
  2075. return 0;
  2076. }
  2077. static long kvm_vcpu_ioctl(struct file *filp,
  2078. unsigned int ioctl, unsigned long arg)
  2079. {
  2080. struct kvm_vcpu *vcpu = filp->private_data;
  2081. void __user *argp = (void __user *)arg;
  2082. int r = -EINVAL;
  2083. switch (ioctl) {
  2084. case KVM_RUN:
  2085. r = -EINVAL;
  2086. if (arg)
  2087. goto out;
  2088. r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
  2089. break;
  2090. case KVM_GET_REGS: {
  2091. struct kvm_regs kvm_regs;
  2092. memset(&kvm_regs, 0, sizeof kvm_regs);
  2093. r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
  2094. if (r)
  2095. goto out;
  2096. r = -EFAULT;
  2097. if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
  2098. goto out;
  2099. r = 0;
  2100. break;
  2101. }
  2102. case KVM_SET_REGS: {
  2103. struct kvm_regs kvm_regs;
  2104. r = -EFAULT;
  2105. if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
  2106. goto out;
  2107. r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
  2108. if (r)
  2109. goto out;
  2110. r = 0;
  2111. break;
  2112. }
  2113. case KVM_GET_SREGS: {
  2114. struct kvm_sregs kvm_sregs;
  2115. memset(&kvm_sregs, 0, sizeof kvm_sregs);
  2116. r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
  2117. if (r)
  2118. goto out;
  2119. r = -EFAULT;
  2120. if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
  2121. goto out;
  2122. r = 0;
  2123. break;
  2124. }
  2125. case KVM_SET_SREGS: {
  2126. struct kvm_sregs kvm_sregs;
  2127. r = -EFAULT;
  2128. if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
  2129. goto out;
  2130. r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
  2131. if (r)
  2132. goto out;
  2133. r = 0;
  2134. break;
  2135. }
  2136. case KVM_TRANSLATE: {
  2137. struct kvm_translation tr;
  2138. r = -EFAULT;
  2139. if (copy_from_user(&tr, argp, sizeof tr))
  2140. goto out;
  2141. r = kvm_vcpu_ioctl_translate(vcpu, &tr);
  2142. if (r)
  2143. goto out;
  2144. r = -EFAULT;
  2145. if (copy_to_user(argp, &tr, sizeof tr))
  2146. goto out;
  2147. r = 0;
  2148. break;
  2149. }
  2150. case KVM_INTERRUPT: {
  2151. struct kvm_interrupt irq;
  2152. r = -EFAULT;
  2153. if (copy_from_user(&irq, argp, sizeof irq))
  2154. goto out;
  2155. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  2156. if (r)
  2157. goto out;
  2158. r = 0;
  2159. break;
  2160. }
  2161. case KVM_DEBUG_GUEST: {
  2162. struct kvm_debug_guest dbg;
  2163. r = -EFAULT;
  2164. if (copy_from_user(&dbg, argp, sizeof dbg))
  2165. goto out;
  2166. r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
  2167. if (r)
  2168. goto out;
  2169. r = 0;
  2170. break;
  2171. }
  2172. case KVM_GET_MSRS:
  2173. r = msr_io(vcpu, argp, get_msr, 1);
  2174. break;
  2175. case KVM_SET_MSRS:
  2176. r = msr_io(vcpu, argp, do_set_msr, 0);
  2177. break;
  2178. case KVM_SET_CPUID: {
  2179. struct kvm_cpuid __user *cpuid_arg = argp;
  2180. struct kvm_cpuid cpuid;
  2181. r = -EFAULT;
  2182. if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
  2183. goto out;
  2184. r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
  2185. if (r)
  2186. goto out;
  2187. break;
  2188. }
  2189. case KVM_SET_SIGNAL_MASK: {
  2190. struct kvm_signal_mask __user *sigmask_arg = argp;
  2191. struct kvm_signal_mask kvm_sigmask;
  2192. sigset_t sigset, *p;
  2193. p = NULL;
  2194. if (argp) {
  2195. r = -EFAULT;
  2196. if (copy_from_user(&kvm_sigmask, argp,
  2197. sizeof kvm_sigmask))
  2198. goto out;
  2199. r = -EINVAL;
  2200. if (kvm_sigmask.len != sizeof sigset)
  2201. goto out;
  2202. r = -EFAULT;
  2203. if (copy_from_user(&sigset, sigmask_arg->sigset,
  2204. sizeof sigset))
  2205. goto out;
  2206. p = &sigset;
  2207. }
  2208. r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
  2209. break;
  2210. }
  2211. case KVM_GET_FPU: {
  2212. struct kvm_fpu fpu;
  2213. memset(&fpu, 0, sizeof fpu);
  2214. r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
  2215. if (r)
  2216. goto out;
  2217. r = -EFAULT;
  2218. if (copy_to_user(argp, &fpu, sizeof fpu))
  2219. goto out;
  2220. r = 0;
  2221. break;
  2222. }
  2223. case KVM_SET_FPU: {
  2224. struct kvm_fpu fpu;
  2225. r = -EFAULT;
  2226. if (copy_from_user(&fpu, argp, sizeof fpu))
  2227. goto out;
  2228. r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
  2229. if (r)
  2230. goto out;
  2231. r = 0;
  2232. break;
  2233. }
  2234. default:
  2235. ;
  2236. }
  2237. out:
  2238. return r;
  2239. }
  2240. static long kvm_vm_ioctl(struct file *filp,
  2241. unsigned int ioctl, unsigned long arg)
  2242. {
  2243. struct kvm *kvm = filp->private_data;
  2244. void __user *argp = (void __user *)arg;
  2245. int r = -EINVAL;
  2246. switch (ioctl) {
  2247. case KVM_CREATE_VCPU:
  2248. r = kvm_vm_ioctl_create_vcpu(kvm, arg);
  2249. if (r < 0)
  2250. goto out;
  2251. break;
  2252. case KVM_SET_MEMORY_REGION: {
  2253. struct kvm_memory_region kvm_mem;
  2254. r = -EFAULT;
  2255. if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
  2256. goto out;
  2257. r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
  2258. if (r)
  2259. goto out;
  2260. break;
  2261. }
  2262. case KVM_GET_DIRTY_LOG: {
  2263. struct kvm_dirty_log log;
  2264. r = -EFAULT;
  2265. if (copy_from_user(&log, argp, sizeof log))
  2266. goto out;
  2267. r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
  2268. if (r)
  2269. goto out;
  2270. break;
  2271. }
  2272. case KVM_SET_MEMORY_ALIAS: {
  2273. struct kvm_memory_alias alias;
  2274. r = -EFAULT;
  2275. if (copy_from_user(&alias, argp, sizeof alias))
  2276. goto out;
  2277. r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
  2278. if (r)
  2279. goto out;
  2280. break;
  2281. }
  2282. default:
  2283. ;
  2284. }
  2285. out:
  2286. return r;
  2287. }
  2288. static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
  2289. unsigned long address,
  2290. int *type)
  2291. {
  2292. struct kvm *kvm = vma->vm_file->private_data;
  2293. unsigned long pgoff;
  2294. struct page *page;
  2295. *type = VM_FAULT_MINOR;
  2296. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  2297. page = gfn_to_page(kvm, pgoff);
  2298. if (!page)
  2299. return NOPAGE_SIGBUS;
  2300. get_page(page);
  2301. return page;
  2302. }
  2303. static struct vm_operations_struct kvm_vm_vm_ops = {
  2304. .nopage = kvm_vm_nopage,
  2305. };
  2306. static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
  2307. {
  2308. vma->vm_ops = &kvm_vm_vm_ops;
  2309. return 0;
  2310. }
  2311. static struct file_operations kvm_vm_fops = {
  2312. .release = kvm_vm_release,
  2313. .unlocked_ioctl = kvm_vm_ioctl,
  2314. .compat_ioctl = kvm_vm_ioctl,
  2315. .mmap = kvm_vm_mmap,
  2316. };
  2317. static int kvm_dev_ioctl_create_vm(void)
  2318. {
  2319. int fd, r;
  2320. struct inode *inode;
  2321. struct file *file;
  2322. struct kvm *kvm;
  2323. inode = kvmfs_inode(&kvm_vm_fops);
  2324. if (IS_ERR(inode)) {
  2325. r = PTR_ERR(inode);
  2326. goto out1;
  2327. }
  2328. kvm = kvm_create_vm();
  2329. if (IS_ERR(kvm)) {
  2330. r = PTR_ERR(kvm);
  2331. goto out2;
  2332. }
  2333. file = kvmfs_file(inode, kvm);
  2334. if (IS_ERR(file)) {
  2335. r = PTR_ERR(file);
  2336. goto out3;
  2337. }
  2338. kvm->filp = file;
  2339. r = get_unused_fd();
  2340. if (r < 0)
  2341. goto out4;
  2342. fd = r;
  2343. fd_install(fd, file);
  2344. return fd;
  2345. out4:
  2346. fput(file);
  2347. out3:
  2348. kvm_destroy_vm(kvm);
  2349. out2:
  2350. iput(inode);
  2351. out1:
  2352. return r;
  2353. }
  2354. static long kvm_dev_ioctl(struct file *filp,
  2355. unsigned int ioctl, unsigned long arg)
  2356. {
  2357. void __user *argp = (void __user *)arg;
  2358. long r = -EINVAL;
  2359. switch (ioctl) {
  2360. case KVM_GET_API_VERSION:
  2361. r = -EINVAL;
  2362. if (arg)
  2363. goto out;
  2364. r = KVM_API_VERSION;
  2365. break;
  2366. case KVM_CREATE_VM:
  2367. r = -EINVAL;
  2368. if (arg)
  2369. goto out;
  2370. r = kvm_dev_ioctl_create_vm();
  2371. break;
  2372. case KVM_GET_MSR_INDEX_LIST: {
  2373. struct kvm_msr_list __user *user_msr_list = argp;
  2374. struct kvm_msr_list msr_list;
  2375. unsigned n;
  2376. r = -EFAULT;
  2377. if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
  2378. goto out;
  2379. n = msr_list.nmsrs;
  2380. msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
  2381. if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
  2382. goto out;
  2383. r = -E2BIG;
  2384. if (n < num_msrs_to_save)
  2385. goto out;
  2386. r = -EFAULT;
  2387. if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  2388. num_msrs_to_save * sizeof(u32)))
  2389. goto out;
  2390. if (copy_to_user(user_msr_list->indices
  2391. + num_msrs_to_save * sizeof(u32),
  2392. &emulated_msrs,
  2393. ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
  2394. goto out;
  2395. r = 0;
  2396. break;
  2397. }
  2398. case KVM_CHECK_EXTENSION:
  2399. /*
  2400. * No extensions defined at present.
  2401. */
  2402. r = 0;
  2403. break;
  2404. case KVM_GET_VCPU_MMAP_SIZE:
  2405. r = -EINVAL;
  2406. if (arg)
  2407. goto out;
  2408. r = 2 * PAGE_SIZE;
  2409. break;
  2410. default:
  2411. ;
  2412. }
  2413. out:
  2414. return r;
  2415. }
  2416. static struct file_operations kvm_chardev_ops = {
  2417. .open = kvm_dev_open,
  2418. .release = kvm_dev_release,
  2419. .unlocked_ioctl = kvm_dev_ioctl,
  2420. .compat_ioctl = kvm_dev_ioctl,
  2421. };
  2422. static struct miscdevice kvm_dev = {
  2423. KVM_MINOR,
  2424. "kvm",
  2425. &kvm_chardev_ops,
  2426. };
  2427. static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
  2428. void *v)
  2429. {
  2430. if (val == SYS_RESTART) {
  2431. /*
  2432. * Some (well, at least mine) BIOSes hang on reboot if
  2433. * in vmx root mode.
  2434. */
  2435. printk(KERN_INFO "kvm: exiting hardware virtualization\n");
  2436. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  2437. }
  2438. return NOTIFY_OK;
  2439. }
  2440. static struct notifier_block kvm_reboot_notifier = {
  2441. .notifier_call = kvm_reboot,
  2442. .priority = 0,
  2443. };
  2444. /*
  2445. * Make sure that a cpu that is being hot-unplugged does not have any vcpus
  2446. * cached on it.
  2447. */
  2448. static void decache_vcpus_on_cpu(int cpu)
  2449. {
  2450. struct kvm *vm;
  2451. struct kvm_vcpu *vcpu;
  2452. int i;
  2453. spin_lock(&kvm_lock);
  2454. list_for_each_entry(vm, &vm_list, vm_list)
  2455. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  2456. vcpu = &vm->vcpus[i];
  2457. /*
  2458. * If the vcpu is locked, then it is running on some
  2459. * other cpu and therefore it is not cached on the
  2460. * cpu in question.
  2461. *
  2462. * If it's not locked, check the last cpu it executed
  2463. * on.
  2464. */
  2465. if (mutex_trylock(&vcpu->mutex)) {
  2466. if (vcpu->cpu == cpu) {
  2467. kvm_arch_ops->vcpu_decache(vcpu);
  2468. vcpu->cpu = -1;
  2469. }
  2470. mutex_unlock(&vcpu->mutex);
  2471. }
  2472. }
  2473. spin_unlock(&kvm_lock);
  2474. }
  2475. static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
  2476. void *v)
  2477. {
  2478. int cpu = (long)v;
  2479. switch (val) {
  2480. case CPU_DOWN_PREPARE:
  2481. case CPU_UP_CANCELED:
  2482. printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
  2483. cpu);
  2484. decache_vcpus_on_cpu(cpu);
  2485. smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
  2486. NULL, 0, 1);
  2487. break;
  2488. case CPU_ONLINE:
  2489. printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
  2490. cpu);
  2491. smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
  2492. NULL, 0, 1);
  2493. break;
  2494. }
  2495. return NOTIFY_OK;
  2496. }
  2497. static struct notifier_block kvm_cpu_notifier = {
  2498. .notifier_call = kvm_cpu_hotplug,
  2499. .priority = 20, /* must be > scheduler priority */
  2500. };
  2501. static u64 stat_get(void *_offset)
  2502. {
  2503. unsigned offset = (long)_offset;
  2504. u64 total = 0;
  2505. struct kvm *kvm;
  2506. struct kvm_vcpu *vcpu;
  2507. int i;
  2508. spin_lock(&kvm_lock);
  2509. list_for_each_entry(kvm, &vm_list, vm_list)
  2510. for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  2511. vcpu = &kvm->vcpus[i];
  2512. total += *(u32 *)((void *)vcpu + offset);
  2513. }
  2514. spin_unlock(&kvm_lock);
  2515. return total;
  2516. }
  2517. static void stat_set(void *offset, u64 val)
  2518. {
  2519. }
  2520. DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, stat_set, "%llu\n");
  2521. static __init void kvm_init_debug(void)
  2522. {
  2523. struct kvm_stats_debugfs_item *p;
  2524. debugfs_dir = debugfs_create_dir("kvm", NULL);
  2525. for (p = debugfs_entries; p->name; ++p)
  2526. p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
  2527. (void *)(long)p->offset,
  2528. &stat_fops);
  2529. }
  2530. static void kvm_exit_debug(void)
  2531. {
  2532. struct kvm_stats_debugfs_item *p;
  2533. for (p = debugfs_entries; p->name; ++p)
  2534. debugfs_remove(p->dentry);
  2535. debugfs_remove(debugfs_dir);
  2536. }
  2537. static int kvm_suspend(struct sys_device *dev, pm_message_t state)
  2538. {
  2539. decache_vcpus_on_cpu(raw_smp_processor_id());
  2540. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  2541. return 0;
  2542. }
  2543. static int kvm_resume(struct sys_device *dev)
  2544. {
  2545. on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
  2546. return 0;
  2547. }
  2548. static struct sysdev_class kvm_sysdev_class = {
  2549. set_kset_name("kvm"),
  2550. .suspend = kvm_suspend,
  2551. .resume = kvm_resume,
  2552. };
  2553. static struct sys_device kvm_sysdev = {
  2554. .id = 0,
  2555. .cls = &kvm_sysdev_class,
  2556. };
  2557. hpa_t bad_page_address;
  2558. static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
  2559. const char *dev_name, void *data, struct vfsmount *mnt)
  2560. {
  2561. return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
  2562. }
  2563. static struct file_system_type kvm_fs_type = {
  2564. .name = "kvmfs",
  2565. .get_sb = kvmfs_get_sb,
  2566. .kill_sb = kill_anon_super,
  2567. };
  2568. int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
  2569. {
  2570. int r;
  2571. if (kvm_arch_ops) {
  2572. printk(KERN_ERR "kvm: already loaded the other module\n");
  2573. return -EEXIST;
  2574. }
  2575. if (!ops->cpu_has_kvm_support()) {
  2576. printk(KERN_ERR "kvm: no hardware support\n");
  2577. return -EOPNOTSUPP;
  2578. }
  2579. if (ops->disabled_by_bios()) {
  2580. printk(KERN_ERR "kvm: disabled by bios\n");
  2581. return -EOPNOTSUPP;
  2582. }
  2583. kvm_arch_ops = ops;
  2584. r = kvm_arch_ops->hardware_setup();
  2585. if (r < 0)
  2586. goto out;
  2587. on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
  2588. r = register_cpu_notifier(&kvm_cpu_notifier);
  2589. if (r)
  2590. goto out_free_1;
  2591. register_reboot_notifier(&kvm_reboot_notifier);
  2592. r = sysdev_class_register(&kvm_sysdev_class);
  2593. if (r)
  2594. goto out_free_2;
  2595. r = sysdev_register(&kvm_sysdev);
  2596. if (r)
  2597. goto out_free_3;
  2598. kvm_chardev_ops.owner = module;
  2599. r = misc_register(&kvm_dev);
  2600. if (r) {
  2601. printk (KERN_ERR "kvm: misc device register failed\n");
  2602. goto out_free;
  2603. }
  2604. return r;
  2605. out_free:
  2606. sysdev_unregister(&kvm_sysdev);
  2607. out_free_3:
  2608. sysdev_class_unregister(&kvm_sysdev_class);
  2609. out_free_2:
  2610. unregister_reboot_notifier(&kvm_reboot_notifier);
  2611. unregister_cpu_notifier(&kvm_cpu_notifier);
  2612. out_free_1:
  2613. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  2614. kvm_arch_ops->hardware_unsetup();
  2615. out:
  2616. kvm_arch_ops = NULL;
  2617. return r;
  2618. }
  2619. void kvm_exit_arch(void)
  2620. {
  2621. misc_deregister(&kvm_dev);
  2622. sysdev_unregister(&kvm_sysdev);
  2623. sysdev_class_unregister(&kvm_sysdev_class);
  2624. unregister_reboot_notifier(&kvm_reboot_notifier);
  2625. unregister_cpu_notifier(&kvm_cpu_notifier);
  2626. on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
  2627. kvm_arch_ops->hardware_unsetup();
  2628. kvm_arch_ops = NULL;
  2629. }
  2630. static __init int kvm_init(void)
  2631. {
  2632. static struct page *bad_page;
  2633. int r;
  2634. r = kvm_mmu_module_init();
  2635. if (r)
  2636. goto out4;
  2637. r = register_filesystem(&kvm_fs_type);
  2638. if (r)
  2639. goto out3;
  2640. kvmfs_mnt = kern_mount(&kvm_fs_type);
  2641. r = PTR_ERR(kvmfs_mnt);
  2642. if (IS_ERR(kvmfs_mnt))
  2643. goto out2;
  2644. kvm_init_debug();
  2645. kvm_init_msr_list();
  2646. if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
  2647. r = -ENOMEM;
  2648. goto out;
  2649. }
  2650. bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
  2651. memset(__va(bad_page_address), 0, PAGE_SIZE);
  2652. return 0;
  2653. out:
  2654. kvm_exit_debug();
  2655. mntput(kvmfs_mnt);
  2656. out2:
  2657. unregister_filesystem(&kvm_fs_type);
  2658. out3:
  2659. kvm_mmu_module_exit();
  2660. out4:
  2661. return r;
  2662. }
  2663. static __exit void kvm_exit(void)
  2664. {
  2665. kvm_exit_debug();
  2666. __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
  2667. mntput(kvmfs_mnt);
  2668. unregister_filesystem(&kvm_fs_type);
  2669. kvm_mmu_module_exit();
  2670. }
  2671. module_init(kvm_init)
  2672. module_exit(kvm_exit)
  2673. EXPORT_SYMBOL_GPL(kvm_init_arch);
  2674. EXPORT_SYMBOL_GPL(kvm_exit_arch);