mmu.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. *
  11. * Authors:
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Avi Kivity <avi@qumranet.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2. See
  16. * the COPYING file in the top-level directory.
  17. *
  18. */
  19. #include "vmx.h"
  20. #include "kvm.h"
  21. #include <linux/types.h>
  22. #include <linux/string.h>
  23. #include <linux/mm.h>
  24. #include <linux/highmem.h>
  25. #include <linux/module.h>
  26. #include <asm/page.h>
  27. #include <asm/cmpxchg.h>
  28. #undef MMU_DEBUG
  29. #undef AUDIT
  30. #ifdef AUDIT
  31. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
  32. #else
  33. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
  34. #endif
  35. #ifdef MMU_DEBUG
  36. #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
  37. #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
  38. #else
  39. #define pgprintk(x...) do { } while (0)
  40. #define rmap_printk(x...) do { } while (0)
  41. #endif
  42. #if defined(MMU_DEBUG) || defined(AUDIT)
  43. static int dbg = 1;
  44. #endif
  45. #ifndef MMU_DEBUG
  46. #define ASSERT(x) do { } while (0)
  47. #else
  48. #define ASSERT(x) \
  49. if (!(x)) { \
  50. printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
  51. __FILE__, __LINE__, #x); \
  52. }
  53. #endif
  54. #define PT64_PT_BITS 9
  55. #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
  56. #define PT32_PT_BITS 10
  57. #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
  58. #define PT_WRITABLE_SHIFT 1
  59. #define PT_PRESENT_MASK (1ULL << 0)
  60. #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
  61. #define PT_USER_MASK (1ULL << 2)
  62. #define PT_PWT_MASK (1ULL << 3)
  63. #define PT_PCD_MASK (1ULL << 4)
  64. #define PT_ACCESSED_MASK (1ULL << 5)
  65. #define PT_DIRTY_MASK (1ULL << 6)
  66. #define PT_PAGE_SIZE_MASK (1ULL << 7)
  67. #define PT_PAT_MASK (1ULL << 7)
  68. #define PT_GLOBAL_MASK (1ULL << 8)
  69. #define PT64_NX_MASK (1ULL << 63)
  70. #define PT_PAT_SHIFT 7
  71. #define PT_DIR_PAT_SHIFT 12
  72. #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
  73. #define PT32_DIR_PSE36_SIZE 4
  74. #define PT32_DIR_PSE36_SHIFT 13
  75. #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
  76. #define PT_FIRST_AVAIL_BITS_SHIFT 9
  77. #define PT64_SECOND_AVAIL_BITS_SHIFT 52
  78. #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
  79. #define VALID_PAGE(x) ((x) != INVALID_PAGE)
  80. #define PT64_LEVEL_BITS 9
  81. #define PT64_LEVEL_SHIFT(level) \
  82. ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
  83. #define PT64_LEVEL_MASK(level) \
  84. (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
  85. #define PT64_INDEX(address, level)\
  86. (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
  87. #define PT32_LEVEL_BITS 10
  88. #define PT32_LEVEL_SHIFT(level) \
  89. ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
  90. #define PT32_LEVEL_MASK(level) \
  91. (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
  92. #define PT32_INDEX(address, level)\
  93. (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
  94. #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
  95. #define PT64_DIR_BASE_ADDR_MASK \
  96. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
  97. #define PT32_BASE_ADDR_MASK PAGE_MASK
  98. #define PT32_DIR_BASE_ADDR_MASK \
  99. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
  100. #define PFERR_PRESENT_MASK (1U << 0)
  101. #define PFERR_WRITE_MASK (1U << 1)
  102. #define PFERR_USER_MASK (1U << 2)
  103. #define PFERR_FETCH_MASK (1U << 4)
  104. #define PT64_ROOT_LEVEL 4
  105. #define PT32_ROOT_LEVEL 2
  106. #define PT32E_ROOT_LEVEL 3
  107. #define PT_DIRECTORY_LEVEL 2
  108. #define PT_PAGE_TABLE_LEVEL 1
  109. #define RMAP_EXT 4
  110. struct kvm_rmap_desc {
  111. u64 *shadow_ptes[RMAP_EXT];
  112. struct kvm_rmap_desc *more;
  113. };
  114. static struct kmem_cache *pte_chain_cache;
  115. static struct kmem_cache *rmap_desc_cache;
  116. static struct kmem_cache *mmu_page_header_cache;
  117. static int is_write_protection(struct kvm_vcpu *vcpu)
  118. {
  119. return vcpu->cr0 & CR0_WP_MASK;
  120. }
  121. static int is_cpuid_PSE36(void)
  122. {
  123. return 1;
  124. }
  125. static int is_nx(struct kvm_vcpu *vcpu)
  126. {
  127. return vcpu->shadow_efer & EFER_NX;
  128. }
  129. static int is_present_pte(unsigned long pte)
  130. {
  131. return pte & PT_PRESENT_MASK;
  132. }
  133. static int is_writeble_pte(unsigned long pte)
  134. {
  135. return pte & PT_WRITABLE_MASK;
  136. }
  137. static int is_io_pte(unsigned long pte)
  138. {
  139. return pte & PT_SHADOW_IO_MARK;
  140. }
  141. static int is_rmap_pte(u64 pte)
  142. {
  143. return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
  144. == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
  145. }
  146. static void set_shadow_pte(u64 *sptep, u64 spte)
  147. {
  148. #ifdef CONFIG_X86_64
  149. set_64bit((unsigned long *)sptep, spte);
  150. #else
  151. set_64bit((unsigned long long *)sptep, spte);
  152. #endif
  153. }
  154. static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
  155. struct kmem_cache *base_cache, int min,
  156. gfp_t gfp_flags)
  157. {
  158. void *obj;
  159. if (cache->nobjs >= min)
  160. return 0;
  161. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  162. obj = kmem_cache_zalloc(base_cache, gfp_flags);
  163. if (!obj)
  164. return -ENOMEM;
  165. cache->objects[cache->nobjs++] = obj;
  166. }
  167. return 0;
  168. }
  169. static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
  170. {
  171. while (mc->nobjs)
  172. kfree(mc->objects[--mc->nobjs]);
  173. }
  174. static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
  175. int min, gfp_t gfp_flags)
  176. {
  177. struct page *page;
  178. if (cache->nobjs >= min)
  179. return 0;
  180. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  181. page = alloc_page(gfp_flags);
  182. if (!page)
  183. return -ENOMEM;
  184. set_page_private(page, 0);
  185. cache->objects[cache->nobjs++] = page_address(page);
  186. }
  187. return 0;
  188. }
  189. static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
  190. {
  191. while (mc->nobjs)
  192. free_page((unsigned long)mc->objects[--mc->nobjs]);
  193. }
  194. static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
  195. {
  196. int r;
  197. r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
  198. pte_chain_cache, 4, gfp_flags);
  199. if (r)
  200. goto out;
  201. r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
  202. rmap_desc_cache, 1, gfp_flags);
  203. if (r)
  204. goto out;
  205. r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags);
  206. if (r)
  207. goto out;
  208. r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
  209. mmu_page_header_cache, 4, gfp_flags);
  210. out:
  211. return r;
  212. }
  213. static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
  214. {
  215. int r;
  216. r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
  217. if (r < 0) {
  218. spin_unlock(&vcpu->kvm->lock);
  219. kvm_arch_ops->vcpu_put(vcpu);
  220. r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
  221. kvm_arch_ops->vcpu_load(vcpu);
  222. spin_lock(&vcpu->kvm->lock);
  223. }
  224. return r;
  225. }
  226. static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
  227. {
  228. mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
  229. mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
  230. mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
  231. mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
  232. }
  233. static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
  234. size_t size)
  235. {
  236. void *p;
  237. BUG_ON(!mc->nobjs);
  238. p = mc->objects[--mc->nobjs];
  239. memset(p, 0, size);
  240. return p;
  241. }
  242. static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
  243. {
  244. return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
  245. sizeof(struct kvm_pte_chain));
  246. }
  247. static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
  248. {
  249. kfree(pc);
  250. }
  251. static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
  252. {
  253. return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
  254. sizeof(struct kvm_rmap_desc));
  255. }
  256. static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
  257. {
  258. kfree(rd);
  259. }
  260. /*
  261. * Reverse mapping data structures:
  262. *
  263. * If page->private bit zero is zero, then page->private points to the
  264. * shadow page table entry that points to page_address(page).
  265. *
  266. * If page->private bit zero is one, (then page->private & ~1) points
  267. * to a struct kvm_rmap_desc containing more mappings.
  268. */
  269. static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
  270. {
  271. struct page *page;
  272. struct kvm_rmap_desc *desc;
  273. int i;
  274. if (!is_rmap_pte(*spte))
  275. return;
  276. page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
  277. if (!page_private(page)) {
  278. rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
  279. set_page_private(page,(unsigned long)spte);
  280. } else if (!(page_private(page) & 1)) {
  281. rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
  282. desc = mmu_alloc_rmap_desc(vcpu);
  283. desc->shadow_ptes[0] = (u64 *)page_private(page);
  284. desc->shadow_ptes[1] = spte;
  285. set_page_private(page,(unsigned long)desc | 1);
  286. } else {
  287. rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
  288. desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
  289. while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
  290. desc = desc->more;
  291. if (desc->shadow_ptes[RMAP_EXT-1]) {
  292. desc->more = mmu_alloc_rmap_desc(vcpu);
  293. desc = desc->more;
  294. }
  295. for (i = 0; desc->shadow_ptes[i]; ++i)
  296. ;
  297. desc->shadow_ptes[i] = spte;
  298. }
  299. }
  300. static void rmap_desc_remove_entry(struct page *page,
  301. struct kvm_rmap_desc *desc,
  302. int i,
  303. struct kvm_rmap_desc *prev_desc)
  304. {
  305. int j;
  306. for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
  307. ;
  308. desc->shadow_ptes[i] = desc->shadow_ptes[j];
  309. desc->shadow_ptes[j] = NULL;
  310. if (j != 0)
  311. return;
  312. if (!prev_desc && !desc->more)
  313. set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
  314. else
  315. if (prev_desc)
  316. prev_desc->more = desc->more;
  317. else
  318. set_page_private(page,(unsigned long)desc->more | 1);
  319. mmu_free_rmap_desc(desc);
  320. }
  321. static void rmap_remove(u64 *spte)
  322. {
  323. struct page *page;
  324. struct kvm_rmap_desc *desc;
  325. struct kvm_rmap_desc *prev_desc;
  326. int i;
  327. if (!is_rmap_pte(*spte))
  328. return;
  329. page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
  330. if (!page_private(page)) {
  331. printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
  332. BUG();
  333. } else if (!(page_private(page) & 1)) {
  334. rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
  335. if ((u64 *)page_private(page) != spte) {
  336. printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
  337. spte, *spte);
  338. BUG();
  339. }
  340. set_page_private(page,0);
  341. } else {
  342. rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
  343. desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
  344. prev_desc = NULL;
  345. while (desc) {
  346. for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
  347. if (desc->shadow_ptes[i] == spte) {
  348. rmap_desc_remove_entry(page,
  349. desc, i,
  350. prev_desc);
  351. return;
  352. }
  353. prev_desc = desc;
  354. desc = desc->more;
  355. }
  356. BUG();
  357. }
  358. }
  359. static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
  360. {
  361. struct kvm *kvm = vcpu->kvm;
  362. struct page *page;
  363. struct kvm_rmap_desc *desc;
  364. u64 *spte;
  365. page = gfn_to_page(kvm, gfn);
  366. BUG_ON(!page);
  367. while (page_private(page)) {
  368. if (!(page_private(page) & 1))
  369. spte = (u64 *)page_private(page);
  370. else {
  371. desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
  372. spte = desc->shadow_ptes[0];
  373. }
  374. BUG_ON(!spte);
  375. BUG_ON((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT
  376. != page_to_pfn(page));
  377. BUG_ON(!(*spte & PT_PRESENT_MASK));
  378. BUG_ON(!(*spte & PT_WRITABLE_MASK));
  379. rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
  380. rmap_remove(spte);
  381. set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
  382. kvm_flush_remote_tlbs(vcpu->kvm);
  383. }
  384. }
  385. #ifdef MMU_DEBUG
  386. static int is_empty_shadow_page(u64 *spt)
  387. {
  388. u64 *pos;
  389. u64 *end;
  390. for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
  391. if (*pos != 0) {
  392. printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
  393. pos, *pos);
  394. return 0;
  395. }
  396. return 1;
  397. }
  398. #endif
  399. static void kvm_mmu_free_page(struct kvm *kvm,
  400. struct kvm_mmu_page *page_head)
  401. {
  402. ASSERT(is_empty_shadow_page(page_head->spt));
  403. list_del(&page_head->link);
  404. __free_page(virt_to_page(page_head->spt));
  405. kfree(page_head);
  406. ++kvm->n_free_mmu_pages;
  407. }
  408. static unsigned kvm_page_table_hashfn(gfn_t gfn)
  409. {
  410. return gfn;
  411. }
  412. static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
  413. u64 *parent_pte)
  414. {
  415. struct kvm_mmu_page *page;
  416. if (!vcpu->kvm->n_free_mmu_pages)
  417. return NULL;
  418. page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
  419. sizeof *page);
  420. page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
  421. set_page_private(virt_to_page(page->spt), (unsigned long)page);
  422. list_add(&page->link, &vcpu->kvm->active_mmu_pages);
  423. ASSERT(is_empty_shadow_page(page->spt));
  424. page->slot_bitmap = 0;
  425. page->multimapped = 0;
  426. page->parent_pte = parent_pte;
  427. --vcpu->kvm->n_free_mmu_pages;
  428. return page;
  429. }
  430. static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
  431. struct kvm_mmu_page *page, u64 *parent_pte)
  432. {
  433. struct kvm_pte_chain *pte_chain;
  434. struct hlist_node *node;
  435. int i;
  436. if (!parent_pte)
  437. return;
  438. if (!page->multimapped) {
  439. u64 *old = page->parent_pte;
  440. if (!old) {
  441. page->parent_pte = parent_pte;
  442. return;
  443. }
  444. page->multimapped = 1;
  445. pte_chain = mmu_alloc_pte_chain(vcpu);
  446. INIT_HLIST_HEAD(&page->parent_ptes);
  447. hlist_add_head(&pte_chain->link, &page->parent_ptes);
  448. pte_chain->parent_ptes[0] = old;
  449. }
  450. hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
  451. if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
  452. continue;
  453. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
  454. if (!pte_chain->parent_ptes[i]) {
  455. pte_chain->parent_ptes[i] = parent_pte;
  456. return;
  457. }
  458. }
  459. pte_chain = mmu_alloc_pte_chain(vcpu);
  460. BUG_ON(!pte_chain);
  461. hlist_add_head(&pte_chain->link, &page->parent_ptes);
  462. pte_chain->parent_ptes[0] = parent_pte;
  463. }
  464. static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
  465. u64 *parent_pte)
  466. {
  467. struct kvm_pte_chain *pte_chain;
  468. struct hlist_node *node;
  469. int i;
  470. if (!page->multimapped) {
  471. BUG_ON(page->parent_pte != parent_pte);
  472. page->parent_pte = NULL;
  473. return;
  474. }
  475. hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
  476. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
  477. if (!pte_chain->parent_ptes[i])
  478. break;
  479. if (pte_chain->parent_ptes[i] != parent_pte)
  480. continue;
  481. while (i + 1 < NR_PTE_CHAIN_ENTRIES
  482. && pte_chain->parent_ptes[i + 1]) {
  483. pte_chain->parent_ptes[i]
  484. = pte_chain->parent_ptes[i + 1];
  485. ++i;
  486. }
  487. pte_chain->parent_ptes[i] = NULL;
  488. if (i == 0) {
  489. hlist_del(&pte_chain->link);
  490. mmu_free_pte_chain(pte_chain);
  491. if (hlist_empty(&page->parent_ptes)) {
  492. page->multimapped = 0;
  493. page->parent_pte = NULL;
  494. }
  495. }
  496. return;
  497. }
  498. BUG();
  499. }
  500. static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
  501. gfn_t gfn)
  502. {
  503. unsigned index;
  504. struct hlist_head *bucket;
  505. struct kvm_mmu_page *page;
  506. struct hlist_node *node;
  507. pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
  508. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  509. bucket = &vcpu->kvm->mmu_page_hash[index];
  510. hlist_for_each_entry(page, node, bucket, hash_link)
  511. if (page->gfn == gfn && !page->role.metaphysical) {
  512. pgprintk("%s: found role %x\n",
  513. __FUNCTION__, page->role.word);
  514. return page;
  515. }
  516. return NULL;
  517. }
  518. static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
  519. gfn_t gfn,
  520. gva_t gaddr,
  521. unsigned level,
  522. int metaphysical,
  523. unsigned hugepage_access,
  524. u64 *parent_pte)
  525. {
  526. union kvm_mmu_page_role role;
  527. unsigned index;
  528. unsigned quadrant;
  529. struct hlist_head *bucket;
  530. struct kvm_mmu_page *page;
  531. struct hlist_node *node;
  532. role.word = 0;
  533. role.glevels = vcpu->mmu.root_level;
  534. role.level = level;
  535. role.metaphysical = metaphysical;
  536. role.hugepage_access = hugepage_access;
  537. if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
  538. quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
  539. quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
  540. role.quadrant = quadrant;
  541. }
  542. pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
  543. gfn, role.word);
  544. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  545. bucket = &vcpu->kvm->mmu_page_hash[index];
  546. hlist_for_each_entry(page, node, bucket, hash_link)
  547. if (page->gfn == gfn && page->role.word == role.word) {
  548. mmu_page_add_parent_pte(vcpu, page, parent_pte);
  549. pgprintk("%s: found\n", __FUNCTION__);
  550. return page;
  551. }
  552. page = kvm_mmu_alloc_page(vcpu, parent_pte);
  553. if (!page)
  554. return page;
  555. pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
  556. page->gfn = gfn;
  557. page->role = role;
  558. hlist_add_head(&page->hash_link, bucket);
  559. if (!metaphysical)
  560. rmap_write_protect(vcpu, gfn);
  561. return page;
  562. }
  563. static void kvm_mmu_page_unlink_children(struct kvm *kvm,
  564. struct kvm_mmu_page *page)
  565. {
  566. unsigned i;
  567. u64 *pt;
  568. u64 ent;
  569. pt = page->spt;
  570. if (page->role.level == PT_PAGE_TABLE_LEVEL) {
  571. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  572. if (pt[i] & PT_PRESENT_MASK)
  573. rmap_remove(&pt[i]);
  574. pt[i] = 0;
  575. }
  576. kvm_flush_remote_tlbs(kvm);
  577. return;
  578. }
  579. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  580. ent = pt[i];
  581. pt[i] = 0;
  582. if (!(ent & PT_PRESENT_MASK))
  583. continue;
  584. ent &= PT64_BASE_ADDR_MASK;
  585. mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
  586. }
  587. kvm_flush_remote_tlbs(kvm);
  588. }
  589. static void kvm_mmu_put_page(struct kvm_mmu_page *page,
  590. u64 *parent_pte)
  591. {
  592. mmu_page_remove_parent_pte(page, parent_pte);
  593. }
  594. static void kvm_mmu_zap_page(struct kvm *kvm,
  595. struct kvm_mmu_page *page)
  596. {
  597. u64 *parent_pte;
  598. while (page->multimapped || page->parent_pte) {
  599. if (!page->multimapped)
  600. parent_pte = page->parent_pte;
  601. else {
  602. struct kvm_pte_chain *chain;
  603. chain = container_of(page->parent_ptes.first,
  604. struct kvm_pte_chain, link);
  605. parent_pte = chain->parent_ptes[0];
  606. }
  607. BUG_ON(!parent_pte);
  608. kvm_mmu_put_page(page, parent_pte);
  609. set_shadow_pte(parent_pte, 0);
  610. }
  611. kvm_mmu_page_unlink_children(kvm, page);
  612. if (!page->root_count) {
  613. hlist_del(&page->hash_link);
  614. kvm_mmu_free_page(kvm, page);
  615. } else
  616. list_move(&page->link, &kvm->active_mmu_pages);
  617. }
  618. static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
  619. {
  620. unsigned index;
  621. struct hlist_head *bucket;
  622. struct kvm_mmu_page *page;
  623. struct hlist_node *node, *n;
  624. int r;
  625. pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
  626. r = 0;
  627. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  628. bucket = &vcpu->kvm->mmu_page_hash[index];
  629. hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
  630. if (page->gfn == gfn && !page->role.metaphysical) {
  631. pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
  632. page->role.word);
  633. kvm_mmu_zap_page(vcpu->kvm, page);
  634. r = 1;
  635. }
  636. return r;
  637. }
  638. static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
  639. {
  640. struct kvm_mmu_page *page;
  641. while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
  642. pgprintk("%s: zap %lx %x\n",
  643. __FUNCTION__, gfn, page->role.word);
  644. kvm_mmu_zap_page(vcpu->kvm, page);
  645. }
  646. }
  647. static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
  648. {
  649. int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
  650. struct kvm_mmu_page *page_head = page_header(__pa(pte));
  651. __set_bit(slot, &page_head->slot_bitmap);
  652. }
  653. hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
  654. {
  655. hpa_t hpa = gpa_to_hpa(vcpu, gpa);
  656. return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
  657. }
  658. hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
  659. {
  660. struct page *page;
  661. ASSERT((gpa & HPA_ERR_MASK) == 0);
  662. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  663. if (!page)
  664. return gpa | HPA_ERR_MASK;
  665. return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
  666. | (gpa & (PAGE_SIZE-1));
  667. }
  668. hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
  669. {
  670. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
  671. if (gpa == UNMAPPED_GVA)
  672. return UNMAPPED_GVA;
  673. return gpa_to_hpa(vcpu, gpa);
  674. }
  675. struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
  676. {
  677. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
  678. if (gpa == UNMAPPED_GVA)
  679. return NULL;
  680. return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT);
  681. }
  682. static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
  683. {
  684. }
  685. static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
  686. {
  687. int level = PT32E_ROOT_LEVEL;
  688. hpa_t table_addr = vcpu->mmu.root_hpa;
  689. for (; ; level--) {
  690. u32 index = PT64_INDEX(v, level);
  691. u64 *table;
  692. u64 pte;
  693. ASSERT(VALID_PAGE(table_addr));
  694. table = __va(table_addr);
  695. if (level == 1) {
  696. pte = table[index];
  697. if (is_present_pte(pte) && is_writeble_pte(pte))
  698. return 0;
  699. mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
  700. page_header_update_slot(vcpu->kvm, table, v);
  701. table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
  702. PT_USER_MASK;
  703. rmap_add(vcpu, &table[index]);
  704. return 0;
  705. }
  706. if (table[index] == 0) {
  707. struct kvm_mmu_page *new_table;
  708. gfn_t pseudo_gfn;
  709. pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
  710. >> PAGE_SHIFT;
  711. new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
  712. v, level - 1,
  713. 1, 0, &table[index]);
  714. if (!new_table) {
  715. pgprintk("nonpaging_map: ENOMEM\n");
  716. return -ENOMEM;
  717. }
  718. table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
  719. | PT_WRITABLE_MASK | PT_USER_MASK;
  720. }
  721. table_addr = table[index] & PT64_BASE_ADDR_MASK;
  722. }
  723. }
  724. static void mmu_free_roots(struct kvm_vcpu *vcpu)
  725. {
  726. int i;
  727. struct kvm_mmu_page *page;
  728. if (!VALID_PAGE(vcpu->mmu.root_hpa))
  729. return;
  730. #ifdef CONFIG_X86_64
  731. if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  732. hpa_t root = vcpu->mmu.root_hpa;
  733. page = page_header(root);
  734. --page->root_count;
  735. vcpu->mmu.root_hpa = INVALID_PAGE;
  736. return;
  737. }
  738. #endif
  739. for (i = 0; i < 4; ++i) {
  740. hpa_t root = vcpu->mmu.pae_root[i];
  741. if (root) {
  742. root &= PT64_BASE_ADDR_MASK;
  743. page = page_header(root);
  744. --page->root_count;
  745. }
  746. vcpu->mmu.pae_root[i] = INVALID_PAGE;
  747. }
  748. vcpu->mmu.root_hpa = INVALID_PAGE;
  749. }
  750. static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
  751. {
  752. int i;
  753. gfn_t root_gfn;
  754. struct kvm_mmu_page *page;
  755. root_gfn = vcpu->cr3 >> PAGE_SHIFT;
  756. #ifdef CONFIG_X86_64
  757. if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  758. hpa_t root = vcpu->mmu.root_hpa;
  759. ASSERT(!VALID_PAGE(root));
  760. page = kvm_mmu_get_page(vcpu, root_gfn, 0,
  761. PT64_ROOT_LEVEL, 0, 0, NULL);
  762. root = __pa(page->spt);
  763. ++page->root_count;
  764. vcpu->mmu.root_hpa = root;
  765. return;
  766. }
  767. #endif
  768. for (i = 0; i < 4; ++i) {
  769. hpa_t root = vcpu->mmu.pae_root[i];
  770. ASSERT(!VALID_PAGE(root));
  771. if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
  772. if (!is_present_pte(vcpu->pdptrs[i])) {
  773. vcpu->mmu.pae_root[i] = 0;
  774. continue;
  775. }
  776. root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
  777. } else if (vcpu->mmu.root_level == 0)
  778. root_gfn = 0;
  779. page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
  780. PT32_ROOT_LEVEL, !is_paging(vcpu),
  781. 0, NULL);
  782. root = __pa(page->spt);
  783. ++page->root_count;
  784. vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
  785. }
  786. vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
  787. }
  788. static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
  789. {
  790. return vaddr;
  791. }
  792. static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
  793. u32 error_code)
  794. {
  795. gpa_t addr = gva;
  796. hpa_t paddr;
  797. int r;
  798. r = mmu_topup_memory_caches(vcpu);
  799. if (r)
  800. return r;
  801. ASSERT(vcpu);
  802. ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
  803. paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
  804. if (is_error_hpa(paddr))
  805. return 1;
  806. return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
  807. }
  808. static void nonpaging_free(struct kvm_vcpu *vcpu)
  809. {
  810. mmu_free_roots(vcpu);
  811. }
  812. static int nonpaging_init_context(struct kvm_vcpu *vcpu)
  813. {
  814. struct kvm_mmu *context = &vcpu->mmu;
  815. context->new_cr3 = nonpaging_new_cr3;
  816. context->page_fault = nonpaging_page_fault;
  817. context->gva_to_gpa = nonpaging_gva_to_gpa;
  818. context->free = nonpaging_free;
  819. context->root_level = 0;
  820. context->shadow_root_level = PT32E_ROOT_LEVEL;
  821. context->root_hpa = INVALID_PAGE;
  822. return 0;
  823. }
  824. static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
  825. {
  826. ++vcpu->stat.tlb_flush;
  827. kvm_arch_ops->tlb_flush(vcpu);
  828. }
  829. static void paging_new_cr3(struct kvm_vcpu *vcpu)
  830. {
  831. pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
  832. mmu_free_roots(vcpu);
  833. }
  834. static void inject_page_fault(struct kvm_vcpu *vcpu,
  835. u64 addr,
  836. u32 err_code)
  837. {
  838. kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
  839. }
  840. static void paging_free(struct kvm_vcpu *vcpu)
  841. {
  842. nonpaging_free(vcpu);
  843. }
  844. #define PTTYPE 64
  845. #include "paging_tmpl.h"
  846. #undef PTTYPE
  847. #define PTTYPE 32
  848. #include "paging_tmpl.h"
  849. #undef PTTYPE
  850. static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
  851. {
  852. struct kvm_mmu *context = &vcpu->mmu;
  853. ASSERT(is_pae(vcpu));
  854. context->new_cr3 = paging_new_cr3;
  855. context->page_fault = paging64_page_fault;
  856. context->gva_to_gpa = paging64_gva_to_gpa;
  857. context->free = paging_free;
  858. context->root_level = level;
  859. context->shadow_root_level = level;
  860. context->root_hpa = INVALID_PAGE;
  861. return 0;
  862. }
  863. static int paging64_init_context(struct kvm_vcpu *vcpu)
  864. {
  865. return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
  866. }
  867. static int paging32_init_context(struct kvm_vcpu *vcpu)
  868. {
  869. struct kvm_mmu *context = &vcpu->mmu;
  870. context->new_cr3 = paging_new_cr3;
  871. context->page_fault = paging32_page_fault;
  872. context->gva_to_gpa = paging32_gva_to_gpa;
  873. context->free = paging_free;
  874. context->root_level = PT32_ROOT_LEVEL;
  875. context->shadow_root_level = PT32E_ROOT_LEVEL;
  876. context->root_hpa = INVALID_PAGE;
  877. return 0;
  878. }
  879. static int paging32E_init_context(struct kvm_vcpu *vcpu)
  880. {
  881. return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
  882. }
  883. static int init_kvm_mmu(struct kvm_vcpu *vcpu)
  884. {
  885. ASSERT(vcpu);
  886. ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
  887. if (!is_paging(vcpu))
  888. return nonpaging_init_context(vcpu);
  889. else if (is_long_mode(vcpu))
  890. return paging64_init_context(vcpu);
  891. else if (is_pae(vcpu))
  892. return paging32E_init_context(vcpu);
  893. else
  894. return paging32_init_context(vcpu);
  895. }
  896. static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
  897. {
  898. ASSERT(vcpu);
  899. if (VALID_PAGE(vcpu->mmu.root_hpa)) {
  900. vcpu->mmu.free(vcpu);
  901. vcpu->mmu.root_hpa = INVALID_PAGE;
  902. }
  903. }
  904. int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
  905. {
  906. destroy_kvm_mmu(vcpu);
  907. return init_kvm_mmu(vcpu);
  908. }
  909. int kvm_mmu_load(struct kvm_vcpu *vcpu)
  910. {
  911. int r;
  912. spin_lock(&vcpu->kvm->lock);
  913. r = mmu_topup_memory_caches(vcpu);
  914. if (r)
  915. goto out;
  916. mmu_alloc_roots(vcpu);
  917. kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
  918. kvm_mmu_flush_tlb(vcpu);
  919. out:
  920. spin_unlock(&vcpu->kvm->lock);
  921. return r;
  922. }
  923. EXPORT_SYMBOL_GPL(kvm_mmu_load);
  924. void kvm_mmu_unload(struct kvm_vcpu *vcpu)
  925. {
  926. mmu_free_roots(vcpu);
  927. }
  928. static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
  929. struct kvm_mmu_page *page,
  930. u64 *spte)
  931. {
  932. u64 pte;
  933. struct kvm_mmu_page *child;
  934. pte = *spte;
  935. if (is_present_pte(pte)) {
  936. if (page->role.level == PT_PAGE_TABLE_LEVEL)
  937. rmap_remove(spte);
  938. else {
  939. child = page_header(pte & PT64_BASE_ADDR_MASK);
  940. mmu_page_remove_parent_pte(child, spte);
  941. }
  942. }
  943. *spte = 0;
  944. kvm_flush_remote_tlbs(vcpu->kvm);
  945. }
  946. static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
  947. struct kvm_mmu_page *page,
  948. u64 *spte,
  949. const void *new, int bytes)
  950. {
  951. if (page->role.level != PT_PAGE_TABLE_LEVEL)
  952. return;
  953. if (page->role.glevels == PT32_ROOT_LEVEL)
  954. paging32_update_pte(vcpu, page, spte, new, bytes);
  955. else
  956. paging64_update_pte(vcpu, page, spte, new, bytes);
  957. }
  958. void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  959. const u8 *old, const u8 *new, int bytes)
  960. {
  961. gfn_t gfn = gpa >> PAGE_SHIFT;
  962. struct kvm_mmu_page *page;
  963. struct hlist_node *node, *n;
  964. struct hlist_head *bucket;
  965. unsigned index;
  966. u64 *spte;
  967. unsigned offset = offset_in_page(gpa);
  968. unsigned pte_size;
  969. unsigned page_offset;
  970. unsigned misaligned;
  971. unsigned quadrant;
  972. int level;
  973. int flooded = 0;
  974. int npte;
  975. pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
  976. if (gfn == vcpu->last_pt_write_gfn) {
  977. ++vcpu->last_pt_write_count;
  978. if (vcpu->last_pt_write_count >= 3)
  979. flooded = 1;
  980. } else {
  981. vcpu->last_pt_write_gfn = gfn;
  982. vcpu->last_pt_write_count = 1;
  983. }
  984. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  985. bucket = &vcpu->kvm->mmu_page_hash[index];
  986. hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
  987. if (page->gfn != gfn || page->role.metaphysical)
  988. continue;
  989. pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
  990. misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
  991. misaligned |= bytes < 4;
  992. if (misaligned || flooded) {
  993. /*
  994. * Misaligned accesses are too much trouble to fix
  995. * up; also, they usually indicate a page is not used
  996. * as a page table.
  997. *
  998. * If we're seeing too many writes to a page,
  999. * it may no longer be a page table, or we may be
  1000. * forking, in which case it is better to unmap the
  1001. * page.
  1002. */
  1003. pgprintk("misaligned: gpa %llx bytes %d role %x\n",
  1004. gpa, bytes, page->role.word);
  1005. kvm_mmu_zap_page(vcpu->kvm, page);
  1006. continue;
  1007. }
  1008. page_offset = offset;
  1009. level = page->role.level;
  1010. npte = 1;
  1011. if (page->role.glevels == PT32_ROOT_LEVEL) {
  1012. page_offset <<= 1; /* 32->64 */
  1013. /*
  1014. * A 32-bit pde maps 4MB while the shadow pdes map
  1015. * only 2MB. So we need to double the offset again
  1016. * and zap two pdes instead of one.
  1017. */
  1018. if (level == PT32_ROOT_LEVEL) {
  1019. page_offset &= ~7; /* kill rounding error */
  1020. page_offset <<= 1;
  1021. npte = 2;
  1022. }
  1023. quadrant = page_offset >> PAGE_SHIFT;
  1024. page_offset &= ~PAGE_MASK;
  1025. if (quadrant != page->role.quadrant)
  1026. continue;
  1027. }
  1028. spte = &page->spt[page_offset / sizeof(*spte)];
  1029. while (npte--) {
  1030. mmu_pte_write_zap_pte(vcpu, page, spte);
  1031. mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
  1032. ++spte;
  1033. }
  1034. }
  1035. }
  1036. int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
  1037. {
  1038. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
  1039. return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
  1040. }
  1041. void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
  1042. {
  1043. while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
  1044. struct kvm_mmu_page *page;
  1045. page = container_of(vcpu->kvm->active_mmu_pages.prev,
  1046. struct kvm_mmu_page, link);
  1047. kvm_mmu_zap_page(vcpu->kvm, page);
  1048. }
  1049. }
  1050. EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
  1051. static void free_mmu_pages(struct kvm_vcpu *vcpu)
  1052. {
  1053. struct kvm_mmu_page *page;
  1054. while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
  1055. page = container_of(vcpu->kvm->active_mmu_pages.next,
  1056. struct kvm_mmu_page, link);
  1057. kvm_mmu_zap_page(vcpu->kvm, page);
  1058. }
  1059. free_page((unsigned long)vcpu->mmu.pae_root);
  1060. }
  1061. static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
  1062. {
  1063. struct page *page;
  1064. int i;
  1065. ASSERT(vcpu);
  1066. vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
  1067. /*
  1068. * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
  1069. * Therefore we need to allocate shadow page tables in the first
  1070. * 4GB of memory, which happens to fit the DMA32 zone.
  1071. */
  1072. page = alloc_page(GFP_KERNEL | __GFP_DMA32);
  1073. if (!page)
  1074. goto error_1;
  1075. vcpu->mmu.pae_root = page_address(page);
  1076. for (i = 0; i < 4; ++i)
  1077. vcpu->mmu.pae_root[i] = INVALID_PAGE;
  1078. return 0;
  1079. error_1:
  1080. free_mmu_pages(vcpu);
  1081. return -ENOMEM;
  1082. }
  1083. int kvm_mmu_create(struct kvm_vcpu *vcpu)
  1084. {
  1085. ASSERT(vcpu);
  1086. ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
  1087. return alloc_mmu_pages(vcpu);
  1088. }
  1089. int kvm_mmu_setup(struct kvm_vcpu *vcpu)
  1090. {
  1091. ASSERT(vcpu);
  1092. ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
  1093. return init_kvm_mmu(vcpu);
  1094. }
  1095. void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
  1096. {
  1097. ASSERT(vcpu);
  1098. destroy_kvm_mmu(vcpu);
  1099. free_mmu_pages(vcpu);
  1100. mmu_free_memory_caches(vcpu);
  1101. }
  1102. void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
  1103. {
  1104. struct kvm_mmu_page *page;
  1105. list_for_each_entry(page, &kvm->active_mmu_pages, link) {
  1106. int i;
  1107. u64 *pt;
  1108. if (!test_bit(slot, &page->slot_bitmap))
  1109. continue;
  1110. pt = page->spt;
  1111. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  1112. /* avoid RMW */
  1113. if (pt[i] & PT_WRITABLE_MASK) {
  1114. rmap_remove(&pt[i]);
  1115. pt[i] &= ~PT_WRITABLE_MASK;
  1116. }
  1117. }
  1118. }
  1119. void kvm_mmu_zap_all(struct kvm *kvm)
  1120. {
  1121. struct kvm_mmu_page *page, *node;
  1122. list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
  1123. kvm_mmu_zap_page(kvm, page);
  1124. kvm_flush_remote_tlbs(kvm);
  1125. }
  1126. void kvm_mmu_module_exit(void)
  1127. {
  1128. if (pte_chain_cache)
  1129. kmem_cache_destroy(pte_chain_cache);
  1130. if (rmap_desc_cache)
  1131. kmem_cache_destroy(rmap_desc_cache);
  1132. if (mmu_page_header_cache)
  1133. kmem_cache_destroy(mmu_page_header_cache);
  1134. }
  1135. int kvm_mmu_module_init(void)
  1136. {
  1137. pte_chain_cache = kmem_cache_create("kvm_pte_chain",
  1138. sizeof(struct kvm_pte_chain),
  1139. 0, 0, NULL);
  1140. if (!pte_chain_cache)
  1141. goto nomem;
  1142. rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
  1143. sizeof(struct kvm_rmap_desc),
  1144. 0, 0, NULL);
  1145. if (!rmap_desc_cache)
  1146. goto nomem;
  1147. mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
  1148. sizeof(struct kvm_mmu_page),
  1149. 0, 0, NULL);
  1150. if (!mmu_page_header_cache)
  1151. goto nomem;
  1152. return 0;
  1153. nomem:
  1154. kvm_mmu_module_exit();
  1155. return -ENOMEM;
  1156. }
  1157. #ifdef AUDIT
  1158. static const char *audit_msg;
  1159. static gva_t canonicalize(gva_t gva)
  1160. {
  1161. #ifdef CONFIG_X86_64
  1162. gva = (long long)(gva << 16) >> 16;
  1163. #endif
  1164. return gva;
  1165. }
  1166. static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
  1167. gva_t va, int level)
  1168. {
  1169. u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
  1170. int i;
  1171. gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
  1172. for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
  1173. u64 ent = pt[i];
  1174. if (!(ent & PT_PRESENT_MASK))
  1175. continue;
  1176. va = canonicalize(va);
  1177. if (level > 1)
  1178. audit_mappings_page(vcpu, ent, va, level - 1);
  1179. else {
  1180. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
  1181. hpa_t hpa = gpa_to_hpa(vcpu, gpa);
  1182. if ((ent & PT_PRESENT_MASK)
  1183. && (ent & PT64_BASE_ADDR_MASK) != hpa)
  1184. printk(KERN_ERR "audit error: (%s) levels %d"
  1185. " gva %lx gpa %llx hpa %llx ent %llx\n",
  1186. audit_msg, vcpu->mmu.root_level,
  1187. va, gpa, hpa, ent);
  1188. }
  1189. }
  1190. }
  1191. static void audit_mappings(struct kvm_vcpu *vcpu)
  1192. {
  1193. unsigned i;
  1194. if (vcpu->mmu.root_level == 4)
  1195. audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
  1196. else
  1197. for (i = 0; i < 4; ++i)
  1198. if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
  1199. audit_mappings_page(vcpu,
  1200. vcpu->mmu.pae_root[i],
  1201. i << 30,
  1202. 2);
  1203. }
  1204. static int count_rmaps(struct kvm_vcpu *vcpu)
  1205. {
  1206. int nmaps = 0;
  1207. int i, j, k;
  1208. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  1209. struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
  1210. struct kvm_rmap_desc *d;
  1211. for (j = 0; j < m->npages; ++j) {
  1212. struct page *page = m->phys_mem[j];
  1213. if (!page->private)
  1214. continue;
  1215. if (!(page->private & 1)) {
  1216. ++nmaps;
  1217. continue;
  1218. }
  1219. d = (struct kvm_rmap_desc *)(page->private & ~1ul);
  1220. while (d) {
  1221. for (k = 0; k < RMAP_EXT; ++k)
  1222. if (d->shadow_ptes[k])
  1223. ++nmaps;
  1224. else
  1225. break;
  1226. d = d->more;
  1227. }
  1228. }
  1229. }
  1230. return nmaps;
  1231. }
  1232. static int count_writable_mappings(struct kvm_vcpu *vcpu)
  1233. {
  1234. int nmaps = 0;
  1235. struct kvm_mmu_page *page;
  1236. int i;
  1237. list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
  1238. u64 *pt = page->spt;
  1239. if (page->role.level != PT_PAGE_TABLE_LEVEL)
  1240. continue;
  1241. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  1242. u64 ent = pt[i];
  1243. if (!(ent & PT_PRESENT_MASK))
  1244. continue;
  1245. if (!(ent & PT_WRITABLE_MASK))
  1246. continue;
  1247. ++nmaps;
  1248. }
  1249. }
  1250. return nmaps;
  1251. }
  1252. static void audit_rmap(struct kvm_vcpu *vcpu)
  1253. {
  1254. int n_rmap = count_rmaps(vcpu);
  1255. int n_actual = count_writable_mappings(vcpu);
  1256. if (n_rmap != n_actual)
  1257. printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
  1258. __FUNCTION__, audit_msg, n_rmap, n_actual);
  1259. }
  1260. static void audit_write_protection(struct kvm_vcpu *vcpu)
  1261. {
  1262. struct kvm_mmu_page *page;
  1263. list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
  1264. hfn_t hfn;
  1265. struct page *pg;
  1266. if (page->role.metaphysical)
  1267. continue;
  1268. hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
  1269. >> PAGE_SHIFT;
  1270. pg = pfn_to_page(hfn);
  1271. if (pg->private)
  1272. printk(KERN_ERR "%s: (%s) shadow page has writable"
  1273. " mappings: gfn %lx role %x\n",
  1274. __FUNCTION__, audit_msg, page->gfn,
  1275. page->role.word);
  1276. }
  1277. }
  1278. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
  1279. {
  1280. int olddbg = dbg;
  1281. dbg = 0;
  1282. audit_msg = msg;
  1283. audit_rmap(vcpu);
  1284. audit_write_protection(vcpu);
  1285. audit_mappings(vcpu);
  1286. dbg = olddbg;
  1287. }
  1288. #endif