mmu.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. *
  11. * Authors:
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Avi Kivity <avi@qumranet.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2. See
  16. * the COPYING file in the top-level directory.
  17. *
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <asm/page.h>
  22. #include <linux/mm.h>
  23. #include <linux/highmem.h>
  24. #include <linux/module.h>
  25. #include "vmx.h"
  26. #include "kvm.h"
  27. #undef MMU_DEBUG
  28. #undef AUDIT
  29. #ifdef AUDIT
  30. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
  31. #else
  32. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
  33. #endif
  34. #ifdef MMU_DEBUG
  35. #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
  36. #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
  37. #else
  38. #define pgprintk(x...) do { } while (0)
  39. #define rmap_printk(x...) do { } while (0)
  40. #endif
  41. #if defined(MMU_DEBUG) || defined(AUDIT)
  42. static int dbg = 1;
  43. #endif
  44. #define ASSERT(x) \
  45. if (!(x)) { \
  46. printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
  47. __FILE__, __LINE__, #x); \
  48. }
  49. #define PT64_PT_BITS 9
  50. #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
  51. #define PT32_PT_BITS 10
  52. #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
  53. #define PT_WRITABLE_SHIFT 1
  54. #define PT_PRESENT_MASK (1ULL << 0)
  55. #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
  56. #define PT_USER_MASK (1ULL << 2)
  57. #define PT_PWT_MASK (1ULL << 3)
  58. #define PT_PCD_MASK (1ULL << 4)
  59. #define PT_ACCESSED_MASK (1ULL << 5)
  60. #define PT_DIRTY_MASK (1ULL << 6)
  61. #define PT_PAGE_SIZE_MASK (1ULL << 7)
  62. #define PT_PAT_MASK (1ULL << 7)
  63. #define PT_GLOBAL_MASK (1ULL << 8)
  64. #define PT64_NX_MASK (1ULL << 63)
  65. #define PT_PAT_SHIFT 7
  66. #define PT_DIR_PAT_SHIFT 12
  67. #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
  68. #define PT32_DIR_PSE36_SIZE 4
  69. #define PT32_DIR_PSE36_SHIFT 13
  70. #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
  71. #define PT32_PTE_COPY_MASK \
  72. (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
  73. #define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
  74. #define PT_FIRST_AVAIL_BITS_SHIFT 9
  75. #define PT64_SECOND_AVAIL_BITS_SHIFT 52
  76. #define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
  77. #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
  78. #define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
  79. #define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
  80. #define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
  81. #define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
  82. #define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
  83. #define VALID_PAGE(x) ((x) != INVALID_PAGE)
  84. #define PT64_LEVEL_BITS 9
  85. #define PT64_LEVEL_SHIFT(level) \
  86. ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
  87. #define PT64_LEVEL_MASK(level) \
  88. (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
  89. #define PT64_INDEX(address, level)\
  90. (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
  91. #define PT32_LEVEL_BITS 10
  92. #define PT32_LEVEL_SHIFT(level) \
  93. ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
  94. #define PT32_LEVEL_MASK(level) \
  95. (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
  96. #define PT32_INDEX(address, level)\
  97. (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
  98. #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
  99. #define PT64_DIR_BASE_ADDR_MASK \
  100. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
  101. #define PT32_BASE_ADDR_MASK PAGE_MASK
  102. #define PT32_DIR_BASE_ADDR_MASK \
  103. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
  104. #define PFERR_PRESENT_MASK (1U << 0)
  105. #define PFERR_WRITE_MASK (1U << 1)
  106. #define PFERR_USER_MASK (1U << 2)
  107. #define PT64_ROOT_LEVEL 4
  108. #define PT32_ROOT_LEVEL 2
  109. #define PT32E_ROOT_LEVEL 3
  110. #define PT_DIRECTORY_LEVEL 2
  111. #define PT_PAGE_TABLE_LEVEL 1
  112. #define RMAP_EXT 4
  113. struct kvm_rmap_desc {
  114. u64 *shadow_ptes[RMAP_EXT];
  115. struct kvm_rmap_desc *more;
  116. };
  117. static int is_write_protection(struct kvm_vcpu *vcpu)
  118. {
  119. return vcpu->cr0 & CR0_WP_MASK;
  120. }
  121. static int is_cpuid_PSE36(void)
  122. {
  123. return 1;
  124. }
  125. static int is_present_pte(unsigned long pte)
  126. {
  127. return pte & PT_PRESENT_MASK;
  128. }
  129. static int is_writeble_pte(unsigned long pte)
  130. {
  131. return pte & PT_WRITABLE_MASK;
  132. }
  133. static int is_io_pte(unsigned long pte)
  134. {
  135. return pte & PT_SHADOW_IO_MARK;
  136. }
  137. static int is_rmap_pte(u64 pte)
  138. {
  139. return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
  140. == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
  141. }
  142. static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
  143. size_t objsize, int min)
  144. {
  145. void *obj;
  146. if (cache->nobjs >= min)
  147. return 0;
  148. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  149. obj = kzalloc(objsize, GFP_NOWAIT);
  150. if (!obj)
  151. return -ENOMEM;
  152. cache->objects[cache->nobjs++] = obj;
  153. }
  154. return 0;
  155. }
  156. static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
  157. {
  158. while (mc->nobjs)
  159. kfree(mc->objects[--mc->nobjs]);
  160. }
  161. static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
  162. {
  163. int r;
  164. r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
  165. sizeof(struct kvm_pte_chain), 4);
  166. if (r)
  167. goto out;
  168. r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
  169. sizeof(struct kvm_rmap_desc), 1);
  170. out:
  171. return r;
  172. }
  173. static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
  174. {
  175. mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
  176. mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
  177. }
  178. static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
  179. size_t size)
  180. {
  181. void *p;
  182. BUG_ON(!mc->nobjs);
  183. p = mc->objects[--mc->nobjs];
  184. memset(p, 0, size);
  185. return p;
  186. }
  187. static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
  188. {
  189. if (mc->nobjs < KVM_NR_MEM_OBJS)
  190. mc->objects[mc->nobjs++] = obj;
  191. else
  192. kfree(obj);
  193. }
  194. static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
  195. {
  196. return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
  197. sizeof(struct kvm_pte_chain));
  198. }
  199. static void mmu_free_pte_chain(struct kvm_vcpu *vcpu,
  200. struct kvm_pte_chain *pc)
  201. {
  202. mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc);
  203. }
  204. static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
  205. {
  206. return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
  207. sizeof(struct kvm_rmap_desc));
  208. }
  209. static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu,
  210. struct kvm_rmap_desc *rd)
  211. {
  212. mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd);
  213. }
  214. /*
  215. * Reverse mapping data structures:
  216. *
  217. * If page->private bit zero is zero, then page->private points to the
  218. * shadow page table entry that points to page_address(page).
  219. *
  220. * If page->private bit zero is one, (then page->private & ~1) points
  221. * to a struct kvm_rmap_desc containing more mappings.
  222. */
  223. static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
  224. {
  225. struct page *page;
  226. struct kvm_rmap_desc *desc;
  227. int i;
  228. if (!is_rmap_pte(*spte))
  229. return;
  230. page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
  231. if (!page->private) {
  232. rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
  233. page->private = (unsigned long)spte;
  234. } else if (!(page->private & 1)) {
  235. rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
  236. desc = mmu_alloc_rmap_desc(vcpu);
  237. desc->shadow_ptes[0] = (u64 *)page->private;
  238. desc->shadow_ptes[1] = spte;
  239. page->private = (unsigned long)desc | 1;
  240. } else {
  241. rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
  242. desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
  243. while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
  244. desc = desc->more;
  245. if (desc->shadow_ptes[RMAP_EXT-1]) {
  246. desc->more = mmu_alloc_rmap_desc(vcpu);
  247. desc = desc->more;
  248. }
  249. for (i = 0; desc->shadow_ptes[i]; ++i)
  250. ;
  251. desc->shadow_ptes[i] = spte;
  252. }
  253. }
  254. static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
  255. struct page *page,
  256. struct kvm_rmap_desc *desc,
  257. int i,
  258. struct kvm_rmap_desc *prev_desc)
  259. {
  260. int j;
  261. for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
  262. ;
  263. desc->shadow_ptes[i] = desc->shadow_ptes[j];
  264. desc->shadow_ptes[j] = 0;
  265. if (j != 0)
  266. return;
  267. if (!prev_desc && !desc->more)
  268. page->private = (unsigned long)desc->shadow_ptes[0];
  269. else
  270. if (prev_desc)
  271. prev_desc->more = desc->more;
  272. else
  273. page->private = (unsigned long)desc->more | 1;
  274. mmu_free_rmap_desc(vcpu, desc);
  275. }
  276. static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
  277. {
  278. struct page *page;
  279. struct kvm_rmap_desc *desc;
  280. struct kvm_rmap_desc *prev_desc;
  281. int i;
  282. if (!is_rmap_pte(*spte))
  283. return;
  284. page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
  285. if (!page->private) {
  286. printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
  287. BUG();
  288. } else if (!(page->private & 1)) {
  289. rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
  290. if ((u64 *)page->private != spte) {
  291. printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
  292. spte, *spte);
  293. BUG();
  294. }
  295. page->private = 0;
  296. } else {
  297. rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
  298. desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
  299. prev_desc = NULL;
  300. while (desc) {
  301. for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
  302. if (desc->shadow_ptes[i] == spte) {
  303. rmap_desc_remove_entry(vcpu, page,
  304. desc, i,
  305. prev_desc);
  306. return;
  307. }
  308. prev_desc = desc;
  309. desc = desc->more;
  310. }
  311. BUG();
  312. }
  313. }
  314. static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
  315. {
  316. struct kvm *kvm = vcpu->kvm;
  317. struct page *page;
  318. struct kvm_memory_slot *slot;
  319. struct kvm_rmap_desc *desc;
  320. u64 *spte;
  321. slot = gfn_to_memslot(kvm, gfn);
  322. BUG_ON(!slot);
  323. page = gfn_to_page(slot, gfn);
  324. while (page->private) {
  325. if (!(page->private & 1))
  326. spte = (u64 *)page->private;
  327. else {
  328. desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
  329. spte = desc->shadow_ptes[0];
  330. }
  331. BUG_ON(!spte);
  332. BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
  333. page_to_pfn(page) << PAGE_SHIFT);
  334. BUG_ON(!(*spte & PT_PRESENT_MASK));
  335. BUG_ON(!(*spte & PT_WRITABLE_MASK));
  336. rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
  337. rmap_remove(vcpu, spte);
  338. kvm_arch_ops->tlb_flush(vcpu);
  339. *spte &= ~(u64)PT_WRITABLE_MASK;
  340. }
  341. }
  342. static int is_empty_shadow_page(hpa_t page_hpa)
  343. {
  344. u64 *pos;
  345. u64 *end;
  346. for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
  347. pos != end; pos++)
  348. if (*pos != 0) {
  349. printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
  350. pos, *pos);
  351. return 0;
  352. }
  353. return 1;
  354. }
  355. static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
  356. {
  357. struct kvm_mmu_page *page_head = page_header(page_hpa);
  358. ASSERT(is_empty_shadow_page(page_hpa));
  359. list_del(&page_head->link);
  360. page_head->page_hpa = page_hpa;
  361. list_add(&page_head->link, &vcpu->free_pages);
  362. ++vcpu->kvm->n_free_mmu_pages;
  363. }
  364. static unsigned kvm_page_table_hashfn(gfn_t gfn)
  365. {
  366. return gfn;
  367. }
  368. static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
  369. u64 *parent_pte)
  370. {
  371. struct kvm_mmu_page *page;
  372. if (list_empty(&vcpu->free_pages))
  373. return NULL;
  374. page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
  375. list_del(&page->link);
  376. list_add(&page->link, &vcpu->kvm->active_mmu_pages);
  377. ASSERT(is_empty_shadow_page(page->page_hpa));
  378. page->slot_bitmap = 0;
  379. page->global = 1;
  380. page->multimapped = 0;
  381. page->parent_pte = parent_pte;
  382. --vcpu->kvm->n_free_mmu_pages;
  383. return page;
  384. }
  385. static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
  386. struct kvm_mmu_page *page, u64 *parent_pte)
  387. {
  388. struct kvm_pte_chain *pte_chain;
  389. struct hlist_node *node;
  390. int i;
  391. if (!parent_pte)
  392. return;
  393. if (!page->multimapped) {
  394. u64 *old = page->parent_pte;
  395. if (!old) {
  396. page->parent_pte = parent_pte;
  397. return;
  398. }
  399. page->multimapped = 1;
  400. pte_chain = mmu_alloc_pte_chain(vcpu);
  401. INIT_HLIST_HEAD(&page->parent_ptes);
  402. hlist_add_head(&pte_chain->link, &page->parent_ptes);
  403. pte_chain->parent_ptes[0] = old;
  404. }
  405. hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
  406. if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
  407. continue;
  408. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
  409. if (!pte_chain->parent_ptes[i]) {
  410. pte_chain->parent_ptes[i] = parent_pte;
  411. return;
  412. }
  413. }
  414. pte_chain = mmu_alloc_pte_chain(vcpu);
  415. BUG_ON(!pte_chain);
  416. hlist_add_head(&pte_chain->link, &page->parent_ptes);
  417. pte_chain->parent_ptes[0] = parent_pte;
  418. }
  419. static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
  420. struct kvm_mmu_page *page,
  421. u64 *parent_pte)
  422. {
  423. struct kvm_pte_chain *pte_chain;
  424. struct hlist_node *node;
  425. int i;
  426. if (!page->multimapped) {
  427. BUG_ON(page->parent_pte != parent_pte);
  428. page->parent_pte = NULL;
  429. return;
  430. }
  431. hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
  432. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
  433. if (!pte_chain->parent_ptes[i])
  434. break;
  435. if (pte_chain->parent_ptes[i] != parent_pte)
  436. continue;
  437. while (i + 1 < NR_PTE_CHAIN_ENTRIES
  438. && pte_chain->parent_ptes[i + 1]) {
  439. pte_chain->parent_ptes[i]
  440. = pte_chain->parent_ptes[i + 1];
  441. ++i;
  442. }
  443. pte_chain->parent_ptes[i] = NULL;
  444. if (i == 0) {
  445. hlist_del(&pte_chain->link);
  446. mmu_free_pte_chain(vcpu, pte_chain);
  447. if (hlist_empty(&page->parent_ptes)) {
  448. page->multimapped = 0;
  449. page->parent_pte = NULL;
  450. }
  451. }
  452. return;
  453. }
  454. BUG();
  455. }
  456. static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
  457. gfn_t gfn)
  458. {
  459. unsigned index;
  460. struct hlist_head *bucket;
  461. struct kvm_mmu_page *page;
  462. struct hlist_node *node;
  463. pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
  464. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  465. bucket = &vcpu->kvm->mmu_page_hash[index];
  466. hlist_for_each_entry(page, node, bucket, hash_link)
  467. if (page->gfn == gfn && !page->role.metaphysical) {
  468. pgprintk("%s: found role %x\n",
  469. __FUNCTION__, page->role.word);
  470. return page;
  471. }
  472. return NULL;
  473. }
  474. static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
  475. gfn_t gfn,
  476. gva_t gaddr,
  477. unsigned level,
  478. int metaphysical,
  479. u64 *parent_pte)
  480. {
  481. union kvm_mmu_page_role role;
  482. unsigned index;
  483. unsigned quadrant;
  484. struct hlist_head *bucket;
  485. struct kvm_mmu_page *page;
  486. struct hlist_node *node;
  487. role.word = 0;
  488. role.glevels = vcpu->mmu.root_level;
  489. role.level = level;
  490. role.metaphysical = metaphysical;
  491. if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
  492. quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
  493. quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
  494. role.quadrant = quadrant;
  495. }
  496. pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
  497. gfn, role.word);
  498. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  499. bucket = &vcpu->kvm->mmu_page_hash[index];
  500. hlist_for_each_entry(page, node, bucket, hash_link)
  501. if (page->gfn == gfn && page->role.word == role.word) {
  502. mmu_page_add_parent_pte(vcpu, page, parent_pte);
  503. pgprintk("%s: found\n", __FUNCTION__);
  504. return page;
  505. }
  506. page = kvm_mmu_alloc_page(vcpu, parent_pte);
  507. if (!page)
  508. return page;
  509. pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
  510. page->gfn = gfn;
  511. page->role = role;
  512. hlist_add_head(&page->hash_link, bucket);
  513. if (!metaphysical)
  514. rmap_write_protect(vcpu, gfn);
  515. return page;
  516. }
  517. static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
  518. struct kvm_mmu_page *page)
  519. {
  520. unsigned i;
  521. u64 *pt;
  522. u64 ent;
  523. pt = __va(page->page_hpa);
  524. if (page->role.level == PT_PAGE_TABLE_LEVEL) {
  525. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  526. if (pt[i] & PT_PRESENT_MASK)
  527. rmap_remove(vcpu, &pt[i]);
  528. pt[i] = 0;
  529. }
  530. kvm_arch_ops->tlb_flush(vcpu);
  531. return;
  532. }
  533. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  534. ent = pt[i];
  535. pt[i] = 0;
  536. if (!(ent & PT_PRESENT_MASK))
  537. continue;
  538. ent &= PT64_BASE_ADDR_MASK;
  539. mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
  540. }
  541. }
  542. static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
  543. struct kvm_mmu_page *page,
  544. u64 *parent_pte)
  545. {
  546. mmu_page_remove_parent_pte(vcpu, page, parent_pte);
  547. }
  548. static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
  549. struct kvm_mmu_page *page)
  550. {
  551. u64 *parent_pte;
  552. while (page->multimapped || page->parent_pte) {
  553. if (!page->multimapped)
  554. parent_pte = page->parent_pte;
  555. else {
  556. struct kvm_pte_chain *chain;
  557. chain = container_of(page->parent_ptes.first,
  558. struct kvm_pte_chain, link);
  559. parent_pte = chain->parent_ptes[0];
  560. }
  561. BUG_ON(!parent_pte);
  562. kvm_mmu_put_page(vcpu, page, parent_pte);
  563. *parent_pte = 0;
  564. }
  565. kvm_mmu_page_unlink_children(vcpu, page);
  566. if (!page->root_count) {
  567. hlist_del(&page->hash_link);
  568. kvm_mmu_free_page(vcpu, page->page_hpa);
  569. } else {
  570. list_del(&page->link);
  571. list_add(&page->link, &vcpu->kvm->active_mmu_pages);
  572. }
  573. }
  574. static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
  575. {
  576. unsigned index;
  577. struct hlist_head *bucket;
  578. struct kvm_mmu_page *page;
  579. struct hlist_node *node, *n;
  580. int r;
  581. pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
  582. r = 0;
  583. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  584. bucket = &vcpu->kvm->mmu_page_hash[index];
  585. hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
  586. if (page->gfn == gfn && !page->role.metaphysical) {
  587. pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
  588. page->role.word);
  589. kvm_mmu_zap_page(vcpu, page);
  590. r = 1;
  591. }
  592. return r;
  593. }
  594. static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
  595. {
  596. int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
  597. struct kvm_mmu_page *page_head = page_header(__pa(pte));
  598. __set_bit(slot, &page_head->slot_bitmap);
  599. }
  600. hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
  601. {
  602. hpa_t hpa = gpa_to_hpa(vcpu, gpa);
  603. return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
  604. }
  605. hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
  606. {
  607. struct kvm_memory_slot *slot;
  608. struct page *page;
  609. ASSERT((gpa & HPA_ERR_MASK) == 0);
  610. slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
  611. if (!slot)
  612. return gpa | HPA_ERR_MASK;
  613. page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
  614. return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
  615. | (gpa & (PAGE_SIZE-1));
  616. }
  617. hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
  618. {
  619. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
  620. if (gpa == UNMAPPED_GVA)
  621. return UNMAPPED_GVA;
  622. return gpa_to_hpa(vcpu, gpa);
  623. }
  624. static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
  625. {
  626. }
  627. static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
  628. {
  629. int level = PT32E_ROOT_LEVEL;
  630. hpa_t table_addr = vcpu->mmu.root_hpa;
  631. for (; ; level--) {
  632. u32 index = PT64_INDEX(v, level);
  633. u64 *table;
  634. u64 pte;
  635. ASSERT(VALID_PAGE(table_addr));
  636. table = __va(table_addr);
  637. if (level == 1) {
  638. pte = table[index];
  639. if (is_present_pte(pte) && is_writeble_pte(pte))
  640. return 0;
  641. mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
  642. page_header_update_slot(vcpu->kvm, table, v);
  643. table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
  644. PT_USER_MASK;
  645. rmap_add(vcpu, &table[index]);
  646. return 0;
  647. }
  648. if (table[index] == 0) {
  649. struct kvm_mmu_page *new_table;
  650. gfn_t pseudo_gfn;
  651. pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
  652. >> PAGE_SHIFT;
  653. new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
  654. v, level - 1,
  655. 1, &table[index]);
  656. if (!new_table) {
  657. pgprintk("nonpaging_map: ENOMEM\n");
  658. return -ENOMEM;
  659. }
  660. table[index] = new_table->page_hpa | PT_PRESENT_MASK
  661. | PT_WRITABLE_MASK | PT_USER_MASK;
  662. }
  663. table_addr = table[index] & PT64_BASE_ADDR_MASK;
  664. }
  665. }
  666. static void mmu_free_roots(struct kvm_vcpu *vcpu)
  667. {
  668. int i;
  669. struct kvm_mmu_page *page;
  670. #ifdef CONFIG_X86_64
  671. if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  672. hpa_t root = vcpu->mmu.root_hpa;
  673. ASSERT(VALID_PAGE(root));
  674. page = page_header(root);
  675. --page->root_count;
  676. vcpu->mmu.root_hpa = INVALID_PAGE;
  677. return;
  678. }
  679. #endif
  680. for (i = 0; i < 4; ++i) {
  681. hpa_t root = vcpu->mmu.pae_root[i];
  682. ASSERT(VALID_PAGE(root));
  683. root &= PT64_BASE_ADDR_MASK;
  684. page = page_header(root);
  685. --page->root_count;
  686. vcpu->mmu.pae_root[i] = INVALID_PAGE;
  687. }
  688. vcpu->mmu.root_hpa = INVALID_PAGE;
  689. }
  690. static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
  691. {
  692. int i;
  693. gfn_t root_gfn;
  694. struct kvm_mmu_page *page;
  695. root_gfn = vcpu->cr3 >> PAGE_SHIFT;
  696. #ifdef CONFIG_X86_64
  697. if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  698. hpa_t root = vcpu->mmu.root_hpa;
  699. ASSERT(!VALID_PAGE(root));
  700. page = kvm_mmu_get_page(vcpu, root_gfn, 0,
  701. PT64_ROOT_LEVEL, 0, NULL);
  702. root = page->page_hpa;
  703. ++page->root_count;
  704. vcpu->mmu.root_hpa = root;
  705. return;
  706. }
  707. #endif
  708. for (i = 0; i < 4; ++i) {
  709. hpa_t root = vcpu->mmu.pae_root[i];
  710. ASSERT(!VALID_PAGE(root));
  711. if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL)
  712. root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
  713. else if (vcpu->mmu.root_level == 0)
  714. root_gfn = 0;
  715. page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
  716. PT32_ROOT_LEVEL, !is_paging(vcpu),
  717. NULL);
  718. root = page->page_hpa;
  719. ++page->root_count;
  720. vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
  721. }
  722. vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
  723. }
  724. static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
  725. {
  726. return vaddr;
  727. }
  728. static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
  729. u32 error_code)
  730. {
  731. gpa_t addr = gva;
  732. hpa_t paddr;
  733. int r;
  734. r = mmu_topup_memory_caches(vcpu);
  735. if (r)
  736. return r;
  737. ASSERT(vcpu);
  738. ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
  739. paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
  740. if (is_error_hpa(paddr))
  741. return 1;
  742. return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
  743. }
  744. static void nonpaging_free(struct kvm_vcpu *vcpu)
  745. {
  746. mmu_free_roots(vcpu);
  747. }
  748. static int nonpaging_init_context(struct kvm_vcpu *vcpu)
  749. {
  750. struct kvm_mmu *context = &vcpu->mmu;
  751. context->new_cr3 = nonpaging_new_cr3;
  752. context->page_fault = nonpaging_page_fault;
  753. context->gva_to_gpa = nonpaging_gva_to_gpa;
  754. context->free = nonpaging_free;
  755. context->root_level = 0;
  756. context->shadow_root_level = PT32E_ROOT_LEVEL;
  757. mmu_alloc_roots(vcpu);
  758. ASSERT(VALID_PAGE(context->root_hpa));
  759. kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
  760. return 0;
  761. }
  762. static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
  763. {
  764. ++kvm_stat.tlb_flush;
  765. kvm_arch_ops->tlb_flush(vcpu);
  766. }
  767. static void paging_new_cr3(struct kvm_vcpu *vcpu)
  768. {
  769. pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
  770. mmu_free_roots(vcpu);
  771. if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
  772. kvm_mmu_free_some_pages(vcpu);
  773. mmu_alloc_roots(vcpu);
  774. kvm_mmu_flush_tlb(vcpu);
  775. kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
  776. }
  777. static void mark_pagetable_nonglobal(void *shadow_pte)
  778. {
  779. page_header(__pa(shadow_pte))->global = 0;
  780. }
  781. static inline void set_pte_common(struct kvm_vcpu *vcpu,
  782. u64 *shadow_pte,
  783. gpa_t gaddr,
  784. int dirty,
  785. u64 access_bits,
  786. gfn_t gfn)
  787. {
  788. hpa_t paddr;
  789. *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
  790. if (!dirty)
  791. access_bits &= ~PT_WRITABLE_MASK;
  792. paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
  793. *shadow_pte |= access_bits;
  794. if (!(*shadow_pte & PT_GLOBAL_MASK))
  795. mark_pagetable_nonglobal(shadow_pte);
  796. if (is_error_hpa(paddr)) {
  797. *shadow_pte |= gaddr;
  798. *shadow_pte |= PT_SHADOW_IO_MARK;
  799. *shadow_pte &= ~PT_PRESENT_MASK;
  800. return;
  801. }
  802. *shadow_pte |= paddr;
  803. if (access_bits & PT_WRITABLE_MASK) {
  804. struct kvm_mmu_page *shadow;
  805. shadow = kvm_mmu_lookup_page(vcpu, gfn);
  806. if (shadow) {
  807. pgprintk("%s: found shadow page for %lx, marking ro\n",
  808. __FUNCTION__, gfn);
  809. access_bits &= ~PT_WRITABLE_MASK;
  810. if (is_writeble_pte(*shadow_pte)) {
  811. *shadow_pte &= ~PT_WRITABLE_MASK;
  812. kvm_arch_ops->tlb_flush(vcpu);
  813. }
  814. }
  815. }
  816. if (access_bits & PT_WRITABLE_MASK)
  817. mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
  818. page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
  819. rmap_add(vcpu, shadow_pte);
  820. }
  821. static void inject_page_fault(struct kvm_vcpu *vcpu,
  822. u64 addr,
  823. u32 err_code)
  824. {
  825. kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
  826. }
  827. static inline int fix_read_pf(u64 *shadow_ent)
  828. {
  829. if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
  830. !(*shadow_ent & PT_USER_MASK)) {
  831. /*
  832. * If supervisor write protect is disabled, we shadow kernel
  833. * pages as user pages so we can trap the write access.
  834. */
  835. *shadow_ent |= PT_USER_MASK;
  836. *shadow_ent &= ~PT_WRITABLE_MASK;
  837. return 1;
  838. }
  839. return 0;
  840. }
  841. static int may_access(u64 pte, int write, int user)
  842. {
  843. if (user && !(pte & PT_USER_MASK))
  844. return 0;
  845. if (write && !(pte & PT_WRITABLE_MASK))
  846. return 0;
  847. return 1;
  848. }
  849. static void paging_free(struct kvm_vcpu *vcpu)
  850. {
  851. nonpaging_free(vcpu);
  852. }
  853. #define PTTYPE 64
  854. #include "paging_tmpl.h"
  855. #undef PTTYPE
  856. #define PTTYPE 32
  857. #include "paging_tmpl.h"
  858. #undef PTTYPE
  859. static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
  860. {
  861. struct kvm_mmu *context = &vcpu->mmu;
  862. ASSERT(is_pae(vcpu));
  863. context->new_cr3 = paging_new_cr3;
  864. context->page_fault = paging64_page_fault;
  865. context->gva_to_gpa = paging64_gva_to_gpa;
  866. context->free = paging_free;
  867. context->root_level = level;
  868. context->shadow_root_level = level;
  869. mmu_alloc_roots(vcpu);
  870. ASSERT(VALID_PAGE(context->root_hpa));
  871. kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
  872. (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
  873. return 0;
  874. }
  875. static int paging64_init_context(struct kvm_vcpu *vcpu)
  876. {
  877. return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
  878. }
  879. static int paging32_init_context(struct kvm_vcpu *vcpu)
  880. {
  881. struct kvm_mmu *context = &vcpu->mmu;
  882. context->new_cr3 = paging_new_cr3;
  883. context->page_fault = paging32_page_fault;
  884. context->gva_to_gpa = paging32_gva_to_gpa;
  885. context->free = paging_free;
  886. context->root_level = PT32_ROOT_LEVEL;
  887. context->shadow_root_level = PT32E_ROOT_LEVEL;
  888. mmu_alloc_roots(vcpu);
  889. ASSERT(VALID_PAGE(context->root_hpa));
  890. kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
  891. (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
  892. return 0;
  893. }
  894. static int paging32E_init_context(struct kvm_vcpu *vcpu)
  895. {
  896. return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
  897. }
  898. static int init_kvm_mmu(struct kvm_vcpu *vcpu)
  899. {
  900. ASSERT(vcpu);
  901. ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
  902. if (!is_paging(vcpu))
  903. return nonpaging_init_context(vcpu);
  904. else if (is_long_mode(vcpu))
  905. return paging64_init_context(vcpu);
  906. else if (is_pae(vcpu))
  907. return paging32E_init_context(vcpu);
  908. else
  909. return paging32_init_context(vcpu);
  910. }
  911. static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
  912. {
  913. ASSERT(vcpu);
  914. if (VALID_PAGE(vcpu->mmu.root_hpa)) {
  915. vcpu->mmu.free(vcpu);
  916. vcpu->mmu.root_hpa = INVALID_PAGE;
  917. }
  918. }
  919. int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
  920. {
  921. int r;
  922. destroy_kvm_mmu(vcpu);
  923. r = init_kvm_mmu(vcpu);
  924. if (r < 0)
  925. goto out;
  926. r = mmu_topup_memory_caches(vcpu);
  927. out:
  928. return r;
  929. }
  930. void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
  931. {
  932. gfn_t gfn = gpa >> PAGE_SHIFT;
  933. struct kvm_mmu_page *page;
  934. struct kvm_mmu_page *child;
  935. struct hlist_node *node, *n;
  936. struct hlist_head *bucket;
  937. unsigned index;
  938. u64 *spte;
  939. u64 pte;
  940. unsigned offset = offset_in_page(gpa);
  941. unsigned pte_size;
  942. unsigned page_offset;
  943. unsigned misaligned;
  944. int level;
  945. int flooded = 0;
  946. pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
  947. if (gfn == vcpu->last_pt_write_gfn) {
  948. ++vcpu->last_pt_write_count;
  949. if (vcpu->last_pt_write_count >= 3)
  950. flooded = 1;
  951. } else {
  952. vcpu->last_pt_write_gfn = gfn;
  953. vcpu->last_pt_write_count = 1;
  954. }
  955. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  956. bucket = &vcpu->kvm->mmu_page_hash[index];
  957. hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
  958. if (page->gfn != gfn || page->role.metaphysical)
  959. continue;
  960. pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
  961. misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
  962. if (misaligned || flooded) {
  963. /*
  964. * Misaligned accesses are too much trouble to fix
  965. * up; also, they usually indicate a page is not used
  966. * as a page table.
  967. *
  968. * If we're seeing too many writes to a page,
  969. * it may no longer be a page table, or we may be
  970. * forking, in which case it is better to unmap the
  971. * page.
  972. */
  973. pgprintk("misaligned: gpa %llx bytes %d role %x\n",
  974. gpa, bytes, page->role.word);
  975. kvm_mmu_zap_page(vcpu, page);
  976. continue;
  977. }
  978. page_offset = offset;
  979. level = page->role.level;
  980. if (page->role.glevels == PT32_ROOT_LEVEL) {
  981. page_offset <<= 1; /* 32->64 */
  982. page_offset &= ~PAGE_MASK;
  983. }
  984. spte = __va(page->page_hpa);
  985. spte += page_offset / sizeof(*spte);
  986. pte = *spte;
  987. if (is_present_pte(pte)) {
  988. if (level == PT_PAGE_TABLE_LEVEL)
  989. rmap_remove(vcpu, spte);
  990. else {
  991. child = page_header(pte & PT64_BASE_ADDR_MASK);
  992. mmu_page_remove_parent_pte(vcpu, child, spte);
  993. }
  994. }
  995. *spte = 0;
  996. }
  997. }
  998. void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
  999. {
  1000. }
  1001. int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
  1002. {
  1003. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
  1004. return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
  1005. }
  1006. void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
  1007. {
  1008. while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
  1009. struct kvm_mmu_page *page;
  1010. page = container_of(vcpu->kvm->active_mmu_pages.prev,
  1011. struct kvm_mmu_page, link);
  1012. kvm_mmu_zap_page(vcpu, page);
  1013. }
  1014. }
  1015. EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
  1016. static void free_mmu_pages(struct kvm_vcpu *vcpu)
  1017. {
  1018. struct kvm_mmu_page *page;
  1019. while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
  1020. page = container_of(vcpu->kvm->active_mmu_pages.next,
  1021. struct kvm_mmu_page, link);
  1022. kvm_mmu_zap_page(vcpu, page);
  1023. }
  1024. while (!list_empty(&vcpu->free_pages)) {
  1025. page = list_entry(vcpu->free_pages.next,
  1026. struct kvm_mmu_page, link);
  1027. list_del(&page->link);
  1028. __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
  1029. page->page_hpa = INVALID_PAGE;
  1030. }
  1031. free_page((unsigned long)vcpu->mmu.pae_root);
  1032. }
  1033. static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
  1034. {
  1035. struct page *page;
  1036. int i;
  1037. ASSERT(vcpu);
  1038. for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
  1039. struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
  1040. INIT_LIST_HEAD(&page_header->link);
  1041. if ((page = alloc_page(GFP_KERNEL)) == NULL)
  1042. goto error_1;
  1043. page->private = (unsigned long)page_header;
  1044. page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
  1045. memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
  1046. list_add(&page_header->link, &vcpu->free_pages);
  1047. ++vcpu->kvm->n_free_mmu_pages;
  1048. }
  1049. /*
  1050. * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
  1051. * Therefore we need to allocate shadow page tables in the first
  1052. * 4GB of memory, which happens to fit the DMA32 zone.
  1053. */
  1054. page = alloc_page(GFP_KERNEL | __GFP_DMA32);
  1055. if (!page)
  1056. goto error_1;
  1057. vcpu->mmu.pae_root = page_address(page);
  1058. for (i = 0; i < 4; ++i)
  1059. vcpu->mmu.pae_root[i] = INVALID_PAGE;
  1060. return 0;
  1061. error_1:
  1062. free_mmu_pages(vcpu);
  1063. return -ENOMEM;
  1064. }
  1065. int kvm_mmu_create(struct kvm_vcpu *vcpu)
  1066. {
  1067. ASSERT(vcpu);
  1068. ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
  1069. ASSERT(list_empty(&vcpu->free_pages));
  1070. return alloc_mmu_pages(vcpu);
  1071. }
  1072. int kvm_mmu_setup(struct kvm_vcpu *vcpu)
  1073. {
  1074. ASSERT(vcpu);
  1075. ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
  1076. ASSERT(!list_empty(&vcpu->free_pages));
  1077. return init_kvm_mmu(vcpu);
  1078. }
  1079. void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
  1080. {
  1081. ASSERT(vcpu);
  1082. destroy_kvm_mmu(vcpu);
  1083. free_mmu_pages(vcpu);
  1084. mmu_free_memory_caches(vcpu);
  1085. }
  1086. void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
  1087. {
  1088. struct kvm *kvm = vcpu->kvm;
  1089. struct kvm_mmu_page *page;
  1090. list_for_each_entry(page, &kvm->active_mmu_pages, link) {
  1091. int i;
  1092. u64 *pt;
  1093. if (!test_bit(slot, &page->slot_bitmap))
  1094. continue;
  1095. pt = __va(page->page_hpa);
  1096. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  1097. /* avoid RMW */
  1098. if (pt[i] & PT_WRITABLE_MASK) {
  1099. rmap_remove(vcpu, &pt[i]);
  1100. pt[i] &= ~PT_WRITABLE_MASK;
  1101. }
  1102. }
  1103. }
  1104. #ifdef AUDIT
  1105. static const char *audit_msg;
  1106. static gva_t canonicalize(gva_t gva)
  1107. {
  1108. #ifdef CONFIG_X86_64
  1109. gva = (long long)(gva << 16) >> 16;
  1110. #endif
  1111. return gva;
  1112. }
  1113. static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
  1114. gva_t va, int level)
  1115. {
  1116. u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
  1117. int i;
  1118. gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
  1119. for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
  1120. u64 ent = pt[i];
  1121. if (!ent & PT_PRESENT_MASK)
  1122. continue;
  1123. va = canonicalize(va);
  1124. if (level > 1)
  1125. audit_mappings_page(vcpu, ent, va, level - 1);
  1126. else {
  1127. gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
  1128. hpa_t hpa = gpa_to_hpa(vcpu, gpa);
  1129. if ((ent & PT_PRESENT_MASK)
  1130. && (ent & PT64_BASE_ADDR_MASK) != hpa)
  1131. printk(KERN_ERR "audit error: (%s) levels %d"
  1132. " gva %lx gpa %llx hpa %llx ent %llx\n",
  1133. audit_msg, vcpu->mmu.root_level,
  1134. va, gpa, hpa, ent);
  1135. }
  1136. }
  1137. }
  1138. static void audit_mappings(struct kvm_vcpu *vcpu)
  1139. {
  1140. int i;
  1141. if (vcpu->mmu.root_level == 4)
  1142. audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
  1143. else
  1144. for (i = 0; i < 4; ++i)
  1145. if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
  1146. audit_mappings_page(vcpu,
  1147. vcpu->mmu.pae_root[i],
  1148. i << 30,
  1149. 2);
  1150. }
  1151. static int count_rmaps(struct kvm_vcpu *vcpu)
  1152. {
  1153. int nmaps = 0;
  1154. int i, j, k;
  1155. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  1156. struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
  1157. struct kvm_rmap_desc *d;
  1158. for (j = 0; j < m->npages; ++j) {
  1159. struct page *page = m->phys_mem[j];
  1160. if (!page->private)
  1161. continue;
  1162. if (!(page->private & 1)) {
  1163. ++nmaps;
  1164. continue;
  1165. }
  1166. d = (struct kvm_rmap_desc *)(page->private & ~1ul);
  1167. while (d) {
  1168. for (k = 0; k < RMAP_EXT; ++k)
  1169. if (d->shadow_ptes[k])
  1170. ++nmaps;
  1171. else
  1172. break;
  1173. d = d->more;
  1174. }
  1175. }
  1176. }
  1177. return nmaps;
  1178. }
  1179. static int count_writable_mappings(struct kvm_vcpu *vcpu)
  1180. {
  1181. int nmaps = 0;
  1182. struct kvm_mmu_page *page;
  1183. int i;
  1184. list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
  1185. u64 *pt = __va(page->page_hpa);
  1186. if (page->role.level != PT_PAGE_TABLE_LEVEL)
  1187. continue;
  1188. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  1189. u64 ent = pt[i];
  1190. if (!(ent & PT_PRESENT_MASK))
  1191. continue;
  1192. if (!(ent & PT_WRITABLE_MASK))
  1193. continue;
  1194. ++nmaps;
  1195. }
  1196. }
  1197. return nmaps;
  1198. }
  1199. static void audit_rmap(struct kvm_vcpu *vcpu)
  1200. {
  1201. int n_rmap = count_rmaps(vcpu);
  1202. int n_actual = count_writable_mappings(vcpu);
  1203. if (n_rmap != n_actual)
  1204. printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
  1205. __FUNCTION__, audit_msg, n_rmap, n_actual);
  1206. }
  1207. static void audit_write_protection(struct kvm_vcpu *vcpu)
  1208. {
  1209. struct kvm_mmu_page *page;
  1210. list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
  1211. hfn_t hfn;
  1212. struct page *pg;
  1213. if (page->role.metaphysical)
  1214. continue;
  1215. hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
  1216. >> PAGE_SHIFT;
  1217. pg = pfn_to_page(hfn);
  1218. if (pg->private)
  1219. printk(KERN_ERR "%s: (%s) shadow page has writable"
  1220. " mappings: gfn %lx role %x\n",
  1221. __FUNCTION__, audit_msg, page->gfn,
  1222. page->role.word);
  1223. }
  1224. }
  1225. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
  1226. {
  1227. int olddbg = dbg;
  1228. dbg = 0;
  1229. audit_msg = msg;
  1230. audit_rmap(vcpu);
  1231. audit_write_protection(vcpu);
  1232. audit_mappings(vcpu);
  1233. dbg = olddbg;
  1234. }
  1235. #endif