mmu.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. *
  11. * Authors:
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Avi Kivity <avi@qumranet.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2. See
  16. * the COPYING file in the top-level directory.
  17. *
  18. */
  19. #include "vmx.h"
  20. #include "mmu.h"
  21. #include <linux/kvm_host.h>
  22. #include <linux/types.h>
  23. #include <linux/string.h>
  24. #include <linux/mm.h>
  25. #include <linux/highmem.h>
  26. #include <linux/module.h>
  27. #include <linux/swap.h>
  28. #include <asm/page.h>
  29. #include <asm/cmpxchg.h>
  30. #include <asm/io.h>
  31. #undef MMU_DEBUG
  32. #undef AUDIT
  33. #ifdef AUDIT
  34. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
  35. #else
  36. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
  37. #endif
  38. #ifdef MMU_DEBUG
  39. #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
  40. #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
  41. #else
  42. #define pgprintk(x...) do { } while (0)
  43. #define rmap_printk(x...) do { } while (0)
  44. #endif
  45. #if defined(MMU_DEBUG) || defined(AUDIT)
  46. static int dbg = 1;
  47. #endif
  48. #ifndef MMU_DEBUG
  49. #define ASSERT(x) do { } while (0)
  50. #else
  51. #define ASSERT(x) \
  52. if (!(x)) { \
  53. printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
  54. __FILE__, __LINE__, #x); \
  55. }
  56. #endif
  57. #define PT64_PT_BITS 9
  58. #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
  59. #define PT32_PT_BITS 10
  60. #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
  61. #define PT_WRITABLE_SHIFT 1
  62. #define PT_PRESENT_MASK (1ULL << 0)
  63. #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
  64. #define PT_USER_MASK (1ULL << 2)
  65. #define PT_PWT_MASK (1ULL << 3)
  66. #define PT_PCD_MASK (1ULL << 4)
  67. #define PT_ACCESSED_MASK (1ULL << 5)
  68. #define PT_DIRTY_MASK (1ULL << 6)
  69. #define PT_PAGE_SIZE_MASK (1ULL << 7)
  70. #define PT_PAT_MASK (1ULL << 7)
  71. #define PT_GLOBAL_MASK (1ULL << 8)
  72. #define PT64_NX_SHIFT 63
  73. #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
  74. #define PT_PAT_SHIFT 7
  75. #define PT_DIR_PAT_SHIFT 12
  76. #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
  77. #define PT32_DIR_PSE36_SIZE 4
  78. #define PT32_DIR_PSE36_SHIFT 13
  79. #define PT32_DIR_PSE36_MASK \
  80. (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
  81. #define PT_FIRST_AVAIL_BITS_SHIFT 9
  82. #define PT64_SECOND_AVAIL_BITS_SHIFT 52
  83. #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
  84. #define VALID_PAGE(x) ((x) != INVALID_PAGE)
  85. #define PT64_LEVEL_BITS 9
  86. #define PT64_LEVEL_SHIFT(level) \
  87. (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
  88. #define PT64_LEVEL_MASK(level) \
  89. (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
  90. #define PT64_INDEX(address, level)\
  91. (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
  92. #define PT32_LEVEL_BITS 10
  93. #define PT32_LEVEL_SHIFT(level) \
  94. (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
  95. #define PT32_LEVEL_MASK(level) \
  96. (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
  97. #define PT32_INDEX(address, level)\
  98. (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
  99. #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
  100. #define PT64_DIR_BASE_ADDR_MASK \
  101. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
  102. #define PT32_BASE_ADDR_MASK PAGE_MASK
  103. #define PT32_DIR_BASE_ADDR_MASK \
  104. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
  105. #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
  106. | PT64_NX_MASK)
  107. #define PFERR_PRESENT_MASK (1U << 0)
  108. #define PFERR_WRITE_MASK (1U << 1)
  109. #define PFERR_USER_MASK (1U << 2)
  110. #define PFERR_FETCH_MASK (1U << 4)
  111. #define PT64_ROOT_LEVEL 4
  112. #define PT32_ROOT_LEVEL 2
  113. #define PT32E_ROOT_LEVEL 3
  114. #define PT_DIRECTORY_LEVEL 2
  115. #define PT_PAGE_TABLE_LEVEL 1
  116. #define RMAP_EXT 4
  117. #define ACC_EXEC_MASK 1
  118. #define ACC_WRITE_MASK PT_WRITABLE_MASK
  119. #define ACC_USER_MASK PT_USER_MASK
  120. #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
  121. struct kvm_rmap_desc {
  122. u64 *shadow_ptes[RMAP_EXT];
  123. struct kvm_rmap_desc *more;
  124. };
  125. static struct kmem_cache *pte_chain_cache;
  126. static struct kmem_cache *rmap_desc_cache;
  127. static struct kmem_cache *mmu_page_header_cache;
  128. static u64 __read_mostly shadow_trap_nonpresent_pte;
  129. static u64 __read_mostly shadow_notrap_nonpresent_pte;
  130. void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
  131. {
  132. shadow_trap_nonpresent_pte = trap_pte;
  133. shadow_notrap_nonpresent_pte = notrap_pte;
  134. }
  135. EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
  136. static int is_write_protection(struct kvm_vcpu *vcpu)
  137. {
  138. return vcpu->arch.cr0 & X86_CR0_WP;
  139. }
  140. static int is_cpuid_PSE36(void)
  141. {
  142. return 1;
  143. }
  144. static int is_nx(struct kvm_vcpu *vcpu)
  145. {
  146. return vcpu->arch.shadow_efer & EFER_NX;
  147. }
  148. static int is_present_pte(unsigned long pte)
  149. {
  150. return pte & PT_PRESENT_MASK;
  151. }
  152. static int is_shadow_present_pte(u64 pte)
  153. {
  154. pte &= ~PT_SHADOW_IO_MARK;
  155. return pte != shadow_trap_nonpresent_pte
  156. && pte != shadow_notrap_nonpresent_pte;
  157. }
  158. static int is_writeble_pte(unsigned long pte)
  159. {
  160. return pte & PT_WRITABLE_MASK;
  161. }
  162. static int is_dirty_pte(unsigned long pte)
  163. {
  164. return pte & PT_DIRTY_MASK;
  165. }
  166. static int is_io_pte(unsigned long pte)
  167. {
  168. return pte & PT_SHADOW_IO_MARK;
  169. }
  170. static int is_rmap_pte(u64 pte)
  171. {
  172. return pte != shadow_trap_nonpresent_pte
  173. && pte != shadow_notrap_nonpresent_pte;
  174. }
  175. static gfn_t pse36_gfn_delta(u32 gpte)
  176. {
  177. int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
  178. return (gpte & PT32_DIR_PSE36_MASK) << shift;
  179. }
  180. static void set_shadow_pte(u64 *sptep, u64 spte)
  181. {
  182. #ifdef CONFIG_X86_64
  183. set_64bit((unsigned long *)sptep, spte);
  184. #else
  185. set_64bit((unsigned long long *)sptep, spte);
  186. #endif
  187. }
  188. static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
  189. struct kmem_cache *base_cache, int min)
  190. {
  191. void *obj;
  192. if (cache->nobjs >= min)
  193. return 0;
  194. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  195. obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
  196. if (!obj)
  197. return -ENOMEM;
  198. cache->objects[cache->nobjs++] = obj;
  199. }
  200. return 0;
  201. }
  202. static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
  203. {
  204. while (mc->nobjs)
  205. kfree(mc->objects[--mc->nobjs]);
  206. }
  207. static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
  208. int min)
  209. {
  210. struct page *page;
  211. if (cache->nobjs >= min)
  212. return 0;
  213. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  214. page = alloc_page(GFP_KERNEL);
  215. if (!page)
  216. return -ENOMEM;
  217. set_page_private(page, 0);
  218. cache->objects[cache->nobjs++] = page_address(page);
  219. }
  220. return 0;
  221. }
  222. static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
  223. {
  224. while (mc->nobjs)
  225. free_page((unsigned long)mc->objects[--mc->nobjs]);
  226. }
  227. static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
  228. {
  229. int r;
  230. r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
  231. pte_chain_cache, 4);
  232. if (r)
  233. goto out;
  234. r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
  235. rmap_desc_cache, 1);
  236. if (r)
  237. goto out;
  238. r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
  239. if (r)
  240. goto out;
  241. r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
  242. mmu_page_header_cache, 4);
  243. out:
  244. return r;
  245. }
  246. static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
  247. {
  248. mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
  249. mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
  250. mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
  251. mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
  252. }
  253. static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
  254. size_t size)
  255. {
  256. void *p;
  257. BUG_ON(!mc->nobjs);
  258. p = mc->objects[--mc->nobjs];
  259. memset(p, 0, size);
  260. return p;
  261. }
  262. static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
  263. {
  264. return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
  265. sizeof(struct kvm_pte_chain));
  266. }
  267. static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
  268. {
  269. kfree(pc);
  270. }
  271. static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
  272. {
  273. return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
  274. sizeof(struct kvm_rmap_desc));
  275. }
  276. static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
  277. {
  278. kfree(rd);
  279. }
  280. /*
  281. * Take gfn and return the reverse mapping to it.
  282. * Note: gfn must be unaliased before this function get called
  283. */
  284. static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
  285. {
  286. struct kvm_memory_slot *slot;
  287. slot = gfn_to_memslot(kvm, gfn);
  288. return &slot->rmap[gfn - slot->base_gfn];
  289. }
  290. /*
  291. * Reverse mapping data structures:
  292. *
  293. * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
  294. * that points to page_address(page).
  295. *
  296. * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
  297. * containing more mappings.
  298. */
  299. static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
  300. {
  301. struct kvm_mmu_page *sp;
  302. struct kvm_rmap_desc *desc;
  303. unsigned long *rmapp;
  304. int i;
  305. if (!is_rmap_pte(*spte))
  306. return;
  307. gfn = unalias_gfn(vcpu->kvm, gfn);
  308. sp = page_header(__pa(spte));
  309. sp->gfns[spte - sp->spt] = gfn;
  310. rmapp = gfn_to_rmap(vcpu->kvm, gfn);
  311. if (!*rmapp) {
  312. rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
  313. *rmapp = (unsigned long)spte;
  314. } else if (!(*rmapp & 1)) {
  315. rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
  316. desc = mmu_alloc_rmap_desc(vcpu);
  317. desc->shadow_ptes[0] = (u64 *)*rmapp;
  318. desc->shadow_ptes[1] = spte;
  319. *rmapp = (unsigned long)desc | 1;
  320. } else {
  321. rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
  322. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  323. while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
  324. desc = desc->more;
  325. if (desc->shadow_ptes[RMAP_EXT-1]) {
  326. desc->more = mmu_alloc_rmap_desc(vcpu);
  327. desc = desc->more;
  328. }
  329. for (i = 0; desc->shadow_ptes[i]; ++i)
  330. ;
  331. desc->shadow_ptes[i] = spte;
  332. }
  333. }
  334. static void rmap_desc_remove_entry(unsigned long *rmapp,
  335. struct kvm_rmap_desc *desc,
  336. int i,
  337. struct kvm_rmap_desc *prev_desc)
  338. {
  339. int j;
  340. for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
  341. ;
  342. desc->shadow_ptes[i] = desc->shadow_ptes[j];
  343. desc->shadow_ptes[j] = NULL;
  344. if (j != 0)
  345. return;
  346. if (!prev_desc && !desc->more)
  347. *rmapp = (unsigned long)desc->shadow_ptes[0];
  348. else
  349. if (prev_desc)
  350. prev_desc->more = desc->more;
  351. else
  352. *rmapp = (unsigned long)desc->more | 1;
  353. mmu_free_rmap_desc(desc);
  354. }
  355. static void rmap_remove(struct kvm *kvm, u64 *spte)
  356. {
  357. struct kvm_rmap_desc *desc;
  358. struct kvm_rmap_desc *prev_desc;
  359. struct kvm_mmu_page *sp;
  360. struct page *page;
  361. unsigned long *rmapp;
  362. int i;
  363. if (!is_rmap_pte(*spte))
  364. return;
  365. sp = page_header(__pa(spte));
  366. page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
  367. mark_page_accessed(page);
  368. if (is_writeble_pte(*spte))
  369. kvm_release_page_dirty(page);
  370. else
  371. kvm_release_page_clean(page);
  372. rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
  373. if (!*rmapp) {
  374. printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
  375. BUG();
  376. } else if (!(*rmapp & 1)) {
  377. rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
  378. if ((u64 *)*rmapp != spte) {
  379. printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
  380. spte, *spte);
  381. BUG();
  382. }
  383. *rmapp = 0;
  384. } else {
  385. rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
  386. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  387. prev_desc = NULL;
  388. while (desc) {
  389. for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
  390. if (desc->shadow_ptes[i] == spte) {
  391. rmap_desc_remove_entry(rmapp,
  392. desc, i,
  393. prev_desc);
  394. return;
  395. }
  396. prev_desc = desc;
  397. desc = desc->more;
  398. }
  399. BUG();
  400. }
  401. }
  402. static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
  403. {
  404. struct kvm_rmap_desc *desc;
  405. struct kvm_rmap_desc *prev_desc;
  406. u64 *prev_spte;
  407. int i;
  408. if (!*rmapp)
  409. return NULL;
  410. else if (!(*rmapp & 1)) {
  411. if (!spte)
  412. return (u64 *)*rmapp;
  413. return NULL;
  414. }
  415. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  416. prev_desc = NULL;
  417. prev_spte = NULL;
  418. while (desc) {
  419. for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
  420. if (prev_spte == spte)
  421. return desc->shadow_ptes[i];
  422. prev_spte = desc->shadow_ptes[i];
  423. }
  424. desc = desc->more;
  425. }
  426. return NULL;
  427. }
  428. static void rmap_write_protect(struct kvm *kvm, u64 gfn)
  429. {
  430. unsigned long *rmapp;
  431. u64 *spte;
  432. int write_protected = 0;
  433. gfn = unalias_gfn(kvm, gfn);
  434. rmapp = gfn_to_rmap(kvm, gfn);
  435. spte = rmap_next(kvm, rmapp, NULL);
  436. while (spte) {
  437. BUG_ON(!spte);
  438. BUG_ON(!(*spte & PT_PRESENT_MASK));
  439. rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
  440. if (is_writeble_pte(*spte)) {
  441. set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
  442. write_protected = 1;
  443. }
  444. spte = rmap_next(kvm, rmapp, spte);
  445. }
  446. if (write_protected)
  447. kvm_flush_remote_tlbs(kvm);
  448. }
  449. #ifdef MMU_DEBUG
  450. static int is_empty_shadow_page(u64 *spt)
  451. {
  452. u64 *pos;
  453. u64 *end;
  454. for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
  455. if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
  456. printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
  457. pos, *pos);
  458. return 0;
  459. }
  460. return 1;
  461. }
  462. #endif
  463. static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  464. {
  465. ASSERT(is_empty_shadow_page(sp->spt));
  466. list_del(&sp->link);
  467. __free_page(virt_to_page(sp->spt));
  468. __free_page(virt_to_page(sp->gfns));
  469. kfree(sp);
  470. ++kvm->arch.n_free_mmu_pages;
  471. }
  472. static unsigned kvm_page_table_hashfn(gfn_t gfn)
  473. {
  474. return gfn;
  475. }
  476. static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
  477. u64 *parent_pte)
  478. {
  479. struct kvm_mmu_page *sp;
  480. sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
  481. sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
  482. sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
  483. set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
  484. list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
  485. ASSERT(is_empty_shadow_page(sp->spt));
  486. sp->slot_bitmap = 0;
  487. sp->multimapped = 0;
  488. sp->parent_pte = parent_pte;
  489. --vcpu->kvm->arch.n_free_mmu_pages;
  490. return sp;
  491. }
  492. static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
  493. struct kvm_mmu_page *sp, u64 *parent_pte)
  494. {
  495. struct kvm_pte_chain *pte_chain;
  496. struct hlist_node *node;
  497. int i;
  498. if (!parent_pte)
  499. return;
  500. if (!sp->multimapped) {
  501. u64 *old = sp->parent_pte;
  502. if (!old) {
  503. sp->parent_pte = parent_pte;
  504. return;
  505. }
  506. sp->multimapped = 1;
  507. pte_chain = mmu_alloc_pte_chain(vcpu);
  508. INIT_HLIST_HEAD(&sp->parent_ptes);
  509. hlist_add_head(&pte_chain->link, &sp->parent_ptes);
  510. pte_chain->parent_ptes[0] = old;
  511. }
  512. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
  513. if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
  514. continue;
  515. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
  516. if (!pte_chain->parent_ptes[i]) {
  517. pte_chain->parent_ptes[i] = parent_pte;
  518. return;
  519. }
  520. }
  521. pte_chain = mmu_alloc_pte_chain(vcpu);
  522. BUG_ON(!pte_chain);
  523. hlist_add_head(&pte_chain->link, &sp->parent_ptes);
  524. pte_chain->parent_ptes[0] = parent_pte;
  525. }
  526. static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
  527. u64 *parent_pte)
  528. {
  529. struct kvm_pte_chain *pte_chain;
  530. struct hlist_node *node;
  531. int i;
  532. if (!sp->multimapped) {
  533. BUG_ON(sp->parent_pte != parent_pte);
  534. sp->parent_pte = NULL;
  535. return;
  536. }
  537. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
  538. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
  539. if (!pte_chain->parent_ptes[i])
  540. break;
  541. if (pte_chain->parent_ptes[i] != parent_pte)
  542. continue;
  543. while (i + 1 < NR_PTE_CHAIN_ENTRIES
  544. && pte_chain->parent_ptes[i + 1]) {
  545. pte_chain->parent_ptes[i]
  546. = pte_chain->parent_ptes[i + 1];
  547. ++i;
  548. }
  549. pte_chain->parent_ptes[i] = NULL;
  550. if (i == 0) {
  551. hlist_del(&pte_chain->link);
  552. mmu_free_pte_chain(pte_chain);
  553. if (hlist_empty(&sp->parent_ptes)) {
  554. sp->multimapped = 0;
  555. sp->parent_pte = NULL;
  556. }
  557. }
  558. return;
  559. }
  560. BUG();
  561. }
  562. static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
  563. {
  564. unsigned index;
  565. struct hlist_head *bucket;
  566. struct kvm_mmu_page *sp;
  567. struct hlist_node *node;
  568. pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
  569. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  570. bucket = &kvm->arch.mmu_page_hash[index];
  571. hlist_for_each_entry(sp, node, bucket, hash_link)
  572. if (sp->gfn == gfn && !sp->role.metaphysical) {
  573. pgprintk("%s: found role %x\n",
  574. __FUNCTION__, sp->role.word);
  575. return sp;
  576. }
  577. return NULL;
  578. }
  579. static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
  580. gfn_t gfn,
  581. gva_t gaddr,
  582. unsigned level,
  583. int metaphysical,
  584. unsigned access,
  585. u64 *parent_pte,
  586. bool *new_page)
  587. {
  588. union kvm_mmu_page_role role;
  589. unsigned index;
  590. unsigned quadrant;
  591. struct hlist_head *bucket;
  592. struct kvm_mmu_page *sp;
  593. struct hlist_node *node;
  594. role.word = 0;
  595. role.glevels = vcpu->arch.mmu.root_level;
  596. role.level = level;
  597. role.metaphysical = metaphysical;
  598. role.access = access;
  599. if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
  600. quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
  601. quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
  602. role.quadrant = quadrant;
  603. }
  604. pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
  605. gfn, role.word);
  606. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  607. bucket = &vcpu->kvm->arch.mmu_page_hash[index];
  608. hlist_for_each_entry(sp, node, bucket, hash_link)
  609. if (sp->gfn == gfn && sp->role.word == role.word) {
  610. mmu_page_add_parent_pte(vcpu, sp, parent_pte);
  611. pgprintk("%s: found\n", __FUNCTION__);
  612. return sp;
  613. }
  614. ++vcpu->kvm->stat.mmu_cache_miss;
  615. sp = kvm_mmu_alloc_page(vcpu, parent_pte);
  616. if (!sp)
  617. return sp;
  618. pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
  619. sp->gfn = gfn;
  620. sp->role = role;
  621. hlist_add_head(&sp->hash_link, bucket);
  622. vcpu->arch.mmu.prefetch_page(vcpu, sp);
  623. if (!metaphysical)
  624. rmap_write_protect(vcpu->kvm, gfn);
  625. if (new_page)
  626. *new_page = 1;
  627. return sp;
  628. }
  629. static void kvm_mmu_page_unlink_children(struct kvm *kvm,
  630. struct kvm_mmu_page *sp)
  631. {
  632. unsigned i;
  633. u64 *pt;
  634. u64 ent;
  635. pt = sp->spt;
  636. if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
  637. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  638. if (is_shadow_present_pte(pt[i]))
  639. rmap_remove(kvm, &pt[i]);
  640. pt[i] = shadow_trap_nonpresent_pte;
  641. }
  642. kvm_flush_remote_tlbs(kvm);
  643. return;
  644. }
  645. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  646. ent = pt[i];
  647. pt[i] = shadow_trap_nonpresent_pte;
  648. if (!is_shadow_present_pte(ent))
  649. continue;
  650. ent &= PT64_BASE_ADDR_MASK;
  651. mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
  652. }
  653. kvm_flush_remote_tlbs(kvm);
  654. }
  655. static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
  656. {
  657. mmu_page_remove_parent_pte(sp, parent_pte);
  658. }
  659. static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
  660. {
  661. int i;
  662. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  663. if (kvm->vcpus[i])
  664. kvm->vcpus[i]->arch.last_pte_updated = NULL;
  665. }
  666. static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  667. {
  668. u64 *parent_pte;
  669. ++kvm->stat.mmu_shadow_zapped;
  670. while (sp->multimapped || sp->parent_pte) {
  671. if (!sp->multimapped)
  672. parent_pte = sp->parent_pte;
  673. else {
  674. struct kvm_pte_chain *chain;
  675. chain = container_of(sp->parent_ptes.first,
  676. struct kvm_pte_chain, link);
  677. parent_pte = chain->parent_ptes[0];
  678. }
  679. BUG_ON(!parent_pte);
  680. kvm_mmu_put_page(sp, parent_pte);
  681. set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
  682. }
  683. kvm_mmu_page_unlink_children(kvm, sp);
  684. if (!sp->root_count) {
  685. hlist_del(&sp->hash_link);
  686. kvm_mmu_free_page(kvm, sp);
  687. } else
  688. list_move(&sp->link, &kvm->arch.active_mmu_pages);
  689. kvm_mmu_reset_last_pte_updated(kvm);
  690. }
  691. /*
  692. * Changing the number of mmu pages allocated to the vm
  693. * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
  694. */
  695. void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
  696. {
  697. /*
  698. * If we set the number of mmu pages to be smaller be than the
  699. * number of actived pages , we must to free some mmu pages before we
  700. * change the value
  701. */
  702. if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
  703. kvm_nr_mmu_pages) {
  704. int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
  705. - kvm->arch.n_free_mmu_pages;
  706. while (n_used_mmu_pages > kvm_nr_mmu_pages) {
  707. struct kvm_mmu_page *page;
  708. page = container_of(kvm->arch.active_mmu_pages.prev,
  709. struct kvm_mmu_page, link);
  710. kvm_mmu_zap_page(kvm, page);
  711. n_used_mmu_pages--;
  712. }
  713. kvm->arch.n_free_mmu_pages = 0;
  714. }
  715. else
  716. kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
  717. - kvm->arch.n_alloc_mmu_pages;
  718. kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
  719. }
  720. static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
  721. {
  722. unsigned index;
  723. struct hlist_head *bucket;
  724. struct kvm_mmu_page *sp;
  725. struct hlist_node *node, *n;
  726. int r;
  727. pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
  728. r = 0;
  729. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  730. bucket = &kvm->arch.mmu_page_hash[index];
  731. hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
  732. if (sp->gfn == gfn && !sp->role.metaphysical) {
  733. pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
  734. sp->role.word);
  735. kvm_mmu_zap_page(kvm, sp);
  736. r = 1;
  737. }
  738. return r;
  739. }
  740. static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
  741. {
  742. struct kvm_mmu_page *sp;
  743. while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
  744. pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
  745. kvm_mmu_zap_page(kvm, sp);
  746. }
  747. }
  748. static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
  749. {
  750. int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
  751. struct kvm_mmu_page *sp = page_header(__pa(pte));
  752. __set_bit(slot, &sp->slot_bitmap);
  753. }
  754. struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
  755. {
  756. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
  757. if (gpa == UNMAPPED_GVA)
  758. return NULL;
  759. return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  760. }
  761. static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
  762. unsigned pt_access, unsigned pte_access,
  763. int user_fault, int write_fault, int dirty,
  764. int *ptwrite, gfn_t gfn, struct page *page)
  765. {
  766. u64 spte;
  767. int was_rmapped = is_rmap_pte(*shadow_pte);
  768. pgprintk("%s: spte %llx access %x write_fault %d"
  769. " user_fault %d gfn %lx\n",
  770. __FUNCTION__, *shadow_pte, pt_access,
  771. write_fault, user_fault, gfn);
  772. /*
  773. * We don't set the accessed bit, since we sometimes want to see
  774. * whether the guest actually used the pte (in order to detect
  775. * demand paging).
  776. */
  777. spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
  778. if (!dirty)
  779. pte_access &= ~ACC_WRITE_MASK;
  780. if (!(pte_access & ACC_EXEC_MASK))
  781. spte |= PT64_NX_MASK;
  782. spte |= PT_PRESENT_MASK;
  783. if (pte_access & ACC_USER_MASK)
  784. spte |= PT_USER_MASK;
  785. if (is_error_page(page)) {
  786. set_shadow_pte(shadow_pte,
  787. shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
  788. kvm_release_page_clean(page);
  789. return;
  790. }
  791. spte |= page_to_phys(page);
  792. if ((pte_access & ACC_WRITE_MASK)
  793. || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
  794. struct kvm_mmu_page *shadow;
  795. spte |= PT_WRITABLE_MASK;
  796. if (user_fault) {
  797. mmu_unshadow(vcpu->kvm, gfn);
  798. goto unshadowed;
  799. }
  800. shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
  801. if (shadow) {
  802. pgprintk("%s: found shadow page for %lx, marking ro\n",
  803. __FUNCTION__, gfn);
  804. pte_access &= ~ACC_WRITE_MASK;
  805. if (is_writeble_pte(spte)) {
  806. spte &= ~PT_WRITABLE_MASK;
  807. kvm_x86_ops->tlb_flush(vcpu);
  808. }
  809. if (write_fault)
  810. *ptwrite = 1;
  811. }
  812. }
  813. unshadowed:
  814. if (pte_access & ACC_WRITE_MASK)
  815. mark_page_dirty(vcpu->kvm, gfn);
  816. pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
  817. set_shadow_pte(shadow_pte, spte);
  818. page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
  819. if (!was_rmapped) {
  820. rmap_add(vcpu, shadow_pte, gfn);
  821. if (!is_rmap_pte(*shadow_pte))
  822. kvm_release_page_clean(page);
  823. }
  824. else
  825. kvm_release_page_clean(page);
  826. if (!ptwrite || !*ptwrite)
  827. vcpu->arch.last_pte_updated = shadow_pte;
  828. }
  829. static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
  830. {
  831. }
  832. static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
  833. gfn_t gfn, struct page *page)
  834. {
  835. int level = PT32E_ROOT_LEVEL;
  836. hpa_t table_addr = vcpu->arch.mmu.root_hpa;
  837. int pt_write = 0;
  838. for (; ; level--) {
  839. u32 index = PT64_INDEX(v, level);
  840. u64 *table;
  841. ASSERT(VALID_PAGE(table_addr));
  842. table = __va(table_addr);
  843. if (level == 1) {
  844. mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
  845. 0, write, 1, &pt_write, gfn, page);
  846. return pt_write || is_io_pte(table[index]);
  847. }
  848. if (table[index] == shadow_trap_nonpresent_pte) {
  849. struct kvm_mmu_page *new_table;
  850. gfn_t pseudo_gfn;
  851. pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
  852. >> PAGE_SHIFT;
  853. new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
  854. v, level - 1,
  855. 1, ACC_ALL, &table[index],
  856. NULL);
  857. if (!new_table) {
  858. pgprintk("nonpaging_map: ENOMEM\n");
  859. kvm_release_page_clean(page);
  860. return -ENOMEM;
  861. }
  862. table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
  863. | PT_WRITABLE_MASK | PT_USER_MASK;
  864. }
  865. table_addr = table[index] & PT64_BASE_ADDR_MASK;
  866. }
  867. }
  868. static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
  869. {
  870. int r;
  871. struct page *page;
  872. down_read(&current->mm->mmap_sem);
  873. page = gfn_to_page(vcpu->kvm, gfn);
  874. spin_lock(&vcpu->kvm->mmu_lock);
  875. kvm_mmu_free_some_pages(vcpu);
  876. r = __nonpaging_map(vcpu, v, write, gfn, page);
  877. spin_unlock(&vcpu->kvm->mmu_lock);
  878. up_read(&current->mm->mmap_sem);
  879. return r;
  880. }
  881. static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
  882. struct kvm_mmu_page *sp)
  883. {
  884. int i;
  885. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  886. sp->spt[i] = shadow_trap_nonpresent_pte;
  887. }
  888. static void mmu_free_roots(struct kvm_vcpu *vcpu)
  889. {
  890. int i;
  891. struct kvm_mmu_page *sp;
  892. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  893. return;
  894. spin_lock(&vcpu->kvm->mmu_lock);
  895. #ifdef CONFIG_X86_64
  896. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  897. hpa_t root = vcpu->arch.mmu.root_hpa;
  898. sp = page_header(root);
  899. --sp->root_count;
  900. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  901. spin_unlock(&vcpu->kvm->mmu_lock);
  902. return;
  903. }
  904. #endif
  905. for (i = 0; i < 4; ++i) {
  906. hpa_t root = vcpu->arch.mmu.pae_root[i];
  907. if (root) {
  908. root &= PT64_BASE_ADDR_MASK;
  909. sp = page_header(root);
  910. --sp->root_count;
  911. }
  912. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  913. }
  914. spin_unlock(&vcpu->kvm->mmu_lock);
  915. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  916. }
  917. static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
  918. {
  919. int i;
  920. gfn_t root_gfn;
  921. struct kvm_mmu_page *sp;
  922. root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
  923. #ifdef CONFIG_X86_64
  924. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  925. hpa_t root = vcpu->arch.mmu.root_hpa;
  926. ASSERT(!VALID_PAGE(root));
  927. sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
  928. PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
  929. root = __pa(sp->spt);
  930. ++sp->root_count;
  931. vcpu->arch.mmu.root_hpa = root;
  932. return;
  933. }
  934. #endif
  935. for (i = 0; i < 4; ++i) {
  936. hpa_t root = vcpu->arch.mmu.pae_root[i];
  937. ASSERT(!VALID_PAGE(root));
  938. if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
  939. if (!is_present_pte(vcpu->arch.pdptrs[i])) {
  940. vcpu->arch.mmu.pae_root[i] = 0;
  941. continue;
  942. }
  943. root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
  944. } else if (vcpu->arch.mmu.root_level == 0)
  945. root_gfn = 0;
  946. sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
  947. PT32_ROOT_LEVEL, !is_paging(vcpu),
  948. ACC_ALL, NULL, NULL);
  949. root = __pa(sp->spt);
  950. ++sp->root_count;
  951. vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
  952. }
  953. vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
  954. }
  955. static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
  956. {
  957. return vaddr;
  958. }
  959. static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
  960. u32 error_code)
  961. {
  962. gfn_t gfn;
  963. int r;
  964. pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
  965. r = mmu_topup_memory_caches(vcpu);
  966. if (r)
  967. return r;
  968. ASSERT(vcpu);
  969. ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
  970. gfn = gva >> PAGE_SHIFT;
  971. return nonpaging_map(vcpu, gva & PAGE_MASK,
  972. error_code & PFERR_WRITE_MASK, gfn);
  973. }
  974. static void nonpaging_free(struct kvm_vcpu *vcpu)
  975. {
  976. mmu_free_roots(vcpu);
  977. }
  978. static int nonpaging_init_context(struct kvm_vcpu *vcpu)
  979. {
  980. struct kvm_mmu *context = &vcpu->arch.mmu;
  981. context->new_cr3 = nonpaging_new_cr3;
  982. context->page_fault = nonpaging_page_fault;
  983. context->gva_to_gpa = nonpaging_gva_to_gpa;
  984. context->free = nonpaging_free;
  985. context->prefetch_page = nonpaging_prefetch_page;
  986. context->root_level = 0;
  987. context->shadow_root_level = PT32E_ROOT_LEVEL;
  988. context->root_hpa = INVALID_PAGE;
  989. return 0;
  990. }
  991. void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
  992. {
  993. ++vcpu->stat.tlb_flush;
  994. kvm_x86_ops->tlb_flush(vcpu);
  995. }
  996. static void paging_new_cr3(struct kvm_vcpu *vcpu)
  997. {
  998. pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
  999. mmu_free_roots(vcpu);
  1000. }
  1001. static void inject_page_fault(struct kvm_vcpu *vcpu,
  1002. u64 addr,
  1003. u32 err_code)
  1004. {
  1005. kvm_inject_page_fault(vcpu, addr, err_code);
  1006. }
  1007. static void paging_free(struct kvm_vcpu *vcpu)
  1008. {
  1009. nonpaging_free(vcpu);
  1010. }
  1011. #define PTTYPE 64
  1012. #include "paging_tmpl.h"
  1013. #undef PTTYPE
  1014. #define PTTYPE 32
  1015. #include "paging_tmpl.h"
  1016. #undef PTTYPE
  1017. static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
  1018. {
  1019. struct kvm_mmu *context = &vcpu->arch.mmu;
  1020. ASSERT(is_pae(vcpu));
  1021. context->new_cr3 = paging_new_cr3;
  1022. context->page_fault = paging64_page_fault;
  1023. context->gva_to_gpa = paging64_gva_to_gpa;
  1024. context->prefetch_page = paging64_prefetch_page;
  1025. context->free = paging_free;
  1026. context->root_level = level;
  1027. context->shadow_root_level = level;
  1028. context->root_hpa = INVALID_PAGE;
  1029. return 0;
  1030. }
  1031. static int paging64_init_context(struct kvm_vcpu *vcpu)
  1032. {
  1033. return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
  1034. }
  1035. static int paging32_init_context(struct kvm_vcpu *vcpu)
  1036. {
  1037. struct kvm_mmu *context = &vcpu->arch.mmu;
  1038. context->new_cr3 = paging_new_cr3;
  1039. context->page_fault = paging32_page_fault;
  1040. context->gva_to_gpa = paging32_gva_to_gpa;
  1041. context->free = paging_free;
  1042. context->prefetch_page = paging32_prefetch_page;
  1043. context->root_level = PT32_ROOT_LEVEL;
  1044. context->shadow_root_level = PT32E_ROOT_LEVEL;
  1045. context->root_hpa = INVALID_PAGE;
  1046. return 0;
  1047. }
  1048. static int paging32E_init_context(struct kvm_vcpu *vcpu)
  1049. {
  1050. return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
  1051. }
  1052. static int init_kvm_mmu(struct kvm_vcpu *vcpu)
  1053. {
  1054. ASSERT(vcpu);
  1055. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1056. if (!is_paging(vcpu))
  1057. return nonpaging_init_context(vcpu);
  1058. else if (is_long_mode(vcpu))
  1059. return paging64_init_context(vcpu);
  1060. else if (is_pae(vcpu))
  1061. return paging32E_init_context(vcpu);
  1062. else
  1063. return paging32_init_context(vcpu);
  1064. }
  1065. static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
  1066. {
  1067. ASSERT(vcpu);
  1068. if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
  1069. vcpu->arch.mmu.free(vcpu);
  1070. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  1071. }
  1072. }
  1073. int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
  1074. {
  1075. destroy_kvm_mmu(vcpu);
  1076. return init_kvm_mmu(vcpu);
  1077. }
  1078. EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
  1079. int kvm_mmu_load(struct kvm_vcpu *vcpu)
  1080. {
  1081. int r;
  1082. r = mmu_topup_memory_caches(vcpu);
  1083. if (r)
  1084. goto out;
  1085. spin_lock(&vcpu->kvm->mmu_lock);
  1086. kvm_mmu_free_some_pages(vcpu);
  1087. mmu_alloc_roots(vcpu);
  1088. spin_unlock(&vcpu->kvm->mmu_lock);
  1089. kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
  1090. kvm_mmu_flush_tlb(vcpu);
  1091. out:
  1092. return r;
  1093. }
  1094. EXPORT_SYMBOL_GPL(kvm_mmu_load);
  1095. void kvm_mmu_unload(struct kvm_vcpu *vcpu)
  1096. {
  1097. mmu_free_roots(vcpu);
  1098. }
  1099. static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
  1100. struct kvm_mmu_page *sp,
  1101. u64 *spte)
  1102. {
  1103. u64 pte;
  1104. struct kvm_mmu_page *child;
  1105. pte = *spte;
  1106. if (is_shadow_present_pte(pte)) {
  1107. if (sp->role.level == PT_PAGE_TABLE_LEVEL)
  1108. rmap_remove(vcpu->kvm, spte);
  1109. else {
  1110. child = page_header(pte & PT64_BASE_ADDR_MASK);
  1111. mmu_page_remove_parent_pte(child, spte);
  1112. }
  1113. }
  1114. set_shadow_pte(spte, shadow_trap_nonpresent_pte);
  1115. }
  1116. static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
  1117. struct kvm_mmu_page *sp,
  1118. u64 *spte,
  1119. const void *new, int bytes,
  1120. int offset_in_pte)
  1121. {
  1122. if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
  1123. ++vcpu->kvm->stat.mmu_pde_zapped;
  1124. return;
  1125. }
  1126. ++vcpu->kvm->stat.mmu_pte_updated;
  1127. if (sp->role.glevels == PT32_ROOT_LEVEL)
  1128. paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
  1129. else
  1130. paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
  1131. }
  1132. static bool need_remote_flush(u64 old, u64 new)
  1133. {
  1134. if (!is_shadow_present_pte(old))
  1135. return false;
  1136. if (!is_shadow_present_pte(new))
  1137. return true;
  1138. if ((old ^ new) & PT64_BASE_ADDR_MASK)
  1139. return true;
  1140. old ^= PT64_NX_MASK;
  1141. new ^= PT64_NX_MASK;
  1142. return (old & ~new & PT64_PERM_MASK) != 0;
  1143. }
  1144. static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
  1145. {
  1146. if (need_remote_flush(old, new))
  1147. kvm_flush_remote_tlbs(vcpu->kvm);
  1148. else
  1149. kvm_mmu_flush_tlb(vcpu);
  1150. }
  1151. static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
  1152. {
  1153. u64 *spte = vcpu->arch.last_pte_updated;
  1154. return !!(spte && (*spte & PT_ACCESSED_MASK));
  1155. }
  1156. static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  1157. const u8 *new, int bytes)
  1158. {
  1159. gfn_t gfn;
  1160. int r;
  1161. u64 gpte = 0;
  1162. if (bytes != 4 && bytes != 8)
  1163. return;
  1164. /*
  1165. * Assume that the pte write on a page table of the same type
  1166. * as the current vcpu paging mode. This is nearly always true
  1167. * (might be false while changing modes). Note it is verified later
  1168. * by update_pte().
  1169. */
  1170. if (is_pae(vcpu)) {
  1171. /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
  1172. if ((bytes == 4) && (gpa % 4 == 0)) {
  1173. r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
  1174. if (r)
  1175. return;
  1176. memcpy((void *)&gpte + (gpa % 8), new, 4);
  1177. } else if ((bytes == 8) && (gpa % 8 == 0)) {
  1178. memcpy((void *)&gpte, new, 8);
  1179. }
  1180. } else {
  1181. if ((bytes == 4) && (gpa % 4 == 0))
  1182. memcpy((void *)&gpte, new, 4);
  1183. }
  1184. if (!is_present_pte(gpte))
  1185. return;
  1186. gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
  1187. vcpu->arch.update_pte.gfn = gfn;
  1188. vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
  1189. }
  1190. void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  1191. const u8 *new, int bytes)
  1192. {
  1193. gfn_t gfn = gpa >> PAGE_SHIFT;
  1194. struct kvm_mmu_page *sp;
  1195. struct hlist_node *node, *n;
  1196. struct hlist_head *bucket;
  1197. unsigned index;
  1198. u64 entry;
  1199. u64 *spte;
  1200. unsigned offset = offset_in_page(gpa);
  1201. unsigned pte_size;
  1202. unsigned page_offset;
  1203. unsigned misaligned;
  1204. unsigned quadrant;
  1205. int level;
  1206. int flooded = 0;
  1207. int npte;
  1208. pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
  1209. mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
  1210. spin_lock(&vcpu->kvm->mmu_lock);
  1211. kvm_mmu_free_some_pages(vcpu);
  1212. ++vcpu->kvm->stat.mmu_pte_write;
  1213. kvm_mmu_audit(vcpu, "pre pte write");
  1214. if (gfn == vcpu->arch.last_pt_write_gfn
  1215. && !last_updated_pte_accessed(vcpu)) {
  1216. ++vcpu->arch.last_pt_write_count;
  1217. if (vcpu->arch.last_pt_write_count >= 3)
  1218. flooded = 1;
  1219. } else {
  1220. vcpu->arch.last_pt_write_gfn = gfn;
  1221. vcpu->arch.last_pt_write_count = 1;
  1222. vcpu->arch.last_pte_updated = NULL;
  1223. }
  1224. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  1225. bucket = &vcpu->kvm->arch.mmu_page_hash[index];
  1226. hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
  1227. if (sp->gfn != gfn || sp->role.metaphysical)
  1228. continue;
  1229. pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
  1230. misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
  1231. misaligned |= bytes < 4;
  1232. if (misaligned || flooded) {
  1233. /*
  1234. * Misaligned accesses are too much trouble to fix
  1235. * up; also, they usually indicate a page is not used
  1236. * as a page table.
  1237. *
  1238. * If we're seeing too many writes to a page,
  1239. * it may no longer be a page table, or we may be
  1240. * forking, in which case it is better to unmap the
  1241. * page.
  1242. */
  1243. pgprintk("misaligned: gpa %llx bytes %d role %x\n",
  1244. gpa, bytes, sp->role.word);
  1245. kvm_mmu_zap_page(vcpu->kvm, sp);
  1246. ++vcpu->kvm->stat.mmu_flooded;
  1247. continue;
  1248. }
  1249. page_offset = offset;
  1250. level = sp->role.level;
  1251. npte = 1;
  1252. if (sp->role.glevels == PT32_ROOT_LEVEL) {
  1253. page_offset <<= 1; /* 32->64 */
  1254. /*
  1255. * A 32-bit pde maps 4MB while the shadow pdes map
  1256. * only 2MB. So we need to double the offset again
  1257. * and zap two pdes instead of one.
  1258. */
  1259. if (level == PT32_ROOT_LEVEL) {
  1260. page_offset &= ~7; /* kill rounding error */
  1261. page_offset <<= 1;
  1262. npte = 2;
  1263. }
  1264. quadrant = page_offset >> PAGE_SHIFT;
  1265. page_offset &= ~PAGE_MASK;
  1266. if (quadrant != sp->role.quadrant)
  1267. continue;
  1268. }
  1269. spte = &sp->spt[page_offset / sizeof(*spte)];
  1270. while (npte--) {
  1271. entry = *spte;
  1272. mmu_pte_write_zap_pte(vcpu, sp, spte);
  1273. mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
  1274. page_offset & (pte_size - 1));
  1275. mmu_pte_write_flush_tlb(vcpu, entry, *spte);
  1276. ++spte;
  1277. }
  1278. }
  1279. kvm_mmu_audit(vcpu, "post pte write");
  1280. spin_unlock(&vcpu->kvm->mmu_lock);
  1281. if (vcpu->arch.update_pte.page) {
  1282. kvm_release_page_clean(vcpu->arch.update_pte.page);
  1283. vcpu->arch.update_pte.page = NULL;
  1284. }
  1285. }
  1286. int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
  1287. {
  1288. gpa_t gpa;
  1289. int r;
  1290. down_read(&current->mm->mmap_sem);
  1291. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
  1292. up_read(&current->mm->mmap_sem);
  1293. spin_lock(&vcpu->kvm->mmu_lock);
  1294. r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  1295. spin_unlock(&vcpu->kvm->mmu_lock);
  1296. return r;
  1297. }
  1298. void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
  1299. {
  1300. while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
  1301. struct kvm_mmu_page *sp;
  1302. sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
  1303. struct kvm_mmu_page, link);
  1304. kvm_mmu_zap_page(vcpu->kvm, sp);
  1305. ++vcpu->kvm->stat.mmu_recycled;
  1306. }
  1307. }
  1308. int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
  1309. {
  1310. int r;
  1311. enum emulation_result er;
  1312. r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
  1313. if (r < 0)
  1314. goto out;
  1315. if (!r) {
  1316. r = 1;
  1317. goto out;
  1318. }
  1319. r = mmu_topup_memory_caches(vcpu);
  1320. if (r)
  1321. goto out;
  1322. er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
  1323. switch (er) {
  1324. case EMULATE_DONE:
  1325. return 1;
  1326. case EMULATE_DO_MMIO:
  1327. ++vcpu->stat.mmio_exits;
  1328. return 0;
  1329. case EMULATE_FAIL:
  1330. kvm_report_emulation_failure(vcpu, "pagetable");
  1331. return 1;
  1332. default:
  1333. BUG();
  1334. }
  1335. out:
  1336. return r;
  1337. }
  1338. EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
  1339. static void free_mmu_pages(struct kvm_vcpu *vcpu)
  1340. {
  1341. struct kvm_mmu_page *sp;
  1342. while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
  1343. sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
  1344. struct kvm_mmu_page, link);
  1345. kvm_mmu_zap_page(vcpu->kvm, sp);
  1346. }
  1347. free_page((unsigned long)vcpu->arch.mmu.pae_root);
  1348. }
  1349. static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
  1350. {
  1351. struct page *page;
  1352. int i;
  1353. ASSERT(vcpu);
  1354. if (vcpu->kvm->arch.n_requested_mmu_pages)
  1355. vcpu->kvm->arch.n_free_mmu_pages =
  1356. vcpu->kvm->arch.n_requested_mmu_pages;
  1357. else
  1358. vcpu->kvm->arch.n_free_mmu_pages =
  1359. vcpu->kvm->arch.n_alloc_mmu_pages;
  1360. /*
  1361. * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
  1362. * Therefore we need to allocate shadow page tables in the first
  1363. * 4GB of memory, which happens to fit the DMA32 zone.
  1364. */
  1365. page = alloc_page(GFP_KERNEL | __GFP_DMA32);
  1366. if (!page)
  1367. goto error_1;
  1368. vcpu->arch.mmu.pae_root = page_address(page);
  1369. for (i = 0; i < 4; ++i)
  1370. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  1371. return 0;
  1372. error_1:
  1373. free_mmu_pages(vcpu);
  1374. return -ENOMEM;
  1375. }
  1376. int kvm_mmu_create(struct kvm_vcpu *vcpu)
  1377. {
  1378. ASSERT(vcpu);
  1379. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1380. return alloc_mmu_pages(vcpu);
  1381. }
  1382. int kvm_mmu_setup(struct kvm_vcpu *vcpu)
  1383. {
  1384. ASSERT(vcpu);
  1385. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1386. return init_kvm_mmu(vcpu);
  1387. }
  1388. void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
  1389. {
  1390. ASSERT(vcpu);
  1391. destroy_kvm_mmu(vcpu);
  1392. free_mmu_pages(vcpu);
  1393. mmu_free_memory_caches(vcpu);
  1394. }
  1395. void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
  1396. {
  1397. struct kvm_mmu_page *sp;
  1398. list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
  1399. int i;
  1400. u64 *pt;
  1401. if (!test_bit(slot, &sp->slot_bitmap))
  1402. continue;
  1403. pt = sp->spt;
  1404. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  1405. /* avoid RMW */
  1406. if (pt[i] & PT_WRITABLE_MASK)
  1407. pt[i] &= ~PT_WRITABLE_MASK;
  1408. }
  1409. }
  1410. void kvm_mmu_zap_all(struct kvm *kvm)
  1411. {
  1412. struct kvm_mmu_page *sp, *node;
  1413. spin_lock(&kvm->mmu_lock);
  1414. list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
  1415. kvm_mmu_zap_page(kvm, sp);
  1416. spin_unlock(&kvm->mmu_lock);
  1417. kvm_flush_remote_tlbs(kvm);
  1418. }
  1419. void kvm_mmu_module_exit(void)
  1420. {
  1421. if (pte_chain_cache)
  1422. kmem_cache_destroy(pte_chain_cache);
  1423. if (rmap_desc_cache)
  1424. kmem_cache_destroy(rmap_desc_cache);
  1425. if (mmu_page_header_cache)
  1426. kmem_cache_destroy(mmu_page_header_cache);
  1427. }
  1428. int kvm_mmu_module_init(void)
  1429. {
  1430. pte_chain_cache = kmem_cache_create("kvm_pte_chain",
  1431. sizeof(struct kvm_pte_chain),
  1432. 0, 0, NULL);
  1433. if (!pte_chain_cache)
  1434. goto nomem;
  1435. rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
  1436. sizeof(struct kvm_rmap_desc),
  1437. 0, 0, NULL);
  1438. if (!rmap_desc_cache)
  1439. goto nomem;
  1440. mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
  1441. sizeof(struct kvm_mmu_page),
  1442. 0, 0, NULL);
  1443. if (!mmu_page_header_cache)
  1444. goto nomem;
  1445. return 0;
  1446. nomem:
  1447. kvm_mmu_module_exit();
  1448. return -ENOMEM;
  1449. }
  1450. /*
  1451. * Caculate mmu pages needed for kvm.
  1452. */
  1453. unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
  1454. {
  1455. int i;
  1456. unsigned int nr_mmu_pages;
  1457. unsigned int nr_pages = 0;
  1458. for (i = 0; i < kvm->nmemslots; i++)
  1459. nr_pages += kvm->memslots[i].npages;
  1460. nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
  1461. nr_mmu_pages = max(nr_mmu_pages,
  1462. (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
  1463. return nr_mmu_pages;
  1464. }
  1465. #ifdef AUDIT
  1466. static const char *audit_msg;
  1467. static gva_t canonicalize(gva_t gva)
  1468. {
  1469. #ifdef CONFIG_X86_64
  1470. gva = (long long)(gva << 16) >> 16;
  1471. #endif
  1472. return gva;
  1473. }
  1474. static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
  1475. gva_t va, int level)
  1476. {
  1477. u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
  1478. int i;
  1479. gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
  1480. for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
  1481. u64 ent = pt[i];
  1482. if (ent == shadow_trap_nonpresent_pte)
  1483. continue;
  1484. va = canonicalize(va);
  1485. if (level > 1) {
  1486. if (ent == shadow_notrap_nonpresent_pte)
  1487. printk(KERN_ERR "audit: (%s) nontrapping pte"
  1488. " in nonleaf level: levels %d gva %lx"
  1489. " level %d pte %llx\n", audit_msg,
  1490. vcpu->arch.mmu.root_level, va, level, ent);
  1491. audit_mappings_page(vcpu, ent, va, level - 1);
  1492. } else {
  1493. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
  1494. struct page *page = gpa_to_page(vcpu, gpa);
  1495. hpa_t hpa = page_to_phys(page);
  1496. if (is_shadow_present_pte(ent)
  1497. && (ent & PT64_BASE_ADDR_MASK) != hpa)
  1498. printk(KERN_ERR "xx audit error: (%s) levels %d"
  1499. " gva %lx gpa %llx hpa %llx ent %llx %d\n",
  1500. audit_msg, vcpu->arch.mmu.root_level,
  1501. va, gpa, hpa, ent,
  1502. is_shadow_present_pte(ent));
  1503. else if (ent == shadow_notrap_nonpresent_pte
  1504. && !is_error_hpa(hpa))
  1505. printk(KERN_ERR "audit: (%s) notrap shadow,"
  1506. " valid guest gva %lx\n", audit_msg, va);
  1507. kvm_release_page_clean(page);
  1508. }
  1509. }
  1510. }
  1511. static void audit_mappings(struct kvm_vcpu *vcpu)
  1512. {
  1513. unsigned i;
  1514. if (vcpu->arch.mmu.root_level == 4)
  1515. audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
  1516. else
  1517. for (i = 0; i < 4; ++i)
  1518. if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
  1519. audit_mappings_page(vcpu,
  1520. vcpu->arch.mmu.pae_root[i],
  1521. i << 30,
  1522. 2);
  1523. }
  1524. static int count_rmaps(struct kvm_vcpu *vcpu)
  1525. {
  1526. int nmaps = 0;
  1527. int i, j, k;
  1528. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  1529. struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
  1530. struct kvm_rmap_desc *d;
  1531. for (j = 0; j < m->npages; ++j) {
  1532. unsigned long *rmapp = &m->rmap[j];
  1533. if (!*rmapp)
  1534. continue;
  1535. if (!(*rmapp & 1)) {
  1536. ++nmaps;
  1537. continue;
  1538. }
  1539. d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  1540. while (d) {
  1541. for (k = 0; k < RMAP_EXT; ++k)
  1542. if (d->shadow_ptes[k])
  1543. ++nmaps;
  1544. else
  1545. break;
  1546. d = d->more;
  1547. }
  1548. }
  1549. }
  1550. return nmaps;
  1551. }
  1552. static int count_writable_mappings(struct kvm_vcpu *vcpu)
  1553. {
  1554. int nmaps = 0;
  1555. struct kvm_mmu_page *sp;
  1556. int i;
  1557. list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
  1558. u64 *pt = sp->spt;
  1559. if (sp->role.level != PT_PAGE_TABLE_LEVEL)
  1560. continue;
  1561. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  1562. u64 ent = pt[i];
  1563. if (!(ent & PT_PRESENT_MASK))
  1564. continue;
  1565. if (!(ent & PT_WRITABLE_MASK))
  1566. continue;
  1567. ++nmaps;
  1568. }
  1569. }
  1570. return nmaps;
  1571. }
  1572. static void audit_rmap(struct kvm_vcpu *vcpu)
  1573. {
  1574. int n_rmap = count_rmaps(vcpu);
  1575. int n_actual = count_writable_mappings(vcpu);
  1576. if (n_rmap != n_actual)
  1577. printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
  1578. __FUNCTION__, audit_msg, n_rmap, n_actual);
  1579. }
  1580. static void audit_write_protection(struct kvm_vcpu *vcpu)
  1581. {
  1582. struct kvm_mmu_page *sp;
  1583. struct kvm_memory_slot *slot;
  1584. unsigned long *rmapp;
  1585. gfn_t gfn;
  1586. list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
  1587. if (sp->role.metaphysical)
  1588. continue;
  1589. slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
  1590. gfn = unalias_gfn(vcpu->kvm, sp->gfn);
  1591. rmapp = &slot->rmap[gfn - slot->base_gfn];
  1592. if (*rmapp)
  1593. printk(KERN_ERR "%s: (%s) shadow page has writable"
  1594. " mappings: gfn %lx role %x\n",
  1595. __FUNCTION__, audit_msg, sp->gfn,
  1596. sp->role.word);
  1597. }
  1598. }
  1599. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
  1600. {
  1601. int olddbg = dbg;
  1602. dbg = 0;
  1603. audit_msg = msg;
  1604. audit_rmap(vcpu);
  1605. audit_write_protection(vcpu);
  1606. audit_mappings(vcpu);
  1607. dbg = olddbg;
  1608. }
  1609. #endif